Linux 2.6.31-rc6
[linux-2.6] / arch / ia64 / xen / xen_pv_ops.c
1 /******************************************************************************
2  * arch/ia64/xen/xen_pv_ops.c
3  *
4  * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5  *                    VA Linux Systems Japan K.K.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
20  *
21  */
22
23 #include <linux/console.h>
24 #include <linux/irq.h>
25 #include <linux/kernel.h>
26 #include <linux/pm.h>
27 #include <linux/unistd.h>
28
29 #include <asm/xen/hypervisor.h>
30 #include <asm/xen/xencomm.h>
31 #include <asm/xen/privop.h>
32
33 #include "irq_xen.h"
34 #include "time.h"
35
36 /***************************************************************************
37  * general info
38  */
39 static struct pv_info xen_info __initdata = {
40         .kernel_rpl = 2,        /* or 1: determin at runtime */
41         .paravirt_enabled = 1,
42         .name = "Xen/ia64",
43 };
44
45 #define IA64_RSC_PL_SHIFT       2
46 #define IA64_RSC_PL_BIT_SIZE    2
47 #define IA64_RSC_PL_MASK        \
48         (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
49
50 static void __init
51 xen_info_init(void)
52 {
53         /* Xenified Linux/ia64 may run on pl = 1 or 2.
54          * determin at run time. */
55         unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
56         unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
57         xen_info.kernel_rpl = rpl;
58 }
59
60 /***************************************************************************
61  * pv_init_ops
62  * initialization hooks.
63  */
64
65 static void
66 xen_panic_hypercall(struct unw_frame_info *info, void *arg)
67 {
68         current->thread.ksp = (__u64)info->sw - 16;
69         HYPERVISOR_shutdown(SHUTDOWN_crash);
70         /* we're never actually going to get here... */
71 }
72
73 static int
74 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
75 {
76         unw_init_running(xen_panic_hypercall, NULL);
77         /* we're never actually going to get here... */
78         return NOTIFY_DONE;
79 }
80
81 static struct notifier_block xen_panic_block = {
82         xen_panic_event, NULL, 0 /* try to go last */
83 };
84
85 static void xen_pm_power_off(void)
86 {
87         local_irq_disable();
88         HYPERVISOR_shutdown(SHUTDOWN_poweroff);
89 }
90
91 static void __init
92 xen_banner(void)
93 {
94         printk(KERN_INFO
95                "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
96                "flags=0x%x\n",
97                xen_info.kernel_rpl,
98                HYPERVISOR_shared_info->arch.start_info_pfn,
99                xen_start_info->nr_pages, xen_start_info->flags);
100 }
101
102 static int __init
103 xen_reserve_memory(struct rsvd_region *region)
104 {
105         region->start = (unsigned long)__va(
106                 (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
107         region->end   = region->start + PAGE_SIZE;
108         return 1;
109 }
110
111 static void __init
112 xen_arch_setup_early(void)
113 {
114         struct shared_info *s;
115         BUG_ON(!xen_pv_domain());
116
117         s = HYPERVISOR_shared_info;
118         xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
119
120         /* Must be done before any hypercall.  */
121         xencomm_initialize();
122
123         xen_setup_features();
124         /* Register a call for panic conditions. */
125         atomic_notifier_chain_register(&panic_notifier_list,
126                                        &xen_panic_block);
127         pm_power_off = xen_pm_power_off;
128
129         xen_ia64_enable_opt_feature();
130 }
131
132 static void __init
133 xen_arch_setup_console(char **cmdline_p)
134 {
135         add_preferred_console("xenboot", 0, NULL);
136         add_preferred_console("tty", 0, NULL);
137         /* use hvc_xen */
138         add_preferred_console("hvc", 0, NULL);
139
140 #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
141         conswitchp = NULL;
142 #endif
143 }
144
145 static int __init
146 xen_arch_setup_nomca(void)
147 {
148         return 1;
149 }
150
151 static void __init
152 xen_post_smp_prepare_boot_cpu(void)
153 {
154         xen_setup_vcpu_info_placement();
155 }
156
157 #ifdef ASM_SUPPORTED
158 static unsigned long __init_or_module
159 xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type);
160 #endif
161 static void __init
162 xen_patch_branch(unsigned long tag, unsigned long type);
163
164 static const struct pv_init_ops xen_init_ops __initconst = {
165         .banner = xen_banner,
166
167         .reserve_memory = xen_reserve_memory,
168
169         .arch_setup_early = xen_arch_setup_early,
170         .arch_setup_console = xen_arch_setup_console,
171         .arch_setup_nomca = xen_arch_setup_nomca,
172
173         .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
174 #ifdef ASM_SUPPORTED
175         .patch_bundle = xen_patch_bundle,
176 #endif
177         .patch_branch = xen_patch_branch,
178 };
179
180 /***************************************************************************
181  * pv_fsys_data
182  * addresses for fsys
183  */
184
185 extern unsigned long xen_fsyscall_table[NR_syscalls];
186 extern char xen_fsys_bubble_down[];
187 struct pv_fsys_data xen_fsys_data __initdata = {
188         .fsyscall_table = (unsigned long *)xen_fsyscall_table,
189         .fsys_bubble_down = (void *)xen_fsys_bubble_down,
190 };
191
192 /***************************************************************************
193  * pv_patchdata
194  * patchdata addresses
195  */
196
197 #define DECLARE(name)                                                   \
198         extern unsigned long __xen_start_gate_##name##_patchlist[];     \
199         extern unsigned long __xen_end_gate_##name##_patchlist[]
200
201 DECLARE(fsyscall);
202 DECLARE(brl_fsys_bubble_down);
203 DECLARE(vtop);
204 DECLARE(mckinley_e9);
205
206 extern unsigned long __xen_start_gate_section[];
207
208 #define ASSIGN(name)                                                    \
209         .start_##name##_patchlist =                                     \
210                 (unsigned long)__xen_start_gate_##name##_patchlist,     \
211         .end_##name##_patchlist =                                       \
212                 (unsigned long)__xen_end_gate_##name##_patchlist
213
214 static struct pv_patchdata xen_patchdata __initdata = {
215         ASSIGN(fsyscall),
216         ASSIGN(brl_fsys_bubble_down),
217         ASSIGN(vtop),
218         ASSIGN(mckinley_e9),
219
220         .gate_section = (void*)__xen_start_gate_section,
221 };
222
223 /***************************************************************************
224  * pv_cpu_ops
225  * intrinsics hooks.
226  */
227
228 #ifndef ASM_SUPPORTED
229 static void
230 xen_set_itm_with_offset(unsigned long val)
231 {
232         /* ia64_cpu_local_tick() calls this with interrupt enabled. */
233         /* WARN_ON(!irqs_disabled()); */
234         xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
235 }
236
237 static unsigned long
238 xen_get_itm_with_offset(void)
239 {
240         /* unused at this moment */
241         printk(KERN_DEBUG "%s is called.\n", __func__);
242
243         WARN_ON(!irqs_disabled());
244         return ia64_native_getreg(_IA64_REG_CR_ITM) +
245                 XEN_MAPPEDREGS->itc_offset;
246 }
247
248 /* ia64_set_itc() is only called by
249  * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
250  * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
251  */
252 static void
253 xen_set_itc(unsigned long val)
254 {
255         unsigned long mitc;
256
257         WARN_ON(!irqs_disabled());
258         mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
259         XEN_MAPPEDREGS->itc_offset = val - mitc;
260         XEN_MAPPEDREGS->itc_last = val;
261 }
262
263 static unsigned long
264 xen_get_itc(void)
265 {
266         unsigned long res;
267         unsigned long itc_offset;
268         unsigned long itc_last;
269         unsigned long ret_itc_last;
270
271         itc_offset = XEN_MAPPEDREGS->itc_offset;
272         do {
273                 itc_last = XEN_MAPPEDREGS->itc_last;
274                 res = ia64_native_getreg(_IA64_REG_AR_ITC);
275                 res += itc_offset;
276                 if (itc_last >= res)
277                         res = itc_last + 1;
278                 ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
279                                        itc_last, res);
280         } while (unlikely(ret_itc_last != itc_last));
281         return res;
282
283 #if 0
284         /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
285            Should it be paravirtualized instead? */
286         WARN_ON(!irqs_disabled());
287         itc_offset = XEN_MAPPEDREGS->itc_offset;
288         itc_last = XEN_MAPPEDREGS->itc_last;
289         res = ia64_native_getreg(_IA64_REG_AR_ITC);
290         res += itc_offset;
291         if (itc_last >= res)
292                 res = itc_last + 1;
293         XEN_MAPPEDREGS->itc_last = res;
294         return res;
295 #endif
296 }
297
298 static void xen_setreg(int regnum, unsigned long val)
299 {
300         switch (regnum) {
301         case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
302                 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
303                 break;
304 #ifdef CONFIG_IA32_SUPPORT
305         case _IA64_REG_AR_EFLAG:
306                 xen_set_eflag(val);
307                 break;
308 #endif
309         case _IA64_REG_AR_ITC:
310                 xen_set_itc(val);
311                 break;
312         case _IA64_REG_CR_TPR:
313                 xen_set_tpr(val);
314                 break;
315         case _IA64_REG_CR_ITM:
316                 xen_set_itm_with_offset(val);
317                 break;
318         case _IA64_REG_CR_EOI:
319                 xen_eoi(val);
320                 break;
321         default:
322                 ia64_native_setreg_func(regnum, val);
323                 break;
324         }
325 }
326
327 static unsigned long xen_getreg(int regnum)
328 {
329         unsigned long res;
330
331         switch (regnum) {
332         case _IA64_REG_PSR:
333                 res = xen_get_psr();
334                 break;
335 #ifdef CONFIG_IA32_SUPPORT
336         case _IA64_REG_AR_EFLAG:
337                 res = xen_get_eflag();
338                 break;
339 #endif
340         case _IA64_REG_AR_ITC:
341                 res = xen_get_itc();
342                 break;
343         case _IA64_REG_CR_ITM:
344                 res = xen_get_itm_with_offset();
345                 break;
346         case _IA64_REG_CR_IVR:
347                 res = xen_get_ivr();
348                 break;
349         case _IA64_REG_CR_TPR:
350                 res = xen_get_tpr();
351                 break;
352         default:
353                 res = ia64_native_getreg_func(regnum);
354                 break;
355         }
356         return res;
357 }
358
359 /* turning on interrupts is a bit more complicated.. write to the
360  * memory-mapped virtual psr.i bit first (to avoid race condition),
361  * then if any interrupts were pending, we have to execute a hyperprivop
362  * to ensure the pending interrupt gets delivered; else we're done! */
363 static void
364 xen_ssm_i(void)
365 {
366         int old = xen_get_virtual_psr_i();
367         xen_set_virtual_psr_i(1);
368         barrier();
369         if (!old && xen_get_virtual_pend())
370                 xen_hyper_ssm_i();
371 }
372
373 /* turning off interrupts can be paravirtualized simply by writing
374  * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
375 static void
376 xen_rsm_i(void)
377 {
378         xen_set_virtual_psr_i(0);
379         barrier();
380 }
381
382 static unsigned long
383 xen_get_psr_i(void)
384 {
385         return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
386 }
387
388 static void
389 xen_intrin_local_irq_restore(unsigned long mask)
390 {
391         if (mask & IA64_PSR_I)
392                 xen_ssm_i();
393         else
394                 xen_rsm_i();
395 }
396 #else
397 #define __DEFINE_FUNC(name, code)                                       \
398         extern const char xen_ ## name ## _direct_start[];              \
399         extern const char xen_ ## name ## _direct_end[];                \
400         asm (".align 32\n"                                              \
401              ".proc xen_" #name "\n"                                    \
402              "xen_" #name ":\n"                                         \
403              "xen_" #name "_direct_start:\n"                            \
404              code                                                       \
405              "xen_" #name "_direct_end:\n"                              \
406              "br.cond.sptk.many b6\n"                                   \
407              ".endp xen_" #name "\n")
408
409 #define DEFINE_VOID_FUNC0(name, code)           \
410         extern void                             \
411         xen_ ## name (void);                    \
412         __DEFINE_FUNC(name, code)
413
414 #define DEFINE_VOID_FUNC1(name, code)           \
415         extern void                             \
416         xen_ ## name (unsigned long arg);       \
417         __DEFINE_FUNC(name, code)
418
419 #define DEFINE_VOID_FUNC1_VOID(name, code)      \
420         extern void                             \
421         xen_ ## name (void *arg);               \
422         __DEFINE_FUNC(name, code)
423
424 #define DEFINE_VOID_FUNC2(name, code)           \
425         extern void                             \
426         xen_ ## name (unsigned long arg0,       \
427                       unsigned long arg1);      \
428         __DEFINE_FUNC(name, code)
429
430 #define DEFINE_FUNC0(name, code)                \
431         extern unsigned long                    \
432         xen_ ## name (void);                    \
433         __DEFINE_FUNC(name, code)
434
435 #define DEFINE_FUNC1(name, type, code)          \
436         extern unsigned long                    \
437         xen_ ## name (type arg);                \
438         __DEFINE_FUNC(name, code)
439
440 #define XEN_PSR_I_ADDR_ADDR     (XSI_BASE + XSI_PSR_I_ADDR_OFS)
441
442 /*
443  * static void xen_set_itm_with_offset(unsigned long val)
444  *        xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
445  */
446 /* 2 bundles */
447 DEFINE_VOID_FUNC1(set_itm_with_offset,
448                   "mov r2 = " __stringify(XSI_BASE) " + "
449                   __stringify(XSI_ITC_OFFSET_OFS) "\n"
450                   ";;\n"
451                   "ld8 r3 = [r2]\n"
452                   ";;\n"
453                   "sub r8 = r8, r3\n"
454                   "break " __stringify(HYPERPRIVOP_SET_ITM) "\n");
455
456 /*
457  * static unsigned long xen_get_itm_with_offset(void)
458  *    return ia64_native_getreg(_IA64_REG_CR_ITM) + XEN_MAPPEDREGS->itc_offset;
459  */
460 /* 2 bundles */
461 DEFINE_FUNC0(get_itm_with_offset,
462              "mov r2 = " __stringify(XSI_BASE) " + "
463              __stringify(XSI_ITC_OFFSET_OFS) "\n"
464              ";;\n"
465              "ld8 r3 = [r2]\n"
466              "mov r8 = cr.itm\n"
467              ";;\n"
468              "add r8 = r8, r2\n");
469
470 /*
471  * static void xen_set_itc(unsigned long val)
472  *      unsigned long mitc;
473  *
474  *      WARN_ON(!irqs_disabled());
475  *      mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
476  *      XEN_MAPPEDREGS->itc_offset = val - mitc;
477  *      XEN_MAPPEDREGS->itc_last = val;
478  */
479 /* 2 bundles */
480 DEFINE_VOID_FUNC1(set_itc,
481                   "mov r2 = " __stringify(XSI_BASE) " + "
482                   __stringify(XSI_ITC_LAST_OFS) "\n"
483                   "mov r3 = ar.itc\n"
484                   ";;\n"
485                   "sub r3 = r8, r3\n"
486                   "st8 [r2] = r8, "
487                   __stringify(XSI_ITC_LAST_OFS) " - "
488                   __stringify(XSI_ITC_OFFSET_OFS) "\n"
489                   ";;\n"
490                   "st8 [r2] = r3\n");
491
492 /*
493  * static unsigned long xen_get_itc(void)
494  *      unsigned long res;
495  *      unsigned long itc_offset;
496  *      unsigned long itc_last;
497  *      unsigned long ret_itc_last;
498  *
499  *      itc_offset = XEN_MAPPEDREGS->itc_offset;
500  *      do {
501  *              itc_last = XEN_MAPPEDREGS->itc_last;
502  *              res = ia64_native_getreg(_IA64_REG_AR_ITC);
503  *              res += itc_offset;
504  *              if (itc_last >= res)
505  *                      res = itc_last + 1;
506  *              ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
507  *                                     itc_last, res);
508  *      } while (unlikely(ret_itc_last != itc_last));
509  *      return res;
510  */
511 /* 5 bundles */
512 DEFINE_FUNC0(get_itc,
513              "mov r2 = " __stringify(XSI_BASE) " + "
514              __stringify(XSI_ITC_OFFSET_OFS) "\n"
515              ";;\n"
516              "ld8 r9 = [r2], " __stringify(XSI_ITC_LAST_OFS) " - "
517              __stringify(XSI_ITC_OFFSET_OFS) "\n"
518                                         /* r9 = itc_offset */
519                                         /* r2 = XSI_ITC_OFFSET */
520              "888:\n"
521              "mov r8 = ar.itc\n"        /* res = ar.itc */
522              ";;\n"
523              "ld8 r3 = [r2]\n"          /* r3 = itc_last */
524              "add r8 = r8, r9\n"        /* res = ar.itc + itc_offset */
525              ";;\n"
526              "cmp.gtu p6, p0 = r3, r8\n"
527              ";;\n"
528              "(p6) add r8 = 1, r3\n"    /* if (itc_last > res) itc_last + 1 */
529              ";;\n"
530              "mov ar.ccv = r8\n"
531              ";;\n"
532              "cmpxchg8.acq r10 = [r2], r8, ar.ccv\n"
533              ";;\n"
534              "cmp.ne p6, p0 = r10, r3\n"
535              "(p6) hint @pause\n"
536              "(p6) br.cond.spnt 888b\n");
537
538 DEFINE_VOID_FUNC1_VOID(fc,
539                        "break " __stringify(HYPERPRIVOP_FC) "\n");
540
541 /*
542  * psr_i_addr_addr = XEN_PSR_I_ADDR_ADDR
543  * masked_addr = *psr_i_addr_addr
544  * pending_intr_addr = masked_addr - 1
545  * if (val & IA64_PSR_I) {
546  *   masked = *masked_addr
547  *   *masked_addr = 0:xen_set_virtual_psr_i(1)
548  *   compiler barrier
549  *   if (masked) {
550  *      uint8_t pending = *pending_intr_addr;
551  *      if (pending)
552  *              XEN_HYPER_SSM_I
553  *   }
554  * } else {
555  *   *masked_addr = 1:xen_set_virtual_psr_i(0)
556  * }
557  */
558 /* 6 bundles */
559 DEFINE_VOID_FUNC1(intrin_local_irq_restore,
560                   /* r8 = input value: 0 or IA64_PSR_I
561                    * p6 =  (flags & IA64_PSR_I)
562                    *    = if clause
563                    * p7 = !(flags & IA64_PSR_I)
564                    *    = else clause
565                    */
566                   "cmp.ne p6, p7 = r8, r0\n"
567                   "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
568                   ";;\n"
569                   /* r9 = XEN_PSR_I_ADDR */
570                   "ld8 r9 = [r9]\n"
571                   ";;\n"
572
573                   /* r10 = masked previous value */
574                   "(p6) ld1.acq r10 = [r9]\n"
575                   ";;\n"
576
577                   /* p8 = !masked interrupt masked previously? */
578                   "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
579
580                   /* p7 = else clause */
581                   "(p7) mov r11 = 1\n"
582                   ";;\n"
583                   /* masked = 1 */
584                   "(p7) st1.rel [r9] = r11\n"
585
586                   /* p6 = if clause */
587                   /* masked = 0
588                    * r9 = masked_addr - 1
589                    *    = pending_intr_addr
590                    */
591                   "(p8) st1.rel [r9] = r0, -1\n"
592                   ";;\n"
593                   /* r8 = pending_intr */
594                   "(p8) ld1.acq r11 = [r9]\n"
595                   ";;\n"
596                   /* p9 = interrupt pending? */
597                   "(p8) cmp.ne.unc p9, p10 = r11, r0\n"
598                   ";;\n"
599                   "(p10) mf\n"
600                   /* issue hypercall to trigger interrupt */
601                   "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n");
602
603 DEFINE_VOID_FUNC2(ptcga,
604                   "break " __stringify(HYPERPRIVOP_PTC_GA) "\n");
605 DEFINE_VOID_FUNC2(set_rr,
606                   "break " __stringify(HYPERPRIVOP_SET_RR) "\n");
607
608 /*
609  * tmp = XEN_MAPPEDREGS->interrupt_mask_addr = XEN_PSR_I_ADDR_ADDR;
610  * tmp = *tmp
611  * tmp = *tmp;
612  * psr_i = tmp? 0: IA64_PSR_I;
613  */
614 /* 4 bundles */
615 DEFINE_FUNC0(get_psr_i,
616              "mov r9 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
617              ";;\n"
618              "ld8 r9 = [r9]\n"                  /* r9 = XEN_PSR_I_ADDR */
619              "mov r8 = 0\n"                     /* psr_i = 0 */
620              ";;\n"
621              "ld1.acq r9 = [r9]\n"              /* r9 = XEN_PSR_I */
622              ";;\n"
623              "cmp.eq.unc p6, p0 = r9, r0\n"     /* p6 = (XEN_PSR_I != 0) */
624              ";;\n"
625              "(p6) mov r8 = " __stringify(1 << IA64_PSR_I_BIT) "\n");
626
627 DEFINE_FUNC1(thash, unsigned long,
628              "break " __stringify(HYPERPRIVOP_THASH) "\n");
629 DEFINE_FUNC1(get_cpuid, int,
630              "break " __stringify(HYPERPRIVOP_GET_CPUID) "\n");
631 DEFINE_FUNC1(get_pmd, int,
632              "break " __stringify(HYPERPRIVOP_GET_PMD) "\n");
633 DEFINE_FUNC1(get_rr, unsigned long,
634              "break " __stringify(HYPERPRIVOP_GET_RR) "\n");
635
636 /*
637  * void xen_privop_ssm_i(void)
638  *
639  * int masked = !xen_get_virtual_psr_i();
640  *      // masked = *(*XEN_MAPPEDREGS->interrupt_mask_addr)
641  * xen_set_virtual_psr_i(1)
642  *      // *(*XEN_MAPPEDREGS->interrupt_mask_addr) = 0
643  * // compiler barrier
644  * if (masked) {
645  *      uint8_t* pend_int_addr =
646  *              (uint8_t*)(*XEN_MAPPEDREGS->interrupt_mask_addr) - 1;
647  *      uint8_t pending = *pend_int_addr;
648  *      if (pending)
649  *              XEN_HYPER_SSM_I
650  * }
651  */
652 /* 4 bundles */
653 DEFINE_VOID_FUNC0(ssm_i,
654                   "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
655                   ";;\n"
656                   "ld8 r8 = [r8]\n"             /* r8 = XEN_PSR_I_ADDR */
657                   ";;\n"
658                   "ld1.acq r9 = [r8]\n"         /* r9 = XEN_PSR_I */
659                   ";;\n"
660                   "st1.rel [r8] = r0, -1\n"     /* psr_i = 0. enable interrupt
661                                                  * r8 = XEN_PSR_I_ADDR - 1
662                                                  *    = pend_int_addr
663                                                  */
664                   "cmp.eq.unc p0, p6 = r9, r0\n"/* p6 = !XEN_PSR_I
665                                                  * previously interrupt
666                                                  * masked?
667                                                  */
668                   ";;\n"
669                   "(p6) ld1.acq r8 = [r8]\n"    /* r8 = xen_pend_int */
670                   ";;\n"
671                   "(p6) cmp.eq.unc p6, p7 = r8, r0\n"   /*interrupt pending?*/
672                   ";;\n"
673                   /* issue hypercall to get interrupt */
674                   "(p7) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
675                   ";;\n");
676
677 /*
678  * psr_i_addr_addr = XEN_MAPPEDREGS->interrupt_mask_addr
679  *                 = XEN_PSR_I_ADDR_ADDR;
680  * psr_i_addr = *psr_i_addr_addr;
681  * *psr_i_addr = 1;
682  */
683 /* 2 bundles */
684 DEFINE_VOID_FUNC0(rsm_i,
685                   "mov r8 = " __stringify(XEN_PSR_I_ADDR_ADDR) "\n"
686                                                 /* r8 = XEN_PSR_I_ADDR */
687                   "mov r9 = 1\n"
688                   ";;\n"
689                   "ld8 r8 = [r8]\n"             /* r8 = XEN_PSR_I */
690                   ";;\n"
691                   "st1.rel [r8] = r9\n");       /* XEN_PSR_I = 1 */
692
693 extern void
694 xen_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
695                    unsigned long val2, unsigned long val3,
696                    unsigned long val4);
697 __DEFINE_FUNC(set_rr0_to_rr4,
698               "break " __stringify(HYPERPRIVOP_SET_RR0_TO_RR4) "\n");
699
700
701 extern unsigned long xen_getreg(int regnum);
702 #define __DEFINE_GET_REG(id, privop)                                    \
703         "mov r2 = " __stringify(_IA64_REG_ ## id) "\n"                  \
704         ";;\n"                                                          \
705         "cmp.eq p6, p0 = r2, r8\n"                                      \
706         ";;\n"                                                          \
707         "(p6) break " __stringify(HYPERPRIVOP_GET_ ## privop) "\n"      \
708         "(p6) br.cond.sptk.many b6\n"                                   \
709         ";;\n"
710
711 __DEFINE_FUNC(getreg,
712               __DEFINE_GET_REG(PSR, PSR)
713 #ifdef CONFIG_IA32_SUPPORT
714               __DEFINE_GET_REG(AR_EFLAG, EFLAG)
715 #endif
716
717               /* get_itc */
718               "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
719               ";;\n"
720               "cmp.eq p6, p0 = r2, r8\n"
721               ";;\n"
722               "(p6) br.cond.spnt xen_get_itc\n"
723               ";;\n"
724
725               /* get itm */
726               "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
727               ";;\n"
728               "cmp.eq p6, p0 = r2, r8\n"
729               ";;\n"
730               "(p6) br.cond.spnt xen_get_itm_with_offset\n"
731               ";;\n"
732
733               __DEFINE_GET_REG(CR_IVR, IVR)
734               __DEFINE_GET_REG(CR_TPR, TPR)
735
736               /* fall back */
737               "movl r2 = ia64_native_getreg_func\n"
738               ";;\n"
739               "mov b7 = r2\n"
740               ";;\n"
741               "br.cond.sptk.many b7\n");
742
743 extern void xen_setreg(int regnum, unsigned long val);
744 #define __DEFINE_SET_REG(id, privop)                                    \
745         "mov r2 = " __stringify(_IA64_REG_ ## id) "\n"                  \
746         ";;\n"                                                          \
747         "cmp.eq p6, p0 = r2, r9\n"                                      \
748         ";;\n"                                                          \
749         "(p6) break " __stringify(HYPERPRIVOP_ ## privop) "\n"          \
750         "(p6) br.cond.sptk.many b6\n"                                   \
751         ";;\n"
752
753 __DEFINE_FUNC(setreg,
754               /* kr0 .. kr 7*/
755               /*
756                * if (_IA64_REG_AR_KR0 <= regnum &&
757                *     regnum <= _IA64_REG_AR_KR7) {
758                *     register __index asm ("r8") = regnum - _IA64_REG_AR_KR0
759                *     register __val asm ("r9") = val
760                *    "break HYPERPRIVOP_SET_KR"
761                * }
762                */
763               "mov r17 = r9\n"
764               "mov r2 = " __stringify(_IA64_REG_AR_KR0) "\n"
765               ";;\n"
766               "cmp.ge p6, p0 = r9, r2\n"
767               "sub r17 = r17, r2\n"
768               ";;\n"
769               "(p6) cmp.ge.unc p7, p0 = "
770               __stringify(_IA64_REG_AR_KR7) " - " __stringify(_IA64_REG_AR_KR0)
771               ", r17\n"
772               ";;\n"
773               "(p7) mov r9 = r8\n"
774               ";;\n"
775               "(p7) mov r8 = r17\n"
776               "(p7) break " __stringify(HYPERPRIVOP_SET_KR) "\n"
777
778               /* set itm */
779               "mov r2 = " __stringify(_IA64_REG_CR_ITM) "\n"
780               ";;\n"
781               "cmp.eq p6, p0 = r2, r8\n"
782               ";;\n"
783               "(p6) br.cond.spnt xen_set_itm_with_offset\n"
784
785               /* set itc */
786               "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
787               ";;\n"
788               "cmp.eq p6, p0 = r2, r8\n"
789               ";;\n"
790               "(p6) br.cond.spnt xen_set_itc\n"
791
792 #ifdef CONFIG_IA32_SUPPORT
793               __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
794 #endif
795               __DEFINE_SET_REG(CR_TPR, SET_TPR)
796               __DEFINE_SET_REG(CR_EOI, EOI)
797
798               /* fall back */
799               "movl r2 = ia64_native_setreg_func\n"
800               ";;\n"
801               "mov b7 = r2\n"
802               ";;\n"
803               "br.cond.sptk.many b7\n");
804 #endif
805
806 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
807         .fc             = xen_fc,
808         .thash          = xen_thash,
809         .get_cpuid      = xen_get_cpuid,
810         .get_pmd        = xen_get_pmd,
811         .getreg         = xen_getreg,
812         .setreg         = xen_setreg,
813         .ptcga          = xen_ptcga,
814         .get_rr         = xen_get_rr,
815         .set_rr         = xen_set_rr,
816         .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
817         .ssm_i          = xen_ssm_i,
818         .rsm_i          = xen_rsm_i,
819         .get_psr_i      = xen_get_psr_i,
820         .intrin_local_irq_restore
821                         = xen_intrin_local_irq_restore,
822 };
823
824 /******************************************************************************
825  * replacement of hand written assembly codes.
826  */
827
828 extern char xen_switch_to;
829 extern char xen_leave_syscall;
830 extern char xen_work_processed_syscall;
831 extern char xen_leave_kernel;
832
833 const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
834         .switch_to              = (unsigned long)&xen_switch_to,
835         .leave_syscall          = (unsigned long)&xen_leave_syscall,
836         .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
837         .leave_kernel           = (unsigned long)&xen_leave_kernel,
838 };
839
840 /***************************************************************************
841  * pv_iosapic_ops
842  * iosapic read/write hooks.
843  */
844 static void
845 xen_pcat_compat_init(void)
846 {
847         /* nothing */
848 }
849
850 static struct irq_chip*
851 xen_iosapic_get_irq_chip(unsigned long trigger)
852 {
853         return NULL;
854 }
855
856 static unsigned int
857 xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
858 {
859         struct physdev_apic apic_op;
860         int ret;
861
862         apic_op.apic_physbase = (unsigned long)iosapic -
863                                         __IA64_UNCACHED_OFFSET;
864         apic_op.reg = reg;
865         ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
866         if (ret)
867                 return ret;
868         return apic_op.value;
869 }
870
871 static void
872 xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
873 {
874         struct physdev_apic apic_op;
875
876         apic_op.apic_physbase = (unsigned long)iosapic -
877                                         __IA64_UNCACHED_OFFSET;
878         apic_op.reg = reg;
879         apic_op.value = val;
880         HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
881 }
882
883 static struct pv_iosapic_ops xen_iosapic_ops __initdata = {
884         .pcat_compat_init = xen_pcat_compat_init,
885         .__get_irq_chip = xen_iosapic_get_irq_chip,
886
887         .__read = xen_iosapic_read,
888         .__write = xen_iosapic_write,
889 };
890
891 /***************************************************************************
892  * pv_ops initialization
893  */
894
895 void __init
896 xen_setup_pv_ops(void)
897 {
898         xen_info_init();
899         pv_info = xen_info;
900         pv_init_ops = xen_init_ops;
901         pv_fsys_data = xen_fsys_data;
902         pv_patchdata = xen_patchdata;
903         pv_cpu_ops = xen_cpu_ops;
904         pv_iosapic_ops = xen_iosapic_ops;
905         pv_irq_ops = xen_irq_ops;
906         pv_time_ops = xen_time_ops;
907
908         paravirt_cpu_asm_init(&xen_cpu_asm_switch);
909 }
910
911 #ifdef ASM_SUPPORTED
912 /***************************************************************************
913  * binary pacthing
914  * pv_init_ops.patch_bundle
915  */
916
917 #define DEFINE_FUNC_GETREG(name, privop)                                \
918         DEFINE_FUNC0(get_ ## name,                                      \
919                      "break "__stringify(HYPERPRIVOP_GET_ ## privop) "\n")
920
921 DEFINE_FUNC_GETREG(psr, PSR);
922 DEFINE_FUNC_GETREG(eflag, EFLAG);
923 DEFINE_FUNC_GETREG(ivr, IVR);
924 DEFINE_FUNC_GETREG(tpr, TPR);
925
926 #define DEFINE_FUNC_SET_KR(n)                                           \
927         DEFINE_VOID_FUNC0(set_kr ## n,                                  \
928                           ";;\n"                                        \
929                           "mov r9 = r8\n"                               \
930                           "mov r8 = " #n "\n"                           \
931                           "break " __stringify(HYPERPRIVOP_SET_KR) "\n")
932
933 DEFINE_FUNC_SET_KR(0);
934 DEFINE_FUNC_SET_KR(1);
935 DEFINE_FUNC_SET_KR(2);
936 DEFINE_FUNC_SET_KR(3);
937 DEFINE_FUNC_SET_KR(4);
938 DEFINE_FUNC_SET_KR(5);
939 DEFINE_FUNC_SET_KR(6);
940 DEFINE_FUNC_SET_KR(7);
941
942 #define __DEFINE_FUNC_SETREG(name, privop)                              \
943         DEFINE_VOID_FUNC0(name,                                         \
944                           "break "__stringify(HYPERPRIVOP_ ## privop) "\n")
945
946 #define DEFINE_FUNC_SETREG(name, privop)                        \
947         __DEFINE_FUNC_SETREG(set_ ## name, SET_ ## privop)
948
949 DEFINE_FUNC_SETREG(eflag, EFLAG);
950 DEFINE_FUNC_SETREG(tpr, TPR);
951 __DEFINE_FUNC_SETREG(eoi, EOI);
952
953 extern const char xen_check_events[];
954 extern const char __xen_intrin_local_irq_restore_direct_start[];
955 extern const char __xen_intrin_local_irq_restore_direct_end[];
956 extern const unsigned long __xen_intrin_local_irq_restore_direct_reloc;
957
958 asm (
959         ".align 32\n"
960         ".proc xen_check_events\n"
961         "xen_check_events:\n"
962         /* masked = 0
963          * r9 = masked_addr - 1
964          *    = pending_intr_addr
965          */
966         "st1.rel [r9] = r0, -1\n"
967         ";;\n"
968         /* r8 = pending_intr */
969         "ld1.acq r11 = [r9]\n"
970         ";;\n"
971         /* p9 = interrupt pending? */
972         "cmp.ne p9, p10 = r11, r0\n"
973         ";;\n"
974         "(p10) mf\n"
975         /* issue hypercall to trigger interrupt */
976         "(p9) break " __stringify(HYPERPRIVOP_SSM_I) "\n"
977         "br.cond.sptk.many b6\n"
978         ".endp xen_check_events\n"
979         "\n"
980         ".align 32\n"
981         ".proc __xen_intrin_local_irq_restore_direct\n"
982         "__xen_intrin_local_irq_restore_direct:\n"
983         "__xen_intrin_local_irq_restore_direct_start:\n"
984         "1:\n"
985         "{\n"
986         "cmp.ne p6, p7 = r8, r0\n"
987         "mov r17 = ip\n" /* get ip to calc return address */
988         "mov r9 = "__stringify(XEN_PSR_I_ADDR_ADDR) "\n"
989         ";;\n"
990         "}\n"
991         "{\n"
992         /* r9 = XEN_PSR_I_ADDR */
993         "ld8 r9 = [r9]\n"
994         ";;\n"
995         /* r10 = masked previous value */
996         "(p6) ld1.acq r10 = [r9]\n"
997         "adds r17 =  1f - 1b, r17\n" /* calculate return address */
998         ";;\n"
999         "}\n"
1000         "{\n"
1001         /* p8 = !masked interrupt masked previously? */
1002         "(p6) cmp.ne.unc p8, p0 = r10, r0\n"
1003         "\n"
1004         /* p7 = else clause */
1005         "(p7) mov r11 = 1\n"
1006         ";;\n"
1007         "(p8) mov b6 = r17\n" /* set return address */
1008         "}\n"
1009         "{\n"
1010         /* masked = 1 */
1011         "(p7) st1.rel [r9] = r11\n"
1012         "\n"
1013         "[99:]\n"
1014         "(p8) brl.cond.dptk.few xen_check_events\n"
1015         "}\n"
1016         /* pv calling stub is 5 bundles. fill nop to adjust return address */
1017         "{\n"
1018         "nop 0\n"
1019         "nop 0\n"
1020         "nop 0\n"
1021         "}\n"
1022         "1:\n"
1023         "__xen_intrin_local_irq_restore_direct_end:\n"
1024         ".endp __xen_intrin_local_irq_restore_direct\n"
1025         "\n"
1026         ".align 8\n"
1027         "__xen_intrin_local_irq_restore_direct_reloc:\n"
1028         "data8 99b\n"
1029 );
1030
1031 static struct paravirt_patch_bundle_elem xen_patch_bundle_elems[]
1032 __initdata_or_module =
1033 {
1034 #define XEN_PATCH_BUNDLE_ELEM(name, type)               \
1035         {                                               \
1036                 (void*)xen_ ## name ## _direct_start,   \
1037                 (void*)xen_ ## name ## _direct_end,     \
1038                 PARAVIRT_PATCH_TYPE_ ## type,           \
1039         }
1040
1041         XEN_PATCH_BUNDLE_ELEM(fc, FC),
1042         XEN_PATCH_BUNDLE_ELEM(thash, THASH),
1043         XEN_PATCH_BUNDLE_ELEM(get_cpuid, GET_CPUID),
1044         XEN_PATCH_BUNDLE_ELEM(get_pmd, GET_PMD),
1045         XEN_PATCH_BUNDLE_ELEM(ptcga, PTCGA),
1046         XEN_PATCH_BUNDLE_ELEM(get_rr, GET_RR),
1047         XEN_PATCH_BUNDLE_ELEM(set_rr, SET_RR),
1048         XEN_PATCH_BUNDLE_ELEM(set_rr0_to_rr4, SET_RR0_TO_RR4),
1049         XEN_PATCH_BUNDLE_ELEM(ssm_i, SSM_I),
1050         XEN_PATCH_BUNDLE_ELEM(rsm_i, RSM_I),
1051         XEN_PATCH_BUNDLE_ELEM(get_psr_i, GET_PSR_I),
1052         {
1053                 (void*)__xen_intrin_local_irq_restore_direct_start,
1054                 (void*)__xen_intrin_local_irq_restore_direct_end,
1055                 PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE,
1056         },
1057
1058 #define XEN_PATCH_BUNDLE_ELEM_GETREG(name, reg)                 \
1059         {                                                       \
1060                 xen_get_ ## name ## _direct_start,              \
1061                 xen_get_ ## name ## _direct_end,                \
1062                 PARAVIRT_PATCH_TYPE_GETREG + _IA64_REG_ ## reg, \
1063         }
1064
1065         XEN_PATCH_BUNDLE_ELEM_GETREG(psr, PSR),
1066         XEN_PATCH_BUNDLE_ELEM_GETREG(eflag, AR_EFLAG),
1067
1068         XEN_PATCH_BUNDLE_ELEM_GETREG(ivr, CR_IVR),
1069         XEN_PATCH_BUNDLE_ELEM_GETREG(tpr, CR_TPR),
1070
1071         XEN_PATCH_BUNDLE_ELEM_GETREG(itc, AR_ITC),
1072         XEN_PATCH_BUNDLE_ELEM_GETREG(itm_with_offset, CR_ITM),
1073
1074
1075 #define __XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg)               \
1076         {                                                       \
1077                 xen_ ## name ## _direct_start,                  \
1078                 xen_ ## name ## _direct_end,                    \
1079                 PARAVIRT_PATCH_TYPE_SETREG + _IA64_REG_ ## reg, \
1080         }
1081
1082 #define XEN_PATCH_BUNDLE_ELEM_SETREG(name, reg)                 \
1083         __XEN_PATCH_BUNDLE_ELEM_SETREG(set_ ## name, reg)
1084
1085         XEN_PATCH_BUNDLE_ELEM_SETREG(kr0, AR_KR0),
1086         XEN_PATCH_BUNDLE_ELEM_SETREG(kr1, AR_KR1),
1087         XEN_PATCH_BUNDLE_ELEM_SETREG(kr2, AR_KR2),
1088         XEN_PATCH_BUNDLE_ELEM_SETREG(kr3, AR_KR3),
1089         XEN_PATCH_BUNDLE_ELEM_SETREG(kr4, AR_KR4),
1090         XEN_PATCH_BUNDLE_ELEM_SETREG(kr5, AR_KR5),
1091         XEN_PATCH_BUNDLE_ELEM_SETREG(kr6, AR_KR6),
1092         XEN_PATCH_BUNDLE_ELEM_SETREG(kr7, AR_KR7),
1093
1094         XEN_PATCH_BUNDLE_ELEM_SETREG(eflag, AR_EFLAG),
1095         XEN_PATCH_BUNDLE_ELEM_SETREG(tpr, CR_TPR),
1096         __XEN_PATCH_BUNDLE_ELEM_SETREG(eoi, CR_EOI),
1097
1098         XEN_PATCH_BUNDLE_ELEM_SETREG(itc, AR_ITC),
1099         XEN_PATCH_BUNDLE_ELEM_SETREG(itm_with_offset, CR_ITM),
1100 };
1101
1102 static unsigned long __init_or_module
1103 xen_patch_bundle(void *sbundle, void *ebundle, unsigned long type)
1104 {
1105         const unsigned long nelems = sizeof(xen_patch_bundle_elems) /
1106                 sizeof(xen_patch_bundle_elems[0]);
1107         unsigned long used;
1108         const struct paravirt_patch_bundle_elem *found;
1109
1110         used = __paravirt_patch_apply_bundle(sbundle, ebundle, type,
1111                                              xen_patch_bundle_elems, nelems,
1112                                              &found);
1113
1114         if (found == NULL)
1115                 /* fallback */
1116                 return ia64_native_patch_bundle(sbundle, ebundle, type);
1117         if (used == 0)
1118                 return used;
1119
1120         /* relocation */
1121         switch (type) {
1122         case PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE: {
1123                 unsigned long reloc =
1124                         __xen_intrin_local_irq_restore_direct_reloc;
1125                 unsigned long reloc_offset = reloc - (unsigned long)
1126                         __xen_intrin_local_irq_restore_direct_start;
1127                 unsigned long tag = (unsigned long)sbundle + reloc_offset;
1128                 paravirt_patch_reloc_brl(tag, xen_check_events);
1129                 break;
1130         }
1131         default:
1132                 /* nothing */
1133                 break;
1134         }
1135         return used;
1136 }
1137 #endif /* ASM_SUPPOTED */
1138
1139 const struct paravirt_patch_branch_target xen_branch_target[]
1140 __initconst = {
1141 #define PARAVIRT_BR_TARGET(name, type)                  \
1142         {                                               \
1143                 &xen_ ## name,                          \
1144                 PARAVIRT_PATCH_TYPE_BR_ ## type,        \
1145         }
1146         PARAVIRT_BR_TARGET(switch_to, SWITCH_TO),
1147         PARAVIRT_BR_TARGET(leave_syscall, LEAVE_SYSCALL),
1148         PARAVIRT_BR_TARGET(work_processed_syscall, WORK_PROCESSED_SYSCALL),
1149         PARAVIRT_BR_TARGET(leave_kernel, LEAVE_KERNEL),
1150 };
1151
1152 static void __init
1153 xen_patch_branch(unsigned long tag, unsigned long type)
1154 {
1155         const unsigned long nelem =
1156                 sizeof(xen_branch_target) / sizeof(xen_branch_target[0]);
1157         __paravirt_patch_apply_branch(tag, type, xen_branch_target, nelem);
1158 }