Merge branch 'core/printk' into tracing/ftrace
[linux-2.6] / arch / ia64 / kvm / vcpu.c
1 /*
2  * kvm_vcpu.c: handling all virtual cpu related thing.
3  * Copyright (c) 2005, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  *  Shaofan Li (Susue Li) <susie.li@intel.com>
19  *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20  *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21  *  Xiantao Zhang <xiantao.zhang@intel.com>
22  */
23
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
26
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
33
34 #include "asm-offsets.h"
35 #include "vcpu.h"
36
37 /*
38  * Special notes:
39  * - Index by it/dt/rt sequence
40  * - Only existing mode transitions are allowed in this table
41  * - RSE is placed at lazy mode when emulating guest partial mode
42  * - If gva happens to be rr0 and rr4, only allowed case is identity
43  *   mapping (gva=gpa), or panic! (How?)
44  */
45 int mm_switch_table[8][8] = {
46         /*  2004/09/12(Kevin): Allow switch to self */
47         /*
48          *  (it,dt,rt): (0,0,0) -> (1,1,1)
49          *  This kind of transition usually occurs in the very early
50          *  stage of Linux boot up procedure. Another case is in efi
51          *  and pal calls. (see "arch/ia64/kernel/head.S")
52          *
53          *  (it,dt,rt): (0,0,0) -> (0,1,1)
54          *  This kind of transition is found when OSYa exits efi boot
55          *  service. Due to gva = gpa in this case (Same region),
56          *  data access can be satisfied though itlb entry for physical
57          *  emulation is hit.
58          */
59         {SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
60         {0,  0,  0,  0,  0,  0,  0,  0},
61         {0,  0,  0,  0,  0,  0,  0,  0},
62         /*
63          *  (it,dt,rt): (0,1,1) -> (1,1,1)
64          *  This kind of transition is found in OSYa.
65          *
66          *  (it,dt,rt): (0,1,1) -> (0,0,0)
67          *  This kind of transition is found in OSYa
68          */
69         {SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
70         /* (1,0,0)->(1,1,1) */
71         {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
72         /*
73          *  (it,dt,rt): (1,0,1) -> (1,1,1)
74          *  This kind of transition usually occurs when Linux returns
75          *  from the low level TLB miss handlers.
76          *  (see "arch/ia64/kernel/ivt.S")
77          */
78         {0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
79         {0,  0,  0,  0,  0,  0,  0,  0},
80         /*
81          *  (it,dt,rt): (1,1,1) -> (1,0,1)
82          *  This kind of transition usually occurs in Linux low level
83          *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84          *
85          *  (it,dt,rt): (1,1,1) -> (0,0,0)
86          *  This kind of transition usually occurs in pal and efi calls,
87          *  which requires running in physical mode.
88          *  (see "arch/ia64/kernel/head.S")
89          *  (1,1,1)->(1,0,0)
90          */
91
92         {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
93 };
94
95 void physical_mode_init(struct kvm_vcpu  *vcpu)
96 {
97         vcpu->arch.mode_flags = GUEST_IN_PHY;
98 }
99
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101 {
102         unsigned long psr;
103
104         /* Save original virtual mode rr[0] and rr[4] */
105         psr = ia64_clear_ic();
106         ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107         ia64_srlz_d();
108         ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109         ia64_srlz_d();
110
111         ia64_set_psr(psr);
112         return;
113 }
114
115
116 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117 {
118         unsigned long psr;
119
120         psr = ia64_clear_ic();
121         ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
122         ia64_srlz_d();
123         ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
124         ia64_srlz_d();
125         ia64_set_psr(psr);
126         return;
127 }
128
129 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
130 {
131         return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
132 }
133
134 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
135                                         struct ia64_psr new_psr)
136 {
137         int act;
138         act = mm_switch_action(old_psr, new_psr);
139         switch (act) {
140         case SW_V2P:
141                 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
142                 old_psr.val, new_psr.val);*/
143                 switch_to_physical_rid(vcpu);
144                 /*
145                  * Set rse to enforced lazy, to prevent active rse
146                  *save/restor when guest physical mode.
147                  */
148                 vcpu->arch.mode_flags |= GUEST_IN_PHY;
149                 break;
150         case SW_P2V:
151                 switch_to_virtual_rid(vcpu);
152                 /*
153                  * recover old mode which is saved when entering
154                  * guest physical mode
155                  */
156                 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
157                 break;
158         case SW_SELF:
159                 break;
160         case SW_NOP:
161                 break;
162         default:
163                 /* Sanity check */
164                 break;
165         }
166         return;
167 }
168
169
170
171 /*
172  * In physical mode, insert tc/tr for region 0 and 4 uses
173  * RID[0] and RID[4] which is for physical mode emulation.
174  * However what those inserted tc/tr wants is rid for
175  * virtual mode. So original virtual rid needs to be restored
176  * before insert.
177  *
178  * Operations which required such switch include:
179  *  - insertions (itc.*, itr.*)
180  *  - purges (ptc.* and ptr.*)
181  *  - tpa
182  *  - tak
183  *  - thash?, ttag?
184  * All above needs actual virtual rid for destination entry.
185  */
186
187 void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
188                                         struct ia64_psr new_psr)
189 {
190
191         if ((old_psr.dt != new_psr.dt)
192                         || (old_psr.it != new_psr.it)
193                         || (old_psr.rt != new_psr.rt))
194                 switch_mm_mode(vcpu, old_psr, new_psr);
195
196         return;
197 }
198
199
200 /*
201  * In physical mode, insert tc/tr for region 0 and 4 uses
202  * RID[0] and RID[4] which is for physical mode emulation.
203  * However what those inserted tc/tr wants is rid for
204  * virtual mode. So original virtual rid needs to be restored
205  * before insert.
206  *
207  * Operations which required such switch include:
208  *  - insertions (itc.*, itr.*)
209  *  - purges (ptc.* and ptr.*)
210  *  - tpa
211  *  - tak
212  *  - thash?, ttag?
213  * All above needs actual virtual rid for destination entry.
214  */
215
216 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
217 {
218         if (is_physical_mode(vcpu)) {
219                 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
220                 switch_to_virtual_rid(vcpu);
221         }
222         return;
223 }
224
225 /* Recover always follows prepare */
226 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
227 {
228         if (is_physical_mode(vcpu))
229                 switch_to_physical_rid(vcpu);
230         vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
231         return;
232 }
233
234 #define RPT(x)  ((u16) &((struct kvm_pt_regs *)0)->x)
235
236 static u16 gr_info[32] = {
237         0,      /* r0 is read-only : WE SHOULD NEVER GET THIS */
238         RPT(r1), RPT(r2), RPT(r3),
239         RPT(r4), RPT(r5), RPT(r6), RPT(r7),
240         RPT(r8), RPT(r9), RPT(r10), RPT(r11),
241         RPT(r12), RPT(r13), RPT(r14), RPT(r15),
242         RPT(r16), RPT(r17), RPT(r18), RPT(r19),
243         RPT(r20), RPT(r21), RPT(r22), RPT(r23),
244         RPT(r24), RPT(r25), RPT(r26), RPT(r27),
245         RPT(r28), RPT(r29), RPT(r30), RPT(r31)
246 };
247
248 #define IA64_FIRST_STACKED_GR   32
249 #define IA64_FIRST_ROTATING_FR  32
250
251 static inline unsigned long
252 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
253 {
254         reg += rrb;
255         if (reg >= sor)
256                 reg -= sor;
257         return reg;
258 }
259
260 /*
261  * Return the (rotated) index for floating point register
262  * be in the REGNUM (REGNUM must range from 32-127,
263  * result is in the range from 0-95.
264  */
265 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
266                                                 long regnum)
267 {
268         unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
269         return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270 }
271
272
273 /*
274  * The inverse of the above: given bspstore and the number of
275  * registers, calculate ar.bsp.
276  */
277 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
278                                                         long num_regs)
279 {
280         long delta = ia64_rse_slot_num(addr) + num_regs;
281         int i = 0;
282
283         if (num_regs < 0)
284                 delta -= 0x3e;
285         if (delta < 0) {
286                 while (delta <= -0x3f) {
287                         i--;
288                         delta += 0x3f;
289                 }
290         } else {
291                 while (delta >= 0x3f) {
292                         i++;
293                         delta -= 0x3f;
294                 }
295         }
296
297         return addr + num_regs + i;
298 }
299
300 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
301                                         unsigned long *val, int *nat)
302 {
303         unsigned long *bsp, *addr, *rnat_addr, *bspstore;
304         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
305         unsigned long nat_mask;
306         unsigned long old_rsc, new_rsc;
307         long sof = (regs->cr_ifs) & 0x7f;
308         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
309         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
310         long ridx = r1 - 32;
311
312         if (ridx < sor)
313                 ridx = rotate_reg(sor, rrb_gr, ridx);
314
315         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
316         new_rsc = old_rsc&(~(0x3));
317         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
318
319         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
320         bsp = kbs + (regs->loadrs >> 19);
321
322         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
323         nat_mask = 1UL << ia64_rse_slot_num(addr);
324         rnat_addr = ia64_rse_rnat_addr(addr);
325
326         if (addr >= bspstore) {
327                 ia64_flushrs();
328                 ia64_mf();
329                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
330         }
331         *val = *addr;
332         if (nat) {
333                 if (bspstore < rnat_addr)
334                         *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
335                                                         & nat_mask);
336                 else
337                         *nat = (int)!!((*rnat_addr) & nat_mask);
338                 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
339         }
340 }
341
342 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
343                                 unsigned long val, unsigned long nat)
344 {
345         unsigned long *bsp, *bspstore, *addr, *rnat_addr;
346         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
347         unsigned long nat_mask;
348         unsigned long old_rsc, new_rsc, psr;
349         unsigned long rnat;
350         long sof = (regs->cr_ifs) & 0x7f;
351         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
352         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
353         long ridx = r1 - 32;
354
355         if (ridx < sor)
356                 ridx = rotate_reg(sor, rrb_gr, ridx);
357
358         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
359         /* put RSC to lazy mode, and set loadrs 0 */
360         new_rsc = old_rsc & (~0x3fff0003);
361         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
362         bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
363
364         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
365         nat_mask = 1UL << ia64_rse_slot_num(addr);
366         rnat_addr = ia64_rse_rnat_addr(addr);
367
368         local_irq_save(psr);
369         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
370         if (addr >= bspstore) {
371
372                 ia64_flushrs();
373                 ia64_mf();
374                 *addr = val;
375                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
376                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
377                 if (bspstore < rnat_addr)
378                         rnat = rnat & (~nat_mask);
379                 else
380                         *rnat_addr = (*rnat_addr)&(~nat_mask);
381
382                 ia64_mf();
383                 ia64_loadrs();
384                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
385         } else {
386                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
387                 *addr = val;
388                 if (bspstore < rnat_addr)
389                         rnat = rnat&(~nat_mask);
390                 else
391                         *rnat_addr = (*rnat_addr) & (~nat_mask);
392
393                 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
394                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
395         }
396         local_irq_restore(psr);
397         ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
398 }
399
400 void getreg(unsigned long regnum, unsigned long *val,
401                                 int *nat, struct kvm_pt_regs *regs)
402 {
403         unsigned long addr, *unat;
404         if (regnum >= IA64_FIRST_STACKED_GR) {
405                 get_rse_reg(regs, regnum, val, nat);
406                 return;
407         }
408
409         /*
410          * Now look at registers in [0-31] range and init correct UNAT
411          */
412         addr = (unsigned long)regs;
413         unat = &regs->eml_unat;;
414
415         addr += gr_info[regnum];
416
417         *val  = *(unsigned long *)addr;
418         /*
419          * do it only when requested
420          */
421         if (nat)
422                 *nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
423 }
424
425 void setreg(unsigned long regnum, unsigned long val,
426                         int nat, struct kvm_pt_regs *regs)
427 {
428         unsigned long addr;
429         unsigned long bitmask;
430         unsigned long *unat;
431
432         /*
433          * First takes care of stacked registers
434          */
435         if (regnum >= IA64_FIRST_STACKED_GR) {
436                 set_rse_reg(regs, regnum, val, nat);
437                 return;
438         }
439
440         /*
441          * Now look at registers in [0-31] range and init correct UNAT
442          */
443         addr = (unsigned long)regs;
444         unat = &regs->eml_unat;
445         /*
446          * add offset from base of struct
447          * and do it !
448          */
449         addr += gr_info[regnum];
450
451         *(unsigned long *)addr = val;
452
453         /*
454          * We need to clear the corresponding UNAT bit to fully emulate the load
455          * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
456          */
457         bitmask   = 1UL << ((addr >> 3) & 0x3f);
458         if (nat)
459                 *unat |= bitmask;
460          else
461                 *unat &= ~bitmask;
462
463 }
464
465 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
466 {
467         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
468         u64 val;
469
470         if (!reg)
471                 return 0;
472         getreg(reg, &val, 0, regs);
473         return val;
474 }
475
476 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
477 {
478         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
479         long sof = (regs->cr_ifs) & 0x7f;
480
481         if (!reg)
482                 return;
483         if (reg >= sof + 32)
484                 return;
485         setreg(reg, value, nat, regs);  /* FIXME: handle NATs later*/
486 }
487
488 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
489                                 struct kvm_pt_regs *regs)
490 {
491         /* Take floating register rotation into consideration*/
492         if (regnum >= IA64_FIRST_ROTATING_FR)
493                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
494 #define CASE_FIXED_FP(reg)                      \
495         case  (reg) :                           \
496                 ia64_stf_spill(fpval, reg);     \
497         break
498
499         switch (regnum) {
500                 CASE_FIXED_FP(0);
501                 CASE_FIXED_FP(1);
502                 CASE_FIXED_FP(2);
503                 CASE_FIXED_FP(3);
504                 CASE_FIXED_FP(4);
505                 CASE_FIXED_FP(5);
506
507                 CASE_FIXED_FP(6);
508                 CASE_FIXED_FP(7);
509                 CASE_FIXED_FP(8);
510                 CASE_FIXED_FP(9);
511                 CASE_FIXED_FP(10);
512                 CASE_FIXED_FP(11);
513
514                 CASE_FIXED_FP(12);
515                 CASE_FIXED_FP(13);
516                 CASE_FIXED_FP(14);
517                 CASE_FIXED_FP(15);
518                 CASE_FIXED_FP(16);
519                 CASE_FIXED_FP(17);
520                 CASE_FIXED_FP(18);
521                 CASE_FIXED_FP(19);
522                 CASE_FIXED_FP(20);
523                 CASE_FIXED_FP(21);
524                 CASE_FIXED_FP(22);
525                 CASE_FIXED_FP(23);
526                 CASE_FIXED_FP(24);
527                 CASE_FIXED_FP(25);
528                 CASE_FIXED_FP(26);
529                 CASE_FIXED_FP(27);
530                 CASE_FIXED_FP(28);
531                 CASE_FIXED_FP(29);
532                 CASE_FIXED_FP(30);
533                 CASE_FIXED_FP(31);
534                 CASE_FIXED_FP(32);
535                 CASE_FIXED_FP(33);
536                 CASE_FIXED_FP(34);
537                 CASE_FIXED_FP(35);
538                 CASE_FIXED_FP(36);
539                 CASE_FIXED_FP(37);
540                 CASE_FIXED_FP(38);
541                 CASE_FIXED_FP(39);
542                 CASE_FIXED_FP(40);
543                 CASE_FIXED_FP(41);
544                 CASE_FIXED_FP(42);
545                 CASE_FIXED_FP(43);
546                 CASE_FIXED_FP(44);
547                 CASE_FIXED_FP(45);
548                 CASE_FIXED_FP(46);
549                 CASE_FIXED_FP(47);
550                 CASE_FIXED_FP(48);
551                 CASE_FIXED_FP(49);
552                 CASE_FIXED_FP(50);
553                 CASE_FIXED_FP(51);
554                 CASE_FIXED_FP(52);
555                 CASE_FIXED_FP(53);
556                 CASE_FIXED_FP(54);
557                 CASE_FIXED_FP(55);
558                 CASE_FIXED_FP(56);
559                 CASE_FIXED_FP(57);
560                 CASE_FIXED_FP(58);
561                 CASE_FIXED_FP(59);
562                 CASE_FIXED_FP(60);
563                 CASE_FIXED_FP(61);
564                 CASE_FIXED_FP(62);
565                 CASE_FIXED_FP(63);
566                 CASE_FIXED_FP(64);
567                 CASE_FIXED_FP(65);
568                 CASE_FIXED_FP(66);
569                 CASE_FIXED_FP(67);
570                 CASE_FIXED_FP(68);
571                 CASE_FIXED_FP(69);
572                 CASE_FIXED_FP(70);
573                 CASE_FIXED_FP(71);
574                 CASE_FIXED_FP(72);
575                 CASE_FIXED_FP(73);
576                 CASE_FIXED_FP(74);
577                 CASE_FIXED_FP(75);
578                 CASE_FIXED_FP(76);
579                 CASE_FIXED_FP(77);
580                 CASE_FIXED_FP(78);
581                 CASE_FIXED_FP(79);
582                 CASE_FIXED_FP(80);
583                 CASE_FIXED_FP(81);
584                 CASE_FIXED_FP(82);
585                 CASE_FIXED_FP(83);
586                 CASE_FIXED_FP(84);
587                 CASE_FIXED_FP(85);
588                 CASE_FIXED_FP(86);
589                 CASE_FIXED_FP(87);
590                 CASE_FIXED_FP(88);
591                 CASE_FIXED_FP(89);
592                 CASE_FIXED_FP(90);
593                 CASE_FIXED_FP(91);
594                 CASE_FIXED_FP(92);
595                 CASE_FIXED_FP(93);
596                 CASE_FIXED_FP(94);
597                 CASE_FIXED_FP(95);
598                 CASE_FIXED_FP(96);
599                 CASE_FIXED_FP(97);
600                 CASE_FIXED_FP(98);
601                 CASE_FIXED_FP(99);
602                 CASE_FIXED_FP(100);
603                 CASE_FIXED_FP(101);
604                 CASE_FIXED_FP(102);
605                 CASE_FIXED_FP(103);
606                 CASE_FIXED_FP(104);
607                 CASE_FIXED_FP(105);
608                 CASE_FIXED_FP(106);
609                 CASE_FIXED_FP(107);
610                 CASE_FIXED_FP(108);
611                 CASE_FIXED_FP(109);
612                 CASE_FIXED_FP(110);
613                 CASE_FIXED_FP(111);
614                 CASE_FIXED_FP(112);
615                 CASE_FIXED_FP(113);
616                 CASE_FIXED_FP(114);
617                 CASE_FIXED_FP(115);
618                 CASE_FIXED_FP(116);
619                 CASE_FIXED_FP(117);
620                 CASE_FIXED_FP(118);
621                 CASE_FIXED_FP(119);
622                 CASE_FIXED_FP(120);
623                 CASE_FIXED_FP(121);
624                 CASE_FIXED_FP(122);
625                 CASE_FIXED_FP(123);
626                 CASE_FIXED_FP(124);
627                 CASE_FIXED_FP(125);
628                 CASE_FIXED_FP(126);
629                 CASE_FIXED_FP(127);
630         }
631 #undef CASE_FIXED_FP
632 }
633
634 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
635                                         struct kvm_pt_regs *regs)
636 {
637         /* Take floating register rotation into consideration*/
638         if (regnum >= IA64_FIRST_ROTATING_FR)
639                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
640
641 #define CASE_FIXED_FP(reg)                      \
642         case (reg) :                            \
643                 ia64_ldf_fill(reg, fpval);      \
644         break
645
646         switch (regnum) {
647                 CASE_FIXED_FP(2);
648                 CASE_FIXED_FP(3);
649                 CASE_FIXED_FP(4);
650                 CASE_FIXED_FP(5);
651
652                 CASE_FIXED_FP(6);
653                 CASE_FIXED_FP(7);
654                 CASE_FIXED_FP(8);
655                 CASE_FIXED_FP(9);
656                 CASE_FIXED_FP(10);
657                 CASE_FIXED_FP(11);
658
659                 CASE_FIXED_FP(12);
660                 CASE_FIXED_FP(13);
661                 CASE_FIXED_FP(14);
662                 CASE_FIXED_FP(15);
663                 CASE_FIXED_FP(16);
664                 CASE_FIXED_FP(17);
665                 CASE_FIXED_FP(18);
666                 CASE_FIXED_FP(19);
667                 CASE_FIXED_FP(20);
668                 CASE_FIXED_FP(21);
669                 CASE_FIXED_FP(22);
670                 CASE_FIXED_FP(23);
671                 CASE_FIXED_FP(24);
672                 CASE_FIXED_FP(25);
673                 CASE_FIXED_FP(26);
674                 CASE_FIXED_FP(27);
675                 CASE_FIXED_FP(28);
676                 CASE_FIXED_FP(29);
677                 CASE_FIXED_FP(30);
678                 CASE_FIXED_FP(31);
679                 CASE_FIXED_FP(32);
680                 CASE_FIXED_FP(33);
681                 CASE_FIXED_FP(34);
682                 CASE_FIXED_FP(35);
683                 CASE_FIXED_FP(36);
684                 CASE_FIXED_FP(37);
685                 CASE_FIXED_FP(38);
686                 CASE_FIXED_FP(39);
687                 CASE_FIXED_FP(40);
688                 CASE_FIXED_FP(41);
689                 CASE_FIXED_FP(42);
690                 CASE_FIXED_FP(43);
691                 CASE_FIXED_FP(44);
692                 CASE_FIXED_FP(45);
693                 CASE_FIXED_FP(46);
694                 CASE_FIXED_FP(47);
695                 CASE_FIXED_FP(48);
696                 CASE_FIXED_FP(49);
697                 CASE_FIXED_FP(50);
698                 CASE_FIXED_FP(51);
699                 CASE_FIXED_FP(52);
700                 CASE_FIXED_FP(53);
701                 CASE_FIXED_FP(54);
702                 CASE_FIXED_FP(55);
703                 CASE_FIXED_FP(56);
704                 CASE_FIXED_FP(57);
705                 CASE_FIXED_FP(58);
706                 CASE_FIXED_FP(59);
707                 CASE_FIXED_FP(60);
708                 CASE_FIXED_FP(61);
709                 CASE_FIXED_FP(62);
710                 CASE_FIXED_FP(63);
711                 CASE_FIXED_FP(64);
712                 CASE_FIXED_FP(65);
713                 CASE_FIXED_FP(66);
714                 CASE_FIXED_FP(67);
715                 CASE_FIXED_FP(68);
716                 CASE_FIXED_FP(69);
717                 CASE_FIXED_FP(70);
718                 CASE_FIXED_FP(71);
719                 CASE_FIXED_FP(72);
720                 CASE_FIXED_FP(73);
721                 CASE_FIXED_FP(74);
722                 CASE_FIXED_FP(75);
723                 CASE_FIXED_FP(76);
724                 CASE_FIXED_FP(77);
725                 CASE_FIXED_FP(78);
726                 CASE_FIXED_FP(79);
727                 CASE_FIXED_FP(80);
728                 CASE_FIXED_FP(81);
729                 CASE_FIXED_FP(82);
730                 CASE_FIXED_FP(83);
731                 CASE_FIXED_FP(84);
732                 CASE_FIXED_FP(85);
733                 CASE_FIXED_FP(86);
734                 CASE_FIXED_FP(87);
735                 CASE_FIXED_FP(88);
736                 CASE_FIXED_FP(89);
737                 CASE_FIXED_FP(90);
738                 CASE_FIXED_FP(91);
739                 CASE_FIXED_FP(92);
740                 CASE_FIXED_FP(93);
741                 CASE_FIXED_FP(94);
742                 CASE_FIXED_FP(95);
743                 CASE_FIXED_FP(96);
744                 CASE_FIXED_FP(97);
745                 CASE_FIXED_FP(98);
746                 CASE_FIXED_FP(99);
747                 CASE_FIXED_FP(100);
748                 CASE_FIXED_FP(101);
749                 CASE_FIXED_FP(102);
750                 CASE_FIXED_FP(103);
751                 CASE_FIXED_FP(104);
752                 CASE_FIXED_FP(105);
753                 CASE_FIXED_FP(106);
754                 CASE_FIXED_FP(107);
755                 CASE_FIXED_FP(108);
756                 CASE_FIXED_FP(109);
757                 CASE_FIXED_FP(110);
758                 CASE_FIXED_FP(111);
759                 CASE_FIXED_FP(112);
760                 CASE_FIXED_FP(113);
761                 CASE_FIXED_FP(114);
762                 CASE_FIXED_FP(115);
763                 CASE_FIXED_FP(116);
764                 CASE_FIXED_FP(117);
765                 CASE_FIXED_FP(118);
766                 CASE_FIXED_FP(119);
767                 CASE_FIXED_FP(120);
768                 CASE_FIXED_FP(121);
769                 CASE_FIXED_FP(122);
770                 CASE_FIXED_FP(123);
771                 CASE_FIXED_FP(124);
772                 CASE_FIXED_FP(125);
773                 CASE_FIXED_FP(126);
774                 CASE_FIXED_FP(127);
775         }
776 }
777
778 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
779                                                 struct ia64_fpreg *val)
780 {
781         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
782
783         getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
784 }
785
786 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
787                                                 struct ia64_fpreg *val)
788 {
789         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
790
791         if (reg > 1)
792                 setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
793 }
794
795 /************************************************************************
796  * lsapic timer
797  ***********************************************************************/
798 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
799 {
800         unsigned long guest_itc;
801         guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
802
803         if (guest_itc >= VMX(vcpu, last_itc)) {
804                 VMX(vcpu, last_itc) = guest_itc;
805                 return  guest_itc;
806         } else
807                 return VMX(vcpu, last_itc);
808 }
809
810 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
812 {
813         struct kvm_vcpu *v;
814         int i;
815         long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816         unsigned long vitv = VCPU(vcpu, itv);
817
818         if (vcpu->vcpu_id == 0) {
819                 for (i = 0; i < KVM_MAX_VCPUS; i++) {
820                         v = (struct kvm_vcpu *)((char *)vcpu +
821                                         sizeof(struct kvm_vcpu_data) * i);
822                         VMX(v, itc_offset) = itc_offset;
823                         VMX(v, last_itc) = 0;
824                 }
825         }
826         VMX(vcpu, last_itc) = 0;
827         if (VCPU(vcpu, itm) <= val) {
828                 VMX(vcpu, itc_check) = 0;
829                 vcpu_unpend_interrupt(vcpu, vitv);
830         } else {
831                 VMX(vcpu, itc_check) = 1;
832                 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
833         }
834
835 }
836
837 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
838 {
839         return ((u64)VCPU(vcpu, itm));
840 }
841
842 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
843 {
844         unsigned long vitv = VCPU(vcpu, itv);
845         VCPU(vcpu, itm) = val;
846
847         if (val > vcpu_get_itc(vcpu)) {
848                 VMX(vcpu, itc_check) = 1;
849                 vcpu_unpend_interrupt(vcpu, vitv);
850                 VMX(vcpu, timer_pending) = 0;
851         } else
852                 VMX(vcpu, itc_check) = 0;
853 }
854
855 #define  ITV_VECTOR(itv)    (itv&0xff)
856 #define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
857
858 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
859 {
860         VCPU(vcpu, itv) = val;
861         if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
862                 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
863                 vcpu->arch.timer_pending = 0;
864         }
865 }
866
867 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
868 {
869         int vec;
870
871         vec = highest_inservice_irq(vcpu);
872         if (vec == NULL_VECTOR)
873                 return;
874         VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
875         VCPU(vcpu, eoi) = 0;
876         vcpu->arch.irq_new_pending = 1;
877
878 }
879
880 /* See Table 5-8 in SDM vol2 for the definition */
881 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
882 {
883         union ia64_tpr vtpr;
884
885         vtpr.val = VCPU(vcpu, tpr);
886
887         if (h_inservice == NMI_VECTOR)
888                 return IRQ_MASKED_BY_INSVC;
889
890         if (h_pending == NMI_VECTOR) {
891                 /* Non Maskable Interrupt */
892                 return IRQ_NO_MASKED;
893         }
894
895         if (h_inservice == ExtINT_VECTOR)
896                 return IRQ_MASKED_BY_INSVC;
897
898         if (h_pending == ExtINT_VECTOR) {
899                 if (vtpr.mmi) {
900                         /* mask all external IRQ */
901                         return IRQ_MASKED_BY_VTPR;
902                 } else
903                         return IRQ_NO_MASKED;
904         }
905
906         if (is_higher_irq(h_pending, h_inservice)) {
907                 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
908                         return IRQ_NO_MASKED;
909                 else
910                         return IRQ_MASKED_BY_VTPR;
911         } else {
912                 return IRQ_MASKED_BY_INSVC;
913         }
914 }
915
916 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
917 {
918         long spsr;
919         int ret;
920
921         local_irq_save(spsr);
922         ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
923         local_irq_restore(spsr);
924
925         vcpu->arch.irq_new_pending = 1;
926 }
927
928 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
929 {
930         long spsr;
931         int ret;
932
933         local_irq_save(spsr);
934         ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
935         local_irq_restore(spsr);
936         if (ret) {
937                 vcpu->arch.irq_new_pending = 1;
938                 wmb();
939         }
940 }
941
942 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
943 {
944         u64 vhpi;
945
946         if (vec == NULL_VECTOR)
947                 vhpi = 0;
948         else if (vec == NMI_VECTOR)
949                 vhpi = 32;
950         else if (vec == ExtINT_VECTOR)
951                 vhpi = 16;
952         else
953                 vhpi = vec >> 4;
954
955         VCPU(vcpu, vhpi) = vhpi;
956         if (VCPU(vcpu, vac).a_int)
957                 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
958                                 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
959 }
960
961 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
962 {
963         int vec, h_inservice, mask;
964
965         vec = highest_pending_irq(vcpu);
966         h_inservice = highest_inservice_irq(vcpu);
967         mask = irq_masked(vcpu, vec, h_inservice);
968         if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
969                 if (VCPU(vcpu, vhpi))
970                         update_vhpi(vcpu, NULL_VECTOR);
971                 return IA64_SPURIOUS_INT_VECTOR;
972         }
973         if (mask == IRQ_MASKED_BY_VTPR) {
974                 update_vhpi(vcpu, vec);
975                 return IA64_SPURIOUS_INT_VECTOR;
976         }
977         VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
978         vcpu_unpend_interrupt(vcpu, vec);
979         return  (u64)vec;
980 }
981
982 /**************************************************************************
983   Privileged operation emulation routines
984  **************************************************************************/
985 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
986 {
987         union ia64_pta vpta;
988         union ia64_rr vrr;
989         u64 pval;
990         u64 vhpt_offset;
991
992         vpta.val = vcpu_get_pta(vcpu);
993         vrr.val = vcpu_get_rr(vcpu, vadr);
994         vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
995         if (vpta.vf) {
996                 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
997                                 vpta.val, 0, 0, 0, 0);
998         } else {
999                 pval = (vadr & VRN_MASK) | vhpt_offset |
1000                         (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1001         }
1002         return  pval;
1003 }
1004
1005 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1006 {
1007         union ia64_rr vrr;
1008         union ia64_pta vpta;
1009         u64 pval;
1010
1011         vpta.val = vcpu_get_pta(vcpu);
1012         vrr.val = vcpu_get_rr(vcpu, vadr);
1013         if (vpta.vf) {
1014                 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1015                                                 0, 0, 0, 0, 0);
1016         } else
1017                 pval = 1;
1018
1019         return  pval;
1020 }
1021
1022 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1023 {
1024         struct thash_data *data;
1025         union ia64_pta vpta;
1026         u64 key;
1027
1028         vpta.val = vcpu_get_pta(vcpu);
1029         if (vpta.vf == 0) {
1030                 key = 1;
1031                 return key;
1032         }
1033         data = vtlb_lookup(vcpu, vadr, D_TLB);
1034         if (!data || !data->p)
1035                 key = 1;
1036         else
1037                 key = data->key;
1038
1039         return key;
1040 }
1041
1042
1043
1044 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1045 {
1046         unsigned long thash, vadr;
1047
1048         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1049         thash = vcpu_thash(vcpu, vadr);
1050         vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1051 }
1052
1053
1054 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1055 {
1056         unsigned long tag, vadr;
1057
1058         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1059         tag = vcpu_ttag(vcpu, vadr);
1060         vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1061 }
1062
1063 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1064 {
1065         struct thash_data *data;
1066         union ia64_isr visr, pt_isr;
1067         struct kvm_pt_regs *regs;
1068         struct ia64_psr vpsr;
1069
1070         regs = vcpu_regs(vcpu);
1071         pt_isr.val = VMX(vcpu, cr_isr);
1072         visr.val = 0;
1073         visr.ei = pt_isr.ei;
1074         visr.ir = pt_isr.ir;
1075         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1076         visr.na = 1;
1077
1078         data = vhpt_lookup(vadr);
1079         if (data) {
1080                 if (data->p == 0) {
1081                         vcpu_set_isr(vcpu, visr.val);
1082                         data_page_not_present(vcpu, vadr);
1083                         return IA64_FAULT;
1084                 } else if (data->ma == VA_MATTR_NATPAGE) {
1085                         vcpu_set_isr(vcpu, visr.val);
1086                         dnat_page_consumption(vcpu, vadr);
1087                         return IA64_FAULT;
1088                 } else {
1089                         *padr = (data->gpaddr >> data->ps << data->ps) |
1090                                 (vadr & (PSIZE(data->ps) - 1));
1091                         return IA64_NO_FAULT;
1092                 }
1093         }
1094
1095         data = vtlb_lookup(vcpu, vadr, D_TLB);
1096         if (data) {
1097                 if (data->p == 0) {
1098                         vcpu_set_isr(vcpu, visr.val);
1099                         data_page_not_present(vcpu, vadr);
1100                         return IA64_FAULT;
1101                 } else if (data->ma == VA_MATTR_NATPAGE) {
1102                         vcpu_set_isr(vcpu, visr.val);
1103                         dnat_page_consumption(vcpu, vadr);
1104                         return IA64_FAULT;
1105                 } else{
1106                         *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1107                                 | (vadr & (PSIZE(data->ps) - 1));
1108                         return IA64_NO_FAULT;
1109                 }
1110         }
1111         if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1112                 if (vpsr.ic) {
1113                         vcpu_set_isr(vcpu, visr.val);
1114                         alt_dtlb(vcpu, vadr);
1115                         return IA64_FAULT;
1116                 } else {
1117                         nested_dtlb(vcpu);
1118                         return IA64_FAULT;
1119                 }
1120         } else {
1121                 if (vpsr.ic) {
1122                         vcpu_set_isr(vcpu, visr.val);
1123                         dvhpt_fault(vcpu, vadr);
1124                         return IA64_FAULT;
1125                 } else{
1126                         nested_dtlb(vcpu);
1127                         return IA64_FAULT;
1128                 }
1129         }
1130
1131         return IA64_NO_FAULT;
1132 }
1133
1134
1135 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1136 {
1137         unsigned long r1, r3;
1138
1139         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1140
1141         if (vcpu_tpa(vcpu, r3, &r1))
1142                 return IA64_FAULT;
1143
1144         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1145         return(IA64_NO_FAULT);
1146 }
1147
1148 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1149 {
1150         unsigned long r1, r3;
1151
1152         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1153         r1 = vcpu_tak(vcpu, r3);
1154         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1155 }
1156
1157
1158 /************************************
1159  * Insert/Purge translation register/cache
1160  ************************************/
1161 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1162 {
1163         thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1164 }
1165
1166 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1167 {
1168         thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1169 }
1170
1171 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1172 {
1173         u64 ps, va, rid;
1174         struct thash_data *p_itr;
1175
1176         ps = itir_ps(itir);
1177         va = PAGEALIGN(ifa, ps);
1178         pte &= ~PAGE_FLAGS_RV_MASK;
1179         rid = vcpu_get_rr(vcpu, ifa);
1180         rid = rid & RR_RID_MASK;
1181         p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1182         vcpu_set_tr(p_itr, pte, itir, va, rid);
1183         vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1184 }
1185
1186
1187 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1188 {
1189         u64 gpfn;
1190         u64 ps, va, rid;
1191         struct thash_data *p_dtr;
1192
1193         ps = itir_ps(itir);
1194         va = PAGEALIGN(ifa, ps);
1195         pte &= ~PAGE_FLAGS_RV_MASK;
1196
1197         if (ps != _PAGE_SIZE_16M)
1198                 thash_purge_entries(vcpu, va, ps);
1199         gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1200         if (__gpfn_is_io(gpfn))
1201                 pte |= VTLB_PTE_IO;
1202         rid = vcpu_get_rr(vcpu, va);
1203         rid = rid & RR_RID_MASK;
1204         p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1205         vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1206                                                         pte, itir, va, rid);
1207         vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1208 }
1209
1210 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1211 {
1212         int index;
1213         u64 va;
1214
1215         va = PAGEALIGN(ifa, ps);
1216         while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1217                 vcpu->arch.dtrs[index].page_flags = 0;
1218
1219         thash_purge_entries(vcpu, va, ps);
1220 }
1221
1222 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1223 {
1224         int index;
1225         u64 va;
1226
1227         va = PAGEALIGN(ifa, ps);
1228         while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1229                 vcpu->arch.itrs[index].page_flags = 0;
1230
1231         thash_purge_entries(vcpu, va, ps);
1232 }
1233
1234 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1235 {
1236         va = PAGEALIGN(va, ps);
1237         thash_purge_entries(vcpu, va, ps);
1238 }
1239
1240 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1241 {
1242         thash_purge_all(vcpu);
1243 }
1244
1245 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1246 {
1247         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1248         long psr;
1249         local_irq_save(psr);
1250         p->exit_reason = EXIT_REASON_PTC_G;
1251
1252         p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1253         p->u.ptc_g_data.vaddr = va;
1254         p->u.ptc_g_data.ps = ps;
1255         vmm_transition(vcpu);
1256         /* Do Local Purge Here*/
1257         vcpu_ptc_l(vcpu, va, ps);
1258         local_irq_restore(psr);
1259 }
1260
1261
1262 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1263 {
1264         vcpu_ptc_ga(vcpu, va, ps);
1265 }
1266
1267 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1268 {
1269         unsigned long ifa;
1270
1271         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1272         vcpu_ptc_e(vcpu, ifa);
1273 }
1274
1275 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1276 {
1277         unsigned long ifa, itir;
1278
1279         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1280         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1281         vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1282 }
1283
1284 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1285 {
1286         unsigned long ifa, itir;
1287
1288         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1289         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1290         vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1291 }
1292
1293 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1294 {
1295         unsigned long ifa, itir;
1296
1297         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1298         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1299         vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1300 }
1301
1302 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1303 {
1304         unsigned long ifa, itir;
1305
1306         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1307         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1308         vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1309 }
1310
1311 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1312 {
1313         unsigned long ifa, itir;
1314
1315         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1316         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1317         vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1318 }
1319
1320 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1321 {
1322         unsigned long itir, ifa, pte, slot;
1323
1324         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1325         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1326         itir = vcpu_get_itir(vcpu);
1327         ifa = vcpu_get_ifa(vcpu);
1328         vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1329 }
1330
1331
1332
1333 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1334 {
1335         unsigned long itir, ifa, pte, slot;
1336
1337         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1338         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1339         itir = vcpu_get_itir(vcpu);
1340         ifa = vcpu_get_ifa(vcpu);
1341         vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1342 }
1343
1344 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1345 {
1346         unsigned long itir, ifa, pte;
1347
1348         itir = vcpu_get_itir(vcpu);
1349         ifa = vcpu_get_ifa(vcpu);
1350         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1351         vcpu_itc_d(vcpu, pte, itir, ifa);
1352 }
1353
1354 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1355 {
1356         unsigned long itir, ifa, pte;
1357
1358         itir = vcpu_get_itir(vcpu);
1359         ifa = vcpu_get_ifa(vcpu);
1360         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1361         vcpu_itc_i(vcpu, pte, itir, ifa);
1362 }
1363
1364 /*************************************
1365  * Moves to semi-privileged registers
1366  *************************************/
1367
1368 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1369 {
1370         unsigned long imm;
1371
1372         if (inst.M30.s)
1373                 imm = -inst.M30.imm;
1374         else
1375                 imm = inst.M30.imm;
1376
1377         vcpu_set_itc(vcpu, imm);
1378 }
1379
1380 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1381 {
1382         unsigned long r2;
1383
1384         r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1385         vcpu_set_itc(vcpu, r2);
1386 }
1387
1388
1389 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1390 {
1391         unsigned long r1;
1392
1393         r1 = vcpu_get_itc(vcpu);
1394         vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1395 }
1396 /**************************************************************************
1397   struct kvm_vcpu*protection key register access routines
1398  **************************************************************************/
1399
1400 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1401 {
1402         return ((unsigned long)ia64_get_pkr(reg));
1403 }
1404
1405 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1406 {
1407         ia64_set_pkr(reg, val);
1408 }
1409
1410
1411 unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1412 {
1413         union ia64_rr rr, rr1;
1414
1415         rr.val = vcpu_get_rr(vcpu, ifa);
1416         rr1.val = 0;
1417         rr1.ps = rr.ps;
1418         rr1.rid = rr.rid;
1419         return (rr1.val);
1420 }
1421
1422
1423
1424 /********************************
1425  * Moves to privileged registers
1426  ********************************/
1427 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1428                                         unsigned long val)
1429 {
1430         union ia64_rr oldrr, newrr;
1431         unsigned long rrval;
1432         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1433         unsigned long psr;
1434
1435         oldrr.val = vcpu_get_rr(vcpu, reg);
1436         newrr.val = val;
1437         vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1438
1439         switch ((unsigned long)(reg >> VRN_SHIFT)) {
1440         case VRN6:
1441                 vcpu->arch.vmm_rr = vrrtomrr(val);
1442                 local_irq_save(psr);
1443                 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1444                 vmm_transition(vcpu);
1445                 local_irq_restore(psr);
1446                 break;
1447         case VRN4:
1448                 rrval = vrrtomrr(val);
1449                 vcpu->arch.metaphysical_saved_rr4 = rrval;
1450                 if (!is_physical_mode(vcpu))
1451                         ia64_set_rr(reg, rrval);
1452                 break;
1453         case VRN0:
1454                 rrval = vrrtomrr(val);
1455                 vcpu->arch.metaphysical_saved_rr0 = rrval;
1456                 if (!is_physical_mode(vcpu))
1457                         ia64_set_rr(reg, rrval);
1458                 break;
1459         default:
1460                 ia64_set_rr(reg, vrrtomrr(val));
1461                 break;
1462         }
1463
1464         return (IA64_NO_FAULT);
1465 }
1466
1467
1468
1469 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1470 {
1471         unsigned long r3, r2;
1472
1473         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1474         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1475         vcpu_set_rr(vcpu, r3, r2);
1476 }
1477
1478 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1479 {
1480 }
1481
1482 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1483 {
1484 }
1485
1486 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1487 {
1488         unsigned long r3, r2;
1489
1490         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1491         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1492         vcpu_set_pmc(vcpu, r3, r2);
1493 }
1494
1495 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1496 {
1497         unsigned long r3, r2;
1498
1499         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1500         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1501         vcpu_set_pmd(vcpu, r3, r2);
1502 }
1503
1504 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1505 {
1506         u64 r3, r2;
1507
1508         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1509         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1510         vcpu_set_pkr(vcpu, r3, r2);
1511 }
1512
1513
1514
1515 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1516 {
1517         unsigned long r3, r1;
1518
1519         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1520         r1 = vcpu_get_rr(vcpu, r3);
1521         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1522 }
1523
1524 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1525 {
1526         unsigned long r3, r1;
1527
1528         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1529         r1 = vcpu_get_pkr(vcpu, r3);
1530         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1531 }
1532
1533 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1534 {
1535         unsigned long r3, r1;
1536
1537         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1538         r1 = vcpu_get_dbr(vcpu, r3);
1539         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1540 }
1541
1542 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1543 {
1544         unsigned long r3, r1;
1545
1546         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1547         r1 = vcpu_get_ibr(vcpu, r3);
1548         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1549 }
1550
1551 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1552 {
1553         unsigned long r3, r1;
1554
1555         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1556         r1 = vcpu_get_pmc(vcpu, r3);
1557         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1558 }
1559
1560
1561 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1562 {
1563         /* FIXME: This could get called as a result of a rsvd-reg fault */
1564         if (reg > (ia64_get_cpuid(3) & 0xff))
1565                 return 0;
1566         else
1567                 return ia64_get_cpuid(reg);
1568 }
1569
1570 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1571 {
1572         unsigned long r3, r1;
1573
1574         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1575         r1 = vcpu_get_cpuid(vcpu, r3);
1576         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1577 }
1578
1579 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1580 {
1581         VCPU(vcpu, tpr) = val;
1582         vcpu->arch.irq_check = 1;
1583 }
1584
1585 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1586 {
1587         unsigned long r2;
1588
1589         r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1590         VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1591
1592         switch (inst.M32.cr3) {
1593         case 0:
1594                 vcpu_set_dcr(vcpu, r2);
1595                 break;
1596         case 1:
1597                 vcpu_set_itm(vcpu, r2);
1598                 break;
1599         case 66:
1600                 vcpu_set_tpr(vcpu, r2);
1601                 break;
1602         case 67:
1603                 vcpu_set_eoi(vcpu, r2);
1604                 break;
1605         default:
1606                 break;
1607         }
1608
1609         return 0;
1610 }
1611
1612
1613 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1614 {
1615         unsigned long tgt = inst.M33.r1;
1616         unsigned long val;
1617
1618         switch (inst.M33.cr3) {
1619         case 65:
1620                 val = vcpu_get_ivr(vcpu);
1621                 vcpu_set_gr(vcpu, tgt, val, 0);
1622                 break;
1623
1624         case 67:
1625                 vcpu_set_gr(vcpu, tgt, 0L, 0);
1626                 break;
1627         default:
1628                 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1629                 vcpu_set_gr(vcpu, tgt, val, 0);
1630                 break;
1631         }
1632
1633         return 0;
1634 }
1635
1636
1637
1638 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639 {
1640
1641         unsigned long mask;
1642         struct kvm_pt_regs *regs;
1643         struct ia64_psr old_psr, new_psr;
1644
1645         old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1646
1647         regs = vcpu_regs(vcpu);
1648         /* We only support guest as:
1649          *  vpsr.pk = 0
1650          *  vpsr.is = 0
1651          * Otherwise panic
1652          */
1653         if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1654                 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \
1655                                 & vpsr.is=0\n");
1656
1657         /*
1658          * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1659          * Since these bits will become 0, after success execution of each
1660          * instruction, we will change set them to mIA64_PSR
1661          */
1662         VCPU(vcpu, vpsr) = val
1663                 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1664                         IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1665
1666         if (!old_psr.i && (val & IA64_PSR_I)) {
1667                 /* vpsr.i 0->1 */
1668                 vcpu->arch.irq_check = 1;
1669         }
1670         new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1671
1672         /*
1673          * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1674          * , except for the following bits:
1675          *  ic/i/dt/si/rt/mc/it/bn/vm
1676          */
1677         mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1678                 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1679                 IA64_PSR_VM;
1680
1681         regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1682
1683         check_mm_mode_switch(vcpu, old_psr, new_psr);
1684
1685         return ;
1686 }
1687
1688 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1689 {
1690         struct ia64_psr vpsr;
1691
1692         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1693         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1694
1695         if (!vpsr.ic)
1696                 VCPU(vcpu, ifs) = regs->cr_ifs;
1697         regs->cr_ifs = IA64_IFS_V;
1698         return (IA64_NO_FAULT);
1699 }
1700
1701
1702
1703 /**************************************************************************
1704   VCPU banked general register access routines
1705  **************************************************************************/
1706 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1707         do {                                                            \
1708                 __asm__ __volatile__ (                                  \
1709                                 ";;extr.u %0 = %3,%6,16;;\n"            \
1710                                 "dep %1 = %0, %1, 0, 16;;\n"            \
1711                                 "st8 [%4] = %1\n"                       \
1712                                 "extr.u %0 = %2, 16, 16;;\n"            \
1713                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1714                                 "st8 [%5] = %3\n"                       \
1715                                 ::"r"(i), "r"(*b1unat), "r"(*b0unat),   \
1716                                 "r"(*runat), "r"(b1unat), "r"(runat),   \
1717                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1718         } while (0)
1719
1720 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1721 {
1722         unsigned long i;
1723
1724         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1725         unsigned long *r = &regs->r16;
1726         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1727         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1728         unsigned long *runat = &regs->eml_unat;
1729         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1730         unsigned long *b1unat = &VCPU(vcpu, vnat);
1731
1732
1733         if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1734                 for (i = 0; i < 16; i++) {
1735                         *b1++ = *r;
1736                         *r++ = *b0++;
1737                 }
1738                 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1739                                 VMM_PT_REGS_R16_SLOT);
1740                 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1741         }
1742 }
1743
1744 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1745         do {                                                            \
1746                 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"    \
1747                                 "dep %1 = %0, %1, 16, 16;;\n"           \
1748                                 "st8 [%4] = %1\n"                       \
1749                                 "extr.u %0 = %2, 0, 16;;\n"             \
1750                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1751                                 "st8 [%5] = %3\n"                       \
1752                                 ::"r"(i), "r"(*b0unat), "r"(*b1unat),   \
1753                                 "r"(*runat), "r"(b0unat), "r"(runat),   \
1754                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1755         } while (0)
1756
1757 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1758 {
1759         unsigned long i;
1760         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1761         unsigned long *r = &regs->r16;
1762         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1763         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1764         unsigned long *runat = &regs->eml_unat;
1765         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1766         unsigned long *b1unat = &VCPU(vcpu, vnat);
1767
1768         if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1769                 for (i = 0; i < 16; i++) {
1770                         *b0++ = *r;
1771                         *r++ = *b1++;
1772                 }
1773                 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1774                                 VMM_PT_REGS_R16_SLOT);
1775                 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1776         }
1777 }
1778
1779
1780
1781
1782 void vcpu_rfi(struct kvm_vcpu *vcpu)
1783 {
1784         unsigned long ifs, psr;
1785         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1786
1787         psr = VCPU(vcpu, ipsr);
1788         if (psr & IA64_PSR_BN)
1789                 vcpu_bsw1(vcpu);
1790         else
1791                 vcpu_bsw0(vcpu);
1792         vcpu_set_psr(vcpu, psr);
1793         ifs = VCPU(vcpu, ifs);
1794         if (ifs >> 63)
1795                 regs->cr_ifs = ifs;
1796         regs->cr_iip = VCPU(vcpu, iip);
1797 }
1798
1799
1800 /*
1801    VPSR can't keep track of below bits of guest PSR
1802    This function gets guest PSR
1803  */
1804
1805 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1806 {
1807         unsigned long mask;
1808         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1809
1810         mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1811                 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1812         return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1813 }
1814
1815 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1816 {
1817         unsigned long vpsr;
1818         unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1819                                         | inst.M44.imm;
1820
1821         vpsr = vcpu_get_psr(vcpu);
1822         vpsr &= (~imm24);
1823         vcpu_set_psr(vcpu, vpsr);
1824 }
1825
1826 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1827 {
1828         unsigned long vpsr;
1829         unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1830                                 | inst.M44.imm;
1831
1832         vpsr = vcpu_get_psr(vcpu);
1833         vpsr |= imm24;
1834         vcpu_set_psr(vcpu, vpsr);
1835 }
1836
1837 /* Generate Mask
1838  * Parameter:
1839  *  bit -- starting bit
1840  *  len -- how many bits
1841  */
1842 #define MASK(bit,len)                                   \
1843 ({                                                      \
1844                 __u64   ret;                            \
1845                                                         \
1846                 __asm __volatile("dep %0=-1, r0, %1, %2"\
1847                                 : "=r" (ret):           \
1848                   "M" (bit),                            \
1849                   "M" (len));                           \
1850                 ret;                                    \
1851 })
1852
1853 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1854 {
1855         val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1856         vcpu_set_psr(vcpu, val);
1857 }
1858
1859 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1860 {
1861         unsigned long val;
1862
1863         val = vcpu_get_gr(vcpu, inst.M35.r2);
1864         vcpu_set_psr_l(vcpu, val);
1865 }
1866
1867 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1868 {
1869         unsigned long val;
1870
1871         val = vcpu_get_psr(vcpu);
1872         val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1873         vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1874 }
1875
1876 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1877 {
1878         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1879         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1880         if (ipsr->ri == 2) {
1881                 ipsr->ri = 0;
1882                 regs->cr_iip += 16;
1883         } else
1884                 ipsr->ri++;
1885 }
1886
1887 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1888 {
1889         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1890         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1891
1892         if (ipsr->ri == 0) {
1893                 ipsr->ri = 2;
1894                 regs->cr_iip -= 16;
1895         } else
1896                 ipsr->ri--;
1897 }
1898
1899 /** Emulate a privileged operation.
1900  *
1901  *
1902  * @param vcpu virtual cpu
1903  * @cause the reason cause virtualization fault
1904  * @opcode the instruction code which cause virtualization fault
1905  */
1906
1907 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1908 {
1909         unsigned long status, cause, opcode ;
1910         INST64 inst;
1911
1912         status = IA64_NO_FAULT;
1913         cause = VMX(vcpu, cause);
1914         opcode = VMX(vcpu, opcode);
1915         inst.inst = opcode;
1916         /*
1917          * Switch to actual virtual rid in rr0 and rr4,
1918          * which is required by some tlb related instructions.
1919          */
1920         prepare_if_physical_mode(vcpu);
1921
1922         switch (cause) {
1923         case EVENT_RSM:
1924                 kvm_rsm(vcpu, inst);
1925                 break;
1926         case EVENT_SSM:
1927                 kvm_ssm(vcpu, inst);
1928                 break;
1929         case EVENT_MOV_TO_PSR:
1930                 kvm_mov_to_psr(vcpu, inst);
1931                 break;
1932         case EVENT_MOV_FROM_PSR:
1933                 kvm_mov_from_psr(vcpu, inst);
1934                 break;
1935         case EVENT_MOV_FROM_CR:
1936                 kvm_mov_from_cr(vcpu, inst);
1937                 break;
1938         case EVENT_MOV_TO_CR:
1939                 kvm_mov_to_cr(vcpu, inst);
1940                 break;
1941         case EVENT_BSW_0:
1942                 vcpu_bsw0(vcpu);
1943                 break;
1944         case EVENT_BSW_1:
1945                 vcpu_bsw1(vcpu);
1946                 break;
1947         case EVENT_COVER:
1948                 vcpu_cover(vcpu);
1949                 break;
1950         case EVENT_RFI:
1951                 vcpu_rfi(vcpu);
1952                 break;
1953         case EVENT_ITR_D:
1954                 kvm_itr_d(vcpu, inst);
1955                 break;
1956         case EVENT_ITR_I:
1957                 kvm_itr_i(vcpu, inst);
1958                 break;
1959         case EVENT_PTR_D:
1960                 kvm_ptr_d(vcpu, inst);
1961                 break;
1962         case EVENT_PTR_I:
1963                 kvm_ptr_i(vcpu, inst);
1964                 break;
1965         case EVENT_ITC_D:
1966                 kvm_itc_d(vcpu, inst);
1967                 break;
1968         case EVENT_ITC_I:
1969                 kvm_itc_i(vcpu, inst);
1970                 break;
1971         case EVENT_PTC_L:
1972                 kvm_ptc_l(vcpu, inst);
1973                 break;
1974         case EVENT_PTC_G:
1975                 kvm_ptc_g(vcpu, inst);
1976                 break;
1977         case EVENT_PTC_GA:
1978                 kvm_ptc_ga(vcpu, inst);
1979                 break;
1980         case EVENT_PTC_E:
1981                 kvm_ptc_e(vcpu, inst);
1982                 break;
1983         case EVENT_MOV_TO_RR:
1984                 kvm_mov_to_rr(vcpu, inst);
1985                 break;
1986         case EVENT_MOV_FROM_RR:
1987                 kvm_mov_from_rr(vcpu, inst);
1988                 break;
1989         case EVENT_THASH:
1990                 kvm_thash(vcpu, inst);
1991                 break;
1992         case EVENT_TTAG:
1993                 kvm_ttag(vcpu, inst);
1994                 break;
1995         case EVENT_TPA:
1996                 status = kvm_tpa(vcpu, inst);
1997                 break;
1998         case EVENT_TAK:
1999                 kvm_tak(vcpu, inst);
2000                 break;
2001         case EVENT_MOV_TO_AR_IMM:
2002                 kvm_mov_to_ar_imm(vcpu, inst);
2003                 break;
2004         case EVENT_MOV_TO_AR:
2005                 kvm_mov_to_ar_reg(vcpu, inst);
2006                 break;
2007         case EVENT_MOV_FROM_AR:
2008                 kvm_mov_from_ar_reg(vcpu, inst);
2009                 break;
2010         case EVENT_MOV_TO_DBR:
2011                 kvm_mov_to_dbr(vcpu, inst);
2012                 break;
2013         case EVENT_MOV_TO_IBR:
2014                 kvm_mov_to_ibr(vcpu, inst);
2015                 break;
2016         case EVENT_MOV_TO_PMC:
2017                 kvm_mov_to_pmc(vcpu, inst);
2018                 break;
2019         case EVENT_MOV_TO_PMD:
2020                 kvm_mov_to_pmd(vcpu, inst);
2021                 break;
2022         case EVENT_MOV_TO_PKR:
2023                 kvm_mov_to_pkr(vcpu, inst);
2024                 break;
2025         case EVENT_MOV_FROM_DBR:
2026                 kvm_mov_from_dbr(vcpu, inst);
2027                 break;
2028         case EVENT_MOV_FROM_IBR:
2029                 kvm_mov_from_ibr(vcpu, inst);
2030                 break;
2031         case EVENT_MOV_FROM_PMC:
2032                 kvm_mov_from_pmc(vcpu, inst);
2033                 break;
2034         case EVENT_MOV_FROM_PKR:
2035                 kvm_mov_from_pkr(vcpu, inst);
2036                 break;
2037         case EVENT_MOV_FROM_CPUID:
2038                 kvm_mov_from_cpuid(vcpu, inst);
2039                 break;
2040         case EVENT_VMSW:
2041                 status = IA64_FAULT;
2042                 break;
2043         default:
2044                 break;
2045         };
2046         /*Assume all status is NO_FAULT ?*/
2047         if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2048                 vcpu_increment_iip(vcpu);
2049
2050         recover_if_physical_mode(vcpu);
2051 }
2052
2053 void init_vcpu(struct kvm_vcpu *vcpu)
2054 {
2055         int i;
2056
2057         vcpu->arch.mode_flags = GUEST_IN_PHY;
2058         VMX(vcpu, vrr[0]) = 0x38;
2059         VMX(vcpu, vrr[1]) = 0x38;
2060         VMX(vcpu, vrr[2]) = 0x38;
2061         VMX(vcpu, vrr[3]) = 0x38;
2062         VMX(vcpu, vrr[4]) = 0x38;
2063         VMX(vcpu, vrr[5]) = 0x38;
2064         VMX(vcpu, vrr[6]) = 0x38;
2065         VMX(vcpu, vrr[7]) = 0x38;
2066         VCPU(vcpu, vpsr) = IA64_PSR_BN;
2067         VCPU(vcpu, dcr) = 0;
2068         /* pta.size must not be 0.  The minimum is 15 (32k) */
2069         VCPU(vcpu, pta) = 15 << 2;
2070         VCPU(vcpu, itv) = 0x10000;
2071         VCPU(vcpu, itm) = 0;
2072         VMX(vcpu, last_itc) = 0;
2073
2074         VCPU(vcpu, lid) = VCPU_LID(vcpu);
2075         VCPU(vcpu, ivr) = 0;
2076         VCPU(vcpu, tpr) = 0x10000;
2077         VCPU(vcpu, eoi) = 0;
2078         VCPU(vcpu, irr[0]) = 0;
2079         VCPU(vcpu, irr[1]) = 0;
2080         VCPU(vcpu, irr[2]) = 0;
2081         VCPU(vcpu, irr[3]) = 0;
2082         VCPU(vcpu, pmv) = 0x10000;
2083         VCPU(vcpu, cmcv) = 0x10000;
2084         VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
2085         VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
2086         update_vhpi(vcpu, NULL_VECTOR);
2087         VLSAPIC_XTP(vcpu) = 0x80;       /* disabled */
2088
2089         for (i = 0; i < 4; i++)
2090                 VLSAPIC_INSVC(vcpu, i) = 0;
2091 }
2092
2093 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2094 {
2095         unsigned long psr;
2096
2097         local_irq_save(psr);
2098
2099         /* WARNING: not allow co-exist of both virtual mode and physical
2100          * mode in same region
2101          */
2102
2103         vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2104         vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2105
2106         if (is_physical_mode(vcpu)) {
2107                 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2108                         panic_vm(vcpu, "Machine Status conflicts!\n");
2109
2110                 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2111                 ia64_dv_serialize_data();
2112                 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2113                 ia64_dv_serialize_data();
2114         } else {
2115                 ia64_set_rr((VRN0 << VRN_SHIFT),
2116                                 vcpu->arch.metaphysical_saved_rr0);
2117                 ia64_dv_serialize_data();
2118                 ia64_set_rr((VRN4 << VRN_SHIFT),
2119                                 vcpu->arch.metaphysical_saved_rr4);
2120                 ia64_dv_serialize_data();
2121         }
2122         ia64_set_rr((VRN1 << VRN_SHIFT),
2123                         vrrtomrr(VMX(vcpu, vrr[VRN1])));
2124         ia64_dv_serialize_data();
2125         ia64_set_rr((VRN2 << VRN_SHIFT),
2126                         vrrtomrr(VMX(vcpu, vrr[VRN2])));
2127         ia64_dv_serialize_data();
2128         ia64_set_rr((VRN3 << VRN_SHIFT),
2129                         vrrtomrr(VMX(vcpu, vrr[VRN3])));
2130         ia64_dv_serialize_data();
2131         ia64_set_rr((VRN5 << VRN_SHIFT),
2132                         vrrtomrr(VMX(vcpu, vrr[VRN5])));
2133         ia64_dv_serialize_data();
2134         ia64_set_rr((VRN7 << VRN_SHIFT),
2135                         vrrtomrr(VMX(vcpu, vrr[VRN7])));
2136         ia64_dv_serialize_data();
2137         ia64_srlz_d();
2138         ia64_set_psr(psr);
2139 }
2140
2141 int vmm_entry(void)
2142 {
2143         struct kvm_vcpu *v;
2144         v = current_vcpu;
2145
2146         ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2147                                                 0, 0, 0, 0, 0, 0);
2148         kvm_init_vtlb(v);
2149         kvm_init_vhpt(v);
2150         init_vcpu(v);
2151         kvm_init_all_rr(v);
2152         vmm_reset_entry();
2153
2154         return 0;
2155 }
2156
2157 static void kvm_show_registers(struct kvm_pt_regs *regs)
2158 {
2159         unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2160
2161         struct kvm_vcpu *vcpu = current_vcpu;
2162         if (vcpu != NULL)
2163                 printk("vcpu 0x%p vcpu %d\n",
2164                        vcpu, vcpu->vcpu_id);
2165
2166         printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
2167                regs->cr_ipsr, regs->cr_ifs, ip);
2168
2169         printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2170                regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2171         printk("rnat: %016lx bspstore: %016lx pr  : %016lx\n",
2172                regs->ar_rnat, regs->ar_bspstore, regs->pr);
2173         printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2174                regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2175         printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2176         printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0,
2177                                                         regs->b6, regs->b7);
2178         printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
2179                regs->f6.u.bits[1], regs->f6.u.bits[0],
2180                regs->f7.u.bits[1], regs->f7.u.bits[0]);
2181         printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
2182                regs->f8.u.bits[1], regs->f8.u.bits[0],
2183                regs->f9.u.bits[1], regs->f9.u.bits[0]);
2184         printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2185                regs->f10.u.bits[1], regs->f10.u.bits[0],
2186                regs->f11.u.bits[1], regs->f11.u.bits[0]);
2187
2188         printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1,
2189                                                         regs->r2, regs->r3);
2190         printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8,
2191                                                         regs->r9, regs->r10);
2192         printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2193                                                         regs->r12, regs->r13);
2194         printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2195                                                         regs->r15, regs->r16);
2196         printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2197                                                         regs->r18, regs->r19);
2198         printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2199                                                         regs->r21, regs->r22);
2200         printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2201                                                         regs->r24, regs->r25);
2202         printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2203                                                         regs->r27, regs->r28);
2204         printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2205                                                         regs->r30, regs->r31);
2206
2207 }
2208
2209 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2210 {
2211         va_list args;
2212         char buf[256];
2213
2214         struct kvm_pt_regs *regs = vcpu_regs(v);
2215         struct exit_ctl_data *p = &v->arch.exit_data;
2216         va_start(args, fmt);
2217         vsnprintf(buf, sizeof(buf), fmt, args);
2218         va_end(args);
2219         printk(buf);
2220         kvm_show_registers(regs);
2221         p->exit_reason = EXIT_REASON_VM_PANIC;
2222         vmm_transition(v);
2223         /*Never to return*/
2224         while (1);
2225 }