Merge git://git.infradead.org/battery-2.6
[linux-2.6] / arch / ia64 / kvm / vcpu.c
1 /*
2  * kvm_vcpu.c: handling all virtual cpu related thing.
3  * Copyright (c) 2005, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  *  Shaofan Li (Susue Li) <susie.li@intel.com>
19  *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20  *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21  *  Xiantao Zhang <xiantao.zhang@intel.com>
22  */
23
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
26
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
33
34 #include "asm-offsets.h"
35 #include "vcpu.h"
36
37 /*
38  * Special notes:
39  * - Index by it/dt/rt sequence
40  * - Only existing mode transitions are allowed in this table
41  * - RSE is placed at lazy mode when emulating guest partial mode
42  * - If gva happens to be rr0 and rr4, only allowed case is identity
43  *   mapping (gva=gpa), or panic! (How?)
44  */
45 int mm_switch_table[8][8] = {
46         /*  2004/09/12(Kevin): Allow switch to self */
47         /*
48          *  (it,dt,rt): (0,0,0) -> (1,1,1)
49          *  This kind of transition usually occurs in the very early
50          *  stage of Linux boot up procedure. Another case is in efi
51          *  and pal calls. (see "arch/ia64/kernel/head.S")
52          *
53          *  (it,dt,rt): (0,0,0) -> (0,1,1)
54          *  This kind of transition is found when OSYa exits efi boot
55          *  service. Due to gva = gpa in this case (Same region),
56          *  data access can be satisfied though itlb entry for physical
57          *  emulation is hit.
58          */
59         {SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
60         {0,  0,  0,  0,  0,  0,  0,  0},
61         {0,  0,  0,  0,  0,  0,  0,  0},
62         /*
63          *  (it,dt,rt): (0,1,1) -> (1,1,1)
64          *  This kind of transition is found in OSYa.
65          *
66          *  (it,dt,rt): (0,1,1) -> (0,0,0)
67          *  This kind of transition is found in OSYa
68          */
69         {SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
70         /* (1,0,0)->(1,1,1) */
71         {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
72         /*
73          *  (it,dt,rt): (1,0,1) -> (1,1,1)
74          *  This kind of transition usually occurs when Linux returns
75          *  from the low level TLB miss handlers.
76          *  (see "arch/ia64/kernel/ivt.S")
77          */
78         {0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
79         {0,  0,  0,  0,  0,  0,  0,  0},
80         /*
81          *  (it,dt,rt): (1,1,1) -> (1,0,1)
82          *  This kind of transition usually occurs in Linux low level
83          *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84          *
85          *  (it,dt,rt): (1,1,1) -> (0,0,0)
86          *  This kind of transition usually occurs in pal and efi calls,
87          *  which requires running in physical mode.
88          *  (see "arch/ia64/kernel/head.S")
89          *  (1,1,1)->(1,0,0)
90          */
91
92         {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
93 };
94
95 void physical_mode_init(struct kvm_vcpu  *vcpu)
96 {
97         vcpu->arch.mode_flags = GUEST_IN_PHY;
98 }
99
100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101 {
102         unsigned long psr;
103
104         /* Save original virtual mode rr[0] and rr[4] */
105         psr = ia64_clear_ic();
106         ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107         ia64_srlz_d();
108         ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109         ia64_srlz_d();
110
111         ia64_set_psr(psr);
112         return;
113 }
114
115
116 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
117 {
118         unsigned long psr;
119
120         psr = ia64_clear_ic();
121         ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
122         ia64_srlz_d();
123         ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
124         ia64_srlz_d();
125         ia64_set_psr(psr);
126         return;
127 }
128
129 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
130 {
131         return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
132 }
133
134 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
135                                         struct ia64_psr new_psr)
136 {
137         int act;
138         act = mm_switch_action(old_psr, new_psr);
139         switch (act) {
140         case SW_V2P:
141                 /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
142                 old_psr.val, new_psr.val);*/
143                 switch_to_physical_rid(vcpu);
144                 /*
145                  * Set rse to enforced lazy, to prevent active rse
146                  *save/restor when guest physical mode.
147                  */
148                 vcpu->arch.mode_flags |= GUEST_IN_PHY;
149                 break;
150         case SW_P2V:
151                 switch_to_virtual_rid(vcpu);
152                 /*
153                  * recover old mode which is saved when entering
154                  * guest physical mode
155                  */
156                 vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
157                 break;
158         case SW_SELF:
159                 break;
160         case SW_NOP:
161                 break;
162         default:
163                 /* Sanity check */
164                 break;
165         }
166         return;
167 }
168
169
170
171 /*
172  * In physical mode, insert tc/tr for region 0 and 4 uses
173  * RID[0] and RID[4] which is for physical mode emulation.
174  * However what those inserted tc/tr wants is rid for
175  * virtual mode. So original virtual rid needs to be restored
176  * before insert.
177  *
178  * Operations which required such switch include:
179  *  - insertions (itc.*, itr.*)
180  *  - purges (ptc.* and ptr.*)
181  *  - tpa
182  *  - tak
183  *  - thash?, ttag?
184  * All above needs actual virtual rid for destination entry.
185  */
186
187 void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
188                                         struct ia64_psr new_psr)
189 {
190
191         if ((old_psr.dt != new_psr.dt)
192                         || (old_psr.it != new_psr.it)
193                         || (old_psr.rt != new_psr.rt))
194                 switch_mm_mode(vcpu, old_psr, new_psr);
195
196         return;
197 }
198
199
200 /*
201  * In physical mode, insert tc/tr for region 0 and 4 uses
202  * RID[0] and RID[4] which is for physical mode emulation.
203  * However what those inserted tc/tr wants is rid for
204  * virtual mode. So original virtual rid needs to be restored
205  * before insert.
206  *
207  * Operations which required such switch include:
208  *  - insertions (itc.*, itr.*)
209  *  - purges (ptc.* and ptr.*)
210  *  - tpa
211  *  - tak
212  *  - thash?, ttag?
213  * All above needs actual virtual rid for destination entry.
214  */
215
216 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
217 {
218         if (is_physical_mode(vcpu)) {
219                 vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
220                 switch_to_virtual_rid(vcpu);
221         }
222         return;
223 }
224
225 /* Recover always follows prepare */
226 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
227 {
228         if (is_physical_mode(vcpu))
229                 switch_to_physical_rid(vcpu);
230         vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
231         return;
232 }
233
234 #define RPT(x)  ((u16) &((struct kvm_pt_regs *)0)->x)
235
236 static u16 gr_info[32] = {
237         0,      /* r0 is read-only : WE SHOULD NEVER GET THIS */
238         RPT(r1), RPT(r2), RPT(r3),
239         RPT(r4), RPT(r5), RPT(r6), RPT(r7),
240         RPT(r8), RPT(r9), RPT(r10), RPT(r11),
241         RPT(r12), RPT(r13), RPT(r14), RPT(r15),
242         RPT(r16), RPT(r17), RPT(r18), RPT(r19),
243         RPT(r20), RPT(r21), RPT(r22), RPT(r23),
244         RPT(r24), RPT(r25), RPT(r26), RPT(r27),
245         RPT(r28), RPT(r29), RPT(r30), RPT(r31)
246 };
247
248 #define IA64_FIRST_STACKED_GR   32
249 #define IA64_FIRST_ROTATING_FR  32
250
251 static inline unsigned long
252 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
253 {
254         reg += rrb;
255         if (reg >= sor)
256                 reg -= sor;
257         return reg;
258 }
259
260 /*
261  * Return the (rotated) index for floating point register
262  * be in the REGNUM (REGNUM must range from 32-127,
263  * result is in the range from 0-95.
264  */
265 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
266                                                 long regnum)
267 {
268         unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
269         return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
270 }
271
272
273 /*
274  * The inverse of the above: given bspstore and the number of
275  * registers, calculate ar.bsp.
276  */
277 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
278                                                         long num_regs)
279 {
280         long delta = ia64_rse_slot_num(addr) + num_regs;
281         int i = 0;
282
283         if (num_regs < 0)
284                 delta -= 0x3e;
285         if (delta < 0) {
286                 while (delta <= -0x3f) {
287                         i--;
288                         delta += 0x3f;
289                 }
290         } else {
291                 while (delta >= 0x3f) {
292                         i++;
293                         delta -= 0x3f;
294                 }
295         }
296
297         return addr + num_regs + i;
298 }
299
300 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
301                                         unsigned long *val, int *nat)
302 {
303         unsigned long *bsp, *addr, *rnat_addr, *bspstore;
304         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
305         unsigned long nat_mask;
306         unsigned long old_rsc, new_rsc;
307         long sof = (regs->cr_ifs) & 0x7f;
308         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
309         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
310         long ridx = r1 - 32;
311
312         if (ridx < sor)
313                 ridx = rotate_reg(sor, rrb_gr, ridx);
314
315         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
316         new_rsc = old_rsc&(~(0x3));
317         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
318
319         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
320         bsp = kbs + (regs->loadrs >> 19);
321
322         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
323         nat_mask = 1UL << ia64_rse_slot_num(addr);
324         rnat_addr = ia64_rse_rnat_addr(addr);
325
326         if (addr >= bspstore) {
327                 ia64_flushrs();
328                 ia64_mf();
329                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
330         }
331         *val = *addr;
332         if (nat) {
333                 if (bspstore < rnat_addr)
334                         *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
335                                                         & nat_mask);
336                 else
337                         *nat = (int)!!((*rnat_addr) & nat_mask);
338                 ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
339         }
340 }
341
342 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
343                                 unsigned long val, unsigned long nat)
344 {
345         unsigned long *bsp, *bspstore, *addr, *rnat_addr;
346         unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
347         unsigned long nat_mask;
348         unsigned long old_rsc, new_rsc, psr;
349         unsigned long rnat;
350         long sof = (regs->cr_ifs) & 0x7f;
351         long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
352         long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
353         long ridx = r1 - 32;
354
355         if (ridx < sor)
356                 ridx = rotate_reg(sor, rrb_gr, ridx);
357
358         old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
359         /* put RSC to lazy mode, and set loadrs 0 */
360         new_rsc = old_rsc & (~0x3fff0003);
361         ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
362         bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
363
364         addr = kvm_rse_skip_regs(bsp, -sof + ridx);
365         nat_mask = 1UL << ia64_rse_slot_num(addr);
366         rnat_addr = ia64_rse_rnat_addr(addr);
367
368         local_irq_save(psr);
369         bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
370         if (addr >= bspstore) {
371
372                 ia64_flushrs();
373                 ia64_mf();
374                 *addr = val;
375                 bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
376                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
377                 if (bspstore < rnat_addr)
378                         rnat = rnat & (~nat_mask);
379                 else
380                         *rnat_addr = (*rnat_addr)&(~nat_mask);
381
382                 ia64_mf();
383                 ia64_loadrs();
384                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
385         } else {
386                 rnat = ia64_getreg(_IA64_REG_AR_RNAT);
387                 *addr = val;
388                 if (bspstore < rnat_addr)
389                         rnat = rnat&(~nat_mask);
390                 else
391                         *rnat_addr = (*rnat_addr) & (~nat_mask);
392
393                 ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
394                 ia64_setreg(_IA64_REG_AR_RNAT, rnat);
395         }
396         local_irq_restore(psr);
397         ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
398 }
399
400 void getreg(unsigned long regnum, unsigned long *val,
401                                 int *nat, struct kvm_pt_regs *regs)
402 {
403         unsigned long addr, *unat;
404         if (regnum >= IA64_FIRST_STACKED_GR) {
405                 get_rse_reg(regs, regnum, val, nat);
406                 return;
407         }
408
409         /*
410          * Now look at registers in [0-31] range and init correct UNAT
411          */
412         addr = (unsigned long)regs;
413         unat = &regs->eml_unat;;
414
415         addr += gr_info[regnum];
416
417         *val  = *(unsigned long *)addr;
418         /*
419          * do it only when requested
420          */
421         if (nat)
422                 *nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
423 }
424
425 void setreg(unsigned long regnum, unsigned long val,
426                         int nat, struct kvm_pt_regs *regs)
427 {
428         unsigned long addr;
429         unsigned long bitmask;
430         unsigned long *unat;
431
432         /*
433          * First takes care of stacked registers
434          */
435         if (regnum >= IA64_FIRST_STACKED_GR) {
436                 set_rse_reg(regs, regnum, val, nat);
437                 return;
438         }
439
440         /*
441          * Now look at registers in [0-31] range and init correct UNAT
442          */
443         addr = (unsigned long)regs;
444         unat = &regs->eml_unat;
445         /*
446          * add offset from base of struct
447          * and do it !
448          */
449         addr += gr_info[regnum];
450
451         *(unsigned long *)addr = val;
452
453         /*
454          * We need to clear the corresponding UNAT bit to fully emulate the load
455          * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
456          */
457         bitmask   = 1UL << ((addr >> 3) & 0x3f);
458         if (nat)
459                 *unat |= bitmask;
460          else
461                 *unat &= ~bitmask;
462
463 }
464
465 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
466 {
467         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
468         u64 val;
469
470         if (!reg)
471                 return 0;
472         getreg(reg, &val, 0, regs);
473         return val;
474 }
475
476 void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
477 {
478         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
479         long sof = (regs->cr_ifs) & 0x7f;
480
481         if (!reg)
482                 return;
483         if (reg >= sof + 32)
484                 return;
485         setreg(reg, value, nat, regs);  /* FIXME: handle NATs later*/
486 }
487
488 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
489                                 struct kvm_pt_regs *regs)
490 {
491         /* Take floating register rotation into consideration*/
492         if (regnum >= IA64_FIRST_ROTATING_FR)
493                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
494 #define CASE_FIXED_FP(reg)                      \
495         case  (reg) :                           \
496                 ia64_stf_spill(fpval, reg);     \
497         break
498
499         switch (regnum) {
500                 CASE_FIXED_FP(0);
501                 CASE_FIXED_FP(1);
502                 CASE_FIXED_FP(2);
503                 CASE_FIXED_FP(3);
504                 CASE_FIXED_FP(4);
505                 CASE_FIXED_FP(5);
506
507                 CASE_FIXED_FP(6);
508                 CASE_FIXED_FP(7);
509                 CASE_FIXED_FP(8);
510                 CASE_FIXED_FP(9);
511                 CASE_FIXED_FP(10);
512                 CASE_FIXED_FP(11);
513
514                 CASE_FIXED_FP(12);
515                 CASE_FIXED_FP(13);
516                 CASE_FIXED_FP(14);
517                 CASE_FIXED_FP(15);
518                 CASE_FIXED_FP(16);
519                 CASE_FIXED_FP(17);
520                 CASE_FIXED_FP(18);
521                 CASE_FIXED_FP(19);
522                 CASE_FIXED_FP(20);
523                 CASE_FIXED_FP(21);
524                 CASE_FIXED_FP(22);
525                 CASE_FIXED_FP(23);
526                 CASE_FIXED_FP(24);
527                 CASE_FIXED_FP(25);
528                 CASE_FIXED_FP(26);
529                 CASE_FIXED_FP(27);
530                 CASE_FIXED_FP(28);
531                 CASE_FIXED_FP(29);
532                 CASE_FIXED_FP(30);
533                 CASE_FIXED_FP(31);
534                 CASE_FIXED_FP(32);
535                 CASE_FIXED_FP(33);
536                 CASE_FIXED_FP(34);
537                 CASE_FIXED_FP(35);
538                 CASE_FIXED_FP(36);
539                 CASE_FIXED_FP(37);
540                 CASE_FIXED_FP(38);
541                 CASE_FIXED_FP(39);
542                 CASE_FIXED_FP(40);
543                 CASE_FIXED_FP(41);
544                 CASE_FIXED_FP(42);
545                 CASE_FIXED_FP(43);
546                 CASE_FIXED_FP(44);
547                 CASE_FIXED_FP(45);
548                 CASE_FIXED_FP(46);
549                 CASE_FIXED_FP(47);
550                 CASE_FIXED_FP(48);
551                 CASE_FIXED_FP(49);
552                 CASE_FIXED_FP(50);
553                 CASE_FIXED_FP(51);
554                 CASE_FIXED_FP(52);
555                 CASE_FIXED_FP(53);
556                 CASE_FIXED_FP(54);
557                 CASE_FIXED_FP(55);
558                 CASE_FIXED_FP(56);
559                 CASE_FIXED_FP(57);
560                 CASE_FIXED_FP(58);
561                 CASE_FIXED_FP(59);
562                 CASE_FIXED_FP(60);
563                 CASE_FIXED_FP(61);
564                 CASE_FIXED_FP(62);
565                 CASE_FIXED_FP(63);
566                 CASE_FIXED_FP(64);
567                 CASE_FIXED_FP(65);
568                 CASE_FIXED_FP(66);
569                 CASE_FIXED_FP(67);
570                 CASE_FIXED_FP(68);
571                 CASE_FIXED_FP(69);
572                 CASE_FIXED_FP(70);
573                 CASE_FIXED_FP(71);
574                 CASE_FIXED_FP(72);
575                 CASE_FIXED_FP(73);
576                 CASE_FIXED_FP(74);
577                 CASE_FIXED_FP(75);
578                 CASE_FIXED_FP(76);
579                 CASE_FIXED_FP(77);
580                 CASE_FIXED_FP(78);
581                 CASE_FIXED_FP(79);
582                 CASE_FIXED_FP(80);
583                 CASE_FIXED_FP(81);
584                 CASE_FIXED_FP(82);
585                 CASE_FIXED_FP(83);
586                 CASE_FIXED_FP(84);
587                 CASE_FIXED_FP(85);
588                 CASE_FIXED_FP(86);
589                 CASE_FIXED_FP(87);
590                 CASE_FIXED_FP(88);
591                 CASE_FIXED_FP(89);
592                 CASE_FIXED_FP(90);
593                 CASE_FIXED_FP(91);
594                 CASE_FIXED_FP(92);
595                 CASE_FIXED_FP(93);
596                 CASE_FIXED_FP(94);
597                 CASE_FIXED_FP(95);
598                 CASE_FIXED_FP(96);
599                 CASE_FIXED_FP(97);
600                 CASE_FIXED_FP(98);
601                 CASE_FIXED_FP(99);
602                 CASE_FIXED_FP(100);
603                 CASE_FIXED_FP(101);
604                 CASE_FIXED_FP(102);
605                 CASE_FIXED_FP(103);
606                 CASE_FIXED_FP(104);
607                 CASE_FIXED_FP(105);
608                 CASE_FIXED_FP(106);
609                 CASE_FIXED_FP(107);
610                 CASE_FIXED_FP(108);
611                 CASE_FIXED_FP(109);
612                 CASE_FIXED_FP(110);
613                 CASE_FIXED_FP(111);
614                 CASE_FIXED_FP(112);
615                 CASE_FIXED_FP(113);
616                 CASE_FIXED_FP(114);
617                 CASE_FIXED_FP(115);
618                 CASE_FIXED_FP(116);
619                 CASE_FIXED_FP(117);
620                 CASE_FIXED_FP(118);
621                 CASE_FIXED_FP(119);
622                 CASE_FIXED_FP(120);
623                 CASE_FIXED_FP(121);
624                 CASE_FIXED_FP(122);
625                 CASE_FIXED_FP(123);
626                 CASE_FIXED_FP(124);
627                 CASE_FIXED_FP(125);
628                 CASE_FIXED_FP(126);
629                 CASE_FIXED_FP(127);
630         }
631 #undef CASE_FIXED_FP
632 }
633
634 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
635                                         struct kvm_pt_regs *regs)
636 {
637         /* Take floating register rotation into consideration*/
638         if (regnum >= IA64_FIRST_ROTATING_FR)
639                 regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
640
641 #define CASE_FIXED_FP(reg)                      \
642         case (reg) :                            \
643                 ia64_ldf_fill(reg, fpval);      \
644         break
645
646         switch (regnum) {
647                 CASE_FIXED_FP(2);
648                 CASE_FIXED_FP(3);
649                 CASE_FIXED_FP(4);
650                 CASE_FIXED_FP(5);
651
652                 CASE_FIXED_FP(6);
653                 CASE_FIXED_FP(7);
654                 CASE_FIXED_FP(8);
655                 CASE_FIXED_FP(9);
656                 CASE_FIXED_FP(10);
657                 CASE_FIXED_FP(11);
658
659                 CASE_FIXED_FP(12);
660                 CASE_FIXED_FP(13);
661                 CASE_FIXED_FP(14);
662                 CASE_FIXED_FP(15);
663                 CASE_FIXED_FP(16);
664                 CASE_FIXED_FP(17);
665                 CASE_FIXED_FP(18);
666                 CASE_FIXED_FP(19);
667                 CASE_FIXED_FP(20);
668                 CASE_FIXED_FP(21);
669                 CASE_FIXED_FP(22);
670                 CASE_FIXED_FP(23);
671                 CASE_FIXED_FP(24);
672                 CASE_FIXED_FP(25);
673                 CASE_FIXED_FP(26);
674                 CASE_FIXED_FP(27);
675                 CASE_FIXED_FP(28);
676                 CASE_FIXED_FP(29);
677                 CASE_FIXED_FP(30);
678                 CASE_FIXED_FP(31);
679                 CASE_FIXED_FP(32);
680                 CASE_FIXED_FP(33);
681                 CASE_FIXED_FP(34);
682                 CASE_FIXED_FP(35);
683                 CASE_FIXED_FP(36);
684                 CASE_FIXED_FP(37);
685                 CASE_FIXED_FP(38);
686                 CASE_FIXED_FP(39);
687                 CASE_FIXED_FP(40);
688                 CASE_FIXED_FP(41);
689                 CASE_FIXED_FP(42);
690                 CASE_FIXED_FP(43);
691                 CASE_FIXED_FP(44);
692                 CASE_FIXED_FP(45);
693                 CASE_FIXED_FP(46);
694                 CASE_FIXED_FP(47);
695                 CASE_FIXED_FP(48);
696                 CASE_FIXED_FP(49);
697                 CASE_FIXED_FP(50);
698                 CASE_FIXED_FP(51);
699                 CASE_FIXED_FP(52);
700                 CASE_FIXED_FP(53);
701                 CASE_FIXED_FP(54);
702                 CASE_FIXED_FP(55);
703                 CASE_FIXED_FP(56);
704                 CASE_FIXED_FP(57);
705                 CASE_FIXED_FP(58);
706                 CASE_FIXED_FP(59);
707                 CASE_FIXED_FP(60);
708                 CASE_FIXED_FP(61);
709                 CASE_FIXED_FP(62);
710                 CASE_FIXED_FP(63);
711                 CASE_FIXED_FP(64);
712                 CASE_FIXED_FP(65);
713                 CASE_FIXED_FP(66);
714                 CASE_FIXED_FP(67);
715                 CASE_FIXED_FP(68);
716                 CASE_FIXED_FP(69);
717                 CASE_FIXED_FP(70);
718                 CASE_FIXED_FP(71);
719                 CASE_FIXED_FP(72);
720                 CASE_FIXED_FP(73);
721                 CASE_FIXED_FP(74);
722                 CASE_FIXED_FP(75);
723                 CASE_FIXED_FP(76);
724                 CASE_FIXED_FP(77);
725                 CASE_FIXED_FP(78);
726                 CASE_FIXED_FP(79);
727                 CASE_FIXED_FP(80);
728                 CASE_FIXED_FP(81);
729                 CASE_FIXED_FP(82);
730                 CASE_FIXED_FP(83);
731                 CASE_FIXED_FP(84);
732                 CASE_FIXED_FP(85);
733                 CASE_FIXED_FP(86);
734                 CASE_FIXED_FP(87);
735                 CASE_FIXED_FP(88);
736                 CASE_FIXED_FP(89);
737                 CASE_FIXED_FP(90);
738                 CASE_FIXED_FP(91);
739                 CASE_FIXED_FP(92);
740                 CASE_FIXED_FP(93);
741                 CASE_FIXED_FP(94);
742                 CASE_FIXED_FP(95);
743                 CASE_FIXED_FP(96);
744                 CASE_FIXED_FP(97);
745                 CASE_FIXED_FP(98);
746                 CASE_FIXED_FP(99);
747                 CASE_FIXED_FP(100);
748                 CASE_FIXED_FP(101);
749                 CASE_FIXED_FP(102);
750                 CASE_FIXED_FP(103);
751                 CASE_FIXED_FP(104);
752                 CASE_FIXED_FP(105);
753                 CASE_FIXED_FP(106);
754                 CASE_FIXED_FP(107);
755                 CASE_FIXED_FP(108);
756                 CASE_FIXED_FP(109);
757                 CASE_FIXED_FP(110);
758                 CASE_FIXED_FP(111);
759                 CASE_FIXED_FP(112);
760                 CASE_FIXED_FP(113);
761                 CASE_FIXED_FP(114);
762                 CASE_FIXED_FP(115);
763                 CASE_FIXED_FP(116);
764                 CASE_FIXED_FP(117);
765                 CASE_FIXED_FP(118);
766                 CASE_FIXED_FP(119);
767                 CASE_FIXED_FP(120);
768                 CASE_FIXED_FP(121);
769                 CASE_FIXED_FP(122);
770                 CASE_FIXED_FP(123);
771                 CASE_FIXED_FP(124);
772                 CASE_FIXED_FP(125);
773                 CASE_FIXED_FP(126);
774                 CASE_FIXED_FP(127);
775         }
776 }
777
778 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
779                                                 struct ia64_fpreg *val)
780 {
781         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
782
783         getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
784 }
785
786 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
787                                                 struct ia64_fpreg *val)
788 {
789         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
790
791         if (reg > 1)
792                 setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
793 }
794
795 /************************************************************************
796  * lsapic timer
797  ***********************************************************************/
798 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
799 {
800         unsigned long guest_itc;
801         guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
802
803         if (guest_itc >= VMX(vcpu, last_itc)) {
804                 VMX(vcpu, last_itc) = guest_itc;
805                 return  guest_itc;
806         } else
807                 return VMX(vcpu, last_itc);
808 }
809
810 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
811 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
812 {
813         struct kvm_vcpu *v;
814         int i;
815         long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
816         unsigned long vitv = VCPU(vcpu, itv);
817
818         if (vcpu->vcpu_id == 0) {
819                 for (i = 0; i < MAX_VCPU_NUM; i++) {
820                         v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
821                         VMX(v, itc_offset) = itc_offset;
822                         VMX(v, last_itc) = 0;
823                 }
824         }
825         VMX(vcpu, last_itc) = 0;
826         if (VCPU(vcpu, itm) <= val) {
827                 VMX(vcpu, itc_check) = 0;
828                 vcpu_unpend_interrupt(vcpu, vitv);
829         } else {
830                 VMX(vcpu, itc_check) = 1;
831                 vcpu_set_itm(vcpu, VCPU(vcpu, itm));
832         }
833
834 }
835
836 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
837 {
838         return ((u64)VCPU(vcpu, itm));
839 }
840
841 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
842 {
843         unsigned long vitv = VCPU(vcpu, itv);
844         VCPU(vcpu, itm) = val;
845
846         if (val > vcpu_get_itc(vcpu)) {
847                 VMX(vcpu, itc_check) = 1;
848                 vcpu_unpend_interrupt(vcpu, vitv);
849                 VMX(vcpu, timer_pending) = 0;
850         } else
851                 VMX(vcpu, itc_check) = 0;
852 }
853
854 #define  ITV_VECTOR(itv)    (itv&0xff)
855 #define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
856
857 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
858 {
859         VCPU(vcpu, itv) = val;
860         if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
861                 vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
862                 vcpu->arch.timer_pending = 0;
863         }
864 }
865
866 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
867 {
868         int vec;
869
870         vec = highest_inservice_irq(vcpu);
871         if (vec == NULL_VECTOR)
872                 return;
873         VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
874         VCPU(vcpu, eoi) = 0;
875         vcpu->arch.irq_new_pending = 1;
876
877 }
878
879 /* See Table 5-8 in SDM vol2 for the definition */
880 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
881 {
882         union ia64_tpr vtpr;
883
884         vtpr.val = VCPU(vcpu, tpr);
885
886         if (h_inservice == NMI_VECTOR)
887                 return IRQ_MASKED_BY_INSVC;
888
889         if (h_pending == NMI_VECTOR) {
890                 /* Non Maskable Interrupt */
891                 return IRQ_NO_MASKED;
892         }
893
894         if (h_inservice == ExtINT_VECTOR)
895                 return IRQ_MASKED_BY_INSVC;
896
897         if (h_pending == ExtINT_VECTOR) {
898                 if (vtpr.mmi) {
899                         /* mask all external IRQ */
900                         return IRQ_MASKED_BY_VTPR;
901                 } else
902                         return IRQ_NO_MASKED;
903         }
904
905         if (is_higher_irq(h_pending, h_inservice)) {
906                 if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
907                         return IRQ_NO_MASKED;
908                 else
909                         return IRQ_MASKED_BY_VTPR;
910         } else {
911                 return IRQ_MASKED_BY_INSVC;
912         }
913 }
914
915 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
916 {
917         long spsr;
918         int ret;
919
920         local_irq_save(spsr);
921         ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
922         local_irq_restore(spsr);
923
924         vcpu->arch.irq_new_pending = 1;
925 }
926
927 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
928 {
929         long spsr;
930         int ret;
931
932         local_irq_save(spsr);
933         ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
934         local_irq_restore(spsr);
935         if (ret) {
936                 vcpu->arch.irq_new_pending = 1;
937                 wmb();
938         }
939 }
940
941 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
942 {
943         u64 vhpi;
944
945         if (vec == NULL_VECTOR)
946                 vhpi = 0;
947         else if (vec == NMI_VECTOR)
948                 vhpi = 32;
949         else if (vec == ExtINT_VECTOR)
950                 vhpi = 16;
951         else
952                 vhpi = vec >> 4;
953
954         VCPU(vcpu, vhpi) = vhpi;
955         if (VCPU(vcpu, vac).a_int)
956                 ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
957                                 (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
958 }
959
960 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
961 {
962         int vec, h_inservice, mask;
963
964         vec = highest_pending_irq(vcpu);
965         h_inservice = highest_inservice_irq(vcpu);
966         mask = irq_masked(vcpu, vec, h_inservice);
967         if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
968                 if (VCPU(vcpu, vhpi))
969                         update_vhpi(vcpu, NULL_VECTOR);
970                 return IA64_SPURIOUS_INT_VECTOR;
971         }
972         if (mask == IRQ_MASKED_BY_VTPR) {
973                 update_vhpi(vcpu, vec);
974                 return IA64_SPURIOUS_INT_VECTOR;
975         }
976         VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
977         vcpu_unpend_interrupt(vcpu, vec);
978         return  (u64)vec;
979 }
980
981 /**************************************************************************
982   Privileged operation emulation routines
983  **************************************************************************/
984 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
985 {
986         union ia64_pta vpta;
987         union ia64_rr vrr;
988         u64 pval;
989         u64 vhpt_offset;
990
991         vpta.val = vcpu_get_pta(vcpu);
992         vrr.val = vcpu_get_rr(vcpu, vadr);
993         vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
994         if (vpta.vf) {
995                 pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
996                                 vpta.val, 0, 0, 0, 0);
997         } else {
998                 pval = (vadr & VRN_MASK) | vhpt_offset |
999                         (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1000         }
1001         return  pval;
1002 }
1003
1004 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1005 {
1006         union ia64_rr vrr;
1007         union ia64_pta vpta;
1008         u64 pval;
1009
1010         vpta.val = vcpu_get_pta(vcpu);
1011         vrr.val = vcpu_get_rr(vcpu, vadr);
1012         if (vpta.vf) {
1013                 pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1014                                                 0, 0, 0, 0, 0);
1015         } else
1016                 pval = 1;
1017
1018         return  pval;
1019 }
1020
1021 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1022 {
1023         struct thash_data *data;
1024         union ia64_pta vpta;
1025         u64 key;
1026
1027         vpta.val = vcpu_get_pta(vcpu);
1028         if (vpta.vf == 0) {
1029                 key = 1;
1030                 return key;
1031         }
1032         data = vtlb_lookup(vcpu, vadr, D_TLB);
1033         if (!data || !data->p)
1034                 key = 1;
1035         else
1036                 key = data->key;
1037
1038         return key;
1039 }
1040
1041
1042
1043 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1044 {
1045         unsigned long thash, vadr;
1046
1047         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1048         thash = vcpu_thash(vcpu, vadr);
1049         vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1050 }
1051
1052
1053 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1054 {
1055         unsigned long tag, vadr;
1056
1057         vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1058         tag = vcpu_ttag(vcpu, vadr);
1059         vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1060 }
1061
1062 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
1063 {
1064         struct thash_data *data;
1065         union ia64_isr visr, pt_isr;
1066         struct kvm_pt_regs *regs;
1067         struct ia64_psr vpsr;
1068
1069         regs = vcpu_regs(vcpu);
1070         pt_isr.val = VMX(vcpu, cr_isr);
1071         visr.val = 0;
1072         visr.ei = pt_isr.ei;
1073         visr.ir = pt_isr.ir;
1074         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1075         visr.na = 1;
1076
1077         data = vhpt_lookup(vadr);
1078         if (data) {
1079                 if (data->p == 0) {
1080                         vcpu_set_isr(vcpu, visr.val);
1081                         data_page_not_present(vcpu, vadr);
1082                         return IA64_FAULT;
1083                 } else if (data->ma == VA_MATTR_NATPAGE) {
1084                         vcpu_set_isr(vcpu, visr.val);
1085                         dnat_page_consumption(vcpu, vadr);
1086                         return IA64_FAULT;
1087                 } else {
1088                         *padr = (data->gpaddr >> data->ps << data->ps) |
1089                                 (vadr & (PSIZE(data->ps) - 1));
1090                         return IA64_NO_FAULT;
1091                 }
1092         }
1093
1094         data = vtlb_lookup(vcpu, vadr, D_TLB);
1095         if (data) {
1096                 if (data->p == 0) {
1097                         vcpu_set_isr(vcpu, visr.val);
1098                         data_page_not_present(vcpu, vadr);
1099                         return IA64_FAULT;
1100                 } else if (data->ma == VA_MATTR_NATPAGE) {
1101                         vcpu_set_isr(vcpu, visr.val);
1102                         dnat_page_consumption(vcpu, vadr);
1103                         return IA64_FAULT;
1104                 } else{
1105                         *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1106                                 | (vadr & (PSIZE(data->ps) - 1));
1107                         return IA64_NO_FAULT;
1108                 }
1109         }
1110         if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1111                 if (vpsr.ic) {
1112                         vcpu_set_isr(vcpu, visr.val);
1113                         alt_dtlb(vcpu, vadr);
1114                         return IA64_FAULT;
1115                 } else {
1116                         nested_dtlb(vcpu);
1117                         return IA64_FAULT;
1118                 }
1119         } else {
1120                 if (vpsr.ic) {
1121                         vcpu_set_isr(vcpu, visr.val);
1122                         dvhpt_fault(vcpu, vadr);
1123                         return IA64_FAULT;
1124                 } else{
1125                         nested_dtlb(vcpu);
1126                         return IA64_FAULT;
1127                 }
1128         }
1129
1130         return IA64_NO_FAULT;
1131 }
1132
1133
1134 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1135 {
1136         unsigned long r1, r3;
1137
1138         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1139
1140         if (vcpu_tpa(vcpu, r3, &r1))
1141                 return IA64_FAULT;
1142
1143         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1144         return(IA64_NO_FAULT);
1145 }
1146
1147 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1148 {
1149         unsigned long r1, r3;
1150
1151         r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1152         r1 = vcpu_tak(vcpu, r3);
1153         vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1154 }
1155
1156
1157 /************************************
1158  * Insert/Purge translation register/cache
1159  ************************************/
1160 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1161 {
1162         thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1163 }
1164
1165 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1166 {
1167         thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1168 }
1169
1170 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1171 {
1172         u64 ps, va, rid;
1173         struct thash_data *p_itr;
1174
1175         ps = itir_ps(itir);
1176         va = PAGEALIGN(ifa, ps);
1177         pte &= ~PAGE_FLAGS_RV_MASK;
1178         rid = vcpu_get_rr(vcpu, ifa);
1179         rid = rid & RR_RID_MASK;
1180         p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1181         vcpu_set_tr(p_itr, pte, itir, va, rid);
1182         vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1183 }
1184
1185
1186 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1187 {
1188         u64 gpfn;
1189         u64 ps, va, rid;
1190         struct thash_data *p_dtr;
1191
1192         ps = itir_ps(itir);
1193         va = PAGEALIGN(ifa, ps);
1194         pte &= ~PAGE_FLAGS_RV_MASK;
1195
1196         if (ps != _PAGE_SIZE_16M)
1197                 thash_purge_entries(vcpu, va, ps);
1198         gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1199         if (__gpfn_is_io(gpfn))
1200                 pte |= VTLB_PTE_IO;
1201         rid = vcpu_get_rr(vcpu, va);
1202         rid = rid & RR_RID_MASK;
1203         p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1204         vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1205                                                         pte, itir, va, rid);
1206         vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1207 }
1208
1209 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1210 {
1211         int index;
1212         u64 va;
1213
1214         va = PAGEALIGN(ifa, ps);
1215         while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1216                 vcpu->arch.dtrs[index].page_flags = 0;
1217
1218         thash_purge_entries(vcpu, va, ps);
1219 }
1220
1221 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1222 {
1223         int index;
1224         u64 va;
1225
1226         va = PAGEALIGN(ifa, ps);
1227         while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1228                 vcpu->arch.itrs[index].page_flags = 0;
1229
1230         thash_purge_entries(vcpu, va, ps);
1231 }
1232
1233 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1234 {
1235         va = PAGEALIGN(va, ps);
1236         thash_purge_entries(vcpu, va, ps);
1237 }
1238
1239 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1240 {
1241         thash_purge_all(vcpu);
1242 }
1243
1244 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1245 {
1246         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1247         long psr;
1248         local_irq_save(psr);
1249         p->exit_reason = EXIT_REASON_PTC_G;
1250
1251         p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1252         p->u.ptc_g_data.vaddr = va;
1253         p->u.ptc_g_data.ps = ps;
1254         vmm_transition(vcpu);
1255         /* Do Local Purge Here*/
1256         vcpu_ptc_l(vcpu, va, ps);
1257         local_irq_restore(psr);
1258 }
1259
1260
1261 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1262 {
1263         vcpu_ptc_ga(vcpu, va, ps);
1264 }
1265
1266 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1267 {
1268         unsigned long ifa;
1269
1270         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1271         vcpu_ptc_e(vcpu, ifa);
1272 }
1273
1274 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1275 {
1276         unsigned long ifa, itir;
1277
1278         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1279         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1280         vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1281 }
1282
1283 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1284 {
1285         unsigned long ifa, itir;
1286
1287         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1288         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1289         vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1290 }
1291
1292 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1293 {
1294         unsigned long ifa, itir;
1295
1296         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1297         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1298         vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1299 }
1300
1301 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1302 {
1303         unsigned long ifa, itir;
1304
1305         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1306         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1307         vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1308 }
1309
1310 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1311 {
1312         unsigned long ifa, itir;
1313
1314         ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1315         itir = vcpu_get_gr(vcpu, inst.M45.r2);
1316         vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1317 }
1318
1319 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1320 {
1321         unsigned long itir, ifa, pte, slot;
1322
1323         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1324         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1325         itir = vcpu_get_itir(vcpu);
1326         ifa = vcpu_get_ifa(vcpu);
1327         vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1328 }
1329
1330
1331
1332 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1333 {
1334         unsigned long itir, ifa, pte, slot;
1335
1336         slot = vcpu_get_gr(vcpu, inst.M45.r3);
1337         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1338         itir = vcpu_get_itir(vcpu);
1339         ifa = vcpu_get_ifa(vcpu);
1340         vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1341 }
1342
1343 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1344 {
1345         unsigned long itir, ifa, pte;
1346
1347         itir = vcpu_get_itir(vcpu);
1348         ifa = vcpu_get_ifa(vcpu);
1349         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1350         vcpu_itc_d(vcpu, pte, itir, ifa);
1351 }
1352
1353 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1354 {
1355         unsigned long itir, ifa, pte;
1356
1357         itir = vcpu_get_itir(vcpu);
1358         ifa = vcpu_get_ifa(vcpu);
1359         pte = vcpu_get_gr(vcpu, inst.M45.r2);
1360         vcpu_itc_i(vcpu, pte, itir, ifa);
1361 }
1362
1363 /*************************************
1364  * Moves to semi-privileged registers
1365  *************************************/
1366
1367 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1368 {
1369         unsigned long imm;
1370
1371         if (inst.M30.s)
1372                 imm = -inst.M30.imm;
1373         else
1374                 imm = inst.M30.imm;
1375
1376         vcpu_set_itc(vcpu, imm);
1377 }
1378
1379 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1380 {
1381         unsigned long r2;
1382
1383         r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1384         vcpu_set_itc(vcpu, r2);
1385 }
1386
1387
1388 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1389 {
1390         unsigned long r1;
1391
1392         r1 = vcpu_get_itc(vcpu);
1393         vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1394 }
1395 /**************************************************************************
1396   struct kvm_vcpu*protection key register access routines
1397  **************************************************************************/
1398
1399 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1400 {
1401         return ((unsigned long)ia64_get_pkr(reg));
1402 }
1403
1404 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1405 {
1406         ia64_set_pkr(reg, val);
1407 }
1408
1409
1410 unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
1411 {
1412         union ia64_rr rr, rr1;
1413
1414         rr.val = vcpu_get_rr(vcpu, ifa);
1415         rr1.val = 0;
1416         rr1.ps = rr.ps;
1417         rr1.rid = rr.rid;
1418         return (rr1.val);
1419 }
1420
1421
1422
1423 /********************************
1424  * Moves to privileged registers
1425  ********************************/
1426 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1427                                         unsigned long val)
1428 {
1429         union ia64_rr oldrr, newrr;
1430         unsigned long rrval;
1431         struct exit_ctl_data *p = &vcpu->arch.exit_data;
1432         unsigned long psr;
1433
1434         oldrr.val = vcpu_get_rr(vcpu, reg);
1435         newrr.val = val;
1436         vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1437
1438         switch ((unsigned long)(reg >> VRN_SHIFT)) {
1439         case VRN6:
1440                 vcpu->arch.vmm_rr = vrrtomrr(val);
1441                 local_irq_save(psr);
1442                 p->exit_reason = EXIT_REASON_SWITCH_RR6;
1443                 vmm_transition(vcpu);
1444                 local_irq_restore(psr);
1445                 break;
1446         case VRN4:
1447                 rrval = vrrtomrr(val);
1448                 vcpu->arch.metaphysical_saved_rr4 = rrval;
1449                 if (!is_physical_mode(vcpu))
1450                         ia64_set_rr(reg, rrval);
1451                 break;
1452         case VRN0:
1453                 rrval = vrrtomrr(val);
1454                 vcpu->arch.metaphysical_saved_rr0 = rrval;
1455                 if (!is_physical_mode(vcpu))
1456                         ia64_set_rr(reg, rrval);
1457                 break;
1458         default:
1459                 ia64_set_rr(reg, vrrtomrr(val));
1460                 break;
1461         }
1462
1463         return (IA64_NO_FAULT);
1464 }
1465
1466
1467
1468 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1469 {
1470         unsigned long r3, r2;
1471
1472         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1473         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1474         vcpu_set_rr(vcpu, r3, r2);
1475 }
1476
1477 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1478 {
1479 }
1480
1481 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1482 {
1483 }
1484
1485 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1486 {
1487         unsigned long r3, r2;
1488
1489         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1490         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1491         vcpu_set_pmc(vcpu, r3, r2);
1492 }
1493
1494 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1495 {
1496         unsigned long r3, r2;
1497
1498         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1499         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1500         vcpu_set_pmd(vcpu, r3, r2);
1501 }
1502
1503 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1504 {
1505         u64 r3, r2;
1506
1507         r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1508         r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1509         vcpu_set_pkr(vcpu, r3, r2);
1510 }
1511
1512
1513
1514 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1515 {
1516         unsigned long r3, r1;
1517
1518         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1519         r1 = vcpu_get_rr(vcpu, r3);
1520         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1521 }
1522
1523 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1524 {
1525         unsigned long r3, r1;
1526
1527         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1528         r1 = vcpu_get_pkr(vcpu, r3);
1529         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1530 }
1531
1532 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1533 {
1534         unsigned long r3, r1;
1535
1536         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1537         r1 = vcpu_get_dbr(vcpu, r3);
1538         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1539 }
1540
1541 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1542 {
1543         unsigned long r3, r1;
1544
1545         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1546         r1 = vcpu_get_ibr(vcpu, r3);
1547         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1548 }
1549
1550 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1551 {
1552         unsigned long r3, r1;
1553
1554         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1555         r1 = vcpu_get_pmc(vcpu, r3);
1556         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1557 }
1558
1559
1560 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1561 {
1562         /* FIXME: This could get called as a result of a rsvd-reg fault */
1563         if (reg > (ia64_get_cpuid(3) & 0xff))
1564                 return 0;
1565         else
1566                 return ia64_get_cpuid(reg);
1567 }
1568
1569 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1570 {
1571         unsigned long r3, r1;
1572
1573         r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1574         r1 = vcpu_get_cpuid(vcpu, r3);
1575         vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1576 }
1577
1578 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1579 {
1580         VCPU(vcpu, tpr) = val;
1581         vcpu->arch.irq_check = 1;
1582 }
1583
1584 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1585 {
1586         unsigned long r2;
1587
1588         r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1589         VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1590
1591         switch (inst.M32.cr3) {
1592         case 0:
1593                 vcpu_set_dcr(vcpu, r2);
1594                 break;
1595         case 1:
1596                 vcpu_set_itm(vcpu, r2);
1597                 break;
1598         case 66:
1599                 vcpu_set_tpr(vcpu, r2);
1600                 break;
1601         case 67:
1602                 vcpu_set_eoi(vcpu, r2);
1603                 break;
1604         default:
1605                 break;
1606         }
1607
1608         return 0;
1609 }
1610
1611
1612 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1613 {
1614         unsigned long tgt = inst.M33.r1;
1615         unsigned long val;
1616
1617         switch (inst.M33.cr3) {
1618         case 65:
1619                 val = vcpu_get_ivr(vcpu);
1620                 vcpu_set_gr(vcpu, tgt, val, 0);
1621                 break;
1622
1623         case 67:
1624                 vcpu_set_gr(vcpu, tgt, 0L, 0);
1625                 break;
1626         default:
1627                 val = VCPU(vcpu, vcr[inst.M33.cr3]);
1628                 vcpu_set_gr(vcpu, tgt, val, 0);
1629                 break;
1630         }
1631
1632         return 0;
1633 }
1634
1635
1636
1637 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1638 {
1639
1640         unsigned long mask;
1641         struct kvm_pt_regs *regs;
1642         struct ia64_psr old_psr, new_psr;
1643
1644         old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1645
1646         regs = vcpu_regs(vcpu);
1647         /* We only support guest as:
1648          *  vpsr.pk = 0
1649          *  vpsr.is = 0
1650          * Otherwise panic
1651          */
1652         if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1653                 panic_vm(vcpu);
1654
1655         /*
1656          * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1657          * Since these bits will become 0, after success execution of each
1658          * instruction, we will change set them to mIA64_PSR
1659          */
1660         VCPU(vcpu, vpsr) = val
1661                 & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1662                         IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1663
1664         if (!old_psr.i && (val & IA64_PSR_I)) {
1665                 /* vpsr.i 0->1 */
1666                 vcpu->arch.irq_check = 1;
1667         }
1668         new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1669
1670         /*
1671          * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1672          * , except for the following bits:
1673          *  ic/i/dt/si/rt/mc/it/bn/vm
1674          */
1675         mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1676                 IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1677                 IA64_PSR_VM;
1678
1679         regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1680
1681         check_mm_mode_switch(vcpu, old_psr, new_psr);
1682
1683         return ;
1684 }
1685
1686 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1687 {
1688         struct ia64_psr vpsr;
1689
1690         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1691         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1692
1693         if (!vpsr.ic)
1694                 VCPU(vcpu, ifs) = regs->cr_ifs;
1695         regs->cr_ifs = IA64_IFS_V;
1696         return (IA64_NO_FAULT);
1697 }
1698
1699
1700
1701 /**************************************************************************
1702   VCPU banked general register access routines
1703  **************************************************************************/
1704 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1705         do {                                                            \
1706                 __asm__ __volatile__ (                                  \
1707                                 ";;extr.u %0 = %3,%6,16;;\n"            \
1708                                 "dep %1 = %0, %1, 0, 16;;\n"            \
1709                                 "st8 [%4] = %1\n"                       \
1710                                 "extr.u %0 = %2, 16, 16;;\n"            \
1711                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1712                                 "st8 [%5] = %3\n"                       \
1713                                 ::"r"(i), "r"(*b1unat), "r"(*b0unat),   \
1714                                 "r"(*runat), "r"(b1unat), "r"(runat),   \
1715                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1716         } while (0)
1717
1718 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1719 {
1720         unsigned long i;
1721
1722         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1723         unsigned long *r = &regs->r16;
1724         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1725         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1726         unsigned long *runat = &regs->eml_unat;
1727         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1728         unsigned long *b1unat = &VCPU(vcpu, vnat);
1729
1730
1731         if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1732                 for (i = 0; i < 16; i++) {
1733                         *b1++ = *r;
1734                         *r++ = *b0++;
1735                 }
1736                 vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1737                                 VMM_PT_REGS_R16_SLOT);
1738                 VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1739         }
1740 }
1741
1742 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)  \
1743         do {                                                            \
1744                 __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"    \
1745                                 "dep %1 = %0, %1, 16, 16;;\n"           \
1746                                 "st8 [%4] = %1\n"                       \
1747                                 "extr.u %0 = %2, 0, 16;;\n"             \
1748                                 "dep %3 = %0, %3, %6, 16;;\n"           \
1749                                 "st8 [%5] = %3\n"                       \
1750                                 ::"r"(i), "r"(*b0unat), "r"(*b1unat),   \
1751                                 "r"(*runat), "r"(b0unat), "r"(runat),   \
1752                                 "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
1753         } while (0)
1754
1755 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1756 {
1757         unsigned long i;
1758         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1759         unsigned long *r = &regs->r16;
1760         unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1761         unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1762         unsigned long *runat = &regs->eml_unat;
1763         unsigned long *b0unat = &VCPU(vcpu, vbnat);
1764         unsigned long *b1unat = &VCPU(vcpu, vnat);
1765
1766         if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1767                 for (i = 0; i < 16; i++) {
1768                         *b0++ = *r;
1769                         *r++ = *b1++;
1770                 }
1771                 vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1772                                 VMM_PT_REGS_R16_SLOT);
1773                 VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1774         }
1775 }
1776
1777
1778
1779
1780 void vcpu_rfi(struct kvm_vcpu *vcpu)
1781 {
1782         unsigned long ifs, psr;
1783         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1784
1785         psr = VCPU(vcpu, ipsr);
1786         if (psr & IA64_PSR_BN)
1787                 vcpu_bsw1(vcpu);
1788         else
1789                 vcpu_bsw0(vcpu);
1790         vcpu_set_psr(vcpu, psr);
1791         ifs = VCPU(vcpu, ifs);
1792         if (ifs >> 63)
1793                 regs->cr_ifs = ifs;
1794         regs->cr_iip = VCPU(vcpu, iip);
1795 }
1796
1797
1798 /*
1799    VPSR can't keep track of below bits of guest PSR
1800    This function gets guest PSR
1801  */
1802
1803 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1804 {
1805         unsigned long mask;
1806         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1807
1808         mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1809                 IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1810         return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1811 }
1812
1813 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1814 {
1815         unsigned long vpsr;
1816         unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1817                                         | inst.M44.imm;
1818
1819         vpsr = vcpu_get_psr(vcpu);
1820         vpsr &= (~imm24);
1821         vcpu_set_psr(vcpu, vpsr);
1822 }
1823
1824 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1825 {
1826         unsigned long vpsr;
1827         unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1828                                 | inst.M44.imm;
1829
1830         vpsr = vcpu_get_psr(vcpu);
1831         vpsr |= imm24;
1832         vcpu_set_psr(vcpu, vpsr);
1833 }
1834
1835 /* Generate Mask
1836  * Parameter:
1837  *  bit -- starting bit
1838  *  len -- how many bits
1839  */
1840 #define MASK(bit,len)                                   \
1841 ({                                                      \
1842                 __u64   ret;                            \
1843                                                         \
1844                 __asm __volatile("dep %0=-1, r0, %1, %2"\
1845                                 : "=r" (ret):           \
1846                   "M" (bit),                            \
1847                   "M" (len));                           \
1848                 ret;                                    \
1849 })
1850
1851 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1852 {
1853         val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1854         vcpu_set_psr(vcpu, val);
1855 }
1856
1857 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1858 {
1859         unsigned long val;
1860
1861         val = vcpu_get_gr(vcpu, inst.M35.r2);
1862         vcpu_set_psr_l(vcpu, val);
1863 }
1864
1865 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1866 {
1867         unsigned long val;
1868
1869         val = vcpu_get_psr(vcpu);
1870         val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1871         vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1872 }
1873
1874 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1875 {
1876         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1877         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1878         if (ipsr->ri == 2) {
1879                 ipsr->ri = 0;
1880                 regs->cr_iip += 16;
1881         } else
1882                 ipsr->ri++;
1883 }
1884
1885 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1886 {
1887         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1888         struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1889
1890         if (ipsr->ri == 0) {
1891                 ipsr->ri = 2;
1892                 regs->cr_iip -= 16;
1893         } else
1894                 ipsr->ri--;
1895 }
1896
1897 /** Emulate a privileged operation.
1898  *
1899  *
1900  * @param vcpu virtual cpu
1901  * @cause the reason cause virtualization fault
1902  * @opcode the instruction code which cause virtualization fault
1903  */
1904
1905 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1906 {
1907         unsigned long status, cause, opcode ;
1908         INST64 inst;
1909
1910         status = IA64_NO_FAULT;
1911         cause = VMX(vcpu, cause);
1912         opcode = VMX(vcpu, opcode);
1913         inst.inst = opcode;
1914         /*
1915          * Switch to actual virtual rid in rr0 and rr4,
1916          * which is required by some tlb related instructions.
1917          */
1918         prepare_if_physical_mode(vcpu);
1919
1920         switch (cause) {
1921         case EVENT_RSM:
1922                 kvm_rsm(vcpu, inst);
1923                 break;
1924         case EVENT_SSM:
1925                 kvm_ssm(vcpu, inst);
1926                 break;
1927         case EVENT_MOV_TO_PSR:
1928                 kvm_mov_to_psr(vcpu, inst);
1929                 break;
1930         case EVENT_MOV_FROM_PSR:
1931                 kvm_mov_from_psr(vcpu, inst);
1932                 break;
1933         case EVENT_MOV_FROM_CR:
1934                 kvm_mov_from_cr(vcpu, inst);
1935                 break;
1936         case EVENT_MOV_TO_CR:
1937                 kvm_mov_to_cr(vcpu, inst);
1938                 break;
1939         case EVENT_BSW_0:
1940                 vcpu_bsw0(vcpu);
1941                 break;
1942         case EVENT_BSW_1:
1943                 vcpu_bsw1(vcpu);
1944                 break;
1945         case EVENT_COVER:
1946                 vcpu_cover(vcpu);
1947                 break;
1948         case EVENT_RFI:
1949                 vcpu_rfi(vcpu);
1950                 break;
1951         case EVENT_ITR_D:
1952                 kvm_itr_d(vcpu, inst);
1953                 break;
1954         case EVENT_ITR_I:
1955                 kvm_itr_i(vcpu, inst);
1956                 break;
1957         case EVENT_PTR_D:
1958                 kvm_ptr_d(vcpu, inst);
1959                 break;
1960         case EVENT_PTR_I:
1961                 kvm_ptr_i(vcpu, inst);
1962                 break;
1963         case EVENT_ITC_D:
1964                 kvm_itc_d(vcpu, inst);
1965                 break;
1966         case EVENT_ITC_I:
1967                 kvm_itc_i(vcpu, inst);
1968                 break;
1969         case EVENT_PTC_L:
1970                 kvm_ptc_l(vcpu, inst);
1971                 break;
1972         case EVENT_PTC_G:
1973                 kvm_ptc_g(vcpu, inst);
1974                 break;
1975         case EVENT_PTC_GA:
1976                 kvm_ptc_ga(vcpu, inst);
1977                 break;
1978         case EVENT_PTC_E:
1979                 kvm_ptc_e(vcpu, inst);
1980                 break;
1981         case EVENT_MOV_TO_RR:
1982                 kvm_mov_to_rr(vcpu, inst);
1983                 break;
1984         case EVENT_MOV_FROM_RR:
1985                 kvm_mov_from_rr(vcpu, inst);
1986                 break;
1987         case EVENT_THASH:
1988                 kvm_thash(vcpu, inst);
1989                 break;
1990         case EVENT_TTAG:
1991                 kvm_ttag(vcpu, inst);
1992                 break;
1993         case EVENT_TPA:
1994                 status = kvm_tpa(vcpu, inst);
1995                 break;
1996         case EVENT_TAK:
1997                 kvm_tak(vcpu, inst);
1998                 break;
1999         case EVENT_MOV_TO_AR_IMM:
2000                 kvm_mov_to_ar_imm(vcpu, inst);
2001                 break;
2002         case EVENT_MOV_TO_AR:
2003                 kvm_mov_to_ar_reg(vcpu, inst);
2004                 break;
2005         case EVENT_MOV_FROM_AR:
2006                 kvm_mov_from_ar_reg(vcpu, inst);
2007                 break;
2008         case EVENT_MOV_TO_DBR:
2009                 kvm_mov_to_dbr(vcpu, inst);
2010                 break;
2011         case EVENT_MOV_TO_IBR:
2012                 kvm_mov_to_ibr(vcpu, inst);
2013                 break;
2014         case EVENT_MOV_TO_PMC:
2015                 kvm_mov_to_pmc(vcpu, inst);
2016                 break;
2017         case EVENT_MOV_TO_PMD:
2018                 kvm_mov_to_pmd(vcpu, inst);
2019                 break;
2020         case EVENT_MOV_TO_PKR:
2021                 kvm_mov_to_pkr(vcpu, inst);
2022                 break;
2023         case EVENT_MOV_FROM_DBR:
2024                 kvm_mov_from_dbr(vcpu, inst);
2025                 break;
2026         case EVENT_MOV_FROM_IBR:
2027                 kvm_mov_from_ibr(vcpu, inst);
2028                 break;
2029         case EVENT_MOV_FROM_PMC:
2030                 kvm_mov_from_pmc(vcpu, inst);
2031                 break;
2032         case EVENT_MOV_FROM_PKR:
2033                 kvm_mov_from_pkr(vcpu, inst);
2034                 break;
2035         case EVENT_MOV_FROM_CPUID:
2036                 kvm_mov_from_cpuid(vcpu, inst);
2037                 break;
2038         case EVENT_VMSW:
2039                 status = IA64_FAULT;
2040                 break;
2041         default:
2042                 break;
2043         };
2044         /*Assume all status is NO_FAULT ?*/
2045         if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2046                 vcpu_increment_iip(vcpu);
2047
2048         recover_if_physical_mode(vcpu);
2049 }
2050
2051 void init_vcpu(struct kvm_vcpu *vcpu)
2052 {
2053         int i;
2054
2055         vcpu->arch.mode_flags = GUEST_IN_PHY;
2056         VMX(vcpu, vrr[0]) = 0x38;
2057         VMX(vcpu, vrr[1]) = 0x38;
2058         VMX(vcpu, vrr[2]) = 0x38;
2059         VMX(vcpu, vrr[3]) = 0x38;
2060         VMX(vcpu, vrr[4]) = 0x38;
2061         VMX(vcpu, vrr[5]) = 0x38;
2062         VMX(vcpu, vrr[6]) = 0x38;
2063         VMX(vcpu, vrr[7]) = 0x38;
2064         VCPU(vcpu, vpsr) = IA64_PSR_BN;
2065         VCPU(vcpu, dcr) = 0;
2066         /* pta.size must not be 0.  The minimum is 15 (32k) */
2067         VCPU(vcpu, pta) = 15 << 2;
2068         VCPU(vcpu, itv) = 0x10000;
2069         VCPU(vcpu, itm) = 0;
2070         VMX(vcpu, last_itc) = 0;
2071
2072         VCPU(vcpu, lid) = VCPU_LID(vcpu);
2073         VCPU(vcpu, ivr) = 0;
2074         VCPU(vcpu, tpr) = 0x10000;
2075         VCPU(vcpu, eoi) = 0;
2076         VCPU(vcpu, irr[0]) = 0;
2077         VCPU(vcpu, irr[1]) = 0;
2078         VCPU(vcpu, irr[2]) = 0;
2079         VCPU(vcpu, irr[3]) = 0;
2080         VCPU(vcpu, pmv) = 0x10000;
2081         VCPU(vcpu, cmcv) = 0x10000;
2082         VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
2083         VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
2084         update_vhpi(vcpu, NULL_VECTOR);
2085         VLSAPIC_XTP(vcpu) = 0x80;       /* disabled */
2086
2087         for (i = 0; i < 4; i++)
2088                 VLSAPIC_INSVC(vcpu, i) = 0;
2089 }
2090
2091 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2092 {
2093         unsigned long psr;
2094
2095         local_irq_save(psr);
2096
2097         /* WARNING: not allow co-exist of both virtual mode and physical
2098          * mode in same region
2099          */
2100
2101         vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2102         vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2103
2104         if (is_physical_mode(vcpu)) {
2105                 if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2106                         panic_vm(vcpu);
2107
2108                 ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2109                 ia64_dv_serialize_data();
2110                 ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2111                 ia64_dv_serialize_data();
2112         } else {
2113                 ia64_set_rr((VRN0 << VRN_SHIFT),
2114                                 vcpu->arch.metaphysical_saved_rr0);
2115                 ia64_dv_serialize_data();
2116                 ia64_set_rr((VRN4 << VRN_SHIFT),
2117                                 vcpu->arch.metaphysical_saved_rr4);
2118                 ia64_dv_serialize_data();
2119         }
2120         ia64_set_rr((VRN1 << VRN_SHIFT),
2121                         vrrtomrr(VMX(vcpu, vrr[VRN1])));
2122         ia64_dv_serialize_data();
2123         ia64_set_rr((VRN2 << VRN_SHIFT),
2124                         vrrtomrr(VMX(vcpu, vrr[VRN2])));
2125         ia64_dv_serialize_data();
2126         ia64_set_rr((VRN3 << VRN_SHIFT),
2127                         vrrtomrr(VMX(vcpu, vrr[VRN3])));
2128         ia64_dv_serialize_data();
2129         ia64_set_rr((VRN5 << VRN_SHIFT),
2130                         vrrtomrr(VMX(vcpu, vrr[VRN5])));
2131         ia64_dv_serialize_data();
2132         ia64_set_rr((VRN7 << VRN_SHIFT),
2133                         vrrtomrr(VMX(vcpu, vrr[VRN7])));
2134         ia64_dv_serialize_data();
2135         ia64_srlz_d();
2136         ia64_set_psr(psr);
2137 }
2138
2139 int vmm_entry(void)
2140 {
2141         struct kvm_vcpu *v;
2142         v = current_vcpu;
2143
2144         ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2145                                                 0, 0, 0, 0, 0, 0);
2146         kvm_init_vtlb(v);
2147         kvm_init_vhpt(v);
2148         init_vcpu(v);
2149         kvm_init_all_rr(v);
2150         vmm_reset_entry();
2151
2152         return 0;
2153 }
2154
2155 void panic_vm(struct kvm_vcpu *v)
2156 {
2157         struct exit_ctl_data *p = &v->arch.exit_data;
2158
2159         p->exit_reason = EXIT_REASON_VM_PANIC;
2160         vmm_transition(v);
2161         /*Never to return*/
2162         while (1);
2163 }