Merge branch 'x86/core' into x86/xsave
[linux-2.6] / arch / ia64 / kvm / process.c
1 /*
2  * process.c: handle interruption inject for guests.
3  * Copyright (c) 2005, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  *      Shaofan Li (Susue Li) <susie.li@intel.com>
19  *      Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
20  *      Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21  *      Xiantao Zhang (xiantao.zhang@intel.com)
22  */
23 #include "vcpu.h"
24
25 #include <asm/pal.h>
26 #include <asm/sal.h>
27 #include <asm/fpswa.h>
28 #include <asm/kregs.h>
29 #include <asm/tlb.h>
30
31 fpswa_interface_t *vmm_fpswa_interface;
32
33 #define IA64_VHPT_TRANS_VECTOR                  0x0000
34 #define IA64_INST_TLB_VECTOR                    0x0400
35 #define IA64_DATA_TLB_VECTOR                    0x0800
36 #define IA64_ALT_INST_TLB_VECTOR                0x0c00
37 #define IA64_ALT_DATA_TLB_VECTOR                0x1000
38 #define IA64_DATA_NESTED_TLB_VECTOR             0x1400
39 #define IA64_INST_KEY_MISS_VECTOR               0x1800
40 #define IA64_DATA_KEY_MISS_VECTOR               0x1c00
41 #define IA64_DIRTY_BIT_VECTOR                   0x2000
42 #define IA64_INST_ACCESS_BIT_VECTOR             0x2400
43 #define IA64_DATA_ACCESS_BIT_VECTOR             0x2800
44 #define IA64_BREAK_VECTOR                       0x2c00
45 #define IA64_EXTINT_VECTOR                      0x3000
46 #define IA64_PAGE_NOT_PRESENT_VECTOR            0x5000
47 #define IA64_KEY_PERMISSION_VECTOR              0x5100
48 #define IA64_INST_ACCESS_RIGHTS_VECTOR          0x5200
49 #define IA64_DATA_ACCESS_RIGHTS_VECTOR          0x5300
50 #define IA64_GENEX_VECTOR                       0x5400
51 #define IA64_DISABLED_FPREG_VECTOR              0x5500
52 #define IA64_NAT_CONSUMPTION_VECTOR             0x5600
53 #define IA64_SPECULATION_VECTOR         0x5700 /* UNUSED */
54 #define IA64_DEBUG_VECTOR                       0x5900
55 #define IA64_UNALIGNED_REF_VECTOR               0x5a00
56 #define IA64_UNSUPPORTED_DATA_REF_VECTOR        0x5b00
57 #define IA64_FP_FAULT_VECTOR                    0x5c00
58 #define IA64_FP_TRAP_VECTOR                     0x5d00
59 #define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR     0x5e00
60 #define IA64_TAKEN_BRANCH_TRAP_VECTOR           0x5f00
61 #define IA64_SINGLE_STEP_TRAP_VECTOR            0x6000
62
63 /* SDM vol2 5.5 - IVA based interruption handling */
64 #define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
65                         IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT |      \
66                         IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
67
68 #define DOMN_PAL_REQUEST    0x110000
69 #define DOMN_SAL_REQUEST    0x110001
70
71 static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
72         0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
73         0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
74         0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
75         0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
76         0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
77         0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
78         0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
79 };
80
81 static void collect_interruption(struct kvm_vcpu *vcpu)
82 {
83         u64 ipsr;
84         u64 vdcr;
85         u64 vifs;
86         unsigned long vpsr;
87         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
88
89         vpsr = vcpu_get_psr(vcpu);
90         vcpu_bsw0(vcpu);
91         if (vpsr & IA64_PSR_IC) {
92
93                 /* Sync mpsr id/da/dd/ss/ed bits to vipsr
94                  * since after guest do rfi, we still want these bits on in
95                  * mpsr
96                  */
97
98                 ipsr = regs->cr_ipsr;
99                 vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
100                                         | IA64_PSR_DD | IA64_PSR_SS
101                                         | IA64_PSR_ED));
102                 vcpu_set_ipsr(vcpu, vpsr);
103
104                 /* Currently, for trap, we do not advance IIP to next
105                  * instruction. That's because we assume caller already
106                  * set up IIP correctly
107                  */
108
109                 vcpu_set_iip(vcpu , regs->cr_iip);
110
111                 /* set vifs.v to zero */
112                 vifs = VCPU(vcpu, ifs);
113                 vifs &= ~IA64_IFS_V;
114                 vcpu_set_ifs(vcpu, vifs);
115
116                 vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
117         }
118
119         vdcr = VCPU(vcpu, dcr);
120
121         /* Set guest psr
122          * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
123          * be: set to the value of dcr.be
124          * pp: set to the value of dcr.pp
125          */
126         vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
127         vpsr |= (vdcr & IA64_DCR_BE);
128
129         /* VDCR pp bit position is different from VPSR pp bit */
130         if (vdcr & IA64_DCR_PP) {
131                 vpsr |= IA64_PSR_PP;
132         } else {
133                 vpsr &= ~IA64_PSR_PP;;
134         }
135
136         vcpu_set_psr(vcpu, vpsr);
137
138 }
139
140 void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
141 {
142         u64 viva;
143         struct kvm_pt_regs *regs;
144         union ia64_isr pt_isr;
145
146         regs = vcpu_regs(vcpu);
147
148         /* clear cr.isr.ir (incomplete register frame)*/
149         pt_isr.val = VMX(vcpu, cr_isr);
150         pt_isr.ir = 0;
151         VMX(vcpu, cr_isr) = pt_isr.val;
152
153         collect_interruption(vcpu);
154
155         viva = vcpu_get_iva(vcpu);
156         regs->cr_iip = viva + vec;
157 }
158
159 static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
160 {
161         union ia64_rr rr, rr1;
162
163         rr.val = vcpu_get_rr(vcpu, ifa);
164         rr1.val = 0;
165         rr1.ps = rr.ps;
166         rr1.rid = rr.rid;
167         return (rr1.val);
168 }
169
170
171 /*
172  * Set vIFA & vITIR & vIHA, when vPSR.ic =1
173  * Parameter:
174  *  set_ifa: if true, set vIFA
175  *  set_itir: if true, set vITIR
176  *  set_iha: if true, set vIHA
177  */
178 void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
179                 int set_ifa, int set_itir, int set_iha)
180 {
181         long vpsr;
182         u64 value;
183
184         vpsr = VCPU(vcpu, vpsr);
185         /* Vol2, Table 8-1 */
186         if (vpsr & IA64_PSR_IC) {
187                 if (set_ifa)
188                         vcpu_set_ifa(vcpu, vadr);
189                 if (set_itir) {
190                         value = vcpu_get_itir_on_fault(vcpu, vadr);
191                         vcpu_set_itir(vcpu, value);
192                 }
193
194                 if (set_iha) {
195                         value = vcpu_thash(vcpu, vadr);
196                         vcpu_set_iha(vcpu, value);
197                 }
198         }
199 }
200
201 /*
202  * Data TLB Fault
203  *  @ Data TLB vector
204  * Refer to SDM Vol2 Table 5-6 & 8-1
205  */
206 void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
207 {
208         /* If vPSR.ic, IFA, ITIR, IHA */
209         set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
210         inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
211 }
212
213 /*
214  * Instruction TLB Fault
215  *  @ Instruction TLB vector
216  * Refer to SDM Vol2 Table 5-6 & 8-1
217  */
218 void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
219 {
220         /* If vPSR.ic, IFA, ITIR, IHA */
221         set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
222         inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
223 }
224
225
226
227 /*
228  * Data Nested TLB Fault
229  *  @ Data Nested TLB Vector
230  * Refer to SDM Vol2 Table 5-6 & 8-1
231  */
232 void nested_dtlb(struct kvm_vcpu *vcpu)
233 {
234         inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
235 }
236
237 /*
238  * Alternate Data TLB Fault
239  *  @ Alternate Data TLB vector
240  * Refer to SDM Vol2 Table 5-6 & 8-1
241  */
242 void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
243 {
244         set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
245         inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
246 }
247
248
249 /*
250  * Data TLB Fault
251  *  @ Data TLB vector
252  * Refer to SDM Vol2 Table 5-6 & 8-1
253  */
254 void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
255 {
256         set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
257         inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
258 }
259
260 /* Deal with:
261  *  VHPT Translation Vector
262  */
263 static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
264 {
265         /* If vPSR.ic, IFA, ITIR, IHA*/
266         set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
267         inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
268
269
270 }
271
272 /*
273  * VHPT Instruction Fault
274  *  @ VHPT Translation vector
275  * Refer to SDM Vol2 Table 5-6 & 8-1
276  */
277 void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
278 {
279         _vhpt_fault(vcpu, vadr);
280 }
281
282
283 /*
284  * VHPT Data Fault
285  *  @ VHPT Translation vector
286  * Refer to SDM Vol2 Table 5-6 & 8-1
287  */
288 void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
289 {
290         _vhpt_fault(vcpu, vadr);
291 }
292
293
294
295 /*
296  * Deal with:
297  *  General Exception vector
298  */
299 void _general_exception(struct kvm_vcpu *vcpu)
300 {
301         inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
302 }
303
304
305 /*
306  * Illegal Operation Fault
307  *  @ General Exception Vector
308  * Refer to SDM Vol2 Table 5-6 & 8-1
309  */
310 void illegal_op(struct kvm_vcpu *vcpu)
311 {
312         _general_exception(vcpu);
313 }
314
315 /*
316  * Illegal Dependency Fault
317  *  @ General Exception Vector
318  * Refer to SDM Vol2 Table 5-6 & 8-1
319  */
320 void illegal_dep(struct kvm_vcpu *vcpu)
321 {
322         _general_exception(vcpu);
323 }
324
325 /*
326  * Reserved Register/Field Fault
327  *  @ General Exception Vector
328  * Refer to SDM Vol2 Table 5-6 & 8-1
329  */
330 void rsv_reg_field(struct kvm_vcpu *vcpu)
331 {
332         _general_exception(vcpu);
333 }
334 /*
335  * Privileged Operation Fault
336  *  @ General Exception Vector
337  * Refer to SDM Vol2 Table 5-6 & 8-1
338  */
339
340 void privilege_op(struct kvm_vcpu *vcpu)
341 {
342         _general_exception(vcpu);
343 }
344
345 /*
346  * Unimplement Data Address Fault
347  *  @ General Exception Vector
348  * Refer to SDM Vol2 Table 5-6 & 8-1
349  */
350 void unimpl_daddr(struct kvm_vcpu *vcpu)
351 {
352         _general_exception(vcpu);
353 }
354
355 /*
356  * Privileged Register Fault
357  *  @ General Exception Vector
358  * Refer to SDM Vol2 Table 5-6 & 8-1
359  */
360 void privilege_reg(struct kvm_vcpu *vcpu)
361 {
362         _general_exception(vcpu);
363 }
364
365 /* Deal with
366  *  Nat consumption vector
367  * Parameter:
368  *  vaddr: Optional, if t == REGISTER
369  */
370 static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
371                                                 enum tlb_miss_type t)
372 {
373         /* If vPSR.ic && t == DATA/INST, IFA */
374         if (t == DATA || t == INSTRUCTION) {
375                 /* IFA */
376                 set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
377         }
378
379         inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
380 }
381
382 /*
383  * Instruction Nat Page Consumption Fault
384  *  @ Nat Consumption Vector
385  * Refer to SDM Vol2 Table 5-6 & 8-1
386  */
387 void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
388 {
389         _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
390 }
391
392 /*
393  * Register Nat Consumption Fault
394  *  @ Nat Consumption Vector
395  * Refer to SDM Vol2 Table 5-6 & 8-1
396  */
397 void rnat_consumption(struct kvm_vcpu *vcpu)
398 {
399         _nat_consumption_fault(vcpu, 0, REGISTER);
400 }
401
402 /*
403  * Data Nat Page Consumption Fault
404  *  @ Nat Consumption Vector
405  * Refer to SDM Vol2 Table 5-6 & 8-1
406  */
407 void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
408 {
409         _nat_consumption_fault(vcpu, vadr, DATA);
410 }
411
412 /* Deal with
413  *  Page not present vector
414  */
415 static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
416 {
417         /* If vPSR.ic, IFA, ITIR */
418         set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
419         inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
420 }
421
422
423 void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
424 {
425         __page_not_present(vcpu, vadr);
426 }
427
428
429 void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
430 {
431         __page_not_present(vcpu, vadr);
432 }
433
434
435 /* Deal with
436  *  Data access rights vector
437  */
438 void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
439 {
440         /* If vPSR.ic, IFA, ITIR */
441         set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
442         inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
443 }
444
445 fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
446                 unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
447                 unsigned long *ifs, struct kvm_pt_regs *regs)
448 {
449         fp_state_t fp_state;
450         fpswa_ret_t ret;
451         struct kvm_vcpu *vcpu = current_vcpu;
452
453         uint64_t old_rr7 = ia64_get_rr(7UL<<61);
454
455         if (!vmm_fpswa_interface)
456                 return (fpswa_ret_t) {-1, 0, 0, 0};
457
458         /*
459          * Just let fpswa driver to use hardware fp registers.
460          * No fp register is valid in memory.
461          */
462         memset(&fp_state, 0, sizeof(fp_state_t));
463
464         /*
465          * unsigned long (*EFI_FPSWA) (
466          *      unsigned long    trap_type,
467          *      void             *Bundle,
468          *      unsigned long    *pipsr,
469          *      unsigned long    *pfsr,
470          *      unsigned long    *pisr,
471          *      unsigned long    *ppreds,
472          *      unsigned long    *pifs,
473          *      void             *fp_state);
474          */
475         /*Call host fpswa interface directly to virtualize
476          *guest fpswa request!
477          */
478         ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
479         ia64_srlz_d();
480
481         ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
482                         ipsr, fpsr, isr, pr, ifs, &fp_state);
483         ia64_set_rr(7UL << 61, old_rr7);
484         ia64_srlz_d();
485         return ret;
486 }
487
488 /*
489  * Handle floating-point assist faults and traps for domain.
490  */
491 unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
492                                         unsigned long isr)
493 {
494         struct kvm_vcpu *v = current_vcpu;
495         IA64_BUNDLE bundle;
496         unsigned long fault_ip;
497         fpswa_ret_t ret;
498
499         fault_ip = regs->cr_iip;
500         /*
501          * When the FP trap occurs, the trapping instruction is completed.
502          * If ipsr.ri == 0, there is the trapping instruction in previous
503          * bundle.
504          */
505         if (!fp_fault && (ia64_psr(regs)->ri == 0))
506                 fault_ip -= 16;
507
508         if (fetch_code(v, fault_ip, &bundle))
509                 return -EAGAIN;
510
511         if (!bundle.i64[0] && !bundle.i64[1])
512                 return -EACCES;
513
514         ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
515                         &isr, &regs->pr, &regs->cr_ifs, regs);
516         return ret.status;
517 }
518
519 void reflect_interruption(u64 ifa, u64 isr, u64 iim,
520                 u64 vec, struct kvm_pt_regs *regs)
521 {
522         u64 vector;
523         int status ;
524         struct kvm_vcpu *vcpu = current_vcpu;
525         u64 vpsr = VCPU(vcpu, vpsr);
526
527         vector = vec2off[vec];
528
529         if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
530                 panic_vm(vcpu);
531                 return;
532         }
533
534         switch (vec) {
535         case 32:        /*IA64_FP_FAULT_VECTOR*/
536                 status = vmm_handle_fpu_swa(1, regs, isr);
537                 if (!status) {
538                         vcpu_increment_iip(vcpu);
539                         return;
540                 } else if (-EAGAIN == status)
541                         return;
542                 break;
543         case 33:        /*IA64_FP_TRAP_VECTOR*/
544                 status = vmm_handle_fpu_swa(0, regs, isr);
545                 if (!status)
546                         return ;
547                 else if (-EAGAIN == status) {
548                         vcpu_decrement_iip(vcpu);
549                         return ;
550                 }
551                 break;
552         }
553
554         VCPU(vcpu, isr) = isr;
555         VCPU(vcpu, iipa) = regs->cr_iip;
556         if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
557                 VCPU(vcpu, iim) = iim;
558         else
559                 set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
560
561         inject_guest_interruption(vcpu, vector);
562 }
563
564 static void set_pal_call_data(struct kvm_vcpu *vcpu)
565 {
566         struct exit_ctl_data *p = &vcpu->arch.exit_data;
567
568         /*FIXME:For static and stacked convention, firmware
569          * has put the parameters in gr28-gr31 before
570          * break to vmm  !!*/
571
572         p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
573         p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
574         p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
575         p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
576         p->exit_reason = EXIT_REASON_PAL_CALL;
577 }
578
579 static void set_pal_call_result(struct kvm_vcpu *vcpu)
580 {
581         struct exit_ctl_data *p = &vcpu->arch.exit_data;
582
583         if (p->exit_reason == EXIT_REASON_PAL_CALL) {
584                 vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
585                 vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
586                 vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
587                 vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
588         } else
589                 panic_vm(vcpu);
590 }
591
592 static void set_sal_call_data(struct kvm_vcpu *vcpu)
593 {
594         struct exit_ctl_data *p = &vcpu->arch.exit_data;
595
596         p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
597         p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
598         p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
599         p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
600         p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
601         p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
602         p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
603         p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
604         p->exit_reason = EXIT_REASON_SAL_CALL;
605 }
606
607 static void set_sal_call_result(struct kvm_vcpu *vcpu)
608 {
609         struct exit_ctl_data *p = &vcpu->arch.exit_data;
610
611         if (p->exit_reason == EXIT_REASON_SAL_CALL) {
612                 vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
613                 vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
614                 vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
615                 vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
616         } else
617                 panic_vm(vcpu);
618 }
619
620 void  kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
621                 unsigned long isr, unsigned long iim)
622 {
623         struct kvm_vcpu *v = current_vcpu;
624
625         if (ia64_psr(regs)->cpl == 0) {
626                 /* Allow hypercalls only when cpl = 0.  */
627                 if (iim == DOMN_PAL_REQUEST) {
628                         set_pal_call_data(v);
629                         vmm_transition(v);
630                         set_pal_call_result(v);
631                         vcpu_increment_iip(v);
632                         return;
633                 } else if (iim == DOMN_SAL_REQUEST) {
634                         set_sal_call_data(v);
635                         vmm_transition(v);
636                         set_sal_call_result(v);
637                         vcpu_increment_iip(v);
638                         return;
639                 }
640         }
641         reflect_interruption(ifa, isr, iim, 11, regs);
642 }
643
644 void check_pending_irq(struct kvm_vcpu *vcpu)
645 {
646         int  mask, h_pending, h_inservice;
647         u64 isr;
648         unsigned long  vpsr;
649         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
650
651         h_pending = highest_pending_irq(vcpu);
652         if (h_pending == NULL_VECTOR) {
653                 update_vhpi(vcpu, NULL_VECTOR);
654                 return;
655         }
656         h_inservice = highest_inservice_irq(vcpu);
657
658         vpsr = VCPU(vcpu, vpsr);
659         mask = irq_masked(vcpu, h_pending, h_inservice);
660         if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
661                 isr = vpsr & IA64_PSR_RI;
662                 update_vhpi(vcpu, h_pending);
663                 reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
664         } else if (mask == IRQ_MASKED_BY_INSVC) {
665                 if (VCPU(vcpu, vhpi))
666                         update_vhpi(vcpu, NULL_VECTOR);
667         } else {
668                 /* masked by vpsr.i or vtpr.*/
669                 update_vhpi(vcpu, h_pending);
670         }
671 }
672
673 static void generate_exirq(struct kvm_vcpu *vcpu)
674 {
675         unsigned  vpsr;
676         uint64_t isr;
677
678         struct kvm_pt_regs *regs = vcpu_regs(vcpu);
679
680         vpsr = VCPU(vcpu, vpsr);
681         isr = vpsr & IA64_PSR_RI;
682         if (!(vpsr & IA64_PSR_IC))
683                 panic_vm(vcpu);
684         reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
685 }
686
687 void vhpi_detection(struct kvm_vcpu *vcpu)
688 {
689         uint64_t    threshold, vhpi;
690         union ia64_tpr       vtpr;
691         struct ia64_psr vpsr;
692
693         vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
694         vtpr.val = VCPU(vcpu, tpr);
695
696         threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
697         vhpi = VCPU(vcpu, vhpi);
698         if (vhpi > threshold) {
699                 /* interrupt actived*/
700                 generate_exirq(vcpu);
701         }
702 }
703
704
705 void leave_hypervisor_tail(void)
706 {
707         struct kvm_vcpu *v = current_vcpu;
708
709         if (VMX(v, timer_check)) {
710                 VMX(v, timer_check) = 0;
711                 if (VMX(v, itc_check)) {
712                         if (vcpu_get_itc(v) > VCPU(v, itm)) {
713                                 if (!(VCPU(v, itv) & (1 << 16))) {
714                                         vcpu_pend_interrupt(v, VCPU(v, itv)
715                                                         & 0xff);
716                                 VMX(v, itc_check) = 0;
717                                 } else {
718                                         v->arch.timer_pending = 1;
719                                 }
720                                 VMX(v, last_itc) = VCPU(v, itm) + 1;
721                         }
722                 }
723         }
724
725         rmb();
726         if (v->arch.irq_new_pending) {
727                 v->arch.irq_new_pending = 0;
728                 VMX(v, irq_check) = 0;
729                 check_pending_irq(v);
730                 return;
731         }
732         if (VMX(v, irq_check)) {
733                 VMX(v, irq_check) = 0;
734                 vhpi_detection(v);
735         }
736 }
737
738
739 static inline void handle_lds(struct kvm_pt_regs *regs)
740 {
741         regs->cr_ipsr |= IA64_PSR_ED;
742 }
743
744 void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
745 {
746         unsigned long pte;
747         union ia64_rr rr;
748
749         rr.val = ia64_get_rr(vadr);
750         pte =  vadr & _PAGE_PPN_MASK;
751         pte = pte | PHY_PAGE_WB;
752         thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
753         return;
754 }
755
756 void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
757 {
758         unsigned long vpsr;
759         int type;
760
761         u64 vhpt_adr, gppa, pteval, rr, itir;
762         union ia64_isr misr;
763         union ia64_pta vpta;
764         struct thash_data *data;
765         struct kvm_vcpu *v = current_vcpu;
766
767         vpsr = VCPU(v, vpsr);
768         misr.val = VMX(v, cr_isr);
769
770         type = vec;
771
772         if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
773                 if (vec == 2) {
774                         if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
775                                 emulate_io_inst(v, ((vadr << 1) >> 1), 4);
776                                 return;
777                         }
778                 }
779                 physical_tlb_miss(v, vadr, type);
780                 return;
781         }
782         data = vtlb_lookup(v, vadr, type);
783         if (data != 0) {
784                 if (type == D_TLB) {
785                         gppa = (vadr & ((1UL << data->ps) - 1))
786                                 + (data->ppn >> (data->ps - 12) << data->ps);
787                         if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
788                                 if (data->pl >= ((regs->cr_ipsr >>
789                                                 IA64_PSR_CPL0_BIT) & 3))
790                                         emulate_io_inst(v, gppa, data->ma);
791                                 else {
792                                         vcpu_set_isr(v, misr.val);
793                                         data_access_rights(v, vadr);
794                                 }
795                                 return ;
796                         }
797                 }
798                 thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
799
800         } else if (type == D_TLB) {
801                 if (misr.sp) {
802                         handle_lds(regs);
803                         return;
804                 }
805
806                 rr = vcpu_get_rr(v, vadr);
807                 itir = rr & (RR_RID_MASK | RR_PS_MASK);
808
809                 if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
810                         if (vpsr & IA64_PSR_IC) {
811                                 vcpu_set_isr(v, misr.val);
812                                 alt_dtlb(v, vadr);
813                         } else {
814                                 nested_dtlb(v);
815                         }
816                         return ;
817                 }
818
819                 vpta.val = vcpu_get_pta(v);
820                 /* avoid recursively walking (short format) VHPT */
821
822                 vhpt_adr = vcpu_thash(v, vadr);
823                 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
824                         /* VHPT successfully read.  */
825                         if (!(pteval & _PAGE_P)) {
826                                 if (vpsr & IA64_PSR_IC) {
827                                         vcpu_set_isr(v, misr.val);
828                                         dtlb_fault(v, vadr);
829                                 } else {
830                                         nested_dtlb(v);
831                                 }
832                         } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
833                                 thash_purge_and_insert(v, pteval, itir,
834                                                                 vadr, D_TLB);
835                         } else if (vpsr & IA64_PSR_IC) {
836                                 vcpu_set_isr(v, misr.val);
837                                 dtlb_fault(v, vadr);
838                         } else {
839                                 nested_dtlb(v);
840                         }
841                 } else {
842                         /* Can't read VHPT.  */
843                         if (vpsr & IA64_PSR_IC) {
844                                 vcpu_set_isr(v, misr.val);
845                                 dvhpt_fault(v, vadr);
846                         } else {
847                                 nested_dtlb(v);
848                         }
849                 }
850         } else if (type == I_TLB) {
851                 if (!(vpsr & IA64_PSR_IC))
852                         misr.ni = 1;
853                 if (!vhpt_enabled(v, vadr, INST_REF)) {
854                         vcpu_set_isr(v, misr.val);
855                         alt_itlb(v, vadr);
856                         return;
857                 }
858
859                 vpta.val = vcpu_get_pta(v);
860
861                 vhpt_adr = vcpu_thash(v, vadr);
862                 if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
863                         /* VHPT successfully read.  */
864                         if (pteval & _PAGE_P) {
865                                 if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
866                                         vcpu_set_isr(v, misr.val);
867                                         itlb_fault(v, vadr);
868                                         return ;
869                                 }
870                                 rr = vcpu_get_rr(v, vadr);
871                                 itir = rr & (RR_RID_MASK | RR_PS_MASK);
872                                 thash_purge_and_insert(v, pteval, itir,
873                                                         vadr, I_TLB);
874                         } else {
875                                 vcpu_set_isr(v, misr.val);
876                                 inst_page_not_present(v, vadr);
877                         }
878                 } else {
879                         vcpu_set_isr(v, misr.val);
880                         ivhpt_fault(v, vadr);
881                 }
882         }
883 }
884
885 void kvm_vexirq(struct kvm_vcpu *vcpu)
886 {
887         u64 vpsr, isr;
888         struct kvm_pt_regs *regs;
889
890         regs = vcpu_regs(vcpu);
891         vpsr = VCPU(vcpu, vpsr);
892         isr = vpsr & IA64_PSR_RI;
893         reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
894 }
895
896 void kvm_ia64_handle_irq(struct kvm_vcpu *v)
897 {
898         struct exit_ctl_data *p = &v->arch.exit_data;
899         long psr;
900
901         local_irq_save(psr);
902         p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
903         vmm_transition(v);
904         local_irq_restore(psr);
905
906         VMX(v, timer_check) = 1;
907
908 }
909
910 static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
911 {
912         u64 oldrid, moldrid, oldpsbits, vaddr;
913         struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
914         vaddr = p->vaddr;
915
916         oldrid = VMX(v, vrr[0]);
917         VMX(v, vrr[0]) = p->rr;
918         oldpsbits = VMX(v, psbits[0]);
919         VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
920         moldrid = ia64_get_rr(0x0);
921         ia64_set_rr(0x0, vrrtomrr(p->rr));
922         ia64_srlz_d();
923
924         vaddr = PAGEALIGN(vaddr, p->ps);
925         thash_purge_entries_remote(v, vaddr, p->ps);
926
927         VMX(v, vrr[0]) = oldrid;
928         VMX(v, psbits[0]) = oldpsbits;
929         ia64_set_rr(0x0, moldrid);
930         ia64_dv_serialize_data();
931 }
932
933 static void vcpu_do_resume(struct kvm_vcpu *vcpu)
934 {
935         /*Re-init VHPT and VTLB once from resume*/
936         vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
937         thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
938         vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
939         thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
940
941         ia64_set_pta(vcpu->arch.vhpt.pta.val);
942 }
943
944 static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
945 {
946         if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
947                 vcpu_do_resume(vcpu);
948                 return;
949         }
950
951         if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
952                 thash_purge_all(vcpu);
953                 return;
954         }
955
956         if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
957                 while (vcpu->arch.ptc_g_count > 0)
958                         ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
959         }
960 }
961
962 void vmm_transition(struct kvm_vcpu *vcpu)
963 {
964         ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
965                         0, 0, 0, 0, 0, 0);
966         vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
967         ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
968                                                 0, 0, 0, 0, 0, 0);
969         kvm_do_resume_op(vcpu);
970 }