2 * mmio.c: MMIO emulation components.
3 * Copyright (c) 2004, Intel Corporation.
4 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
5 * Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
7 * Copyright (c) 2007 Intel Corporation KVM support.
8 * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
9 * Xiantao Zhang (xiantao.zhang@intel.com)
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
26 #include <linux/kvm_host.h>
30 static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
38 #define PIB_LOW_HALF(ofst) !(ofst & (1 << 20))
39 #define PIB_OFST_INTA 0x1E0000
40 #define PIB_OFST_XTP 0x1E0008
43 * execute write IPI op.
45 static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
46 uint64_t addr, uint64_t data)
48 struct exit_ctl_data *p = ¤t_vcpu->arch.exit_data;
53 p->exit_reason = EXIT_REASON_IPI;
54 p->u.ipi_data.addr.val = addr;
55 p->u.ipi_data.data.val = data;
56 vmm_transition(current_vcpu);
58 local_irq_restore(psr);
62 void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
63 unsigned long length, unsigned long val)
65 addr &= (PIB_SIZE - 1);
69 /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
74 vlsapic_write_xtp(v, val);
77 "Undefined write on PIB XTP\n");*/
82 if (PIB_LOW_HALF(addr)) {
86 "Can't LHF write with size %ld!\n",
90 vlsapic_write_ipi(v, addr, val);
91 } else { /* upper half
92 printk("IPI-UHF write %lx\n",addr);*/
99 unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
100 unsigned long length)
104 addr &= (PIB_SIZE - 1);
108 if (length == 1) /* 1 byte load */
109 ; /* There is no i8259, there is no INTA access*/
111 /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
117 result = VLSAPIC_XTP(v);
118 /* printk("read xtp %lx\n", result); */
121 "Undefined read on PIB XTP\n");*/
132 static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
133 u16 s, int ma, int dir)
136 struct exit_ctl_data *p = &vcpu->arch.exit_data;
139 iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
143 /*Intercept the acces for PIB range*/
144 if (iot == GPFN_PIB) {
146 lsapic_write(vcpu, src_pa, s, *dest);
148 *dest = lsapic_read(vcpu, src_pa, s);
151 p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
152 p->u.ioreq.addr = src_pa;
154 p->u.ioreq.dir = dir;
155 if (dir == IOREQ_WRITE)
156 p->u.ioreq.data = *dest;
157 p->u.ioreq.state = STATE_IOREQ_READY;
158 vmm_transition(vcpu);
160 if (p->u.ioreq.state == STATE_IORESP_READY) {
161 if (dir == IOREQ_READ)
162 /* it's necessary to ensure zero extending */
163 *dest = p->u.ioreq.data & (~0UL >> (64-(s*8)));
167 local_irq_restore(psr);
173 inst_type 0:integer 1:floating point
175 #define SL_INTEGER 0 /* store/load interger*/
176 #define SL_FLOATING 1 /* store/load floating*/
178 void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
180 struct kvm_pt_regs *regs;
185 u64 data, slot1a, slot1b, temp, update_reg;
189 regs = vcpu_regs(vcpu);
191 if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
192 /* if fetch code fail, return and try again */
195 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
197 inst.inst = bundle.slot0;
198 else if (slot == 1) {
199 slot1a = bundle.slot1a;
200 slot1b = bundle.slot1b;
201 inst.inst = slot1a + (slot1b << 18);
202 } else if (slot == 2)
203 inst.inst = bundle.slot2;
205 /* Integer Load/Store */
206 if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
207 inst_type = SL_INTEGER;
208 size = (inst.M1.x6 & 0x3);
209 if ((inst.M1.x6 >> 2) > 0xb) {
212 data = vcpu_get_gr(vcpu, inst.M4.r2);
213 } else if ((inst.M1.x6 >> 2) < 0xb) {
217 } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
218 /* Integer Load + Reg update */
219 inst_type = SL_INTEGER;
221 size = (inst.M2.x6 & 0x3);
222 temp = vcpu_get_gr(vcpu, inst.M2.r3);
223 update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
225 vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
226 } else if (inst.M3.major == 5) {
227 /*Integer Load/Store + Imm update*/
228 inst_type = SL_INTEGER;
229 size = (inst.M3.x6&0x3);
230 if ((inst.M5.x6 >> 2) > 0xb) {
233 data = vcpu_get_gr(vcpu, inst.M5.r2);
234 temp = vcpu_get_gr(vcpu, inst.M5.r3);
235 imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
236 (inst.M5.imm7 << 23);
238 vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
240 } else if ((inst.M3.x6 >> 2) < 0xb) {
243 temp = vcpu_get_gr(vcpu, inst.M3.r3);
244 imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
245 (inst.M3.imm7 << 23);
247 vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
250 } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
251 && inst.M9.m == 0 && inst.M9.x == 0) {
252 /* Floating-point spill*/
255 inst_type = SL_FLOATING;
257 vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
258 /* Write high word. FIXME: this is a kludge! */
259 v.u.bits[1] &= 0x3ffff;
260 mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
263 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
264 /* Floating-point spill + Imm update */
267 inst_type = SL_FLOATING;
269 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
270 temp = vcpu_get_gr(vcpu, inst.M10.r3);
271 imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
272 (inst.M10.imm7 << 23);
274 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
276 /* Write high word.FIXME: this is a kludge! */
277 v.u.bits[1] &= 0x3ffff;
278 mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
281 } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
282 /* Floating-point stf8 + Imm update */
284 inst_type = SL_FLOATING;
287 vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
288 data = v.u.bits[0]; /* Significand. */
289 temp = vcpu_get_gr(vcpu, inst.M10.r3);
290 imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
291 (inst.M10.imm7 << 23);
293 vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
294 } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
295 && inst.M15.x6 <= 0x2f) {
296 temp = vcpu_get_gr(vcpu, inst.M15.r3);
297 imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
298 (inst.M15.imm7 << 23);
300 vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
302 vcpu_increment_iip(vcpu);
304 } else if (inst.M12.major == 6 && inst.M12.m == 1
305 && inst.M12.x == 1 && inst.M12.x6 == 1) {
306 /* Floating-point Load Pair + Imm ldfp8 M12*/
309 inst_type = SL_FLOATING;
312 mmio_access(vcpu, padr, &data, size, ma, dir);
314 v.u.bits[1] = 0x1003E;
315 vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
317 mmio_access(vcpu, padr, &data, size, ma, dir);
319 v.u.bits[1] = 0x1003E;
320 vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
322 vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
323 vcpu_increment_iip(vcpu);
331 if (dir == IOREQ_WRITE) {
332 mmio_access(vcpu, padr, &data, size, ma, dir);
334 mmio_access(vcpu, padr, &data, size, ma, dir);
335 if (inst_type == SL_INTEGER)
336 vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
341 vcpu_increment_iip(vcpu);