Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
[linux-2.6] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19
20 #include <linux/jiffies.h>
21 #include <linux/timer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
25
26 #include <asm/reg.h>
27 #include <asm/time.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
31 #include "timing.h"
32
33 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
34 {
35         if (vcpu->arch.tcr & TCR_DIE) {
36                 /* The decrementer ticks at the same rate as the timebase, so
37                  * that's how we convert the guest DEC value to the number of
38                  * host ticks. */
39                 unsigned long nr_jiffies;
40
41                 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
42                 mod_timer(&vcpu->arch.dec_timer,
43                           get_jiffies_64() + nr_jiffies);
44         } else {
45                 del_timer(&vcpu->arch.dec_timer);
46         }
47 }
48
49 /* XXX to do:
50  * lhax
51  * lhaux
52  * lswx
53  * lswi
54  * stswx
55  * stswi
56  * lha
57  * lhau
58  * lmw
59  * stmw
60  *
61  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
62  */
63 /* XXX Should probably auto-generate instruction decoding for a particular core
64  * from opcode tables in the future. */
65 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
66 {
67         u32 inst = vcpu->arch.last_inst;
68         u32 ea;
69         int ra;
70         int rb;
71         int rs;
72         int rt;
73         int sprn;
74         enum emulation_result emulated = EMULATE_DONE;
75         int advance = 1;
76
77         /* this default type might be overwritten by subcategories */
78         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
79
80         switch (get_op(inst)) {
81         case 3:                                             /* trap */
82                 vcpu->arch.esr |= ESR_PTR;
83                 kvmppc_core_queue_program(vcpu);
84                 advance = 0;
85                 break;
86
87         case 31:
88                 switch (get_xop(inst)) {
89
90                 case 23:                                        /* lwzx */
91                         rt = get_rt(inst);
92                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
93                         break;
94
95                 case 87:                                        /* lbzx */
96                         rt = get_rt(inst);
97                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
98                         break;
99
100                 case 151:                                       /* stwx */
101                         rs = get_rs(inst);
102                         emulated = kvmppc_handle_store(run, vcpu,
103                                                        vcpu->arch.gpr[rs],
104                                                        4, 1);
105                         break;
106
107                 case 215:                                       /* stbx */
108                         rs = get_rs(inst);
109                         emulated = kvmppc_handle_store(run, vcpu,
110                                                        vcpu->arch.gpr[rs],
111                                                        1, 1);
112                         break;
113
114                 case 247:                                       /* stbux */
115                         rs = get_rs(inst);
116                         ra = get_ra(inst);
117                         rb = get_rb(inst);
118
119                         ea = vcpu->arch.gpr[rb];
120                         if (ra)
121                                 ea += vcpu->arch.gpr[ra];
122
123                         emulated = kvmppc_handle_store(run, vcpu,
124                                                        vcpu->arch.gpr[rs],
125                                                        1, 1);
126                         vcpu->arch.gpr[rs] = ea;
127                         break;
128
129                 case 279:                                       /* lhzx */
130                         rt = get_rt(inst);
131                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
132                         break;
133
134                 case 311:                                       /* lhzux */
135                         rt = get_rt(inst);
136                         ra = get_ra(inst);
137                         rb = get_rb(inst);
138
139                         ea = vcpu->arch.gpr[rb];
140                         if (ra)
141                                 ea += vcpu->arch.gpr[ra];
142
143                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
144                         vcpu->arch.gpr[ra] = ea;
145                         break;
146
147                 case 339:                                       /* mfspr */
148                         sprn = get_sprn(inst);
149                         rt = get_rt(inst);
150
151                         switch (sprn) {
152                         case SPRN_SRR0:
153                                 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
154                         case SPRN_SRR1:
155                                 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
156                         case SPRN_PVR:
157                                 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
158
159                         /* Note: mftb and TBRL/TBWL are user-accessible, so
160                          * the guest can always access the real TB anyways.
161                          * In fact, we probably will never see these traps. */
162                         case SPRN_TBWL:
163                                 vcpu->arch.gpr[rt] = mftbl(); break;
164                         case SPRN_TBWU:
165                                 vcpu->arch.gpr[rt] = mftbu(); break;
166
167                         case SPRN_SPRG0:
168                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
169                         case SPRN_SPRG1:
170                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
171                         case SPRN_SPRG2:
172                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
173                         case SPRN_SPRG3:
174                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
175                         /* Note: SPRG4-7 are user-readable, so we don't get
176                          * a trap. */
177
178                         default:
179                                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
180                                 if (emulated == EMULATE_FAIL) {
181                                         printk("mfspr: unknown spr %x\n", sprn);
182                                         vcpu->arch.gpr[rt] = 0;
183                                 }
184                                 break;
185                         }
186                         break;
187
188                 case 407:                                       /* sthx */
189                         rs = get_rs(inst);
190                         ra = get_ra(inst);
191                         rb = get_rb(inst);
192
193                         emulated = kvmppc_handle_store(run, vcpu,
194                                                        vcpu->arch.gpr[rs],
195                                                        2, 1);
196                         break;
197
198                 case 439:                                       /* sthux */
199                         rs = get_rs(inst);
200                         ra = get_ra(inst);
201                         rb = get_rb(inst);
202
203                         ea = vcpu->arch.gpr[rb];
204                         if (ra)
205                                 ea += vcpu->arch.gpr[ra];
206
207                         emulated = kvmppc_handle_store(run, vcpu,
208                                                        vcpu->arch.gpr[rs],
209                                                        2, 1);
210                         vcpu->arch.gpr[ra] = ea;
211                         break;
212
213                 case 467:                                       /* mtspr */
214                         sprn = get_sprn(inst);
215                         rs = get_rs(inst);
216                         switch (sprn) {
217                         case SPRN_SRR0:
218                                 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
219                         case SPRN_SRR1:
220                                 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
221
222                         /* XXX We need to context-switch the timebase for
223                          * watchdog and FIT. */
224                         case SPRN_TBWL: break;
225                         case SPRN_TBWU: break;
226
227                         case SPRN_DEC:
228                                 vcpu->arch.dec = vcpu->arch.gpr[rs];
229                                 kvmppc_emulate_dec(vcpu);
230                                 break;
231
232                         case SPRN_SPRG0:
233                                 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
234                         case SPRN_SPRG1:
235                                 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
236                         case SPRN_SPRG2:
237                                 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
238                         case SPRN_SPRG3:
239                                 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
240
241                         default:
242                                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
243                                 if (emulated == EMULATE_FAIL)
244                                         printk("mtspr: unknown spr %x\n", sprn);
245                                 break;
246                         }
247                         break;
248
249                 case 470:                                       /* dcbi */
250                         /* Do nothing. The guest is performing dcbi because
251                          * hardware DMA is not snooped by the dcache, but
252                          * emulated DMA either goes through the dcache as
253                          * normal writes, or the host kernel has handled dcache
254                          * coherence. */
255                         break;
256
257                 case 534:                                       /* lwbrx */
258                         rt = get_rt(inst);
259                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
260                         break;
261
262                 case 566:                                       /* tlbsync */
263                         break;
264
265                 case 662:                                       /* stwbrx */
266                         rs = get_rs(inst);
267                         ra = get_ra(inst);
268                         rb = get_rb(inst);
269
270                         emulated = kvmppc_handle_store(run, vcpu,
271                                                        vcpu->arch.gpr[rs],
272                                                        4, 0);
273                         break;
274
275                 case 790:                                       /* lhbrx */
276                         rt = get_rt(inst);
277                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
278                         break;
279
280                 case 918:                                       /* sthbrx */
281                         rs = get_rs(inst);
282                         ra = get_ra(inst);
283                         rb = get_rb(inst);
284
285                         emulated = kvmppc_handle_store(run, vcpu,
286                                                        vcpu->arch.gpr[rs],
287                                                        2, 0);
288                         break;
289
290                 default:
291                         /* Attempt core-specific emulation below. */
292                         emulated = EMULATE_FAIL;
293                 }
294                 break;
295
296         case 32:                                                /* lwz */
297                 rt = get_rt(inst);
298                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
299                 break;
300
301         case 33:                                                /* lwzu */
302                 ra = get_ra(inst);
303                 rt = get_rt(inst);
304                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
305                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
306                 break;
307
308         case 34:                                                /* lbz */
309                 rt = get_rt(inst);
310                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
311                 break;
312
313         case 35:                                                /* lbzu */
314                 ra = get_ra(inst);
315                 rt = get_rt(inst);
316                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
317                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
318                 break;
319
320         case 36:                                                /* stw */
321                 rs = get_rs(inst);
322                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
323                                                4, 1);
324                 break;
325
326         case 37:                                                /* stwu */
327                 ra = get_ra(inst);
328                 rs = get_rs(inst);
329                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
330                                                4, 1);
331                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
332                 break;
333
334         case 38:                                                /* stb */
335                 rs = get_rs(inst);
336                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
337                                                1, 1);
338                 break;
339
340         case 39:                                                /* stbu */
341                 ra = get_ra(inst);
342                 rs = get_rs(inst);
343                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
344                                                1, 1);
345                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
346                 break;
347
348         case 40:                                                /* lhz */
349                 rt = get_rt(inst);
350                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
351                 break;
352
353         case 41:                                                /* lhzu */
354                 ra = get_ra(inst);
355                 rt = get_rt(inst);
356                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
357                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
358                 break;
359
360         case 44:                                                /* sth */
361                 rs = get_rs(inst);
362                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
363                                                2, 1);
364                 break;
365
366         case 45:                                                /* sthu */
367                 ra = get_ra(inst);
368                 rs = get_rs(inst);
369                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
370                                                2, 1);
371                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
372                 break;
373
374         default:
375                 emulated = EMULATE_FAIL;
376         }
377
378         if (emulated == EMULATE_FAIL) {
379                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
380                 if (emulated == EMULATE_FAIL) {
381                         advance = 0;
382                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
383                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
384                 }
385         }
386
387         KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit);
388
389         if (advance)
390                 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
391
392         return emulated;
393 }