Add desktop PowerPC specific emulation
[linux-3.10.git] / arch / powerpc / kvm / emulate.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  *
17  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18  */
19
20 #include <linux/jiffies.h>
21 #include <linux/timer.h>
22 #include <linux/types.h>
23 #include <linux/string.h>
24 #include <linux/kvm_host.h>
25
26 #include <asm/reg.h>
27 #include <asm/time.h>
28 #include <asm/byteorder.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/disassemble.h>
31 #include "timing.h"
32 #include "trace.h"
33
34 #define OP_TRAP 3
35 #define OP_TRAP_64 2
36
37 #define OP_31_XOP_LWZX      23
38 #define OP_31_XOP_LBZX      87
39 #define OP_31_XOP_STWX      151
40 #define OP_31_XOP_STBX      215
41 #define OP_31_XOP_STBUX     247
42 #define OP_31_XOP_LHZX      279
43 #define OP_31_XOP_LHZUX     311
44 #define OP_31_XOP_MFSPR     339
45 #define OP_31_XOP_STHX      407
46 #define OP_31_XOP_STHUX     439
47 #define OP_31_XOP_MTSPR     467
48 #define OP_31_XOP_DCBI      470
49 #define OP_31_XOP_LWBRX     534
50 #define OP_31_XOP_TLBSYNC   566
51 #define OP_31_XOP_STWBRX    662
52 #define OP_31_XOP_LHBRX     790
53 #define OP_31_XOP_STHBRX    918
54
55 #define OP_LWZ  32
56 #define OP_LWZU 33
57 #define OP_LBZ  34
58 #define OP_LBZU 35
59 #define OP_STW  36
60 #define OP_STWU 37
61 #define OP_STB  38
62 #define OP_STBU 39
63 #define OP_LHZ  40
64 #define OP_LHZU 41
65 #define OP_STH  44
66 #define OP_STHU 45
67
68 #ifdef CONFIG_PPC64
69 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
70 {
71         return 1;
72 }
73 #else
74 static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
75 {
76         return vcpu->arch.tcr & TCR_DIE;
77 }
78 #endif
79
80 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
81 {
82         unsigned long nr_jiffies;
83
84 #ifdef CONFIG_PPC64
85         /* POWER4+ triggers a dec interrupt if the value is < 0 */
86         if (vcpu->arch.dec & 0x80000000) {
87                 del_timer(&vcpu->arch.dec_timer);
88                 kvmppc_core_queue_dec(vcpu);
89                 return;
90         }
91 #endif
92         if (kvmppc_dec_enabled(vcpu)) {
93                 /* The decrementer ticks at the same rate as the timebase, so
94                  * that's how we convert the guest DEC value to the number of
95                  * host ticks. */
96
97                 vcpu->arch.dec_jiffies = get_tb();
98                 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
99                 mod_timer(&vcpu->arch.dec_timer,
100                           get_jiffies_64() + nr_jiffies);
101         } else {
102                 del_timer(&vcpu->arch.dec_timer);
103         }
104 }
105
106 /* XXX to do:
107  * lhax
108  * lhaux
109  * lswx
110  * lswi
111  * stswx
112  * stswi
113  * lha
114  * lhau
115  * lmw
116  * stmw
117  *
118  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
119  */
120 /* XXX Should probably auto-generate instruction decoding for a particular core
121  * from opcode tables in the future. */
122 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
123 {
124         u32 inst = vcpu->arch.last_inst;
125         u32 ea;
126         int ra;
127         int rb;
128         int rs;
129         int rt;
130         int sprn;
131         enum emulation_result emulated = EMULATE_DONE;
132         int advance = 1;
133
134         /* this default type might be overwritten by subcategories */
135         kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
136
137         pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
138
139         switch (get_op(inst)) {
140         case OP_TRAP:
141 #ifdef CONFIG_PPC64
142         case OP_TRAP_64:
143 #else
144                 vcpu->arch.esr |= ESR_PTR;
145 #endif
146                 kvmppc_core_queue_program(vcpu);
147                 advance = 0;
148                 break;
149
150         case 31:
151                 switch (get_xop(inst)) {
152
153                 case OP_31_XOP_LWZX:
154                         rt = get_rt(inst);
155                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
156                         break;
157
158                 case OP_31_XOP_LBZX:
159                         rt = get_rt(inst);
160                         emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
161                         break;
162
163                 case OP_31_XOP_STWX:
164                         rs = get_rs(inst);
165                         emulated = kvmppc_handle_store(run, vcpu,
166                                                        vcpu->arch.gpr[rs],
167                                                        4, 1);
168                         break;
169
170                 case OP_31_XOP_STBX:
171                         rs = get_rs(inst);
172                         emulated = kvmppc_handle_store(run, vcpu,
173                                                        vcpu->arch.gpr[rs],
174                                                        1, 1);
175                         break;
176
177                 case OP_31_XOP_STBUX:
178                         rs = get_rs(inst);
179                         ra = get_ra(inst);
180                         rb = get_rb(inst);
181
182                         ea = vcpu->arch.gpr[rb];
183                         if (ra)
184                                 ea += vcpu->arch.gpr[ra];
185
186                         emulated = kvmppc_handle_store(run, vcpu,
187                                                        vcpu->arch.gpr[rs],
188                                                        1, 1);
189                         vcpu->arch.gpr[rs] = ea;
190                         break;
191
192                 case OP_31_XOP_LHZX:
193                         rt = get_rt(inst);
194                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
195                         break;
196
197                 case OP_31_XOP_LHZUX:
198                         rt = get_rt(inst);
199                         ra = get_ra(inst);
200                         rb = get_rb(inst);
201
202                         ea = vcpu->arch.gpr[rb];
203                         if (ra)
204                                 ea += vcpu->arch.gpr[ra];
205
206                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
207                         vcpu->arch.gpr[ra] = ea;
208                         break;
209
210                 case OP_31_XOP_MFSPR:
211                         sprn = get_sprn(inst);
212                         rt = get_rt(inst);
213
214                         switch (sprn) {
215                         case SPRN_SRR0:
216                                 vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
217                         case SPRN_SRR1:
218                                 vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
219                         case SPRN_PVR:
220                                 vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
221                         case SPRN_PIR:
222                                 vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
223                         case SPRN_MSSSR0:
224                                 vcpu->arch.gpr[rt] = 0; break;
225
226                         /* Note: mftb and TBRL/TBWL are user-accessible, so
227                          * the guest can always access the real TB anyways.
228                          * In fact, we probably will never see these traps. */
229                         case SPRN_TBWL:
230                                 vcpu->arch.gpr[rt] = get_tb() >> 32; break;
231                         case SPRN_TBWU:
232                                 vcpu->arch.gpr[rt] = get_tb(); break;
233
234                         case SPRN_SPRG0:
235                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
236                         case SPRN_SPRG1:
237                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
238                         case SPRN_SPRG2:
239                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
240                         case SPRN_SPRG3:
241                                 vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
242                         /* Note: SPRG4-7 are user-readable, so we don't get
243                          * a trap. */
244
245                         case SPRN_DEC:
246                         {
247                                 u64 jd = get_tb() - vcpu->arch.dec_jiffies;
248                                 vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
249                                 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
250                                 break;
251                         }
252                         default:
253                                 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
254                                 if (emulated == EMULATE_FAIL) {
255                                         printk("mfspr: unknown spr %x\n", sprn);
256                                         vcpu->arch.gpr[rt] = 0;
257                                 }
258                                 break;
259                         }
260                         break;
261
262                 case OP_31_XOP_STHX:
263                         rs = get_rs(inst);
264                         ra = get_ra(inst);
265                         rb = get_rb(inst);
266
267                         emulated = kvmppc_handle_store(run, vcpu,
268                                                        vcpu->arch.gpr[rs],
269                                                        2, 1);
270                         break;
271
272                 case OP_31_XOP_STHUX:
273                         rs = get_rs(inst);
274                         ra = get_ra(inst);
275                         rb = get_rb(inst);
276
277                         ea = vcpu->arch.gpr[rb];
278                         if (ra)
279                                 ea += vcpu->arch.gpr[ra];
280
281                         emulated = kvmppc_handle_store(run, vcpu,
282                                                        vcpu->arch.gpr[rs],
283                                                        2, 1);
284                         vcpu->arch.gpr[ra] = ea;
285                         break;
286
287                 case OP_31_XOP_MTSPR:
288                         sprn = get_sprn(inst);
289                         rs = get_rs(inst);
290                         switch (sprn) {
291                         case SPRN_SRR0:
292                                 vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
293                         case SPRN_SRR1:
294                                 vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
295
296                         /* XXX We need to context-switch the timebase for
297                          * watchdog and FIT. */
298                         case SPRN_TBWL: break;
299                         case SPRN_TBWU: break;
300
301                         case SPRN_MSSSR0: break;
302
303                         case SPRN_DEC:
304                                 vcpu->arch.dec = vcpu->arch.gpr[rs];
305                                 kvmppc_emulate_dec(vcpu);
306                                 break;
307
308                         case SPRN_SPRG0:
309                                 vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
310                         case SPRN_SPRG1:
311                                 vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
312                         case SPRN_SPRG2:
313                                 vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
314                         case SPRN_SPRG3:
315                                 vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
316
317                         default:
318                                 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
319                                 if (emulated == EMULATE_FAIL)
320                                         printk("mtspr: unknown spr %x\n", sprn);
321                                 break;
322                         }
323                         break;
324
325                 case OP_31_XOP_DCBI:
326                         /* Do nothing. The guest is performing dcbi because
327                          * hardware DMA is not snooped by the dcache, but
328                          * emulated DMA either goes through the dcache as
329                          * normal writes, or the host kernel has handled dcache
330                          * coherence. */
331                         break;
332
333                 case OP_31_XOP_LWBRX:
334                         rt = get_rt(inst);
335                         emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
336                         break;
337
338                 case OP_31_XOP_TLBSYNC:
339                         break;
340
341                 case OP_31_XOP_STWBRX:
342                         rs = get_rs(inst);
343                         ra = get_ra(inst);
344                         rb = get_rb(inst);
345
346                         emulated = kvmppc_handle_store(run, vcpu,
347                                                        vcpu->arch.gpr[rs],
348                                                        4, 0);
349                         break;
350
351                 case OP_31_XOP_LHBRX:
352                         rt = get_rt(inst);
353                         emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
354                         break;
355
356                 case OP_31_XOP_STHBRX:
357                         rs = get_rs(inst);
358                         ra = get_ra(inst);
359                         rb = get_rb(inst);
360
361                         emulated = kvmppc_handle_store(run, vcpu,
362                                                        vcpu->arch.gpr[rs],
363                                                        2, 0);
364                         break;
365
366                 default:
367                         /* Attempt core-specific emulation below. */
368                         emulated = EMULATE_FAIL;
369                 }
370                 break;
371
372         case OP_LWZ:
373                 rt = get_rt(inst);
374                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
375                 break;
376
377         case OP_LWZU:
378                 ra = get_ra(inst);
379                 rt = get_rt(inst);
380                 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
381                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
382                 break;
383
384         case OP_LBZ:
385                 rt = get_rt(inst);
386                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
387                 break;
388
389         case OP_LBZU:
390                 ra = get_ra(inst);
391                 rt = get_rt(inst);
392                 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
393                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
394                 break;
395
396         case OP_STW:
397                 rs = get_rs(inst);
398                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
399                                                4, 1);
400                 break;
401
402         case OP_STWU:
403                 ra = get_ra(inst);
404                 rs = get_rs(inst);
405                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
406                                                4, 1);
407                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
408                 break;
409
410         case OP_STB:
411                 rs = get_rs(inst);
412                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
413                                                1, 1);
414                 break;
415
416         case OP_STBU:
417                 ra = get_ra(inst);
418                 rs = get_rs(inst);
419                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
420                                                1, 1);
421                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
422                 break;
423
424         case OP_LHZ:
425                 rt = get_rt(inst);
426                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
427                 break;
428
429         case OP_LHZU:
430                 ra = get_ra(inst);
431                 rt = get_rt(inst);
432                 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
433                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
434                 break;
435
436         case OP_STH:
437                 rs = get_rs(inst);
438                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
439                                                2, 1);
440                 break;
441
442         case OP_STHU:
443                 ra = get_ra(inst);
444                 rs = get_rs(inst);
445                 emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
446                                                2, 1);
447                 vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
448                 break;
449
450         default:
451                 emulated = EMULATE_FAIL;
452         }
453
454         if (emulated == EMULATE_FAIL) {
455                 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
456                 if (emulated == EMULATE_FAIL) {
457                         advance = 0;
458                         printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
459                                "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
460                 }
461         }
462
463         trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
464
465         if (advance)
466                 vcpu->arch.pc += 4; /* Advance past emulated instruction. */
467
468         return emulated;
469 }