Merge branch 'for-linus-3.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-3.10.git] / arch / powerpc / kvm / emulate.c
index 8b0ba0b..ee04aba 100644 (file)
@@ -13,6 +13,7 @@
  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  *
  * Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
  *
  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
  */
@@ -22,6 +23,7 @@
 #include <linux/types.h>
 #include <linux/string.h>
 #include <linux/kvm_host.h>
+#include <linux/clockchips.h>
 
 #include <asm/reg.h>
 #include <asm/time.h>
 #define OP_TRAP 3
 #define OP_TRAP_64 2
 
+#define OP_31_XOP_TRAP      4
 #define OP_31_XOP_LWZX      23
+#define OP_31_XOP_TRAP_64   68
 #define OP_31_XOP_LBZX      87
 #define OP_31_XOP_STWX      151
 #define OP_31_XOP_STBX      215
+#define OP_31_XOP_LBZUX     119
 #define OP_31_XOP_STBUX     247
 #define OP_31_XOP_LHZX      279
 #define OP_31_XOP_LHZUX     311
 #define OP_31_XOP_MFSPR     339
+#define OP_31_XOP_LHAX      343
 #define OP_31_XOP_STHX      407
 #define OP_31_XOP_STHUX     439
 #define OP_31_XOP_MTSPR     467
 #define OP_31_XOP_STHBRX    918
 
 #define OP_LWZ  32
+#define OP_LD   58
 #define OP_LWZU 33
 #define OP_LBZ  34
 #define OP_LBZU 35
 #define OP_STW  36
 #define OP_STWU 37
+#define OP_STD  62
 #define OP_STB  38
 #define OP_STBU 39
 #define OP_LHZ  40
 #define OP_LHZU 41
+#define OP_LHA  42
+#define OP_LHAU 43
 #define OP_STH  44
 #define OP_STHU 45
 
-#ifdef CONFIG_PPC64
-static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
-{
-       return 1;
-}
-#else
-static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.tcr & TCR_DIE;
-}
-#endif
-
 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
 {
        unsigned long dec_nsec;
+       unsigned long long dec_time;
 
        pr_debug("mtDEC: %x\n", vcpu->arch.dec);
-#ifdef CONFIG_PPC64
+       hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+
+#ifdef CONFIG_PPC_BOOK3S
        /* mtdec lowers the interrupt line when positive. */
        kvmppc_core_dequeue_dec(vcpu);
 
        /* POWER4+ triggers a dec interrupt if the value is < 0 */
        if (vcpu->arch.dec & 0x80000000) {
-               hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
                kvmppc_core_queue_dec(vcpu);
                return;
        }
 #endif
-       if (kvmppc_dec_enabled(vcpu)) {
-               /* The decrementer ticks at the same rate as the timebase, so
-                * that's how we convert the guest DEC value to the number of
-                * host ticks. */
-
-               hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
-               dec_nsec = vcpu->arch.dec;
-               dec_nsec *= 1000;
-               dec_nsec /= tb_ticks_per_usec;
-               hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
-                             HRTIMER_MODE_REL);
-               vcpu->arch.dec_jiffies = get_tb();
-       } else {
-               hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
-       }
+
+#ifdef CONFIG_BOOKE
+       /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
+       if (vcpu->arch.dec == 0)
+               return;
+#endif
+
+       /*
+        * The decrementer ticks at the same rate as the timebase, so
+        * that's how we convert the guest DEC value to the number of
+        * host ticks.
+        */
+
+       dec_time = vcpu->arch.dec;
+       /*
+        * Guest timebase ticks at the same frequency as host decrementer.
+        * So use the host decrementer calculations for decrementer emulation.
+        */
+       dec_time = dec_time << decrementer_clockevent.shift;
+       do_div(dec_time, decrementer_clockevent.mult);
+       dec_nsec = do_div(dec_time, NSEC_PER_SEC);
+       hrtimer_start(&vcpu->arch.dec_timer,
+               ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
+       vcpu->arch.dec_jiffies = get_tb();
+}
+
+u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
+{
+       u64 jd = tb - vcpu->arch.dec_jiffies;
+
+#ifdef CONFIG_BOOKE
+       if (vcpu->arch.dec < jd)
+               return 0;
+#endif
+
+       return vcpu->arch.dec - jd;
 }
 
 /* XXX to do:
@@ -128,183 +149,173 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
  * from opcode tables in the future. */
 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 {
-       u32 inst = vcpu->arch.last_inst;
-       u32 ea;
-       int ra;
-       int rb;
-       int rs;
-       int rt;
-       int sprn;
+       u32 inst = kvmppc_get_last_inst(vcpu);
+       int ra = get_ra(inst);
+       int rs = get_rs(inst);
+       int rt = get_rt(inst);
+       int sprn = get_sprn(inst);
        enum emulation_result emulated = EMULATE_DONE;
        int advance = 1;
+       ulong spr_val = 0;
 
        /* this default type might be overwritten by subcategories */
        kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
 
-       pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
-
-       /* Try again next time */
-       if (inst == KVM_INST_FETCH_FAILED)
-               return EMULATE_DONE;
+       pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
 
        switch (get_op(inst)) {
        case OP_TRAP:
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
        case OP_TRAP_64:
+               kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
 #else
-               vcpu->arch.esr |= ESR_PTR;
+               kvmppc_core_queue_program(vcpu,
+                                         vcpu->arch.shared->esr | ESR_PTR);
 #endif
-               kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
                advance = 0;
                break;
 
        case 31:
                switch (get_xop(inst)) {
 
+               case OP_31_XOP_TRAP:
+#ifdef CONFIG_64BIT
+               case OP_31_XOP_TRAP_64:
+#endif
+#ifdef CONFIG_PPC_BOOK3S
+                       kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
+#else
+                       kvmppc_core_queue_program(vcpu,
+                                       vcpu->arch.shared->esr | ESR_PTR);
+#endif
+                       advance = 0;
+                       break;
                case OP_31_XOP_LWZX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
                        break;
 
                case OP_31_XOP_LBZX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
                        break;
 
+               case OP_31_XOP_LBZUX:
+                       emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+                       break;
+
                case OP_31_XOP_STWX:
-                       rs = get_rs(inst);
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       4, 1);
                        break;
 
                case OP_31_XOP_STBX:
-                       rs = get_rs(inst);
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       1, 1);
                        break;
 
                case OP_31_XOP_STBUX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       1, 1);
-                       kvmppc_set_gpr(vcpu, rs, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+                       break;
+
+               case OP_31_XOP_LHAX:
+                       emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
                        break;
 
                case OP_31_XOP_LHZX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
                        break;
 
                case OP_31_XOP_LHZUX:
-                       rt = get_rt(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
-                       kvmppc_set_gpr(vcpu, ra, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                        break;
 
                case OP_31_XOP_MFSPR:
-                       sprn = get_sprn(inst);
-                       rt = get_rt(inst);
-
                        switch (sprn) {
                        case SPRN_SRR0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
+                               spr_val = vcpu->arch.shared->srr0;
+                               break;
                        case SPRN_SRR1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
+                               spr_val = vcpu->arch.shared->srr1;
+                               break;
                        case SPRN_PVR:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
+                               spr_val = vcpu->arch.pvr;
+                               break;
                        case SPRN_PIR:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
+                               spr_val = vcpu->vcpu_id;
+                               break;
                        case SPRN_MSSSR0:
-                               kvmppc_set_gpr(vcpu, rt, 0); break;
+                               spr_val = 0;
+                               break;
 
                        /* Note: mftb and TBRL/TBWL are user-accessible, so
                         * the guest can always access the real TB anyways.
                         * In fact, we probably will never see these traps. */
                        case SPRN_TBWL:
-                               kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
+                               spr_val = get_tb() >> 32;
+                               break;
                        case SPRN_TBWU:
-                               kvmppc_set_gpr(vcpu, rt, get_tb()); break;
+                               spr_val = get_tb();
+                               break;
 
                        case SPRN_SPRG0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
+                               spr_val = vcpu->arch.shared->sprg0;
+                               break;
                        case SPRN_SPRG1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
+                               spr_val = vcpu->arch.shared->sprg1;
+                               break;
                        case SPRN_SPRG2:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
+                               spr_val = vcpu->arch.shared->sprg2;
+                               break;
                        case SPRN_SPRG3:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
+                               spr_val = vcpu->arch.shared->sprg3;
+                               break;
                        /* Note: SPRG4-7 are user-readable, so we don't get
                         * a trap. */
 
                        case SPRN_DEC:
-                       {
-                               u64 jd = get_tb() - vcpu->arch.dec_jiffies;
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
-                               pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
-                                        vcpu->arch.dec, jd,
-                                        kvmppc_get_gpr(vcpu, rt));
+                               spr_val = kvmppc_get_dec(vcpu, get_tb());
                                break;
-                       }
                        default:
-                               emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
-                               if (emulated == EMULATE_FAIL) {
-                                       printk("mfspr: unknown spr %x\n", sprn);
-                                       kvmppc_set_gpr(vcpu, rt, 0);
+                               emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
+                                                                    &spr_val);
+                               if (unlikely(emulated == EMULATE_FAIL)) {
+                                       printk(KERN_INFO "mfspr: unknown spr "
+                                               "0x%x\n", sprn);
                                }
                                break;
                        }
+                       kvmppc_set_gpr(vcpu, rt, spr_val);
+                       kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
                        break;
 
                case OP_31_XOP_STHX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       2, 1);
                        break;
 
                case OP_31_XOP_STHUX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
-                       ea = kvmppc_get_gpr(vcpu, rb);
-                       if (ra)
-                               ea += kvmppc_get_gpr(vcpu, ra);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       2, 1);
-                       kvmppc_set_gpr(vcpu, ra, ea);
+                       kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                        break;
 
                case OP_31_XOP_MTSPR:
-                       sprn = get_sprn(inst);
-                       rs = get_rs(inst);
+                       spr_val = kvmppc_get_gpr(vcpu, rs);
                        switch (sprn) {
                        case SPRN_SRR0:
-                               vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->srr0 = spr_val;
+                               break;
                        case SPRN_SRR1:
-                               vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->srr1 = spr_val;
+                               break;
 
                        /* XXX We need to context-switch the timebase for
                         * watchdog and FIT. */
@@ -314,25 +325,32 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        case SPRN_MSSSR0: break;
 
                        case SPRN_DEC:
-                               vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
+                               vcpu->arch.dec = spr_val;
                                kvmppc_emulate_dec(vcpu);
                                break;
 
                        case SPRN_SPRG0:
-                               vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg0 = spr_val;
+                               break;
                        case SPRN_SPRG1:
-                               vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg1 = spr_val;
+                               break;
                        case SPRN_SPRG2:
-                               vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg2 = spr_val;
+                               break;
                        case SPRN_SPRG3:
-                               vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->sprg3 = spr_val;
+                               break;
 
                        default:
-                               emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
+                               emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
+                                                                    spr_val);
                                if (emulated == EMULATE_FAIL)
-                                       printk("mtspr: unknown spr %x\n", sprn);
+                                       printk(KERN_INFO "mtspr: unknown spr "
+                                               "0x%x\n", sprn);
                                break;
                        }
+                       kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
                        break;
 
                case OP_31_XOP_DCBI:
@@ -344,7 +362,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        break;
 
                case OP_31_XOP_LWBRX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
                        break;
 
@@ -352,25 +369,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        break;
 
                case OP_31_XOP_STWBRX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       4, 0);
                        break;
 
                case OP_31_XOP_LHBRX:
-                       rt = get_rt(inst);
                        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
                        break;
 
                case OP_31_XOP_STHBRX:
-                       rs = get_rs(inst);
-                       ra = get_ra(inst);
-                       rb = get_rb(inst);
-
                        emulated = kvmppc_handle_store(run, vcpu,
                                                       kvmppc_get_gpr(vcpu, rs),
                                                       2, 0);
@@ -383,87 +391,92 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                break;
 
        case OP_LWZ:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
                break;
 
-       case OP_LWZU:
-               ra = get_ra(inst);
+       /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
+       case OP_LD:
                rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+               break;
+
+       case OP_LWZU:
                emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_LBZ:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
                break;
 
        case OP_LBZU:
-               ra = get_ra(inst);
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_STW:
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               4, 1);
                break;
 
-       case OP_STWU:
-               ra = get_ra(inst);
+       /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
+       case OP_STD:
                rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
+                                              8, 1);
+               break;
+
+       case OP_STWU:
+               emulated = kvmppc_handle_store(run, vcpu,
+                                              kvmppc_get_gpr(vcpu, rs),
                                               4, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_STB:
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               1, 1);
                break;
 
        case OP_STBU:
-               ra = get_ra(inst);
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               1, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_LHZ:
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
                break;
 
        case OP_LHZU:
-               ra = get_ra(inst);
-               rt = get_rt(inst);
                emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+               break;
+
+       case OP_LHA:
+               emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+               break;
+
+       case OP_LHAU:
+               emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        case OP_STH:
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               2, 1);
                break;
 
        case OP_STHU:
-               ra = get_ra(inst);
-               rs = get_rs(inst);
                emulated = kvmppc_handle_store(run, vcpu,
                                               kvmppc_get_gpr(vcpu, rs),
                                               2, 1);
-               kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
+               kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
                break;
 
        default:
@@ -472,17 +485,21 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
        if (emulated == EMULATE_FAIL) {
                emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
-               if (emulated == EMULATE_FAIL) {
+               if (emulated == EMULATE_AGAIN) {
+                       advance = 0;
+               } else if (emulated == EMULATE_FAIL) {
                        advance = 0;
                        printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
                               "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
+                       kvmppc_core_queue_program(vcpu, 0);
                }
        }
 
-       trace_kvm_ppc_instr(inst, vcpu->arch.pc, emulated);
+       trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
 
+       /* Advance past emulated instruction. */
        if (advance)
-               vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+               kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
 
        return emulated;
 }