KVM: VMX: Separate saving pre-realmode state from setting segments
[linux-3.10.git] / arch / x86 / kvm / vmx.c
index f5b49c7..4e49caf 100644 (file)
@@ -18,6 +18,7 @@
 
 #include "irq.h"
 #include "mmu.h"
+#include "cpuid.h"
 
 #include <linux/kvm_host.h>
 #include <linux/module.h>
@@ -26,6 +27,7 @@
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
+#include <linux/mod_devicetable.h>
 #include <linux/ftrace_event.h>
 #include <linux/slab.h>
 #include <linux/tboot.h>
@@ -39,6 +41,7 @@
 #include <asm/mce.h>
 #include <asm/i387.h>
 #include <asm/xcr.h>
+#include <asm/perf_event.h>
 
 #include "trace.h"
 
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
-static int __read_mostly bypass_guest_pf = 1;
-module_param(bypass_guest_pf, bool, S_IRUGO);
+static const struct x86_cpu_id vmx_cpu_id[] = {
+       X86_FEATURE_MATCH(X86_FEATURE_VMX),
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
 
-static int __read_mostly enable_vpid = 1;
+static bool __read_mostly enable_vpid = 1;
 module_param_named(vpid, enable_vpid, bool, 0444);
 
-static int __read_mostly flexpriority_enabled = 1;
+static bool __read_mostly flexpriority_enabled = 1;
 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
 
-static int __read_mostly enable_ept = 1;
+static bool __read_mostly enable_ept = 1;
 module_param_named(ept, enable_ept, bool, S_IRUGO);
 
-static int __read_mostly enable_unrestricted_guest = 1;
+static bool __read_mostly enable_unrestricted_guest = 1;
 module_param_named(unrestricted_guest,
                        enable_unrestricted_guest, bool, S_IRUGO);
 
-static int __read_mostly emulate_invalid_guest_state = 0;
+static bool __read_mostly enable_ept_ad_bits = 1;
+module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
+
+static bool __read_mostly emulate_invalid_guest_state = true;
 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 
-static int __read_mostly vmm_exclusive = 1;
+static bool __read_mostly vmm_exclusive = 1;
 module_param(vmm_exclusive, bool, S_IRUGO);
 
-static int __read_mostly yield_on_hlt = 1;
-module_param(yield_on_hlt, bool, S_IRUGO);
+static bool __read_mostly fasteoi = 1;
+module_param(fasteoi, bool, S_IRUGO);
 
 /*
  * If nested=1, nested virtualization is supported, i.e., guests may use
  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
  * use VMX instructions.
  */
-static int __read_mostly nested = 0;
+static bool __read_mostly nested = 0;
 module_param(nested, bool, S_IRUGO);
 
 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                          \
@@ -118,7 +127,7 @@ module_param(ple_gap, int, S_IRUGO);
 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
 module_param(ple_window, int, S_IRUGO);
 
-#define NR_AUTOLOAD_MSRS 1
+#define NR_AUTOLOAD_MSRS 8
 #define VMCS02_POOL_SIZE 1
 
 struct vmcs {
@@ -387,6 +396,9 @@ struct vcpu_vmx {
        struct {
                int           loaded;
                u16           fs_sel, gs_sel, ldt_sel;
+#ifdef CONFIG_X86_64
+               u16           ds_sel, es_sel;
+#endif
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
        } host_state;
@@ -584,10 +596,9 @@ static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
 {
        struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
-       if (is_error_page(page)) {
-               kvm_release_page_clean(page);
+       if (is_error_page(page))
                return NULL;
-       }
+
        return page;
 }
 
@@ -606,6 +617,10 @@ static void kvm_cpu_vmxon(u64 addr);
 static void kvm_cpu_vmxoff(void);
 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+                           struct kvm_segment *var, int seg);
+static void vmx_get_segment(struct kvm_vcpu *vcpu,
+                           struct kvm_segment *var, int seg);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -622,6 +637,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
 static unsigned long *vmx_msr_bitmap_longmode;
 
 static bool cpu_has_load_ia32_efer;
+static bool cpu_has_load_perf_global_ctrl;
 
 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
 static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -779,6 +795,11 @@ static inline bool cpu_has_vmx_ept_4levels(void)
        return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
 }
 
+static inline bool cpu_has_vmx_ept_ad_bits(void)
+{
+       return vmx_capability.ept & VMX_EPT_AD_BIT;
+}
+
 static inline bool cpu_has_vmx_invept_individual_addr(void)
 {
        return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
@@ -839,6 +860,12 @@ static inline bool cpu_has_vmx_rdtscp(void)
                SECONDARY_EXEC_RDTSCP;
 }
 
+static inline bool cpu_has_vmx_invpcid(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_ENABLE_INVPCID;
+}
+
 static inline bool cpu_has_virtual_nmis(void)
 {
        return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -1191,15 +1218,34 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
 
+static void clear_atomic_switch_msr_special(unsigned long entry,
+               unsigned long exit)
+{
+       vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
+       vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
+}
+
 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
 {
        unsigned i;
        struct msr_autoload *m = &vmx->msr_autoload;
 
-       if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
-               vmcs_clear_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
-               vmcs_clear_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
-               return;
+       switch (msr) {
+       case MSR_EFER:
+               if (cpu_has_load_ia32_efer) {
+                       clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+                                       VM_EXIT_LOAD_IA32_EFER);
+                       return;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (cpu_has_load_perf_global_ctrl) {
+                       clear_atomic_switch_msr_special(
+                                       VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+                                       VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
+                       return;
+               }
+               break;
        }
 
        for (i = 0; i < m->nr; ++i)
@@ -1215,25 +1261,55 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
 }
 
+static void add_atomic_switch_msr_special(unsigned long entry,
+               unsigned long exit, unsigned long guest_val_vmcs,
+               unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
+{
+       vmcs_write64(guest_val_vmcs, guest_val);
+       vmcs_write64(host_val_vmcs, host_val);
+       vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
+       vmcs_set_bits(VM_EXIT_CONTROLS, exit);
+}
+
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                                  u64 guest_val, u64 host_val)
 {
        unsigned i;
        struct msr_autoload *m = &vmx->msr_autoload;
 
-       if (msr == MSR_EFER && cpu_has_load_ia32_efer) {
-               vmcs_write64(GUEST_IA32_EFER, guest_val);
-               vmcs_write64(HOST_IA32_EFER, host_val);
-               vmcs_set_bits(VM_ENTRY_CONTROLS, VM_ENTRY_LOAD_IA32_EFER);
-               vmcs_set_bits(VM_EXIT_CONTROLS, VM_EXIT_LOAD_IA32_EFER);
-               return;
+       switch (msr) {
+       case MSR_EFER:
+               if (cpu_has_load_ia32_efer) {
+                       add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+                                       VM_EXIT_LOAD_IA32_EFER,
+                                       GUEST_IA32_EFER,
+                                       HOST_IA32_EFER,
+                                       guest_val, host_val);
+                       return;
+               }
+               break;
+       case MSR_CORE_PERF_GLOBAL_CTRL:
+               if (cpu_has_load_perf_global_ctrl) {
+                       add_atomic_switch_msr_special(
+                                       VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
+                                       VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
+                                       GUEST_IA32_PERF_GLOBAL_CTRL,
+                                       HOST_IA32_PERF_GLOBAL_CTRL,
+                                       guest_val, host_val);
+                       return;
+               }
+               break;
        }
 
        for (i = 0; i < m->nr; ++i)
                if (m->guest[i].index == msr)
                        break;
 
-       if (i == m->nr) {
+       if (i == NR_AUTOLOAD_MSRS) {
+               printk_once(KERN_WARNING"Not enough mst switch entries. "
+                               "Can't add msr %x\n", msr);
+               return;
+       } else if (i == m->nr) {
                ++m->nr;
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
                vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
@@ -1266,7 +1342,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
        guest_efer = vmx->vcpu.arch.efer;
 
        /*
-        * NX is emulated; LMA and LME handled by hardware; SCE meaninless
+        * NX is emulated; LMA and LME handled by hardware; SCE meaningless
         * outside long mode
         */
        ignore_bits = EFER_NX | EFER_SCE;
@@ -1362,6 +1438,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
        }
 
 #ifdef CONFIG_X86_64
+       savesegment(ds, vmx->host_state.ds_sel);
+       savesegment(es, vmx->host_state.es_sel);
+#endif
+
+#ifdef CONFIG_X86_64
        vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
        vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
 #else
@@ -1401,11 +1482,17 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
        }
        if (vmx->host_state.fs_reload_needed)
                loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+       if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
+               loadsegment(ds, vmx->host_state.ds_sel);
+               loadsegment(es, vmx->host_state.es_sel);
+       }
+#endif
        reload_tss();
 #ifdef CONFIG_X86_64
        wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
 #endif
-       if (current_thread_info()->status & TS_USEDFPU)
+       if (user_has_fpu())
                clts();
        load_gdt(&__get_cpu_var(host_gdt));
 }
@@ -1603,17 +1690,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        vmx_set_interrupt_shadow(vcpu, 0);
 }
 
-static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
-{
-       /* Ensure that we clear the HLT state in the VMCS.  We don't need to
-        * explicitly skip the instruction because if the HLT state is set, then
-        * the instruction is already executing and RIP has already been
-        * advanced. */
-       if (!yield_on_hlt &&
-           vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT)
-               vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
-}
-
 /*
  * KVM wants to inject page-faults which it got to the guest. This function
  * checks whether in a nested guest, we need to inject them to L1 or L2.
@@ -1626,7 +1702,7 @@ static int nested_pf_handled(struct kvm_vcpu *vcpu)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
        /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
-       if (!(vmcs12->exception_bitmap & PF_VECTOR))
+       if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
                return 0;
 
        nested_vmx_vmexit(vcpu);
@@ -1666,7 +1742,6 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
                intr_info |= INTR_TYPE_HARD_EXCEPTION;
 
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
-       vmx_clear_hlt(vcpu);
 }
 
 static bool vmx_rdtscp_supported(void)
@@ -1674,6 +1749,11 @@ static bool vmx_rdtscp_supported(void)
        return cpu_has_vmx_rdtscp();
 }
 
+static bool vmx_invpcid_supported(void)
+{
+       return cpu_has_vmx_invpcid() && enable_ept;
+}
+
 /*
  * Swap MSR entry in host/guest MSR entry array.
  */
@@ -1696,7 +1776,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
        int save_nmsrs, index;
        unsigned long *msr_bitmap;
 
-       vmx_load_host_state(vmx);
        save_nmsrs = 0;
 #ifdef CONFIG_X86_64
        if (is_long_mode(&vmx->vcpu)) {
@@ -1751,13 +1830,34 @@ static u64 guest_read_tsc(void)
 }
 
 /*
- * Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ
- * ioctl. In this case the call-back should update internal vmx state to make
- * the changes effective.
+ * Like guest_read_tsc, but always returns L1's notion of the timestamp
+ * counter, even if a nested guest (L2) is currently running.
+ */
+u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu)
+{
+       u64 host_tsc, tsc_offset;
+
+       rdtscll(host_tsc);
+       tsc_offset = is_guest_mode(vcpu) ?
+               to_vmx(vcpu)->nested.vmcs01_tsc_offset :
+               vmcs_read64(TSC_OFFSET);
+       return host_tsc + tsc_offset;
+}
+
+/*
+ * Engage any workarounds for mis-matched TSC rates.  Currently limited to
+ * software catchup for faster rates on slower CPUs.
  */
-static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
+static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
 {
-       /* Nothing to do here */
+       if (!scale)
+               return;
+
+       if (user_tsc_khz > tsc_khz) {
+               vcpu->arch.tsc_catchup = 1;
+               vcpu->arch.tsc_always_catchup = 1;
+       } else
+               WARN(1, "user requested TSC rate below hardware speed\n");
 }
 
 /*
@@ -1765,18 +1865,26 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
  */
 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
-       vmcs_write64(TSC_OFFSET, offset);
-       if (is_guest_mode(vcpu))
+       if (is_guest_mode(vcpu)) {
                /*
-                * We're here if L1 chose not to trap the TSC MSR. Since
-                * prepare_vmcs12() does not copy tsc_offset, we need to also
-                * set the vmcs12 field here.
+                * We're here if L1 chose not to trap WRMSR to TSC. According
+                * to the spec, this should set L1's TSC; The offset that L1
+                * set for L2 remains unchanged, and still needs to be added
+                * to the newly set TSC to get L2's TSC.
                 */
-               get_vmcs12(vcpu)->tsc_offset = offset -
-                       to_vmx(vcpu)->nested.vmcs01_tsc_offset;
+               struct vmcs12 *vmcs12;
+               to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
+               /* recalculate vmcs02.TSC_OFFSET: */
+               vmcs12 = get_vmcs12(vcpu);
+               vmcs_write64(TSC_OFFSET, offset +
+                       (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
+                        vmcs12->tsc_offset : 0));
+       } else {
+               vmcs_write64(TSC_OFFSET, offset);
+       }
 }
 
-static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
+static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
 {
        u64 offset = vmcs_read64(TSC_OFFSET);
        vmcs_write64(TSC_OFFSET, offset + adjustment);
@@ -1882,6 +1990,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
 #endif
                CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
                CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
+               CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        /*
         * We can allow some features even when not supported by the
@@ -2068,12 +2177,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                        return 1;
                /* Otherwise falls through */
        default:
-               vmx_load_host_state(to_vmx(vcpu));
                if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
                        return 0;
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
-                       vmx_load_host_state(to_vmx(vcpu));
                        data = msr->data;
                        break;
                }
@@ -2097,7 +2204,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 
        switch (msr_index) {
        case MSR_EFER:
-               vmx_load_host_state(vmx);
                ret = kvm_set_msr_common(vcpu, msr_index, data);
                break;
 #ifdef CONFIG_X86_64
@@ -2146,8 +2252,13 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                        break;
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
-                       vmx_load_host_state(vmx);
                        msr->data = data;
+                       if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
+                               preempt_disable();
+                               kvm_set_shared_msr(msr->index, msr->data,
+                                                  msr->mask);
+                               preempt_enable();
+                       }
                        break;
                }
                ret = kvm_set_msr_common(vcpu, msr_index, data);
@@ -2328,7 +2439,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                                &_pin_based_exec_control) < 0)
                return -EIO;
 
-       min =
+       min = CPU_BASED_HLT_EXITING |
 #ifdef CONFIG_X86_64
              CPU_BASED_CR8_LOAD_EXITING |
              CPU_BASED_CR8_STORE_EXITING |
@@ -2340,10 +2451,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_USE_TSC_OFFSETING |
              CPU_BASED_MWAIT_EXITING |
              CPU_BASED_MONITOR_EXITING |
-             CPU_BASED_INVLPG_EXITING;
-
-       if (yield_on_hlt)
-               min |= CPU_BASED_HLT_EXITING;
+             CPU_BASED_INVLPG_EXITING |
+             CPU_BASED_RDPMC_EXITING;
 
        opt = CPU_BASED_TPR_SHADOW |
              CPU_BASED_USE_MSR_BITMAPS |
@@ -2364,7 +2473,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                        SECONDARY_EXEC_ENABLE_EPT |
                        SECONDARY_EXEC_UNRESTRICTED_GUEST |
                        SECONDARY_EXEC_PAUSE_LOOP_EXITING |
-                       SECONDARY_EXEC_RDTSCP;
+                       SECONDARY_EXEC_RDTSCP |
+                       SECONDARY_EXEC_ENABLE_INVPCID;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
@@ -2432,6 +2542,42 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
                                   VM_EXIT_LOAD_IA32_EFER);
 
+       cpu_has_load_perf_global_ctrl =
+               allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
+                               VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+               && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
+                                  VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
+
+       /*
+        * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
+        * but due to arrata below it can't be used. Workaround is to use
+        * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
+        *
+        * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
+        *
+        * AAK155             (model 26)
+        * AAP115             (model 30)
+        * AAT100             (model 37)
+        * BC86,AAY89,BD102   (model 44)
+        * BA97               (model 46)
+        *
+        */
+       if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
+               switch (boot_cpu_data.x86_model) {
+               case 26:
+               case 30:
+               case 37:
+               case 44:
+               case 46:
+                       cpu_has_load_perf_global_ctrl = false;
+                       printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
+                                       "does not work properly. Using workaround\n");
+                       break;
+               default:
+                       break;
+               }
+       }
+
        return 0;
 }
 
@@ -2515,8 +2661,12 @@ static __init int hardware_setup(void)
            !cpu_has_vmx_ept_4levels()) {
                enable_ept = 0;
                enable_unrestricted_guest = 0;
+               enable_ept_ad_bits = 0;
        }
 
+       if (!cpu_has_vmx_ept_ad_bits())
+               enable_ept_ad_bits = 0;
+
        if (!cpu_has_vmx_unrestricted_guest())
                enable_unrestricted_guest = 0;
 
@@ -2606,17 +2756,19 @@ static gva_t rmode_tss_base(struct kvm *kvm)
 {
        if (!kvm->arch.tss_addr) {
                struct kvm_memslots *slots;
+               struct kvm_memory_slot *slot;
                gfn_t base_gfn;
 
                slots = kvm_memslots(kvm);
-               base_gfn = slots->memslots[0].base_gfn +
-                                kvm->memslots->memslots[0].npages - 3;
+               slot = id_to_memslot(slots, 0);
+               base_gfn = slot->base_gfn + slot->npages - 3;
+
                return base_gfn << PAGE_SHIFT;
        }
        return kvm->arch.tss_addr;
 }
 
-static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
+static void save_rmode_seg(int seg, struct kvm_save_segment *save)
 {
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
 
@@ -2624,6 +2776,12 @@ static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
        save->base = vmcs_readl(sf->base);
        save->limit = vmcs_read32(sf->limit);
        save->ar = vmcs_read32(sf->ar_bytes);
+}
+
+static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
+{
+       struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
+
        vmcs_write16(sf->selector, save->base >> 4);
        vmcs_write32(sf->base, save->base & 0xffff0);
        vmcs_write32(sf->limit, 0xffff);
@@ -2638,6 +2796,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 {
        unsigned long flags;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_segment var;
 
        if (enable_unrestricted_guest)
                return;
@@ -2645,6 +2804,12 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        vmx->emulation_required = 1;
        vmx->rmode.vm86_active = 1;
 
+       save_rmode_seg(VCPU_SREG_TR, &vmx->rmode.tr);
+       save_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
+       save_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
+       save_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
+       save_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
+
        /*
         * Very old userspace does not call KVM_SET_TSS_ADDR before entering
         * vcpu. Call it here with phys address pointing 16M below 4G.
@@ -2659,14 +2824,8 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
 
        vmx_segment_cache_clear(vmx);
 
-       vmx->rmode.tr.selector = vmcs_read16(GUEST_TR_SELECTOR);
-       vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
        vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
-
-       vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
        vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
-
-       vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
        flags = vmcs_readl(GUEST_RFLAGS);
@@ -2681,20 +2840,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        if (emulate_invalid_guest_state)
                goto continue_rmode;
 
-       vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
-       vmcs_write32(GUEST_SS_LIMIT, 0xffff);
-       vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
+       vmx_get_segment(vcpu, &var, VCPU_SREG_SS);
+       vmx_set_segment(vcpu, &var, VCPU_SREG_SS);
+
+       vmx_get_segment(vcpu, &var, VCPU_SREG_CS);
+       vmx_set_segment(vcpu, &var, VCPU_SREG_CS);
+
+       vmx_get_segment(vcpu, &var, VCPU_SREG_ES);
+       vmx_set_segment(vcpu, &var, VCPU_SREG_ES);
 
-       vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
-       vmcs_write32(GUEST_CS_LIMIT, 0xffff);
-       if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
-               vmcs_writel(GUEST_CS_BASE, 0xf0000);
-       vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
+       vmx_get_segment(vcpu, &var, VCPU_SREG_DS);
+       vmx_set_segment(vcpu, &var, VCPU_SREG_DS);
 
-       fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
-       fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
-       fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
-       fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
+       vmx_get_segment(vcpu, &var, VCPU_SREG_GS);
+       vmx_set_segment(vcpu, &var, VCPU_SREG_GS);
+
+       vmx_get_segment(vcpu, &var, VCPU_SREG_FS);
+       vmx_set_segment(vcpu, &var, VCPU_SREG_FS);
 
 continue_rmode:
        kvm_mmu_reset_context(vcpu);
@@ -2739,8 +2901,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
 
        guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
        if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
-               printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
-                      __func__);
+               pr_debug_ratelimited("%s: tss fixup for long mode. \n",
+                                    __func__);
                vmcs_write32(GUEST_TR_AR_BYTES,
                             (guest_tr_ar & ~AR_TYPE_MASK)
                             | AR_TYPE_BUSY_64_TSS);
@@ -2895,6 +3057,8 @@ static u64 construct_eptp(unsigned long root_hpa)
        /* TODO write the value reading from MSR */
        eptp = VMX_EPT_DEFAULT_MT |
                VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
+       if (enable_ept_ad_bits)
+               eptp |= VMX_EPT_AD_ENABLE_BIT;
        eptp |= (root_hpa & PAGE_MASK);
 
        return eptp;
@@ -3021,11 +3185,22 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
 
 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
 {
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       /*
+        * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations
+        * fail; use the cache instead.
+        */
+       if (unlikely(vmx->emulation_required && emulate_invalid_guest_state)) {
+               return vmx->cpl;
+       }
+
        if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
                __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
-               to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu);
+               vmx->cpl = __vmx_get_cpl(vcpu);
        }
-       return to_vmx(vcpu)->cpl;
+
+       return vmx->cpl;
 }
 
 
@@ -3033,7 +3208,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
 {
        u32 ar;
 
-       if (var->unusable)
+       if (var->unusable || !var->present)
                ar = 1 << 16;
        else {
                ar = var->type & 15;
@@ -3045,8 +3220,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
                ar |= (var->db & 1) << 14;
                ar |= (var->g & 1) << 15;
        }
-       if (ar == 0) /* a 0 value means unusable */
-               ar = AR_UNUSABLE_MASK;
 
        return ar;
 }
@@ -3086,7 +3259,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
         * qemu binaries.
         *   IA32 arch specifies that at the time of processor reset the
         * "Accessed" bit in the AR field of segment registers is 1. And qemu
-        * is setting it to 0 in the usedland code. This causes invalid guest
+        * is setting it to 0 in the userland code. This causes invalid guest
         * state vmexit when "unrestricted guest" mode is turned on.
         *    Fix for this setup issue in cpu_reset is being pushed in the qemu
         * tree. Newer qemu binaries with that qemu fix would not need this
@@ -3097,6 +3270,44 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
 
        vmcs_write32(sf->ar_bytes, ar);
        __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
+
+       /*
+        * Fix segments for real mode guest in hosts that don't have
+        * "unrestricted_mode" or it was disabled.
+        * This is done to allow migration of the guests from hosts with
+        * unrestricted guest like Westmere to older host that don't have
+        * unrestricted guest like Nehelem.
+        */
+       if (!enable_unrestricted_guest && vmx->rmode.vm86_active) {
+               switch (seg) {
+               case VCPU_SREG_CS:
+                       vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
+                       vmcs_write32(GUEST_CS_LIMIT, 0xffff);
+                       if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
+                               vmcs_writel(GUEST_CS_BASE, 0xf0000);
+                       vmcs_write16(GUEST_CS_SELECTOR,
+                                    vmcs_readl(GUEST_CS_BASE) >> 4);
+                       break;
+               case VCPU_SREG_ES:
+                       fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
+                       break;
+               case VCPU_SREG_DS:
+                       fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
+                       break;
+               case VCPU_SREG_GS:
+                       fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
+                       break;
+               case VCPU_SREG_FS:
+                       fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
+                       break;
+               case VCPU_SREG_SS:
+                       vmcs_write16(GUEST_SS_SELECTOR,
+                                    vmcs_readl(GUEST_SS_BASE) >> 4);
+                       vmcs_write32(GUEST_SS_LIMIT, 0xffff);
+                       vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
+                       break;
+               }
+       }
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -3529,8 +3740,18 @@ static void vmx_set_constant_host_state(void)
        vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
 
        vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
+#ifdef CONFIG_X86_64
+       /*
+        * Load null selectors, so we can avoid reloading them in
+        * __vmx_load_host_state(), in case userspace uses the null selectors
+        * too (the expected case).
+        */
+       vmcs_write16(HOST_DS_SELECTOR, 0);
+       vmcs_write16(HOST_ES_SELECTOR, 0);
+#else
        vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
        vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
+#endif
        vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
        vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
 
@@ -3589,6 +3810,8 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
        if (!enable_ept) {
                exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
                enable_unrestricted_guest = 0;
+               /* Enable INVPCID for non-ept guests may cause performance regression. */
+               exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
        }
        if (!enable_unrestricted_guest)
                exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
@@ -3597,6 +3820,17 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
        return exec_control;
 }
 
+static void ept_set_mmio_spte_mask(void)
+{
+       /*
+        * EPT Misconfigurations can be generated if the value of bits 2:0
+        * of an EPT paging-structure entry is 110b (write/execute).
+        * Also, magic bits (0xffull << 49) is set to quickly identify mmio
+        * spte.
+        */
+       kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
+}
+
 /*
  * Sets up the vmcs for emulated real mode.
  */
@@ -3632,8 +3866,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                vmcs_write32(PLE_WINDOW, ple_window);
        }
 
-       vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
-       vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
+       vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
+       vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
        vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
 
        vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
@@ -3794,7 +4028,9 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
        vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
+       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
        vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
        vmx_set_cr4(&vmx->vcpu, 0);
        vmx_set_efer(&vmx->vcpu, 0);
        vmx_fpu_activate(&vmx->vcpu);
@@ -3824,12 +4060,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
 static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
-       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
-               /* We can get here when nested_run_pending caused
-                * vmx_interrupt_allowed() to return false. In this case, do
-                * nothing - the interrupt will be injected later.
+       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+               /*
+                * We get here if vmx_interrupt_allowed() said we can't
+                * inject to L1 now because L2 must run. Ask L2 to exit
+                * right after entry, so we can inject to L1 more promptly.
                 */
+               kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
                return;
+       }
 
        cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
@@ -3879,7 +4118,6 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu)
        } else
                intr |= INTR_TYPE_EXT_INTR;
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
-       vmx_clear_hlt(vcpu);
 }
 
 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
@@ -3911,7 +4149,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
        }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                        INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
-       vmx_clear_hlt(vcpu);
 }
 
 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
@@ -3956,11 +4193,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
-               struct vmcs12 *vmcs12;
-               if (to_vmx(vcpu)->nested.nested_run_pending)
+               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+               if (to_vmx(vcpu)->nested.nested_run_pending ||
+                   (vmcs12->idt_vectoring_info_field &
+                    VECTORING_INFO_VALID_MASK))
                        return 0;
                nested_vmx_vmexit(vcpu);
-               vmcs12 = get_vmcs12(vcpu);
                vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
                vmcs12->vm_exit_intr_info = 0;
                /* fall through to normal code, but now in L1, not L2 */
@@ -4107,8 +4345,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
        if (is_page_fault(intr_info)) {
                /* EPT won't cause page fault directly */
-               if (enable_ept)
-                       BUG();
+               BUG_ON(enable_ept);
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
                trace_kvm_page_fault(cr2, error_code);
 
@@ -4207,7 +4444,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
        hypercall[2] = 0xc1;
 }
 
-/* called to set cr0 as approriate for a mov-to-cr0 exit. */
+/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
 {
        if (to_vmx(vcpu)->nested.vmxon &&
@@ -4333,7 +4570,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                break;
        }
        vcpu->run->exit_reason = 0;
-       pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
+       vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
               (int)(exit_qualification >> 4) & 3, cr);
        return 0;
 }
@@ -4491,6 +4728,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int handle_rdpmc(struct kvm_vcpu *vcpu)
+{
+       int err;
+
+       err = kvm_rdpmc(vcpu);
+       kvm_complete_insn_gp(vcpu, err);
+
+       return 1;
+}
+
 static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
@@ -4510,6 +4757,24 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
 
 static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
+       if (likely(fasteoi)) {
+               unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+               int access_type, offset;
+
+               access_type = exit_qualification & APIC_ACCESS_TYPE;
+               offset = exit_qualification & APIC_ACCESS_OFFSET;
+               /*
+                * Sane guest uses MOV to write EOI, with written value
+                * not cared. So make a short-circuit here by avoiding
+                * heavy instruction emulation.
+                */
+               if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
+                   (offset == APIC_EOI)) {
+                       kvm_lapic_set_eoi(vcpu);
+                       skip_emulated_instruction(vcpu);
+                       return 1;
+               }
+       }
        return emulate_instruction(vcpu, 0) == EMULATE_DONE;
 }
 
@@ -4520,9 +4785,10 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
        bool has_error_code = false;
        u32 error_code = 0;
        u16 tss_selector;
-       int reason, type, idt_v;
+       int reason, type, idt_v, idt_index;
 
        idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
+       idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
        type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -4560,8 +4826,9 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                       type != INTR_TYPE_NMI_INTR))
                skip_emulated_instruction(vcpu);
 
-       if (kvm_task_switch(vcpu, tss_selector, reason,
-                               has_error_code, error_code) == EMULATE_FAIL) {
+       if (kvm_task_switch(vcpu, tss_selector,
+                           type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
+                           has_error_code, error_code) == EMULATE_FAIL) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
@@ -4583,6 +4850,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        gpa_t gpa;
+       u32 error_code;
        int gla_validity;
 
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -4607,7 +4875,13 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
 
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
        trace_kvm_page_fault(gpa, exit_qualification);
-       return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
+
+       /* It is a write fault? */
+       error_code = exit_qualification & (1U << 1);
+       /* ept page table is present? */
+       error_code |= (exit_qualification >> 3) & 0x1;
+
+       return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
 }
 
 static u64 ept_rsvd_mask(u64 spte, int level)
@@ -4674,11 +4948,19 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 {
        u64 sptes[4];
-       int nr_sptes, i;
+       int nr_sptes, i, ret;
        gpa_t gpa;
 
        gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
 
+       ret = handle_mmio_page_fault_common(vcpu, gpa, true);
+       if (likely(ret == 1))
+               return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
+                                             EMULATE_DONE;
+       if (unlikely(!ret))
+               return 1;
+
+       /* It is the real ept misconfig */
        printk(KERN_ERR "EPT: Misconfiguration.\n");
        printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
 
@@ -4714,15 +4996,18 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
        int ret = 1;
        u32 cpu_exec_ctrl;
        bool intr_window_requested;
+       unsigned count = 130;
 
        cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
 
-       while (!guest_state_valid(vcpu)) {
-               if (intr_window_requested
-                   && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
+       while (!guest_state_valid(vcpu) && count-- != 0) {
+               if (intr_window_requested && vmx_interrupt_allowed(vcpu))
                        return handle_interrupt_window(&vmx->vcpu);
 
+               if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
+                       return 1;
+
                err = emulate_instruction(vcpu, 0);
 
                if (err == EMULATE_DO_MMIO) {
@@ -4730,8 +5015,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                        goto out;
                }
 
-               if (err != EMULATE_DONE)
+               if (err != EMULATE_DONE) {
+                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+                       vcpu->run->internal.ndata = 0;
                        return 0;
+               }
 
                if (signal_pending(current))
                        goto out;
@@ -4739,7 +5028,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                        schedule();
        }
 
-       vmx->emulation_required = 0;
+       vmx->emulation_required = !guest_state_valid(vcpu);
 out:
        return ret;
 }
@@ -5415,6 +5704,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_HLT]                     = handle_halt,
        [EXIT_REASON_INVD]                    = handle_invd,
        [EXIT_REASON_INVLPG]                  = handle_invlpg,
+       [EXIT_REASON_RDPMC]                   = handle_rdpmc,
        [EXIT_REASON_VMCALL]                  = handle_vmcall,
        [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
        [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
@@ -5575,8 +5865,8 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
                return 0;
 
        if (unlikely(vmx->fail)) {
-               printk(KERN_INFO "%s failed vm entry %x\n",
-                      __func__, vmcs_read32(VM_INSTRUCTION_ERROR));
+               pr_info_ratelimited("%s failed vm entry %x\n", __func__,
+                                   vmcs_read32(VM_INSTRUCTION_ERROR));
                return 1;
        }
 
@@ -5680,8 +5970,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
 
-       trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
-
        /* If guest state is invalid, start emulating */
        if (vmx->emulation_required && emulate_invalid_guest_state)
                return handle_invalid_guest_state(vcpu);
@@ -5911,6 +6199,24 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
 }
 
+static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
+{
+       int i, nr_msrs;
+       struct perf_guest_switch_msr *msrs;
+
+       msrs = perf_guest_get_msrs(&nr_msrs);
+
+       if (!msrs)
+               return;
+
+       for (i = 0; i < nr_msrs; i++)
+               if (msrs[i].host == msrs[i].guest)
+                       clear_atomic_switch_msr(vmx, msrs[i].msr);
+               else
+                       add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
+                                       msrs[i].host);
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #define Q "q"
@@ -5922,6 +6228,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       unsigned long debugctlmsr;
 
        if (is_guest_mode(vcpu) && !vmx->nested.nested_run_pending) {
                struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -5960,6 +6267,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
+       atomic_switch_perf_msrs(vmx);
+       debugctlmsr = get_debugctlmsr();
+
        vmx->__launched = vmx->loaded_vmcs->launched;
        asm(
                /* Store host registers */
@@ -6060,6 +6370,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 #endif
              );
 
+       /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
+       if (debugctlmsr)
+               update_debugctlmsr(debugctlmsr);
+
+#ifndef CONFIG_X86_64
+       /*
+        * The sysexit path does not restore ds/es, so we must set them to
+        * a reasonable value ourselves.
+        *
+        * We can't defer this to vmx_load_host_state() since that function
+        * may be executed in interrupt context, which saves and restore segments
+        * around it, nullifying its effect.
+        */
+       loadsegment(ds, __USER_DS);
+       loadsegment(es, __USER_DS);
+#endif
+
        vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
                                  | (1 << VCPU_EXREG_RFLAGS)
                                  | (1 << VCPU_EXREG_CPL)
@@ -6081,10 +6408,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                }
        }
 
-       asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
        vmx->loaded_vmcs->launched = 1;
 
        vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+       trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
 
        vmx_complete_atomic_exit(vmx);
        vmx_recover_nmi_blocking(vmx);
@@ -6167,7 +6494,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        return &vmx->vcpu;
 
 free_vmcs:
-       free_vmcs(vmx->loaded_vmcs->vmcs);
+       free_loaded_vmcs(vmx->loaded_vmcs);
 free_msrs:
        kfree(vmx->guest_msrs);
 uninit_vcpu:
@@ -6225,49 +6552,6 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        return ret;
 }
 
-#define _ER(x) { EXIT_REASON_##x, #x }
-
-static const struct trace_print_flags vmx_exit_reasons_str[] = {
-       _ER(EXCEPTION_NMI),
-       _ER(EXTERNAL_INTERRUPT),
-       _ER(TRIPLE_FAULT),
-       _ER(PENDING_INTERRUPT),
-       _ER(NMI_WINDOW),
-       _ER(TASK_SWITCH),
-       _ER(CPUID),
-       _ER(HLT),
-       _ER(INVLPG),
-       _ER(RDPMC),
-       _ER(RDTSC),
-       _ER(VMCALL),
-       _ER(VMCLEAR),
-       _ER(VMLAUNCH),
-       _ER(VMPTRLD),
-       _ER(VMPTRST),
-       _ER(VMREAD),
-       _ER(VMRESUME),
-       _ER(VMWRITE),
-       _ER(VMOFF),
-       _ER(VMON),
-       _ER(CR_ACCESS),
-       _ER(DR_ACCESS),
-       _ER(IO_INSTRUCTION),
-       _ER(MSR_READ),
-       _ER(MSR_WRITE),
-       _ER(MWAIT_INSTRUCTION),
-       _ER(MONITOR_INSTRUCTION),
-       _ER(PAUSE_INSTRUCTION),
-       _ER(MCE_DURING_VMENTRY),
-       _ER(TPR_BELOW_THRESHOLD),
-       _ER(APIC_ACCESS),
-       _ER(EPT_VIOLATION),
-       _ER(EPT_MISCONFIG),
-       _ER(WBINVD),
-       { -1, NULL }
-};
-
-#undef _ER
-
 static int vmx_get_lpage_level(void)
 {
        if (enable_ept && !cpu_has_vmx_ept_1g_page())
@@ -6297,6 +6581,23 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                        }
                }
        }
+
+       exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+       /* Exposing INVPCID only when PCID is exposed */
+       best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+       if (vmx_invpcid_supported() &&
+           best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+           guest_cpuid_has_pcid(vcpu)) {
+               exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
+               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                            exec_control);
+       } else {
+               exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                            exec_control);
+               if (best)
+                       best->ecx &= ~bit(X86_FEATURE_INVPCID);
+       }
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -6498,8 +6799,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        set_cr4_guest_host_mask(vmx);
 
-       vmcs_write64(TSC_OFFSET,
-               vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+       if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+               vmcs_write64(TSC_OFFSET,
+                       vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+       else
+               vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
 
        if (enable_vpid) {
                /*
@@ -6594,9 +6898,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        if (vmcs12->vm_entry_msr_load_count > 0 ||
            vmcs12->vm_exit_msr_load_count > 0 ||
            vmcs12->vm_exit_msr_store_count > 0) {
-               if (printk_ratelimit())
-                       printk(KERN_WARNING
-                         "%s: VMCS MSR_{LOAD,STORE} unsupported\n", __func__);
+               pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
+                                   __func__);
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;
        }
@@ -6906,7 +7209,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
 
        load_vmcs12_host_state(vcpu, vmcs12);
 
-       /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */
+       /* Update TSC_OFFSET if TSC was changed while L2 ran */
        vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
 
        /* This is needed for same reason as it was needed in prepare_vmcs02 */
@@ -7023,13 +7326,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .get_mt_mask = vmx_get_mt_mask,
 
        .get_exit_info = vmx_get_exit_info,
-       .exit_reasons_str = vmx_exit_reasons_str,
 
        .get_lpage_level = vmx_get_lpage_level,
 
        .cpuid_update = vmx_cpuid_update,
 
        .rdtscp_supported = vmx_rdtscp_supported,
+       .invpcid_supported = vmx_invpcid_supported,
 
        .set_supported_cpuid = vmx_set_supported_cpuid,
 
@@ -7039,6 +7342,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .write_tsc_offset = vmx_write_tsc_offset,
        .adjust_tsc_offset = vmx_adjust_tsc_offset,
        .compute_tsc_offset = vmx_compute_tsc_offset,
+       .read_l1_tsc = vmx_read_l1_tsc,
 
        .set_tdp_cr3 = vmx_set_cr3,
 
@@ -7058,23 +7362,21 @@ static int __init vmx_init(void)
        if (!vmx_io_bitmap_a)
                return -ENOMEM;
 
+       r = -ENOMEM;
+
        vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_io_bitmap_b) {
-               r = -ENOMEM;
+       if (!vmx_io_bitmap_b)
                goto out;
-       }
 
        vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_legacy) {
-               r = -ENOMEM;
+       if (!vmx_msr_bitmap_legacy)
                goto out1;
-       }
+
 
        vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
-       if (!vmx_msr_bitmap_longmode) {
-               r = -ENOMEM;
+       if (!vmx_msr_bitmap_longmode)
                goto out2;
-       }
+
 
        /*
         * Allow direct access to the PC debug port (it is often used for I/O
@@ -7103,16 +7405,15 @@ static int __init vmx_init(void)
        vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
 
        if (enable_ept) {
-               bypass_guest_pf = 0;
-               kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
-                               VMX_EPT_EXECUTABLE_MASK);
+               kvm_mmu_set_mask_ptes(0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+                       0ull, VMX_EPT_EXECUTABLE_MASK);
+               ept_set_mmio_spte_mask();
                kvm_enable_tdp();
        } else
                kvm_disable_tdp();
 
-       if (bypass_guest_pf)
-               kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
-
        return 0;
 
 out3: