]> nv-tegra.nvidia Code Review - linux-3.10.git/blobdiff - arch/x86/kvm/mmu.c
KVM: Retry fault before vmentry
[linux-3.10.git] / arch / x86 / kvm / mmu.c
index 908ea5464a518097958083156eb5b0b94f4d021c..b2c60986a7ce361f33bedb52235367b8ffb3dfc4 100644 (file)
  *
  */
 
+#include "irq.h"
 #include "mmu.h"
 #include "x86.h"
 #include "kvm_cache_regs.h"
+#include "x86.h"
 
 #include <linux/kvm_host.h>
 #include <linux/types.h>
@@ -720,7 +722,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        }
 }
 
-static void set_spte_track_bits(u64 *sptep, u64 new_spte)
+static int set_spte_track_bits(u64 *sptep, u64 new_spte)
 {
        pfn_t pfn;
        u64 old_spte = *sptep;
@@ -731,19 +733,20 @@ static void set_spte_track_bits(u64 *sptep, u64 new_spte)
                old_spte = __xchg_spte(sptep, new_spte);
 
        if (!is_rmap_spte(old_spte))
-               return;
+               return 0;
 
        pfn = spte_to_pfn(old_spte);
        if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
                kvm_set_pfn_accessed(pfn);
        if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
                kvm_set_pfn_dirty(pfn);
+       return 1;
 }
 
 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
 {
-       set_spte_track_bits(sptep, new_spte);
-       rmap_remove(kvm, sptep);
+       if (set_spte_track_bits(sptep, new_spte))
+               rmap_remove(kvm, sptep);
 }
 
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
@@ -2393,7 +2396,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
                        ASSERT(!VALID_PAGE(root));
                        spin_lock(&vcpu->kvm->mmu_lock);
                        kvm_mmu_free_some_pages(vcpu);
-                       sp = kvm_mmu_get_page(vcpu, i << 30, i << 30,
+                       sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
+                                             i << 30,
                                              PT32_ROOT_LEVEL, 1, ACC_ALL,
                                              NULL);
                        root = __pa(sp->spt);
@@ -2566,7 +2570,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
-                               u32 error_code)
+                               u32 error_code, bool no_apf)
 {
        gfn_t gfn;
        int r;
@@ -2585,8 +2589,52 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                             error_code & PFERR_WRITE_MASK, gfn);
 }
 
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
-                               u32 error_code)
+int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
+{
+       struct kvm_arch_async_pf arch;
+       arch.gfn = gfn;
+
+       return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
+}
+
+static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+{
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
+                    kvm_event_needs_reinjection(vcpu)))
+               return false;
+
+       return kvm_x86_ops->interrupt_allowed(vcpu);
+}
+
+static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
+                        gva_t gva, pfn_t *pfn)
+{
+       bool async;
+
+       *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async);
+
+       if (!async)
+               return false; /* *pfn has correct page already */
+
+       put_page(pfn_to_page(*pfn));
+
+       if (!no_apf && can_do_async_pf(vcpu)) {
+               trace_kvm_try_async_get_page(async, *pfn);
+               if (kvm_find_async_pf_gfn(vcpu, gfn)) {
+                       trace_kvm_async_pf_doublefault(gva, gfn);
+                       kvm_make_request(KVM_REQ_APF_HALT, vcpu);
+                       return true;
+               } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
+                       return true;
+       }
+
+       *pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+       return false;
+}
+
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
+                         bool no_apf)
 {
        pfn_t pfn;
        int r;
@@ -2607,7 +2655,11 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
 
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
-       pfn = gfn_to_pfn(vcpu->kvm, gfn);
+
+       if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn))
+               return 0;
+
+       /* mmio */
        if (is_error_pfn(pfn))
                return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
        spin_lock(&vcpu->kvm->mmu_lock);
@@ -3267,7 +3319,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
        int r;
        enum emulation_result er;
 
-       r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
+       r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
        if (r < 0)
                goto out;