KVM: MMU: handle large host sptes on invlpg/resync
Marcelo Tosatti [Mon, 22 Dec 2008 20:49:30 +0000 (18:49 -0200)]
The invlpg and sync walkers lack knowledge of large host sptes,
descending to non-existant pagetable level.

Stop at directory level in such case.

Fixes SMP Windows XP with hugepages.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index d50ebac..83f11c7 100644 (file)
@@ -1007,7 +1007,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
        for_each_unsync_children(sp->unsync_child_bitmap, i) {
                u64 ent = sp->spt[i];
 
-               if (is_shadow_present_pte(ent)) {
+               if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
                        struct kvm_mmu_page *child;
                        child = page_header(ent & PT64_BASE_ADDR_MASK);
 
index d206401..9fd78b6 100644 (file)
@@ -472,14 +472,19 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
        struct shadow_walker *sw =
                container_of(_sw, struct shadow_walker, walker);
 
-       if (level == PT_PAGE_TABLE_LEVEL) {
+       /* FIXME: properly handle invlpg on large guest pages */
+       if (level == PT_PAGE_TABLE_LEVEL ||
+           ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
                struct kvm_mmu_page *sp = page_header(__pa(sptep));
 
                sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
                sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
-               if (is_shadow_present_pte(*sptep))
+               if (is_shadow_present_pte(*sptep)) {
                        rmap_remove(vcpu->kvm, sptep);
+                       if (is_large_pte(*sptep))
+                               --vcpu->kvm->stat.lpages;
+               }
                set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
                return 1;
        }