KVM: MMU: only update unsync page in invlpg path
Xiao Guangrong [Sat, 15 May 2010 10:53:35 +0000 (18:53 +0800)]
Only unsync pages need updated at invlpg time since other shadow
pages are write-protected

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>

arch/x86/kvm/paging_tmpl.h

index 22f1379..0671d7a 100644 (file)
@@ -461,6 +461,7 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
+       struct kvm_mmu_page *sp;
        gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
@@ -472,10 +473,13 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                level = iterator.level;
                sptep = iterator.sptep;
 
+               sp = page_header(__pa(sptep));
                if (is_last_spte(*sptep, level)) {
-                       struct kvm_mmu_page *sp = page_header(__pa(sptep));
                        int offset, shift;
 
+                       if (!sp->unsync)
+                               break;
+
                        shift = PAGE_SHIFT -
                                  (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
                        offset = sp->role.quadrant << shift;
@@ -493,7 +497,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                        break;
                }
 
-               if (!is_shadow_present_pte(*sptep))
+               if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
                        break;
        }