KVM: SVM: count all irq windows exit
[linux-3.10.git] / arch / x86 / kvm / mmu_audit.c
index 8becb86..715da5a 100644 (file)
@@ -4,7 +4,7 @@
  * Audit code for KVM MMU
  *
  * Copyright (C) 2006 Qumranet, Inc.
- * Copyright 2010 Red Hat, Inc. and/or its affilates.
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  *
  * Authors:
  *   Yaniv Kamay  <yaniv@qumranet.com>
  *
  */
 
-static const char *audit_msg;
+#include <linux/ratelimit.h>
 
-typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
+char const *audit_point_name[] = {
+       "pre page fault",
+       "post page fault",
+       "pre pte write",
+       "post pte write",
+       "pre sync",
+       "post sync"
+};
+
+#define audit_printk(kvm, fmt, args...)                \
+       printk(KERN_ERR "audit: (%s) error: "   \
+               fmt, audit_point_name[kvm->arch.audit_point], ##args)
+
+typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level);
 
-static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
-                           inspect_spte_fn fn)
+static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+                           inspect_spte_fn fn, int level)
 {
        int i;
 
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
-               u64 ent = sp->spt[i];
-
-               if (is_shadow_present_pte(ent)) {
-                       if (!is_last_spte(ent, sp->role.level)) {
-                               struct kvm_mmu_page *child;
-                               child = page_header(ent & PT64_BASE_ADDR_MASK);
-                               __mmu_spte_walk(kvm, child, fn);
-                       } else
-                               fn(kvm, &sp->spt[i]);
+               u64 *ent = sp->spt;
+
+               fn(vcpu, ent + i, level);
+
+               if (is_shadow_present_pte(ent[i]) &&
+                     !is_last_spte(ent[i], level)) {
+                       struct kvm_mmu_page *child;
+
+                       child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
+                       __mmu_spte_walk(vcpu, child, fn, level - 1);
                }
        }
 }
@@ -47,21 +61,25 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
 
        if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
                return;
-       if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
+
+       if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
                hpa_t root = vcpu->arch.mmu.root_hpa;
+
                sp = page_header(root);
-               __mmu_spte_walk(vcpu->kvm, sp, fn);
+               __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL);
                return;
        }
+
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
                if (root && VALID_PAGE(root)) {
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
-                       __mmu_spte_walk(vcpu->kvm, sp, fn);
+                       __mmu_spte_walk(vcpu, sp, fn, 2);
                }
        }
+
        return;
 }
 
@@ -75,114 +93,84 @@ static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
                fn(kvm, sp);
 }
 
-static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
-                               gva_t va, int level)
+static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
 {
-       u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
-       int i;
-       gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
-
-       for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
-               u64 *sptep = pt + i;
-               struct kvm_mmu_page *sp;
-               gfn_t gfn;
-               pfn_t pfn;
-               hpa_t hpa;
-
-               sp = page_header(__pa(sptep));
-
-               if (sp->unsync) {
-                       if (level != PT_PAGE_TABLE_LEVEL) {
-                               printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
-                                               audit_msg, sp, level);
-                               return;
-                       }
-
-                       if (*sptep == shadow_notrap_nonpresent_pte) {
-                               printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
-                                               audit_msg, sp);
-                               return;
-                       }
-               }
-
-               if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
-                       printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
-                                       audit_msg, sp);
-                       return;
-               }
-
-               if (!is_shadow_present_pte(*sptep) ||
-                     !is_last_spte(*sptep, level))
-                       return;
+       struct kvm_mmu_page *sp;
+       gfn_t gfn;
+       pfn_t pfn;
+       hpa_t hpa;
 
-               gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
-               pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
+       sp = page_header(__pa(sptep));
 
-               if (is_error_pfn(pfn)) {
-                       kvm_release_pfn_clean(pfn);
+       if (sp->unsync) {
+               if (level != PT_PAGE_TABLE_LEVEL) {
+                       audit_printk(vcpu->kvm, "unsync sp: %p "
+                                    "level = %d\n", sp, level);
                        return;
                }
+       }
+
+       if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level))
+               return;
 
-               hpa =  pfn << PAGE_SHIFT;
+       gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
+       pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
 
-               if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
-                       printk(KERN_ERR "xx audit error: (%s) levels %d"
-                                          " gva %lx pfn %llx hpa %llx ent %llxn",
-                                          audit_msg, vcpu->arch.mmu.root_level,
-                                          va, pfn, hpa, *sptep);
+       if (is_error_pfn(pfn)) {
+               kvm_release_pfn_clean(pfn);
+               return;
        }
-}
 
-static void audit_mappings(struct kvm_vcpu *vcpu)
-{
-       unsigned i;
-
-       if (vcpu->arch.mmu.root_level == 4)
-               audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
-       else
-               for (i = 0; i < 4; ++i)
-                       if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
-                               audit_mappings_page(vcpu,
-                                                   vcpu->arch.mmu.pae_root[i],
-                                                   i << 30,
-                                                   2);
+       hpa =  pfn << PAGE_SHIFT;
+       if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
+               audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
+                            "ent %llxn", vcpu->arch.mmu.root_level, pfn,
+                            hpa, *sptep);
 }
 
-void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
+static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
 {
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
        unsigned long *rmapp;
        struct kvm_mmu_page *rev_sp;
        gfn_t gfn;
 
-
        rev_sp = page_header(__pa(sptep));
        gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
 
        if (!gfn_to_memslot(kvm, gfn)) {
-               if (!printk_ratelimit())
+               if (!__ratelimit(&ratelimit_state))
                        return;
-               printk(KERN_ERR "%s: no memslot for gfn %llx\n",
-                                audit_msg, gfn);
-               printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
-                      audit_msg, (long int)(sptep - rev_sp->spt),
-                               rev_sp->gfn);
+               audit_printk(kvm, "no memslot for gfn %llx\n", gfn);
+               audit_printk(kvm, "index %ld of sp (gfn=%llx)\n",
+                      (long int)(sptep - rev_sp->spt), rev_sp->gfn);
                dump_stack();
                return;
        }
 
        rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
        if (!*rmapp) {
-               if (!printk_ratelimit())
+               if (!__ratelimit(&ratelimit_state))
                        return;
-               printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
-                                audit_msg, *sptep);
+               audit_printk(kvm, "no rmap for writable spte %llx\n",
+                            *sptep);
                dump_stack();
        }
 }
 
-void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
+static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level)
+{
+       if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level))
+               inspect_spte_has_rmap(vcpu->kvm, sptep);
+}
+
+static void audit_spte_after_sync(struct kvm_vcpu *vcpu, u64 *sptep, int level)
 {
-       mmu_spte_walk(vcpu, inspect_spte_has_rmap);
+       struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+       if (vcpu->kvm->arch.audit_point == AUDIT_POST_SYNC && sp->unsync)
+               audit_printk(vcpu->kvm, "meet unsync sp(%p) after sync "
+                            "root.\n", sp);
 }
 
 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
@@ -200,7 +188,7 @@ static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
        }
 }
 
-void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
+static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
 {
        struct kvm_memory_slot *slot;
        unsigned long *rmapp;
@@ -212,14 +200,13 @@ void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
        slot = gfn_to_memslot(kvm, sp->gfn);
        rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
 
-       spte = rmap_next(kvm, rmapp, NULL);
+       spte = rmap_next(rmapp, NULL);
        while (spte) {
                if (is_writable_pte(*spte))
-                       printk(KERN_ERR "%s: (%s) shadow page has "
-                               "writable mappings: gfn %llx role %x\n",
-                              __func__, audit_msg, sp->gfn,
-                              sp->role.word);
-               spte = rmap_next(kvm, rmapp, spte);
+                       audit_printk(kvm, "shadow page has writable "
+                                    "mappings: gfn %llx role %x\n",
+                                    sp->gfn, sp->role.word);
+               spte = rmap_next(rmapp, spte);
        }
 }
 
@@ -234,27 +221,45 @@ static void audit_all_active_sps(struct kvm *kvm)
        walk_all_active_sps(kvm, audit_sp);
 }
 
-static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
+static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level)
 {
-       audit_msg = audit_point_name[audit_point];
-       audit_all_active_sps(vcpu->kvm);
-       if (strcmp("pre pte write", audit_msg) != 0)
-               audit_mappings(vcpu);
-       audit_sptes_have_rmaps(vcpu);
+       audit_sptes_have_rmaps(vcpu, sptep, level);
+       audit_mappings(vcpu, sptep, level);
+       audit_spte_after_sync(vcpu, sptep, level);
+}
+
+static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
+{
+       mmu_spte_walk(vcpu, audit_spte);
 }
 
 static bool mmu_audit;
+static struct static_key mmu_audit_key;
 
-static void mmu_audit_enable(void)
+static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
-       int ret;
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
 
-       if (mmu_audit)
+       if (!__ratelimit(&ratelimit_state))
                return;
 
-       ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
-       WARN_ON(ret);
+       vcpu->kvm->arch.audit_point = point;
+       audit_all_active_sps(vcpu->kvm);
+       audit_vcpu_spte(vcpu);
+}
+
+static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
+{
+       if (static_key_false((&mmu_audit_key)))
+               __kvm_mmu_audit(vcpu, point);
+}
+
+static void mmu_audit_enable(void)
+{
+       if (mmu_audit)
+               return;
 
+       static_key_slow_inc(&mmu_audit_key);
        mmu_audit = true;
 }
 
@@ -263,8 +268,7 @@ static void mmu_audit_disable(void)
        if (!mmu_audit)
                return;
 
-       unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
-       tracepoint_synchronize_unregister();
+       static_key_slow_dec(&mmu_audit_key);
        mmu_audit = false;
 }