KVM: MMU: improve active sp audit
[linux-3.10.git] / arch / x86 / kvm / mmu_audit.c
1 /*
2  * mmu_audit.c:
3  *
4  * Audit code for KVM MMU
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright 2010 Red Hat, Inc. and/or its affilates.
8  *
9  * Authors:
10  *   Yaniv Kamay  <yaniv@qumranet.com>
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Marcelo Tosatti <mtosatti@redhat.com>
13  *   Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 static const char *audit_msg;
21
22 typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
23
24 static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
25                             inspect_spte_fn fn)
26 {
27         int i;
28
29         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
30                 u64 ent = sp->spt[i];
31
32                 if (is_shadow_present_pte(ent)) {
33                         if (!is_last_spte(ent, sp->role.level)) {
34                                 struct kvm_mmu_page *child;
35                                 child = page_header(ent & PT64_BASE_ADDR_MASK);
36                                 __mmu_spte_walk(kvm, child, fn);
37                         } else
38                                 fn(kvm, &sp->spt[i]);
39                 }
40         }
41 }
42
43 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
44 {
45         int i;
46         struct kvm_mmu_page *sp;
47
48         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
49                 return;
50         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
51                 hpa_t root = vcpu->arch.mmu.root_hpa;
52                 sp = page_header(root);
53                 __mmu_spte_walk(vcpu->kvm, sp, fn);
54                 return;
55         }
56         for (i = 0; i < 4; ++i) {
57                 hpa_t root = vcpu->arch.mmu.pae_root[i];
58
59                 if (root && VALID_PAGE(root)) {
60                         root &= PT64_BASE_ADDR_MASK;
61                         sp = page_header(root);
62                         __mmu_spte_walk(vcpu->kvm, sp, fn);
63                 }
64         }
65         return;
66 }
67
68 typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp);
69
70 static void walk_all_active_sps(struct kvm *kvm, sp_handler fn)
71 {
72         struct kvm_mmu_page *sp;
73
74         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link)
75                 fn(kvm, sp);
76 }
77
78 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
79                                 gva_t va, int level)
80 {
81         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
82         int i;
83         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
84
85         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
86                 u64 *sptep = pt + i;
87                 struct kvm_mmu_page *sp;
88                 gfn_t gfn;
89                 pfn_t pfn;
90                 hpa_t hpa;
91
92                 sp = page_header(__pa(sptep));
93
94                 if (sp->unsync) {
95                         if (level != PT_PAGE_TABLE_LEVEL) {
96                                 printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n",
97                                                 audit_msg, sp, level);
98                                 return;
99                         }
100
101                         if (*sptep == shadow_notrap_nonpresent_pte) {
102                                 printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n",
103                                                 audit_msg, sp);
104                                 return;
105                         }
106                 }
107
108                 if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) {
109                         printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n",
110                                         audit_msg, sp);
111                         return;
112                 }
113
114                 if (!is_shadow_present_pte(*sptep) ||
115                       !is_last_spte(*sptep, level))
116                         return;
117
118                 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
119                 pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn);
120
121                 if (is_error_pfn(pfn)) {
122                         kvm_release_pfn_clean(pfn);
123                         return;
124                 }
125
126                 hpa =  pfn << PAGE_SHIFT;
127
128                 if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
129                         printk(KERN_ERR "xx audit error: (%s) levels %d"
130                                            " gva %lx pfn %llx hpa %llx ent %llxn",
131                                            audit_msg, vcpu->arch.mmu.root_level,
132                                            va, pfn, hpa, *sptep);
133         }
134 }
135
136 static void audit_mappings(struct kvm_vcpu *vcpu)
137 {
138         unsigned i;
139
140         if (vcpu->arch.mmu.root_level == 4)
141                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
142         else
143                 for (i = 0; i < 4; ++i)
144                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
145                                 audit_mappings_page(vcpu,
146                                                     vcpu->arch.mmu.pae_root[i],
147                                                     i << 30,
148                                                     2);
149 }
150
151 void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
152 {
153         unsigned long *rmapp;
154         struct kvm_mmu_page *rev_sp;
155         gfn_t gfn;
156
157
158         rev_sp = page_header(__pa(sptep));
159         gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
160
161         if (!gfn_to_memslot(kvm, gfn)) {
162                 if (!printk_ratelimit())
163                         return;
164                 printk(KERN_ERR "%s: no memslot for gfn %llx\n",
165                                  audit_msg, gfn);
166                 printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n",
167                        audit_msg, (long int)(sptep - rev_sp->spt),
168                                 rev_sp->gfn);
169                 dump_stack();
170                 return;
171         }
172
173         rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
174         if (!*rmapp) {
175                 if (!printk_ratelimit())
176                         return;
177                 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
178                                  audit_msg, *sptep);
179                 dump_stack();
180         }
181 }
182
183 void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu)
184 {
185         mmu_spte_walk(vcpu, inspect_spte_has_rmap);
186 }
187
188 static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp)
189 {
190         int i;
191
192         if (sp->role.level != PT_PAGE_TABLE_LEVEL)
193                 return;
194
195         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
196                 if (!is_rmap_spte(sp->spt[i]))
197                         continue;
198
199                 inspect_spte_has_rmap(kvm, sp->spt + i);
200         }
201 }
202
203 void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
204 {
205         struct kvm_memory_slot *slot;
206         unsigned long *rmapp;
207         u64 *spte;
208
209         if (sp->role.direct || sp->unsync || sp->role.invalid)
210                 return;
211
212         slot = gfn_to_memslot(kvm, sp->gfn);
213         rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
214
215         spte = rmap_next(kvm, rmapp, NULL);
216         while (spte) {
217                 if (is_writable_pte(*spte))
218                         printk(KERN_ERR "%s: (%s) shadow page has "
219                                 "writable mappings: gfn %llx role %x\n",
220                                __func__, audit_msg, sp->gfn,
221                                sp->role.word);
222                 spte = rmap_next(kvm, rmapp, spte);
223         }
224 }
225
226 static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
227 {
228         check_mappings_rmap(kvm, sp);
229         audit_write_protection(kvm, sp);
230 }
231
232 static void audit_all_active_sps(struct kvm *kvm)
233 {
234         walk_all_active_sps(kvm, audit_sp);
235 }
236
237 static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point)
238 {
239         audit_msg = audit_point_name[audit_point];
240         audit_all_active_sps(vcpu->kvm);
241         if (strcmp("pre pte write", audit_msg) != 0)
242                 audit_mappings(vcpu);
243         audit_sptes_have_rmaps(vcpu);
244 }
245
246 static bool mmu_audit;
247
248 static void mmu_audit_enable(void)
249 {
250         int ret;
251
252         if (mmu_audit)
253                 return;
254
255         ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
256         WARN_ON(ret);
257
258         mmu_audit = true;
259 }
260
261 static void mmu_audit_disable(void)
262 {
263         if (!mmu_audit)
264                 return;
265
266         unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL);
267         tracepoint_synchronize_unregister();
268         mmu_audit = false;
269 }
270
271 static int mmu_audit_set(const char *val, const struct kernel_param *kp)
272 {
273         int ret;
274         unsigned long enable;
275
276         ret = strict_strtoul(val, 10, &enable);
277         if (ret < 0)
278                 return -EINVAL;
279
280         switch (enable) {
281         case 0:
282                 mmu_audit_disable();
283                 break;
284         case 1:
285                 mmu_audit_enable();
286                 break;
287         default:
288                 return -EINVAL;
289         }
290
291         return 0;
292 }
293
294 static struct kernel_param_ops audit_param_ops = {
295         .set = mmu_audit_set,
296         .get = param_get_bool,
297 };
298
299 module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644);