103d008dab8bab3b29ebddd7e8877a912a03b14b
[linux-2.6.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "vmx.h"
21 #include "mmu.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30
31 #include <asm/page.h>
32 #include <asm/cmpxchg.h>
33 #include <asm/io.h>
34
35 /*
36  * When setting this variable to true it enables Two-Dimensional-Paging
37  * where the hardware walks 2 page tables:
38  * 1. the guest-virtual to guest-physical
39  * 2. while doing 1. it walks guest-physical to host-physical
40  * If the hardware supports that we don't need to do shadow paging.
41  */
42 static bool tdp_enabled = false;
43
44 #undef MMU_DEBUG
45
46 #undef AUDIT
47
48 #ifdef AUDIT
49 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
50 #else
51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
52 #endif
53
54 #ifdef MMU_DEBUG
55
56 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
57 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
58
59 #else
60
61 #define pgprintk(x...) do { } while (0)
62 #define rmap_printk(x...) do { } while (0)
63
64 #endif
65
66 #if defined(MMU_DEBUG) || defined(AUDIT)
67 static int dbg = 1;
68 #endif
69
70 #ifndef MMU_DEBUG
71 #define ASSERT(x) do { } while (0)
72 #else
73 #define ASSERT(x)                                                       \
74         if (!(x)) {                                                     \
75                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
76                        __FILE__, __LINE__, #x);                         \
77         }
78 #endif
79
80 #define PT64_PT_BITS 9
81 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
82 #define PT32_PT_BITS 10
83 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
84
85 #define PT_WRITABLE_SHIFT 1
86
87 #define PT_PRESENT_MASK (1ULL << 0)
88 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
89 #define PT_USER_MASK (1ULL << 2)
90 #define PT_PWT_MASK (1ULL << 3)
91 #define PT_PCD_MASK (1ULL << 4)
92 #define PT_ACCESSED_MASK (1ULL << 5)
93 #define PT_DIRTY_MASK (1ULL << 6)
94 #define PT_PAGE_SIZE_MASK (1ULL << 7)
95 #define PT_PAT_MASK (1ULL << 7)
96 #define PT_GLOBAL_MASK (1ULL << 8)
97 #define PT64_NX_SHIFT 63
98 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
99
100 #define PT_PAT_SHIFT 7
101 #define PT_DIR_PAT_SHIFT 12
102 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
103
104 #define PT32_DIR_PSE36_SIZE 4
105 #define PT32_DIR_PSE36_SHIFT 13
106 #define PT32_DIR_PSE36_MASK \
107         (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
108
109
110 #define PT_FIRST_AVAIL_BITS_SHIFT 9
111 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
112
113 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
114
115 #define PT64_LEVEL_BITS 9
116
117 #define PT64_LEVEL_SHIFT(level) \
118                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
119
120 #define PT64_LEVEL_MASK(level) \
121                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
122
123 #define PT64_INDEX(address, level)\
124         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
125
126
127 #define PT32_LEVEL_BITS 10
128
129 #define PT32_LEVEL_SHIFT(level) \
130                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
131
132 #define PT32_LEVEL_MASK(level) \
133                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
134
135 #define PT32_INDEX(address, level)\
136         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
137
138
139 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
140 #define PT64_DIR_BASE_ADDR_MASK \
141         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
142
143 #define PT32_BASE_ADDR_MASK PAGE_MASK
144 #define PT32_DIR_BASE_ADDR_MASK \
145         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
146
147 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
148                         | PT64_NX_MASK)
149
150 #define PFERR_PRESENT_MASK (1U << 0)
151 #define PFERR_WRITE_MASK (1U << 1)
152 #define PFERR_USER_MASK (1U << 2)
153 #define PFERR_FETCH_MASK (1U << 4)
154
155 #define PT64_ROOT_LEVEL 4
156 #define PT32_ROOT_LEVEL 2
157 #define PT32E_ROOT_LEVEL 3
158
159 #define PT_DIRECTORY_LEVEL 2
160 #define PT_PAGE_TABLE_LEVEL 1
161
162 #define RMAP_EXT 4
163
164 #define ACC_EXEC_MASK    1
165 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
166 #define ACC_USER_MASK    PT_USER_MASK
167 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
168
169 struct kvm_rmap_desc {
170         u64 *shadow_ptes[RMAP_EXT];
171         struct kvm_rmap_desc *more;
172 };
173
174 static struct kmem_cache *pte_chain_cache;
175 static struct kmem_cache *rmap_desc_cache;
176 static struct kmem_cache *mmu_page_header_cache;
177
178 static u64 __read_mostly shadow_trap_nonpresent_pte;
179 static u64 __read_mostly shadow_notrap_nonpresent_pte;
180
181 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
182 {
183         shadow_trap_nonpresent_pte = trap_pte;
184         shadow_notrap_nonpresent_pte = notrap_pte;
185 }
186 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
187
188 static int is_write_protection(struct kvm_vcpu *vcpu)
189 {
190         return vcpu->arch.cr0 & X86_CR0_WP;
191 }
192
193 static int is_cpuid_PSE36(void)
194 {
195         return 1;
196 }
197
198 static int is_nx(struct kvm_vcpu *vcpu)
199 {
200         return vcpu->arch.shadow_efer & EFER_NX;
201 }
202
203 static int is_present_pte(unsigned long pte)
204 {
205         return pte & PT_PRESENT_MASK;
206 }
207
208 static int is_shadow_present_pte(u64 pte)
209 {
210         return pte != shadow_trap_nonpresent_pte
211                 && pte != shadow_notrap_nonpresent_pte;
212 }
213
214 static int is_writeble_pte(unsigned long pte)
215 {
216         return pte & PT_WRITABLE_MASK;
217 }
218
219 static int is_dirty_pte(unsigned long pte)
220 {
221         return pte & PT_DIRTY_MASK;
222 }
223
224 static int is_rmap_pte(u64 pte)
225 {
226         return is_shadow_present_pte(pte);
227 }
228
229 static gfn_t pse36_gfn_delta(u32 gpte)
230 {
231         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
232
233         return (gpte & PT32_DIR_PSE36_MASK) << shift;
234 }
235
236 static void set_shadow_pte(u64 *sptep, u64 spte)
237 {
238 #ifdef CONFIG_X86_64
239         set_64bit((unsigned long *)sptep, spte);
240 #else
241         set_64bit((unsigned long long *)sptep, spte);
242 #endif
243 }
244
245 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
246                                   struct kmem_cache *base_cache, int min)
247 {
248         void *obj;
249
250         if (cache->nobjs >= min)
251                 return 0;
252         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
253                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
254                 if (!obj)
255                         return -ENOMEM;
256                 cache->objects[cache->nobjs++] = obj;
257         }
258         return 0;
259 }
260
261 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
262 {
263         while (mc->nobjs)
264                 kfree(mc->objects[--mc->nobjs]);
265 }
266
267 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
268                                        int min)
269 {
270         struct page *page;
271
272         if (cache->nobjs >= min)
273                 return 0;
274         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
275                 page = alloc_page(GFP_KERNEL);
276                 if (!page)
277                         return -ENOMEM;
278                 set_page_private(page, 0);
279                 cache->objects[cache->nobjs++] = page_address(page);
280         }
281         return 0;
282 }
283
284 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
285 {
286         while (mc->nobjs)
287                 free_page((unsigned long)mc->objects[--mc->nobjs]);
288 }
289
290 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
291 {
292         int r;
293
294         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
295                                    pte_chain_cache, 4);
296         if (r)
297                 goto out;
298         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
299                                    rmap_desc_cache, 1);
300         if (r)
301                 goto out;
302         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
303         if (r)
304                 goto out;
305         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
306                                    mmu_page_header_cache, 4);
307 out:
308         return r;
309 }
310
311 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
312 {
313         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
314         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
315         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
316         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
317 }
318
319 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
320                                     size_t size)
321 {
322         void *p;
323
324         BUG_ON(!mc->nobjs);
325         p = mc->objects[--mc->nobjs];
326         memset(p, 0, size);
327         return p;
328 }
329
330 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
331 {
332         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
333                                       sizeof(struct kvm_pte_chain));
334 }
335
336 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
337 {
338         kfree(pc);
339 }
340
341 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
342 {
343         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
344                                       sizeof(struct kvm_rmap_desc));
345 }
346
347 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
348 {
349         kfree(rd);
350 }
351
352 /*
353  * Take gfn and return the reverse mapping to it.
354  * Note: gfn must be unaliased before this function get called
355  */
356
357 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
358 {
359         struct kvm_memory_slot *slot;
360
361         slot = gfn_to_memslot(kvm, gfn);
362         return &slot->rmap[gfn - slot->base_gfn];
363 }
364
365 /*
366  * Reverse mapping data structures:
367  *
368  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
369  * that points to page_address(page).
370  *
371  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
372  * containing more mappings.
373  */
374 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
375 {
376         struct kvm_mmu_page *sp;
377         struct kvm_rmap_desc *desc;
378         unsigned long *rmapp;
379         int i;
380
381         if (!is_rmap_pte(*spte))
382                 return;
383         gfn = unalias_gfn(vcpu->kvm, gfn);
384         sp = page_header(__pa(spte));
385         sp->gfns[spte - sp->spt] = gfn;
386         rmapp = gfn_to_rmap(vcpu->kvm, gfn);
387         if (!*rmapp) {
388                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
389                 *rmapp = (unsigned long)spte;
390         } else if (!(*rmapp & 1)) {
391                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
392                 desc = mmu_alloc_rmap_desc(vcpu);
393                 desc->shadow_ptes[0] = (u64 *)*rmapp;
394                 desc->shadow_ptes[1] = spte;
395                 *rmapp = (unsigned long)desc | 1;
396         } else {
397                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
398                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
399                 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
400                         desc = desc->more;
401                 if (desc->shadow_ptes[RMAP_EXT-1]) {
402                         desc->more = mmu_alloc_rmap_desc(vcpu);
403                         desc = desc->more;
404                 }
405                 for (i = 0; desc->shadow_ptes[i]; ++i)
406                         ;
407                 desc->shadow_ptes[i] = spte;
408         }
409 }
410
411 static void rmap_desc_remove_entry(unsigned long *rmapp,
412                                    struct kvm_rmap_desc *desc,
413                                    int i,
414                                    struct kvm_rmap_desc *prev_desc)
415 {
416         int j;
417
418         for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
419                 ;
420         desc->shadow_ptes[i] = desc->shadow_ptes[j];
421         desc->shadow_ptes[j] = NULL;
422         if (j != 0)
423                 return;
424         if (!prev_desc && !desc->more)
425                 *rmapp = (unsigned long)desc->shadow_ptes[0];
426         else
427                 if (prev_desc)
428                         prev_desc->more = desc->more;
429                 else
430                         *rmapp = (unsigned long)desc->more | 1;
431         mmu_free_rmap_desc(desc);
432 }
433
434 static void rmap_remove(struct kvm *kvm, u64 *spte)
435 {
436         struct kvm_rmap_desc *desc;
437         struct kvm_rmap_desc *prev_desc;
438         struct kvm_mmu_page *sp;
439         struct page *page;
440         unsigned long *rmapp;
441         int i;
442
443         if (!is_rmap_pte(*spte))
444                 return;
445         sp = page_header(__pa(spte));
446         page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
447         mark_page_accessed(page);
448         if (is_writeble_pte(*spte))
449                 kvm_release_page_dirty(page);
450         else
451                 kvm_release_page_clean(page);
452         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
453         if (!*rmapp) {
454                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
455                 BUG();
456         } else if (!(*rmapp & 1)) {
457                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
458                 if ((u64 *)*rmapp != spte) {
459                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
460                                spte, *spte);
461                         BUG();
462                 }
463                 *rmapp = 0;
464         } else {
465                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
466                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
467                 prev_desc = NULL;
468                 while (desc) {
469                         for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
470                                 if (desc->shadow_ptes[i] == spte) {
471                                         rmap_desc_remove_entry(rmapp,
472                                                                desc, i,
473                                                                prev_desc);
474                                         return;
475                                 }
476                         prev_desc = desc;
477                         desc = desc->more;
478                 }
479                 BUG();
480         }
481 }
482
483 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
484 {
485         struct kvm_rmap_desc *desc;
486         struct kvm_rmap_desc *prev_desc;
487         u64 *prev_spte;
488         int i;
489
490         if (!*rmapp)
491                 return NULL;
492         else if (!(*rmapp & 1)) {
493                 if (!spte)
494                         return (u64 *)*rmapp;
495                 return NULL;
496         }
497         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
498         prev_desc = NULL;
499         prev_spte = NULL;
500         while (desc) {
501                 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
502                         if (prev_spte == spte)
503                                 return desc->shadow_ptes[i];
504                         prev_spte = desc->shadow_ptes[i];
505                 }
506                 desc = desc->more;
507         }
508         return NULL;
509 }
510
511 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
512 {
513         unsigned long *rmapp;
514         u64 *spte;
515         int write_protected = 0;
516
517         gfn = unalias_gfn(kvm, gfn);
518         rmapp = gfn_to_rmap(kvm, gfn);
519
520         spte = rmap_next(kvm, rmapp, NULL);
521         while (spte) {
522                 BUG_ON(!spte);
523                 BUG_ON(!(*spte & PT_PRESENT_MASK));
524                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
525                 if (is_writeble_pte(*spte)) {
526                         set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
527                         write_protected = 1;
528                 }
529                 spte = rmap_next(kvm, rmapp, spte);
530         }
531         if (write_protected)
532                 kvm_flush_remote_tlbs(kvm);
533 }
534
535 #ifdef MMU_DEBUG
536 static int is_empty_shadow_page(u64 *spt)
537 {
538         u64 *pos;
539         u64 *end;
540
541         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
542                 if (*pos != shadow_trap_nonpresent_pte) {
543                         printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
544                                pos, *pos);
545                         return 0;
546                 }
547         return 1;
548 }
549 #endif
550
551 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
552 {
553         ASSERT(is_empty_shadow_page(sp->spt));
554         list_del(&sp->link);
555         __free_page(virt_to_page(sp->spt));
556         __free_page(virt_to_page(sp->gfns));
557         kfree(sp);
558         ++kvm->arch.n_free_mmu_pages;
559 }
560
561 static unsigned kvm_page_table_hashfn(gfn_t gfn)
562 {
563         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
564 }
565
566 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
567                                                u64 *parent_pte)
568 {
569         struct kvm_mmu_page *sp;
570
571         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
572         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
573         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
574         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
575         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
576         ASSERT(is_empty_shadow_page(sp->spt));
577         sp->slot_bitmap = 0;
578         sp->multimapped = 0;
579         sp->parent_pte = parent_pte;
580         --vcpu->kvm->arch.n_free_mmu_pages;
581         return sp;
582 }
583
584 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
585                                     struct kvm_mmu_page *sp, u64 *parent_pte)
586 {
587         struct kvm_pte_chain *pte_chain;
588         struct hlist_node *node;
589         int i;
590
591         if (!parent_pte)
592                 return;
593         if (!sp->multimapped) {
594                 u64 *old = sp->parent_pte;
595
596                 if (!old) {
597                         sp->parent_pte = parent_pte;
598                         return;
599                 }
600                 sp->multimapped = 1;
601                 pte_chain = mmu_alloc_pte_chain(vcpu);
602                 INIT_HLIST_HEAD(&sp->parent_ptes);
603                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
604                 pte_chain->parent_ptes[0] = old;
605         }
606         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
607                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
608                         continue;
609                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
610                         if (!pte_chain->parent_ptes[i]) {
611                                 pte_chain->parent_ptes[i] = parent_pte;
612                                 return;
613                         }
614         }
615         pte_chain = mmu_alloc_pte_chain(vcpu);
616         BUG_ON(!pte_chain);
617         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
618         pte_chain->parent_ptes[0] = parent_pte;
619 }
620
621 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
622                                        u64 *parent_pte)
623 {
624         struct kvm_pte_chain *pte_chain;
625         struct hlist_node *node;
626         int i;
627
628         if (!sp->multimapped) {
629                 BUG_ON(sp->parent_pte != parent_pte);
630                 sp->parent_pte = NULL;
631                 return;
632         }
633         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
634                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
635                         if (!pte_chain->parent_ptes[i])
636                                 break;
637                         if (pte_chain->parent_ptes[i] != parent_pte)
638                                 continue;
639                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
640                                 && pte_chain->parent_ptes[i + 1]) {
641                                 pte_chain->parent_ptes[i]
642                                         = pte_chain->parent_ptes[i + 1];
643                                 ++i;
644                         }
645                         pte_chain->parent_ptes[i] = NULL;
646                         if (i == 0) {
647                                 hlist_del(&pte_chain->link);
648                                 mmu_free_pte_chain(pte_chain);
649                                 if (hlist_empty(&sp->parent_ptes)) {
650                                         sp->multimapped = 0;
651                                         sp->parent_pte = NULL;
652                                 }
653                         }
654                         return;
655                 }
656         BUG();
657 }
658
659 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
660 {
661         unsigned index;
662         struct hlist_head *bucket;
663         struct kvm_mmu_page *sp;
664         struct hlist_node *node;
665
666         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
667         index = kvm_page_table_hashfn(gfn);
668         bucket = &kvm->arch.mmu_page_hash[index];
669         hlist_for_each_entry(sp, node, bucket, hash_link)
670                 if (sp->gfn == gfn && !sp->role.metaphysical
671                     && !sp->role.invalid) {
672                         pgprintk("%s: found role %x\n",
673                                  __FUNCTION__, sp->role.word);
674                         return sp;
675                 }
676         return NULL;
677 }
678
679 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
680                                              gfn_t gfn,
681                                              gva_t gaddr,
682                                              unsigned level,
683                                              int metaphysical,
684                                              unsigned access,
685                                              u64 *parent_pte)
686 {
687         union kvm_mmu_page_role role;
688         unsigned index;
689         unsigned quadrant;
690         struct hlist_head *bucket;
691         struct kvm_mmu_page *sp;
692         struct hlist_node *node;
693
694         role.word = 0;
695         role.glevels = vcpu->arch.mmu.root_level;
696         role.level = level;
697         role.metaphysical = metaphysical;
698         role.access = access;
699         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
700                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
701                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
702                 role.quadrant = quadrant;
703         }
704         pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
705                  gfn, role.word);
706         index = kvm_page_table_hashfn(gfn);
707         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
708         hlist_for_each_entry(sp, node, bucket, hash_link)
709                 if (sp->gfn == gfn && sp->role.word == role.word) {
710                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
711                         pgprintk("%s: found\n", __FUNCTION__);
712                         return sp;
713                 }
714         ++vcpu->kvm->stat.mmu_cache_miss;
715         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
716         if (!sp)
717                 return sp;
718         pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
719         sp->gfn = gfn;
720         sp->role = role;
721         hlist_add_head(&sp->hash_link, bucket);
722         vcpu->arch.mmu.prefetch_page(vcpu, sp);
723         if (!metaphysical)
724                 rmap_write_protect(vcpu->kvm, gfn);
725         return sp;
726 }
727
728 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
729                                          struct kvm_mmu_page *sp)
730 {
731         unsigned i;
732         u64 *pt;
733         u64 ent;
734
735         pt = sp->spt;
736
737         if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
738                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
739                         if (is_shadow_present_pte(pt[i]))
740                                 rmap_remove(kvm, &pt[i]);
741                         pt[i] = shadow_trap_nonpresent_pte;
742                 }
743                 kvm_flush_remote_tlbs(kvm);
744                 return;
745         }
746
747         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
748                 ent = pt[i];
749
750                 pt[i] = shadow_trap_nonpresent_pte;
751                 if (!is_shadow_present_pte(ent))
752                         continue;
753                 ent &= PT64_BASE_ADDR_MASK;
754                 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
755         }
756         kvm_flush_remote_tlbs(kvm);
757 }
758
759 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
760 {
761         mmu_page_remove_parent_pte(sp, parent_pte);
762 }
763
764 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
765 {
766         int i;
767
768         for (i = 0; i < KVM_MAX_VCPUS; ++i)
769                 if (kvm->vcpus[i])
770                         kvm->vcpus[i]->arch.last_pte_updated = NULL;
771 }
772
773 static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
774 {
775         u64 *parent_pte;
776
777         ++kvm->stat.mmu_shadow_zapped;
778         while (sp->multimapped || sp->parent_pte) {
779                 if (!sp->multimapped)
780                         parent_pte = sp->parent_pte;
781                 else {
782                         struct kvm_pte_chain *chain;
783
784                         chain = container_of(sp->parent_ptes.first,
785                                              struct kvm_pte_chain, link);
786                         parent_pte = chain->parent_ptes[0];
787                 }
788                 BUG_ON(!parent_pte);
789                 kvm_mmu_put_page(sp, parent_pte);
790                 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
791         }
792         kvm_mmu_page_unlink_children(kvm, sp);
793         if (!sp->root_count) {
794                 hlist_del(&sp->hash_link);
795                 kvm_mmu_free_page(kvm, sp);
796         } else {
797                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
798                 sp->role.invalid = 1;
799                 kvm_reload_remote_mmus(kvm);
800         }
801         kvm_mmu_reset_last_pte_updated(kvm);
802 }
803
804 /*
805  * Changing the number of mmu pages allocated to the vm
806  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
807  */
808 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
809 {
810         /*
811          * If we set the number of mmu pages to be smaller be than the
812          * number of actived pages , we must to free some mmu pages before we
813          * change the value
814          */
815
816         if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
817             kvm_nr_mmu_pages) {
818                 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
819                                        - kvm->arch.n_free_mmu_pages;
820
821                 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
822                         struct kvm_mmu_page *page;
823
824                         page = container_of(kvm->arch.active_mmu_pages.prev,
825                                             struct kvm_mmu_page, link);
826                         kvm_mmu_zap_page(kvm, page);
827                         n_used_mmu_pages--;
828                 }
829                 kvm->arch.n_free_mmu_pages = 0;
830         }
831         else
832                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
833                                          - kvm->arch.n_alloc_mmu_pages;
834
835         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
836 }
837
838 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
839 {
840         unsigned index;
841         struct hlist_head *bucket;
842         struct kvm_mmu_page *sp;
843         struct hlist_node *node, *n;
844         int r;
845
846         pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
847         r = 0;
848         index = kvm_page_table_hashfn(gfn);
849         bucket = &kvm->arch.mmu_page_hash[index];
850         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
851                 if (sp->gfn == gfn && !sp->role.metaphysical) {
852                         pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
853                                  sp->role.word);
854                         kvm_mmu_zap_page(kvm, sp);
855                         r = 1;
856                 }
857         return r;
858 }
859
860 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
861 {
862         struct kvm_mmu_page *sp;
863
864         while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
865                 pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
866                 kvm_mmu_zap_page(kvm, sp);
867         }
868 }
869
870 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
871 {
872         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
873         struct kvm_mmu_page *sp = page_header(__pa(pte));
874
875         __set_bit(slot, &sp->slot_bitmap);
876 }
877
878 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
879 {
880         struct page *page;
881
882         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
883
884         if (gpa == UNMAPPED_GVA)
885                 return NULL;
886
887         down_read(&current->mm->mmap_sem);
888         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
889         up_read(&current->mm->mmap_sem);
890
891         return page;
892 }
893
894 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
895                          unsigned pt_access, unsigned pte_access,
896                          int user_fault, int write_fault, int dirty,
897                          int *ptwrite, gfn_t gfn, struct page *page)
898 {
899         u64 spte;
900         int was_rmapped = 0;
901         int was_writeble = is_writeble_pte(*shadow_pte);
902         hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
903
904         pgprintk("%s: spte %llx access %x write_fault %d"
905                  " user_fault %d gfn %lx\n",
906                  __FUNCTION__, *shadow_pte, pt_access,
907                  write_fault, user_fault, gfn);
908
909         if (is_rmap_pte(*shadow_pte)) {
910                 if (host_pfn != page_to_pfn(page)) {
911                         pgprintk("hfn old %lx new %lx\n",
912                                  host_pfn, page_to_pfn(page));
913                         rmap_remove(vcpu->kvm, shadow_pte);
914                 }
915                 else
916                         was_rmapped = 1;
917         }
918
919         /*
920          * We don't set the accessed bit, since we sometimes want to see
921          * whether the guest actually used the pte (in order to detect
922          * demand paging).
923          */
924         spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
925         if (!dirty)
926                 pte_access &= ~ACC_WRITE_MASK;
927         if (!(pte_access & ACC_EXEC_MASK))
928                 spte |= PT64_NX_MASK;
929
930         spte |= PT_PRESENT_MASK;
931         if (pte_access & ACC_USER_MASK)
932                 spte |= PT_USER_MASK;
933
934         spte |= page_to_phys(page);
935
936         if ((pte_access & ACC_WRITE_MASK)
937             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
938                 struct kvm_mmu_page *shadow;
939
940                 spte |= PT_WRITABLE_MASK;
941                 if (user_fault) {
942                         mmu_unshadow(vcpu->kvm, gfn);
943                         goto unshadowed;
944                 }
945
946                 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
947                 if (shadow) {
948                         pgprintk("%s: found shadow page for %lx, marking ro\n",
949                                  __FUNCTION__, gfn);
950                         pte_access &= ~ACC_WRITE_MASK;
951                         if (is_writeble_pte(spte)) {
952                                 spte &= ~PT_WRITABLE_MASK;
953                                 kvm_x86_ops->tlb_flush(vcpu);
954                         }
955                         if (write_fault)
956                                 *ptwrite = 1;
957                 }
958         }
959
960 unshadowed:
961
962         if (pte_access & ACC_WRITE_MASK)
963                 mark_page_dirty(vcpu->kvm, gfn);
964
965         pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
966         set_shadow_pte(shadow_pte, spte);
967         page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
968         if (!was_rmapped) {
969                 rmap_add(vcpu, shadow_pte, gfn);
970                 if (!is_rmap_pte(*shadow_pte))
971                         kvm_release_page_clean(page);
972         } else {
973                 if (was_writeble)
974                         kvm_release_page_dirty(page);
975                 else
976                         kvm_release_page_clean(page);
977         }
978         if (!ptwrite || !*ptwrite)
979                 vcpu->arch.last_pte_updated = shadow_pte;
980 }
981
982 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
983 {
984 }
985
986 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
987                            gfn_t gfn, struct page *page, int level)
988 {
989         hpa_t table_addr = vcpu->arch.mmu.root_hpa;
990         int pt_write = 0;
991
992         for (; ; level--) {
993                 u32 index = PT64_INDEX(v, level);
994                 u64 *table;
995
996                 ASSERT(VALID_PAGE(table_addr));
997                 table = __va(table_addr);
998
999                 if (level == 1) {
1000                         mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1001                                      0, write, 1, &pt_write, gfn, page);
1002                         return pt_write;
1003                 }
1004
1005                 if (table[index] == shadow_trap_nonpresent_pte) {
1006                         struct kvm_mmu_page *new_table;
1007                         gfn_t pseudo_gfn;
1008
1009                         pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1010                                 >> PAGE_SHIFT;
1011                         new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1012                                                      v, level - 1,
1013                                                      1, ACC_ALL, &table[index]);
1014                         if (!new_table) {
1015                                 pgprintk("nonpaging_map: ENOMEM\n");
1016                                 kvm_release_page_clean(page);
1017                                 return -ENOMEM;
1018                         }
1019
1020                         table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
1021                                 | PT_WRITABLE_MASK | PT_USER_MASK;
1022                 }
1023                 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1024         }
1025 }
1026
1027 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1028 {
1029         int r;
1030
1031         struct page *page;
1032
1033         down_read(&vcpu->kvm->slots_lock);
1034
1035         down_read(&current->mm->mmap_sem);
1036         page = gfn_to_page(vcpu->kvm, gfn);
1037         up_read(&current->mm->mmap_sem);
1038
1039         /* mmio */
1040         if (is_error_page(page)) {
1041                 kvm_release_page_clean(page);
1042                 up_read(&vcpu->kvm->slots_lock);
1043                 return 1;
1044         }
1045
1046         spin_lock(&vcpu->kvm->mmu_lock);
1047         kvm_mmu_free_some_pages(vcpu);
1048         r = __direct_map(vcpu, v, write, gfn, page, PT32E_ROOT_LEVEL);
1049         spin_unlock(&vcpu->kvm->mmu_lock);
1050
1051         up_read(&vcpu->kvm->slots_lock);
1052
1053         return r;
1054 }
1055
1056
1057 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1058                                     struct kvm_mmu_page *sp)
1059 {
1060         int i;
1061
1062         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1063                 sp->spt[i] = shadow_trap_nonpresent_pte;
1064 }
1065
1066 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1067 {
1068         int i;
1069         struct kvm_mmu_page *sp;
1070
1071         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1072                 return;
1073         spin_lock(&vcpu->kvm->mmu_lock);
1074 #ifdef CONFIG_X86_64
1075         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1076                 hpa_t root = vcpu->arch.mmu.root_hpa;
1077
1078                 sp = page_header(root);
1079                 --sp->root_count;
1080                 if (!sp->root_count && sp->role.invalid)
1081                         kvm_mmu_zap_page(vcpu->kvm, sp);
1082                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1083                 spin_unlock(&vcpu->kvm->mmu_lock);
1084                 return;
1085         }
1086 #endif
1087         for (i = 0; i < 4; ++i) {
1088                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1089
1090                 if (root) {
1091                         root &= PT64_BASE_ADDR_MASK;
1092                         sp = page_header(root);
1093                         --sp->root_count;
1094                         if (!sp->root_count && sp->role.invalid)
1095                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1096                 }
1097                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1098         }
1099         spin_unlock(&vcpu->kvm->mmu_lock);
1100         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1101 }
1102
1103 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1104 {
1105         int i;
1106         gfn_t root_gfn;
1107         struct kvm_mmu_page *sp;
1108         int metaphysical = 0;
1109
1110         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1111
1112 #ifdef CONFIG_X86_64
1113         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1114                 hpa_t root = vcpu->arch.mmu.root_hpa;
1115
1116                 ASSERT(!VALID_PAGE(root));
1117                 if (tdp_enabled)
1118                         metaphysical = 1;
1119                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1120                                       PT64_ROOT_LEVEL, metaphysical,
1121                                       ACC_ALL, NULL);
1122                 root = __pa(sp->spt);
1123                 ++sp->root_count;
1124                 vcpu->arch.mmu.root_hpa = root;
1125                 return;
1126         }
1127 #endif
1128         metaphysical = !is_paging(vcpu);
1129         if (tdp_enabled)
1130                 metaphysical = 1;
1131         for (i = 0; i < 4; ++i) {
1132                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1133
1134                 ASSERT(!VALID_PAGE(root));
1135                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1136                         if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1137                                 vcpu->arch.mmu.pae_root[i] = 0;
1138                                 continue;
1139                         }
1140                         root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1141                 } else if (vcpu->arch.mmu.root_level == 0)
1142                         root_gfn = 0;
1143                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1144                                       PT32_ROOT_LEVEL, metaphysical,
1145                                       ACC_ALL, NULL);
1146                 root = __pa(sp->spt);
1147                 ++sp->root_count;
1148                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
1149         }
1150         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
1151 }
1152
1153 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1154 {
1155         return vaddr;
1156 }
1157
1158 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1159                                 u32 error_code)
1160 {
1161         gfn_t gfn;
1162         int r;
1163
1164         pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
1165         r = mmu_topup_memory_caches(vcpu);
1166         if (r)
1167                 return r;
1168
1169         ASSERT(vcpu);
1170         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1171
1172         gfn = gva >> PAGE_SHIFT;
1173
1174         return nonpaging_map(vcpu, gva & PAGE_MASK,
1175                              error_code & PFERR_WRITE_MASK, gfn);
1176 }
1177
1178 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1179                                 u32 error_code)
1180 {
1181         struct page *page;
1182         int r;
1183
1184         ASSERT(vcpu);
1185         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1186
1187         r = mmu_topup_memory_caches(vcpu);
1188         if (r)
1189                 return r;
1190
1191         down_read(&current->mm->mmap_sem);
1192         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1193         if (is_error_page(page)) {
1194                 kvm_release_page_clean(page);
1195                 up_read(&current->mm->mmap_sem);
1196                 return 1;
1197         }
1198         spin_lock(&vcpu->kvm->mmu_lock);
1199         kvm_mmu_free_some_pages(vcpu);
1200         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
1201                          gpa >> PAGE_SHIFT, page, TDP_ROOT_LEVEL);
1202         spin_unlock(&vcpu->kvm->mmu_lock);
1203         up_read(&current->mm->mmap_sem);
1204
1205         return r;
1206 }
1207
1208 static void nonpaging_free(struct kvm_vcpu *vcpu)
1209 {
1210         mmu_free_roots(vcpu);
1211 }
1212
1213 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1214 {
1215         struct kvm_mmu *context = &vcpu->arch.mmu;
1216
1217         context->new_cr3 = nonpaging_new_cr3;
1218         context->page_fault = nonpaging_page_fault;
1219         context->gva_to_gpa = nonpaging_gva_to_gpa;
1220         context->free = nonpaging_free;
1221         context->prefetch_page = nonpaging_prefetch_page;
1222         context->root_level = 0;
1223         context->shadow_root_level = PT32E_ROOT_LEVEL;
1224         context->root_hpa = INVALID_PAGE;
1225         return 0;
1226 }
1227
1228 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1229 {
1230         ++vcpu->stat.tlb_flush;
1231         kvm_x86_ops->tlb_flush(vcpu);
1232 }
1233
1234 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1235 {
1236         pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
1237         mmu_free_roots(vcpu);
1238 }
1239
1240 static void inject_page_fault(struct kvm_vcpu *vcpu,
1241                               u64 addr,
1242                               u32 err_code)
1243 {
1244         kvm_inject_page_fault(vcpu, addr, err_code);
1245 }
1246
1247 static void paging_free(struct kvm_vcpu *vcpu)
1248 {
1249         nonpaging_free(vcpu);
1250 }
1251
1252 #define PTTYPE 64
1253 #include "paging_tmpl.h"
1254 #undef PTTYPE
1255
1256 #define PTTYPE 32
1257 #include "paging_tmpl.h"
1258 #undef PTTYPE
1259
1260 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1261 {
1262         struct kvm_mmu *context = &vcpu->arch.mmu;
1263
1264         ASSERT(is_pae(vcpu));
1265         context->new_cr3 = paging_new_cr3;
1266         context->page_fault = paging64_page_fault;
1267         context->gva_to_gpa = paging64_gva_to_gpa;
1268         context->prefetch_page = paging64_prefetch_page;
1269         context->free = paging_free;
1270         context->root_level = level;
1271         context->shadow_root_level = level;
1272         context->root_hpa = INVALID_PAGE;
1273         return 0;
1274 }
1275
1276 static int paging64_init_context(struct kvm_vcpu *vcpu)
1277 {
1278         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1279 }
1280
1281 static int paging32_init_context(struct kvm_vcpu *vcpu)
1282 {
1283         struct kvm_mmu *context = &vcpu->arch.mmu;
1284
1285         context->new_cr3 = paging_new_cr3;
1286         context->page_fault = paging32_page_fault;
1287         context->gva_to_gpa = paging32_gva_to_gpa;
1288         context->free = paging_free;
1289         context->prefetch_page = paging32_prefetch_page;
1290         context->root_level = PT32_ROOT_LEVEL;
1291         context->shadow_root_level = PT32E_ROOT_LEVEL;
1292         context->root_hpa = INVALID_PAGE;
1293         return 0;
1294 }
1295
1296 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1297 {
1298         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1299 }
1300
1301 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1302 {
1303         struct kvm_mmu *context = &vcpu->arch.mmu;
1304
1305         context->new_cr3 = nonpaging_new_cr3;
1306         context->page_fault = tdp_page_fault;
1307         context->free = nonpaging_free;
1308         context->prefetch_page = nonpaging_prefetch_page;
1309         context->shadow_root_level = TDP_ROOT_LEVEL;
1310         context->root_hpa = INVALID_PAGE;
1311
1312         if (!is_paging(vcpu)) {
1313                 context->gva_to_gpa = nonpaging_gva_to_gpa;
1314                 context->root_level = 0;
1315         } else if (is_long_mode(vcpu)) {
1316                 context->gva_to_gpa = paging64_gva_to_gpa;
1317                 context->root_level = PT64_ROOT_LEVEL;
1318         } else if (is_pae(vcpu)) {
1319                 context->gva_to_gpa = paging64_gva_to_gpa;
1320                 context->root_level = PT32E_ROOT_LEVEL;
1321         } else {
1322                 context->gva_to_gpa = paging32_gva_to_gpa;
1323                 context->root_level = PT32_ROOT_LEVEL;
1324         }
1325
1326         return 0;
1327 }
1328
1329 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
1330 {
1331         ASSERT(vcpu);
1332         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1333
1334         if (!is_paging(vcpu))
1335                 return nonpaging_init_context(vcpu);
1336         else if (is_long_mode(vcpu))
1337                 return paging64_init_context(vcpu);
1338         else if (is_pae(vcpu))
1339                 return paging32E_init_context(vcpu);
1340         else
1341                 return paging32_init_context(vcpu);
1342 }
1343
1344 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1345 {
1346         if (tdp_enabled)
1347                 return init_kvm_tdp_mmu(vcpu);
1348         else
1349                 return init_kvm_softmmu(vcpu);
1350 }
1351
1352 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1353 {
1354         ASSERT(vcpu);
1355         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1356                 vcpu->arch.mmu.free(vcpu);
1357                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1358         }
1359 }
1360
1361 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1362 {
1363         destroy_kvm_mmu(vcpu);
1364         return init_kvm_mmu(vcpu);
1365 }
1366 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1367
1368 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1369 {
1370         int r;
1371
1372         r = mmu_topup_memory_caches(vcpu);
1373         if (r)
1374                 goto out;
1375         spin_lock(&vcpu->kvm->mmu_lock);
1376         kvm_mmu_free_some_pages(vcpu);
1377         mmu_alloc_roots(vcpu);
1378         spin_unlock(&vcpu->kvm->mmu_lock);
1379         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
1380         kvm_mmu_flush_tlb(vcpu);
1381 out:
1382         return r;
1383 }
1384 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1385
1386 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1387 {
1388         mmu_free_roots(vcpu);
1389 }
1390
1391 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1392                                   struct kvm_mmu_page *sp,
1393                                   u64 *spte)
1394 {
1395         u64 pte;
1396         struct kvm_mmu_page *child;
1397
1398         pte = *spte;
1399         if (is_shadow_present_pte(pte)) {
1400                 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
1401                         rmap_remove(vcpu->kvm, spte);
1402                 else {
1403                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1404                         mmu_page_remove_parent_pte(child, spte);
1405                 }
1406         }
1407         set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1408 }
1409
1410 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1411                                   struct kvm_mmu_page *sp,
1412                                   u64 *spte,
1413                                   const void *new)
1414 {
1415         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
1416                 ++vcpu->kvm->stat.mmu_pde_zapped;
1417                 return;
1418         }
1419
1420         ++vcpu->kvm->stat.mmu_pte_updated;
1421         if (sp->role.glevels == PT32_ROOT_LEVEL)
1422                 paging32_update_pte(vcpu, sp, spte, new);
1423         else
1424                 paging64_update_pte(vcpu, sp, spte, new);
1425 }
1426
1427 static bool need_remote_flush(u64 old, u64 new)
1428 {
1429         if (!is_shadow_present_pte(old))
1430                 return false;
1431         if (!is_shadow_present_pte(new))
1432                 return true;
1433         if ((old ^ new) & PT64_BASE_ADDR_MASK)
1434                 return true;
1435         old ^= PT64_NX_MASK;
1436         new ^= PT64_NX_MASK;
1437         return (old & ~new & PT64_PERM_MASK) != 0;
1438 }
1439
1440 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1441 {
1442         if (need_remote_flush(old, new))
1443                 kvm_flush_remote_tlbs(vcpu->kvm);
1444         else
1445                 kvm_mmu_flush_tlb(vcpu);
1446 }
1447
1448 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1449 {
1450         u64 *spte = vcpu->arch.last_pte_updated;
1451
1452         return !!(spte && (*spte & PT_ACCESSED_MASK));
1453 }
1454
1455 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1456                                           const u8 *new, int bytes)
1457 {
1458         gfn_t gfn;
1459         int r;
1460         u64 gpte = 0;
1461         struct page *page;
1462
1463         if (bytes != 4 && bytes != 8)
1464                 return;
1465
1466         /*
1467          * Assume that the pte write on a page table of the same type
1468          * as the current vcpu paging mode.  This is nearly always true
1469          * (might be false while changing modes).  Note it is verified later
1470          * by update_pte().
1471          */
1472         if (is_pae(vcpu)) {
1473                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1474                 if ((bytes == 4) && (gpa % 4 == 0)) {
1475                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1476                         if (r)
1477                                 return;
1478                         memcpy((void *)&gpte + (gpa % 8), new, 4);
1479                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1480                         memcpy((void *)&gpte, new, 8);
1481                 }
1482         } else {
1483                 if ((bytes == 4) && (gpa % 4 == 0))
1484                         memcpy((void *)&gpte, new, 4);
1485         }
1486         if (!is_present_pte(gpte))
1487                 return;
1488         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1489
1490         down_read(&vcpu->kvm->slots_lock);
1491         page = gfn_to_page(vcpu->kvm, gfn);
1492         up_read(&vcpu->kvm->slots_lock);
1493
1494         if (is_error_page(page)) {
1495                 kvm_release_page_clean(page);
1496                 return;
1497         }
1498         vcpu->arch.update_pte.gfn = gfn;
1499         vcpu->arch.update_pte.page = page;
1500 }
1501
1502 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1503                        const u8 *new, int bytes)
1504 {
1505         gfn_t gfn = gpa >> PAGE_SHIFT;
1506         struct kvm_mmu_page *sp;
1507         struct hlist_node *node, *n;
1508         struct hlist_head *bucket;
1509         unsigned index;
1510         u64 entry, gentry;
1511         u64 *spte;
1512         unsigned offset = offset_in_page(gpa);
1513         unsigned pte_size;
1514         unsigned page_offset;
1515         unsigned misaligned;
1516         unsigned quadrant;
1517         int level;
1518         int flooded = 0;
1519         int npte;
1520         int r;
1521
1522         pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1523         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
1524         spin_lock(&vcpu->kvm->mmu_lock);
1525         kvm_mmu_free_some_pages(vcpu);
1526         ++vcpu->kvm->stat.mmu_pte_write;
1527         kvm_mmu_audit(vcpu, "pre pte write");
1528         if (gfn == vcpu->arch.last_pt_write_gfn
1529             && !last_updated_pte_accessed(vcpu)) {
1530                 ++vcpu->arch.last_pt_write_count;
1531                 if (vcpu->arch.last_pt_write_count >= 3)
1532                         flooded = 1;
1533         } else {
1534                 vcpu->arch.last_pt_write_gfn = gfn;
1535                 vcpu->arch.last_pt_write_count = 1;
1536                 vcpu->arch.last_pte_updated = NULL;
1537         }
1538         index = kvm_page_table_hashfn(gfn);
1539         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1540         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1541                 if (sp->gfn != gfn || sp->role.metaphysical)
1542                         continue;
1543                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1544                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1545                 misaligned |= bytes < 4;
1546                 if (misaligned || flooded) {
1547                         /*
1548                          * Misaligned accesses are too much trouble to fix
1549                          * up; also, they usually indicate a page is not used
1550                          * as a page table.
1551                          *
1552                          * If we're seeing too many writes to a page,
1553                          * it may no longer be a page table, or we may be
1554                          * forking, in which case it is better to unmap the
1555                          * page.
1556                          */
1557                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1558                                  gpa, bytes, sp->role.word);
1559                         kvm_mmu_zap_page(vcpu->kvm, sp);
1560                         ++vcpu->kvm->stat.mmu_flooded;
1561                         continue;
1562                 }
1563                 page_offset = offset;
1564                 level = sp->role.level;
1565                 npte = 1;
1566                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
1567                         page_offset <<= 1;      /* 32->64 */
1568                         /*
1569                          * A 32-bit pde maps 4MB while the shadow pdes map
1570                          * only 2MB.  So we need to double the offset again
1571                          * and zap two pdes instead of one.
1572                          */
1573                         if (level == PT32_ROOT_LEVEL) {
1574                                 page_offset &= ~7; /* kill rounding error */
1575                                 page_offset <<= 1;
1576                                 npte = 2;
1577                         }
1578                         quadrant = page_offset >> PAGE_SHIFT;
1579                         page_offset &= ~PAGE_MASK;
1580                         if (quadrant != sp->role.quadrant)
1581                                 continue;
1582                 }
1583                 spte = &sp->spt[page_offset / sizeof(*spte)];
1584                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1585                         gentry = 0;
1586                         r = kvm_read_guest_atomic(vcpu->kvm,
1587                                                   gpa & ~(u64)(pte_size - 1),
1588                                                   &gentry, pte_size);
1589                         new = (const void *)&gentry;
1590                         if (r < 0)
1591                                 new = NULL;
1592                 }
1593                 while (npte--) {
1594                         entry = *spte;
1595                         mmu_pte_write_zap_pte(vcpu, sp, spte);
1596                         if (new)
1597                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
1598                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
1599                         ++spte;
1600                 }
1601         }
1602         kvm_mmu_audit(vcpu, "post pte write");
1603         spin_unlock(&vcpu->kvm->mmu_lock);
1604         if (vcpu->arch.update_pte.page) {
1605                 kvm_release_page_clean(vcpu->arch.update_pte.page);
1606                 vcpu->arch.update_pte.page = NULL;
1607         }
1608 }
1609
1610 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1611 {
1612         gpa_t gpa;
1613         int r;
1614
1615         down_read(&vcpu->kvm->slots_lock);
1616         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1617         up_read(&vcpu->kvm->slots_lock);
1618
1619         spin_lock(&vcpu->kvm->mmu_lock);
1620         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1621         spin_unlock(&vcpu->kvm->mmu_lock);
1622         return r;
1623 }
1624
1625 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1626 {
1627         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
1628                 struct kvm_mmu_page *sp;
1629
1630                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
1631                                   struct kvm_mmu_page, link);
1632                 kvm_mmu_zap_page(vcpu->kvm, sp);
1633                 ++vcpu->kvm->stat.mmu_recycled;
1634         }
1635 }
1636
1637 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1638 {
1639         int r;
1640         enum emulation_result er;
1641
1642         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
1643         if (r < 0)
1644                 goto out;
1645
1646         if (!r) {
1647                 r = 1;
1648                 goto out;
1649         }
1650
1651         r = mmu_topup_memory_caches(vcpu);
1652         if (r)
1653                 goto out;
1654
1655         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1656
1657         switch (er) {
1658         case EMULATE_DONE:
1659                 return 1;
1660         case EMULATE_DO_MMIO:
1661                 ++vcpu->stat.mmio_exits;
1662                 return 0;
1663         case EMULATE_FAIL:
1664                 kvm_report_emulation_failure(vcpu, "pagetable");
1665                 return 1;
1666         default:
1667                 BUG();
1668         }
1669 out:
1670         return r;
1671 }
1672 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1673
1674 void kvm_enable_tdp(void)
1675 {
1676         tdp_enabled = true;
1677 }
1678 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1679
1680 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1681 {
1682         struct kvm_mmu_page *sp;
1683
1684         while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1685                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
1686                                   struct kvm_mmu_page, link);
1687                 kvm_mmu_zap_page(vcpu->kvm, sp);
1688         }
1689         free_page((unsigned long)vcpu->arch.mmu.pae_root);
1690 }
1691
1692 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1693 {
1694         struct page *page;
1695         int i;
1696
1697         ASSERT(vcpu);
1698
1699         if (vcpu->kvm->arch.n_requested_mmu_pages)
1700                 vcpu->kvm->arch.n_free_mmu_pages =
1701                                         vcpu->kvm->arch.n_requested_mmu_pages;
1702         else
1703                 vcpu->kvm->arch.n_free_mmu_pages =
1704                                         vcpu->kvm->arch.n_alloc_mmu_pages;
1705         /*
1706          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1707          * Therefore we need to allocate shadow page tables in the first
1708          * 4GB of memory, which happens to fit the DMA32 zone.
1709          */
1710         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1711         if (!page)
1712                 goto error_1;
1713         vcpu->arch.mmu.pae_root = page_address(page);
1714         for (i = 0; i < 4; ++i)
1715                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1716
1717         return 0;
1718
1719 error_1:
1720         free_mmu_pages(vcpu);
1721         return -ENOMEM;
1722 }
1723
1724 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1725 {
1726         ASSERT(vcpu);
1727         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1728
1729         return alloc_mmu_pages(vcpu);
1730 }
1731
1732 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1733 {
1734         ASSERT(vcpu);
1735         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
1736
1737         return init_kvm_mmu(vcpu);
1738 }
1739
1740 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1741 {
1742         ASSERT(vcpu);
1743
1744         destroy_kvm_mmu(vcpu);
1745         free_mmu_pages(vcpu);
1746         mmu_free_memory_caches(vcpu);
1747 }
1748
1749 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1750 {
1751         struct kvm_mmu_page *sp;
1752
1753         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
1754                 int i;
1755                 u64 *pt;
1756
1757                 if (!test_bit(slot, &sp->slot_bitmap))
1758                         continue;
1759
1760                 pt = sp->spt;
1761                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1762                         /* avoid RMW */
1763                         if (pt[i] & PT_WRITABLE_MASK)
1764                                 pt[i] &= ~PT_WRITABLE_MASK;
1765         }
1766 }
1767
1768 void kvm_mmu_zap_all(struct kvm *kvm)
1769 {
1770         struct kvm_mmu_page *sp, *node;
1771
1772         spin_lock(&kvm->mmu_lock);
1773         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
1774                 kvm_mmu_zap_page(kvm, sp);
1775         spin_unlock(&kvm->mmu_lock);
1776
1777         kvm_flush_remote_tlbs(kvm);
1778 }
1779
1780 void kvm_mmu_module_exit(void)
1781 {
1782         if (pte_chain_cache)
1783                 kmem_cache_destroy(pte_chain_cache);
1784         if (rmap_desc_cache)
1785                 kmem_cache_destroy(rmap_desc_cache);
1786         if (mmu_page_header_cache)
1787                 kmem_cache_destroy(mmu_page_header_cache);
1788 }
1789
1790 int kvm_mmu_module_init(void)
1791 {
1792         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1793                                             sizeof(struct kvm_pte_chain),
1794                                             0, 0, NULL);
1795         if (!pte_chain_cache)
1796                 goto nomem;
1797         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1798                                             sizeof(struct kvm_rmap_desc),
1799                                             0, 0, NULL);
1800         if (!rmap_desc_cache)
1801                 goto nomem;
1802
1803         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1804                                                   sizeof(struct kvm_mmu_page),
1805                                                   0, 0, NULL);
1806         if (!mmu_page_header_cache)
1807                 goto nomem;
1808
1809         return 0;
1810
1811 nomem:
1812         kvm_mmu_module_exit();
1813         return -ENOMEM;
1814 }
1815
1816 /*
1817  * Caculate mmu pages needed for kvm.
1818  */
1819 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1820 {
1821         int i;
1822         unsigned int nr_mmu_pages;
1823         unsigned int  nr_pages = 0;
1824
1825         for (i = 0; i < kvm->nmemslots; i++)
1826                 nr_pages += kvm->memslots[i].npages;
1827
1828         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1829         nr_mmu_pages = max(nr_mmu_pages,
1830                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1831
1832         return nr_mmu_pages;
1833 }
1834
1835 #ifdef AUDIT
1836
1837 static const char *audit_msg;
1838
1839 static gva_t canonicalize(gva_t gva)
1840 {
1841 #ifdef CONFIG_X86_64
1842         gva = (long long)(gva << 16) >> 16;
1843 #endif
1844         return gva;
1845 }
1846
1847 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1848                                 gva_t va, int level)
1849 {
1850         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1851         int i;
1852         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1853
1854         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1855                 u64 ent = pt[i];
1856
1857                 if (ent == shadow_trap_nonpresent_pte)
1858                         continue;
1859
1860                 va = canonicalize(va);
1861                 if (level > 1) {
1862                         if (ent == shadow_notrap_nonpresent_pte)
1863                                 printk(KERN_ERR "audit: (%s) nontrapping pte"
1864                                        " in nonleaf level: levels %d gva %lx"
1865                                        " level %d pte %llx\n", audit_msg,
1866                                        vcpu->arch.mmu.root_level, va, level, ent);
1867
1868                         audit_mappings_page(vcpu, ent, va, level - 1);
1869                 } else {
1870                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1871                         struct page *page = gpa_to_page(vcpu, gpa);
1872                         hpa_t hpa = page_to_phys(page);
1873
1874                         if (is_shadow_present_pte(ent)
1875                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
1876                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
1877                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1878                                        audit_msg, vcpu->arch.mmu.root_level,
1879                                        va, gpa, hpa, ent,
1880                                        is_shadow_present_pte(ent));
1881                         else if (ent == shadow_notrap_nonpresent_pte
1882                                  && !is_error_hpa(hpa))
1883                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
1884                                        " valid guest gva %lx\n", audit_msg, va);
1885                         kvm_release_page_clean(page);
1886
1887                 }
1888         }
1889 }
1890
1891 static void audit_mappings(struct kvm_vcpu *vcpu)
1892 {
1893         unsigned i;
1894
1895         if (vcpu->arch.mmu.root_level == 4)
1896                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
1897         else
1898                 for (i = 0; i < 4; ++i)
1899                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
1900                                 audit_mappings_page(vcpu,
1901                                                     vcpu->arch.mmu.pae_root[i],
1902                                                     i << 30,
1903                                                     2);
1904 }
1905
1906 static int count_rmaps(struct kvm_vcpu *vcpu)
1907 {
1908         int nmaps = 0;
1909         int i, j, k;
1910
1911         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1912                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1913                 struct kvm_rmap_desc *d;
1914
1915                 for (j = 0; j < m->npages; ++j) {
1916                         unsigned long *rmapp = &m->rmap[j];
1917
1918                         if (!*rmapp)
1919                                 continue;
1920                         if (!(*rmapp & 1)) {
1921                                 ++nmaps;
1922                                 continue;
1923                         }
1924                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1925                         while (d) {
1926                                 for (k = 0; k < RMAP_EXT; ++k)
1927                                         if (d->shadow_ptes[k])
1928                                                 ++nmaps;
1929                                         else
1930                                                 break;
1931                                 d = d->more;
1932                         }
1933                 }
1934         }
1935         return nmaps;
1936 }
1937
1938 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1939 {
1940         int nmaps = 0;
1941         struct kvm_mmu_page *sp;
1942         int i;
1943
1944         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
1945                 u64 *pt = sp->spt;
1946
1947                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
1948                         continue;
1949
1950                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1951                         u64 ent = pt[i];
1952
1953                         if (!(ent & PT_PRESENT_MASK))
1954                                 continue;
1955                         if (!(ent & PT_WRITABLE_MASK))
1956                                 continue;
1957                         ++nmaps;
1958                 }
1959         }
1960         return nmaps;
1961 }
1962
1963 static void audit_rmap(struct kvm_vcpu *vcpu)
1964 {
1965         int n_rmap = count_rmaps(vcpu);
1966         int n_actual = count_writable_mappings(vcpu);
1967
1968         if (n_rmap != n_actual)
1969                 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1970                        __FUNCTION__, audit_msg, n_rmap, n_actual);
1971 }
1972
1973 static void audit_write_protection(struct kvm_vcpu *vcpu)
1974 {
1975         struct kvm_mmu_page *sp;
1976         struct kvm_memory_slot *slot;
1977         unsigned long *rmapp;
1978         gfn_t gfn;
1979
1980         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
1981                 if (sp->role.metaphysical)
1982                         continue;
1983
1984                 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
1985                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
1986                 rmapp = &slot->rmap[gfn - slot->base_gfn];
1987                 if (*rmapp)
1988                         printk(KERN_ERR "%s: (%s) shadow page has writable"
1989                                " mappings: gfn %lx role %x\n",
1990                                __FUNCTION__, audit_msg, sp->gfn,
1991                                sp->role.word);
1992         }
1993 }
1994
1995 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1996 {
1997         int olddbg = dbg;
1998
1999         dbg = 0;
2000         audit_msg = msg;
2001         audit_rmap(vcpu);
2002         audit_write_protection(vcpu);
2003         audit_mappings(vcpu);
2004         dbg = olddbg;
2005 }
2006
2007 #endif