KVM: Prepare memslot data structures for multiple hugepage sizes
[linux-2.6.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36 #include <asm/vmx.h>
37
38 /*
39  * When setting this variable to true it enables Two-Dimensional-Paging
40  * where the hardware walks 2 page tables:
41  * 1. the guest-virtual to guest-physical
42  * 2. while doing 1. it walks guest-physical to host-physical
43  * If the hardware supports that we don't need to do shadow paging.
44  */
45 bool tdp_enabled = false;
46
47 #undef MMU_DEBUG
48
49 #undef AUDIT
50
51 #ifdef AUDIT
52 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 #else
54 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
55 #endif
56
57 #ifdef MMU_DEBUG
58
59 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
60 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
61
62 #else
63
64 #define pgprintk(x...) do { } while (0)
65 #define rmap_printk(x...) do { } while (0)
66
67 #endif
68
69 #if defined(MMU_DEBUG) || defined(AUDIT)
70 static int dbg = 0;
71 module_param(dbg, bool, 0644);
72 #endif
73
74 static int oos_shadow = 1;
75 module_param(oos_shadow, bool, 0644);
76
77 #ifndef MMU_DEBUG
78 #define ASSERT(x) do { } while (0)
79 #else
80 #define ASSERT(x)                                                       \
81         if (!(x)) {                                                     \
82                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
83                        __FILE__, __LINE__, #x);                         \
84         }
85 #endif
86
87 #define PT_FIRST_AVAIL_BITS_SHIFT 9
88 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
89
90 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
91
92 #define PT64_LEVEL_BITS 9
93
94 #define PT64_LEVEL_SHIFT(level) \
95                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
96
97 #define PT64_LEVEL_MASK(level) \
98                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
99
100 #define PT64_INDEX(address, level)\
101         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
102
103
104 #define PT32_LEVEL_BITS 10
105
106 #define PT32_LEVEL_SHIFT(level) \
107                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
108
109 #define PT32_LEVEL_MASK(level) \
110                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111
112 #define PT32_INDEX(address, level)\
113         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
114
115
116 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
117 #define PT64_DIR_BASE_ADDR_MASK \
118         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
119
120 #define PT32_BASE_ADDR_MASK PAGE_MASK
121 #define PT32_DIR_BASE_ADDR_MASK \
122         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
123
124 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
125                         | PT64_NX_MASK)
126
127 #define PFERR_PRESENT_MASK (1U << 0)
128 #define PFERR_WRITE_MASK (1U << 1)
129 #define PFERR_USER_MASK (1U << 2)
130 #define PFERR_RSVD_MASK (1U << 3)
131 #define PFERR_FETCH_MASK (1U << 4)
132
133 #define PT_DIRECTORY_LEVEL 2
134 #define PT_PAGE_TABLE_LEVEL 1
135
136 #define RMAP_EXT 4
137
138 #define ACC_EXEC_MASK    1
139 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
140 #define ACC_USER_MASK    PT_USER_MASK
141 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
142
143 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
144
145 struct kvm_rmap_desc {
146         u64 *sptes[RMAP_EXT];
147         struct kvm_rmap_desc *more;
148 };
149
150 struct kvm_shadow_walk_iterator {
151         u64 addr;
152         hpa_t shadow_addr;
153         int level;
154         u64 *sptep;
155         unsigned index;
156 };
157
158 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
159         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
160              shadow_walk_okay(&(_walker));                      \
161              shadow_walk_next(&(_walker)))
162
163
164 struct kvm_unsync_walk {
165         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
166 };
167
168 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
169
170 static struct kmem_cache *pte_chain_cache;
171 static struct kmem_cache *rmap_desc_cache;
172 static struct kmem_cache *mmu_page_header_cache;
173
174 static u64 __read_mostly shadow_trap_nonpresent_pte;
175 static u64 __read_mostly shadow_notrap_nonpresent_pte;
176 static u64 __read_mostly shadow_base_present_pte;
177 static u64 __read_mostly shadow_nx_mask;
178 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
179 static u64 __read_mostly shadow_user_mask;
180 static u64 __read_mostly shadow_accessed_mask;
181 static u64 __read_mostly shadow_dirty_mask;
182
183 static inline u64 rsvd_bits(int s, int e)
184 {
185         return ((1ULL << (e - s + 1)) - 1) << s;
186 }
187
188 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
189 {
190         shadow_trap_nonpresent_pte = trap_pte;
191         shadow_notrap_nonpresent_pte = notrap_pte;
192 }
193 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
194
195 void kvm_mmu_set_base_ptes(u64 base_pte)
196 {
197         shadow_base_present_pte = base_pte;
198 }
199 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
200
201 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
202                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
203 {
204         shadow_user_mask = user_mask;
205         shadow_accessed_mask = accessed_mask;
206         shadow_dirty_mask = dirty_mask;
207         shadow_nx_mask = nx_mask;
208         shadow_x_mask = x_mask;
209 }
210 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
211
212 static int is_write_protection(struct kvm_vcpu *vcpu)
213 {
214         return vcpu->arch.cr0 & X86_CR0_WP;
215 }
216
217 static int is_cpuid_PSE36(void)
218 {
219         return 1;
220 }
221
222 static int is_nx(struct kvm_vcpu *vcpu)
223 {
224         return vcpu->arch.shadow_efer & EFER_NX;
225 }
226
227 static int is_shadow_present_pte(u64 pte)
228 {
229         return pte != shadow_trap_nonpresent_pte
230                 && pte != shadow_notrap_nonpresent_pte;
231 }
232
233 static int is_large_pte(u64 pte)
234 {
235         return pte & PT_PAGE_SIZE_MASK;
236 }
237
238 static int is_writeble_pte(unsigned long pte)
239 {
240         return pte & PT_WRITABLE_MASK;
241 }
242
243 static int is_dirty_gpte(unsigned long pte)
244 {
245         return pte & PT_DIRTY_MASK;
246 }
247
248 static int is_rmap_spte(u64 pte)
249 {
250         return is_shadow_present_pte(pte);
251 }
252
253 static int is_last_spte(u64 pte, int level)
254 {
255         if (level == PT_PAGE_TABLE_LEVEL)
256                 return 1;
257         if (level == PT_DIRECTORY_LEVEL && is_large_pte(pte))
258                 return 1;
259         return 0;
260 }
261
262 static pfn_t spte_to_pfn(u64 pte)
263 {
264         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
265 }
266
267 static gfn_t pse36_gfn_delta(u32 gpte)
268 {
269         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
270
271         return (gpte & PT32_DIR_PSE36_MASK) << shift;
272 }
273
274 static void __set_spte(u64 *sptep, u64 spte)
275 {
276 #ifdef CONFIG_X86_64
277         set_64bit((unsigned long *)sptep, spte);
278 #else
279         set_64bit((unsigned long long *)sptep, spte);
280 #endif
281 }
282
283 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
284                                   struct kmem_cache *base_cache, int min)
285 {
286         void *obj;
287
288         if (cache->nobjs >= min)
289                 return 0;
290         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
291                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
292                 if (!obj)
293                         return -ENOMEM;
294                 cache->objects[cache->nobjs++] = obj;
295         }
296         return 0;
297 }
298
299 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
300 {
301         while (mc->nobjs)
302                 kfree(mc->objects[--mc->nobjs]);
303 }
304
305 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
306                                        int min)
307 {
308         struct page *page;
309
310         if (cache->nobjs >= min)
311                 return 0;
312         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
313                 page = alloc_page(GFP_KERNEL);
314                 if (!page)
315                         return -ENOMEM;
316                 set_page_private(page, 0);
317                 cache->objects[cache->nobjs++] = page_address(page);
318         }
319         return 0;
320 }
321
322 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
323 {
324         while (mc->nobjs)
325                 free_page((unsigned long)mc->objects[--mc->nobjs]);
326 }
327
328 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
329 {
330         int r;
331
332         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
333                                    pte_chain_cache, 4);
334         if (r)
335                 goto out;
336         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
337                                    rmap_desc_cache, 4);
338         if (r)
339                 goto out;
340         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
341         if (r)
342                 goto out;
343         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
344                                    mmu_page_header_cache, 4);
345 out:
346         return r;
347 }
348
349 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
350 {
351         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
352         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
353         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
354         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
355 }
356
357 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
358                                     size_t size)
359 {
360         void *p;
361
362         BUG_ON(!mc->nobjs);
363         p = mc->objects[--mc->nobjs];
364         return p;
365 }
366
367 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
368 {
369         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
370                                       sizeof(struct kvm_pte_chain));
371 }
372
373 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
374 {
375         kfree(pc);
376 }
377
378 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
379 {
380         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
381                                       sizeof(struct kvm_rmap_desc));
382 }
383
384 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
385 {
386         kfree(rd);
387 }
388
389 /*
390  * Return the pointer to the largepage write count for a given
391  * gfn, handling slots that are not large page aligned.
392  */
393 static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
394 {
395         unsigned long idx;
396
397         idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
398               (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
399         return &slot->lpage_info[0][idx].write_count;
400 }
401
402 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
403 {
404         int *write_count;
405
406         gfn = unalias_gfn(kvm, gfn);
407         write_count = slot_largepage_idx(gfn,
408                                          gfn_to_memslot_unaliased(kvm, gfn));
409         *write_count += 1;
410 }
411
412 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
413 {
414         int *write_count;
415
416         gfn = unalias_gfn(kvm, gfn);
417         write_count = slot_largepage_idx(gfn,
418                                          gfn_to_memslot_unaliased(kvm, gfn));
419         *write_count -= 1;
420         WARN_ON(*write_count < 0);
421 }
422
423 static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
424 {
425         struct kvm_memory_slot *slot;
426         int *largepage_idx;
427
428         gfn = unalias_gfn(kvm, gfn);
429         slot = gfn_to_memslot_unaliased(kvm, gfn);
430         if (slot) {
431                 largepage_idx = slot_largepage_idx(gfn, slot);
432                 return *largepage_idx;
433         }
434
435         return 1;
436 }
437
438 static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
439 {
440         struct vm_area_struct *vma;
441         unsigned long addr;
442         int ret = 0;
443
444         addr = gfn_to_hva(kvm, gfn);
445         if (kvm_is_error_hva(addr))
446                 return ret;
447
448         down_read(&current->mm->mmap_sem);
449         vma = find_vma(current->mm, addr);
450         if (vma && is_vm_hugetlb_page(vma))
451                 ret = 1;
452         up_read(&current->mm->mmap_sem);
453
454         return ret;
455 }
456
457 static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
458 {
459         struct kvm_memory_slot *slot;
460
461         if (has_wrprotected_page(vcpu->kvm, large_gfn))
462                 return 0;
463
464         if (!host_largepage_backed(vcpu->kvm, large_gfn))
465                 return 0;
466
467         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
468         if (slot && slot->dirty_bitmap)
469                 return 0;
470
471         return 1;
472 }
473
474 /*
475  * Take gfn and return the reverse mapping to it.
476  * Note: gfn must be unaliased before this function get called
477  */
478
479 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
480 {
481         struct kvm_memory_slot *slot;
482         unsigned long idx;
483
484         slot = gfn_to_memslot(kvm, gfn);
485         if (!lpage)
486                 return &slot->rmap[gfn - slot->base_gfn];
487
488         idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
489               (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
490
491         return &slot->lpage_info[0][idx].rmap_pde;
492 }
493
494 /*
495  * Reverse mapping data structures:
496  *
497  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
498  * that points to page_address(page).
499  *
500  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
501  * containing more mappings.
502  *
503  * Returns the number of rmap entries before the spte was added or zero if
504  * the spte was not added.
505  *
506  */
507 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
508 {
509         struct kvm_mmu_page *sp;
510         struct kvm_rmap_desc *desc;
511         unsigned long *rmapp;
512         int i, count = 0;
513
514         if (!is_rmap_spte(*spte))
515                 return count;
516         gfn = unalias_gfn(vcpu->kvm, gfn);
517         sp = page_header(__pa(spte));
518         sp->gfns[spte - sp->spt] = gfn;
519         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
520         if (!*rmapp) {
521                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
522                 *rmapp = (unsigned long)spte;
523         } else if (!(*rmapp & 1)) {
524                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
525                 desc = mmu_alloc_rmap_desc(vcpu);
526                 desc->sptes[0] = (u64 *)*rmapp;
527                 desc->sptes[1] = spte;
528                 *rmapp = (unsigned long)desc | 1;
529         } else {
530                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
531                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
532                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
533                         desc = desc->more;
534                         count += RMAP_EXT;
535                 }
536                 if (desc->sptes[RMAP_EXT-1]) {
537                         desc->more = mmu_alloc_rmap_desc(vcpu);
538                         desc = desc->more;
539                 }
540                 for (i = 0; desc->sptes[i]; ++i)
541                         ;
542                 desc->sptes[i] = spte;
543         }
544         return count;
545 }
546
547 static void rmap_desc_remove_entry(unsigned long *rmapp,
548                                    struct kvm_rmap_desc *desc,
549                                    int i,
550                                    struct kvm_rmap_desc *prev_desc)
551 {
552         int j;
553
554         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
555                 ;
556         desc->sptes[i] = desc->sptes[j];
557         desc->sptes[j] = NULL;
558         if (j != 0)
559                 return;
560         if (!prev_desc && !desc->more)
561                 *rmapp = (unsigned long)desc->sptes[0];
562         else
563                 if (prev_desc)
564                         prev_desc->more = desc->more;
565                 else
566                         *rmapp = (unsigned long)desc->more | 1;
567         mmu_free_rmap_desc(desc);
568 }
569
570 static void rmap_remove(struct kvm *kvm, u64 *spte)
571 {
572         struct kvm_rmap_desc *desc;
573         struct kvm_rmap_desc *prev_desc;
574         struct kvm_mmu_page *sp;
575         pfn_t pfn;
576         unsigned long *rmapp;
577         int i;
578
579         if (!is_rmap_spte(*spte))
580                 return;
581         sp = page_header(__pa(spte));
582         pfn = spte_to_pfn(*spte);
583         if (*spte & shadow_accessed_mask)
584                 kvm_set_pfn_accessed(pfn);
585         if (is_writeble_pte(*spte))
586                 kvm_release_pfn_dirty(pfn);
587         else
588                 kvm_release_pfn_clean(pfn);
589         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
590         if (!*rmapp) {
591                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
592                 BUG();
593         } else if (!(*rmapp & 1)) {
594                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
595                 if ((u64 *)*rmapp != spte) {
596                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
597                                spte, *spte);
598                         BUG();
599                 }
600                 *rmapp = 0;
601         } else {
602                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
603                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
604                 prev_desc = NULL;
605                 while (desc) {
606                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
607                                 if (desc->sptes[i] == spte) {
608                                         rmap_desc_remove_entry(rmapp,
609                                                                desc, i,
610                                                                prev_desc);
611                                         return;
612                                 }
613                         prev_desc = desc;
614                         desc = desc->more;
615                 }
616                 BUG();
617         }
618 }
619
620 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
621 {
622         struct kvm_rmap_desc *desc;
623         struct kvm_rmap_desc *prev_desc;
624         u64 *prev_spte;
625         int i;
626
627         if (!*rmapp)
628                 return NULL;
629         else if (!(*rmapp & 1)) {
630                 if (!spte)
631                         return (u64 *)*rmapp;
632                 return NULL;
633         }
634         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
635         prev_desc = NULL;
636         prev_spte = NULL;
637         while (desc) {
638                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
639                         if (prev_spte == spte)
640                                 return desc->sptes[i];
641                         prev_spte = desc->sptes[i];
642                 }
643                 desc = desc->more;
644         }
645         return NULL;
646 }
647
648 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
649 {
650         unsigned long *rmapp;
651         u64 *spte;
652         int write_protected = 0;
653
654         gfn = unalias_gfn(kvm, gfn);
655         rmapp = gfn_to_rmap(kvm, gfn, 0);
656
657         spte = rmap_next(kvm, rmapp, NULL);
658         while (spte) {
659                 BUG_ON(!spte);
660                 BUG_ON(!(*spte & PT_PRESENT_MASK));
661                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
662                 if (is_writeble_pte(*spte)) {
663                         __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
664                         write_protected = 1;
665                 }
666                 spte = rmap_next(kvm, rmapp, spte);
667         }
668         if (write_protected) {
669                 pfn_t pfn;
670
671                 spte = rmap_next(kvm, rmapp, NULL);
672                 pfn = spte_to_pfn(*spte);
673                 kvm_set_pfn_dirty(pfn);
674         }
675
676         /* check for huge page mappings */
677         rmapp = gfn_to_rmap(kvm, gfn, 1);
678         spte = rmap_next(kvm, rmapp, NULL);
679         while (spte) {
680                 BUG_ON(!spte);
681                 BUG_ON(!(*spte & PT_PRESENT_MASK));
682                 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
683                 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
684                 if (is_writeble_pte(*spte)) {
685                         rmap_remove(kvm, spte);
686                         --kvm->stat.lpages;
687                         __set_spte(spte, shadow_trap_nonpresent_pte);
688                         spte = NULL;
689                         write_protected = 1;
690                 }
691                 spte = rmap_next(kvm, rmapp, spte);
692         }
693
694         return write_protected;
695 }
696
697 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
698 {
699         u64 *spte;
700         int need_tlb_flush = 0;
701
702         while ((spte = rmap_next(kvm, rmapp, NULL))) {
703                 BUG_ON(!(*spte & PT_PRESENT_MASK));
704                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
705                 rmap_remove(kvm, spte);
706                 __set_spte(spte, shadow_trap_nonpresent_pte);
707                 need_tlb_flush = 1;
708         }
709         return need_tlb_flush;
710 }
711
712 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
713                           int (*handler)(struct kvm *kvm, unsigned long *rmapp))
714 {
715         int i;
716         int retval = 0;
717
718         /*
719          * If mmap_sem isn't taken, we can look the memslots with only
720          * the mmu_lock by skipping over the slots with userspace_addr == 0.
721          */
722         for (i = 0; i < kvm->nmemslots; i++) {
723                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
724                 unsigned long start = memslot->userspace_addr;
725                 unsigned long end;
726
727                 /* mmu_lock protects userspace_addr */
728                 if (!start)
729                         continue;
730
731                 end = start + (memslot->npages << PAGE_SHIFT);
732                 if (hva >= start && hva < end) {
733                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
734                         int idx = gfn_offset /
735                                   KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL);
736                         retval |= handler(kvm, &memslot->rmap[gfn_offset]);
737                         retval |= handler(kvm,
738                                         &memslot->lpage_info[0][idx].rmap_pde);
739                 }
740         }
741
742         return retval;
743 }
744
745 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
746 {
747         return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
748 }
749
750 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
751 {
752         u64 *spte;
753         int young = 0;
754
755         /* always return old for EPT */
756         if (!shadow_accessed_mask)
757                 return 0;
758
759         spte = rmap_next(kvm, rmapp, NULL);
760         while (spte) {
761                 int _young;
762                 u64 _spte = *spte;
763                 BUG_ON(!(_spte & PT_PRESENT_MASK));
764                 _young = _spte & PT_ACCESSED_MASK;
765                 if (_young) {
766                         young = 1;
767                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
768                 }
769                 spte = rmap_next(kvm, rmapp, spte);
770         }
771         return young;
772 }
773
774 #define RMAP_RECYCLE_THRESHOLD 1000
775
776 static void rmap_recycle(struct kvm_vcpu *vcpu, gfn_t gfn, int lpage)
777 {
778         unsigned long *rmapp;
779
780         gfn = unalias_gfn(vcpu->kvm, gfn);
781         rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
782
783         kvm_unmap_rmapp(vcpu->kvm, rmapp);
784         kvm_flush_remote_tlbs(vcpu->kvm);
785 }
786
787 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
788 {
789         return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
790 }
791
792 #ifdef MMU_DEBUG
793 static int is_empty_shadow_page(u64 *spt)
794 {
795         u64 *pos;
796         u64 *end;
797
798         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
799                 if (is_shadow_present_pte(*pos)) {
800                         printk(KERN_ERR "%s: %p %llx\n", __func__,
801                                pos, *pos);
802                         return 0;
803                 }
804         return 1;
805 }
806 #endif
807
808 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
809 {
810         ASSERT(is_empty_shadow_page(sp->spt));
811         list_del(&sp->link);
812         __free_page(virt_to_page(sp->spt));
813         __free_page(virt_to_page(sp->gfns));
814         kfree(sp);
815         ++kvm->arch.n_free_mmu_pages;
816 }
817
818 static unsigned kvm_page_table_hashfn(gfn_t gfn)
819 {
820         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
821 }
822
823 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
824                                                u64 *parent_pte)
825 {
826         struct kvm_mmu_page *sp;
827
828         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
829         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
830         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
831         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
832         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
833         INIT_LIST_HEAD(&sp->oos_link);
834         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
835         sp->multimapped = 0;
836         sp->parent_pte = parent_pte;
837         --vcpu->kvm->arch.n_free_mmu_pages;
838         return sp;
839 }
840
841 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
842                                     struct kvm_mmu_page *sp, u64 *parent_pte)
843 {
844         struct kvm_pte_chain *pte_chain;
845         struct hlist_node *node;
846         int i;
847
848         if (!parent_pte)
849                 return;
850         if (!sp->multimapped) {
851                 u64 *old = sp->parent_pte;
852
853                 if (!old) {
854                         sp->parent_pte = parent_pte;
855                         return;
856                 }
857                 sp->multimapped = 1;
858                 pte_chain = mmu_alloc_pte_chain(vcpu);
859                 INIT_HLIST_HEAD(&sp->parent_ptes);
860                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
861                 pte_chain->parent_ptes[0] = old;
862         }
863         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
864                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
865                         continue;
866                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
867                         if (!pte_chain->parent_ptes[i]) {
868                                 pte_chain->parent_ptes[i] = parent_pte;
869                                 return;
870                         }
871         }
872         pte_chain = mmu_alloc_pte_chain(vcpu);
873         BUG_ON(!pte_chain);
874         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
875         pte_chain->parent_ptes[0] = parent_pte;
876 }
877
878 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
879                                        u64 *parent_pte)
880 {
881         struct kvm_pte_chain *pte_chain;
882         struct hlist_node *node;
883         int i;
884
885         if (!sp->multimapped) {
886                 BUG_ON(sp->parent_pte != parent_pte);
887                 sp->parent_pte = NULL;
888                 return;
889         }
890         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
891                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
892                         if (!pte_chain->parent_ptes[i])
893                                 break;
894                         if (pte_chain->parent_ptes[i] != parent_pte)
895                                 continue;
896                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
897                                 && pte_chain->parent_ptes[i + 1]) {
898                                 pte_chain->parent_ptes[i]
899                                         = pte_chain->parent_ptes[i + 1];
900                                 ++i;
901                         }
902                         pte_chain->parent_ptes[i] = NULL;
903                         if (i == 0) {
904                                 hlist_del(&pte_chain->link);
905                                 mmu_free_pte_chain(pte_chain);
906                                 if (hlist_empty(&sp->parent_ptes)) {
907                                         sp->multimapped = 0;
908                                         sp->parent_pte = NULL;
909                                 }
910                         }
911                         return;
912                 }
913         BUG();
914 }
915
916
917 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
918                             mmu_parent_walk_fn fn)
919 {
920         struct kvm_pte_chain *pte_chain;
921         struct hlist_node *node;
922         struct kvm_mmu_page *parent_sp;
923         int i;
924
925         if (!sp->multimapped && sp->parent_pte) {
926                 parent_sp = page_header(__pa(sp->parent_pte));
927                 fn(vcpu, parent_sp);
928                 mmu_parent_walk(vcpu, parent_sp, fn);
929                 return;
930         }
931         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
932                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
933                         if (!pte_chain->parent_ptes[i])
934                                 break;
935                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
936                         fn(vcpu, parent_sp);
937                         mmu_parent_walk(vcpu, parent_sp, fn);
938                 }
939 }
940
941 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
942 {
943         unsigned int index;
944         struct kvm_mmu_page *sp = page_header(__pa(spte));
945
946         index = spte - sp->spt;
947         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
948                 sp->unsync_children++;
949         WARN_ON(!sp->unsync_children);
950 }
951
952 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
953 {
954         struct kvm_pte_chain *pte_chain;
955         struct hlist_node *node;
956         int i;
957
958         if (!sp->parent_pte)
959                 return;
960
961         if (!sp->multimapped) {
962                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
963                 return;
964         }
965
966         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
967                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
968                         if (!pte_chain->parent_ptes[i])
969                                 break;
970                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
971                 }
972 }
973
974 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
975 {
976         kvm_mmu_update_parents_unsync(sp);
977         return 1;
978 }
979
980 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
981                                         struct kvm_mmu_page *sp)
982 {
983         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
984         kvm_mmu_update_parents_unsync(sp);
985 }
986
987 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
988                                     struct kvm_mmu_page *sp)
989 {
990         int i;
991
992         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
993                 sp->spt[i] = shadow_trap_nonpresent_pte;
994 }
995
996 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
997                                struct kvm_mmu_page *sp)
998 {
999         return 1;
1000 }
1001
1002 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1003 {
1004 }
1005
1006 #define KVM_PAGE_ARRAY_NR 16
1007
1008 struct kvm_mmu_pages {
1009         struct mmu_page_and_offset {
1010                 struct kvm_mmu_page *sp;
1011                 unsigned int idx;
1012         } page[KVM_PAGE_ARRAY_NR];
1013         unsigned int nr;
1014 };
1015
1016 #define for_each_unsync_children(bitmap, idx)           \
1017         for (idx = find_first_bit(bitmap, 512);         \
1018              idx < 512;                                 \
1019              idx = find_next_bit(bitmap, 512, idx+1))
1020
1021 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1022                          int idx)
1023 {
1024         int i;
1025
1026         if (sp->unsync)
1027                 for (i=0; i < pvec->nr; i++)
1028                         if (pvec->page[i].sp == sp)
1029                                 return 0;
1030
1031         pvec->page[pvec->nr].sp = sp;
1032         pvec->page[pvec->nr].idx = idx;
1033         pvec->nr++;
1034         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1035 }
1036
1037 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1038                            struct kvm_mmu_pages *pvec)
1039 {
1040         int i, ret, nr_unsync_leaf = 0;
1041
1042         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1043                 u64 ent = sp->spt[i];
1044
1045                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1046                         struct kvm_mmu_page *child;
1047                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1048
1049                         if (child->unsync_children) {
1050                                 if (mmu_pages_add(pvec, child, i))
1051                                         return -ENOSPC;
1052
1053                                 ret = __mmu_unsync_walk(child, pvec);
1054                                 if (!ret)
1055                                         __clear_bit(i, sp->unsync_child_bitmap);
1056                                 else if (ret > 0)
1057                                         nr_unsync_leaf += ret;
1058                                 else
1059                                         return ret;
1060                         }
1061
1062                         if (child->unsync) {
1063                                 nr_unsync_leaf++;
1064                                 if (mmu_pages_add(pvec, child, i))
1065                                         return -ENOSPC;
1066                         }
1067                 }
1068         }
1069
1070         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1071                 sp->unsync_children = 0;
1072
1073         return nr_unsync_leaf;
1074 }
1075
1076 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1077                            struct kvm_mmu_pages *pvec)
1078 {
1079         if (!sp->unsync_children)
1080                 return 0;
1081
1082         mmu_pages_add(pvec, sp, 0);
1083         return __mmu_unsync_walk(sp, pvec);
1084 }
1085
1086 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1087 {
1088         unsigned index;
1089         struct hlist_head *bucket;
1090         struct kvm_mmu_page *sp;
1091         struct hlist_node *node;
1092
1093         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1094         index = kvm_page_table_hashfn(gfn);
1095         bucket = &kvm->arch.mmu_page_hash[index];
1096         hlist_for_each_entry(sp, node, bucket, hash_link)
1097                 if (sp->gfn == gfn && !sp->role.direct
1098                     && !sp->role.invalid) {
1099                         pgprintk("%s: found role %x\n",
1100                                  __func__, sp->role.word);
1101                         return sp;
1102                 }
1103         return NULL;
1104 }
1105
1106 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1107 {
1108         WARN_ON(!sp->unsync);
1109         sp->unsync = 0;
1110         --kvm->stat.mmu_unsync;
1111 }
1112
1113 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1114
1115 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1116 {
1117         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1118                 kvm_mmu_zap_page(vcpu->kvm, sp);
1119                 return 1;
1120         }
1121
1122         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1123                 kvm_flush_remote_tlbs(vcpu->kvm);
1124         kvm_unlink_unsync_page(vcpu->kvm, sp);
1125         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1126                 kvm_mmu_zap_page(vcpu->kvm, sp);
1127                 return 1;
1128         }
1129
1130         kvm_mmu_flush_tlb(vcpu);
1131         return 0;
1132 }
1133
1134 struct mmu_page_path {
1135         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1136         unsigned int idx[PT64_ROOT_LEVEL-1];
1137 };
1138
1139 #define for_each_sp(pvec, sp, parents, i)                       \
1140                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1141                         sp = pvec.page[i].sp;                   \
1142                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1143                         i = mmu_pages_next(&pvec, &parents, i))
1144
1145 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1146                           struct mmu_page_path *parents,
1147                           int i)
1148 {
1149         int n;
1150
1151         for (n = i+1; n < pvec->nr; n++) {
1152                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1153
1154                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1155                         parents->idx[0] = pvec->page[n].idx;
1156                         return n;
1157                 }
1158
1159                 parents->parent[sp->role.level-2] = sp;
1160                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1161         }
1162
1163         return n;
1164 }
1165
1166 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1167 {
1168         struct kvm_mmu_page *sp;
1169         unsigned int level = 0;
1170
1171         do {
1172                 unsigned int idx = parents->idx[level];
1173
1174                 sp = parents->parent[level];
1175                 if (!sp)
1176                         return;
1177
1178                 --sp->unsync_children;
1179                 WARN_ON((int)sp->unsync_children < 0);
1180                 __clear_bit(idx, sp->unsync_child_bitmap);
1181                 level++;
1182         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1183 }
1184
1185 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1186                                struct mmu_page_path *parents,
1187                                struct kvm_mmu_pages *pvec)
1188 {
1189         parents->parent[parent->role.level-1] = NULL;
1190         pvec->nr = 0;
1191 }
1192
1193 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1194                               struct kvm_mmu_page *parent)
1195 {
1196         int i;
1197         struct kvm_mmu_page *sp;
1198         struct mmu_page_path parents;
1199         struct kvm_mmu_pages pages;
1200
1201         kvm_mmu_pages_init(parent, &parents, &pages);
1202         while (mmu_unsync_walk(parent, &pages)) {
1203                 int protected = 0;
1204
1205                 for_each_sp(pages, sp, parents, i)
1206                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1207
1208                 if (protected)
1209                         kvm_flush_remote_tlbs(vcpu->kvm);
1210
1211                 for_each_sp(pages, sp, parents, i) {
1212                         kvm_sync_page(vcpu, sp);
1213                         mmu_pages_clear_parents(&parents);
1214                 }
1215                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1216                 kvm_mmu_pages_init(parent, &parents, &pages);
1217         }
1218 }
1219
1220 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1221                                              gfn_t gfn,
1222                                              gva_t gaddr,
1223                                              unsigned level,
1224                                              int direct,
1225                                              unsigned access,
1226                                              u64 *parent_pte)
1227 {
1228         union kvm_mmu_page_role role;
1229         unsigned index;
1230         unsigned quadrant;
1231         struct hlist_head *bucket;
1232         struct kvm_mmu_page *sp;
1233         struct hlist_node *node, *tmp;
1234
1235         role = vcpu->arch.mmu.base_role;
1236         role.level = level;
1237         role.direct = direct;
1238         role.access = access;
1239         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1240                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1241                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1242                 role.quadrant = quadrant;
1243         }
1244         pgprintk("%s: looking gfn %lx role %x\n", __func__,
1245                  gfn, role.word);
1246         index = kvm_page_table_hashfn(gfn);
1247         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1248         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1249                 if (sp->gfn == gfn) {
1250                         if (sp->unsync)
1251                                 if (kvm_sync_page(vcpu, sp))
1252                                         continue;
1253
1254                         if (sp->role.word != role.word)
1255                                 continue;
1256
1257                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1258                         if (sp->unsync_children) {
1259                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1260                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1261                         }
1262                         pgprintk("%s: found\n", __func__);
1263                         return sp;
1264                 }
1265         ++vcpu->kvm->stat.mmu_cache_miss;
1266         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1267         if (!sp)
1268                 return sp;
1269         pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
1270         sp->gfn = gfn;
1271         sp->role = role;
1272         hlist_add_head(&sp->hash_link, bucket);
1273         if (!direct) {
1274                 if (rmap_write_protect(vcpu->kvm, gfn))
1275                         kvm_flush_remote_tlbs(vcpu->kvm);
1276                 account_shadowed(vcpu->kvm, gfn);
1277         }
1278         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1279                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1280         else
1281                 nonpaging_prefetch_page(vcpu, sp);
1282         return sp;
1283 }
1284
1285 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1286                              struct kvm_vcpu *vcpu, u64 addr)
1287 {
1288         iterator->addr = addr;
1289         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1290         iterator->level = vcpu->arch.mmu.shadow_root_level;
1291         if (iterator->level == PT32E_ROOT_LEVEL) {
1292                 iterator->shadow_addr
1293                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1294                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1295                 --iterator->level;
1296                 if (!iterator->shadow_addr)
1297                         iterator->level = 0;
1298         }
1299 }
1300
1301 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1302 {
1303         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1304                 return false;
1305
1306         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1307                 if (is_large_pte(*iterator->sptep))
1308                         return false;
1309
1310         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1311         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1312         return true;
1313 }
1314
1315 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1316 {
1317         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1318         --iterator->level;
1319 }
1320
1321 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1322                                          struct kvm_mmu_page *sp)
1323 {
1324         unsigned i;
1325         u64 *pt;
1326         u64 ent;
1327
1328         pt = sp->spt;
1329
1330         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1331                 ent = pt[i];
1332
1333                 if (is_shadow_present_pte(ent)) {
1334                         if (!is_last_spte(ent, sp->role.level)) {
1335                                 ent &= PT64_BASE_ADDR_MASK;
1336                                 mmu_page_remove_parent_pte(page_header(ent),
1337                                                            &pt[i]);
1338                         } else {
1339                                 if (is_large_pte(ent))
1340                                         --kvm->stat.lpages;
1341                                 rmap_remove(kvm, &pt[i]);
1342                         }
1343                 }
1344                 pt[i] = shadow_trap_nonpresent_pte;
1345         }
1346 }
1347
1348 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1349 {
1350         mmu_page_remove_parent_pte(sp, parent_pte);
1351 }
1352
1353 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1354 {
1355         int i;
1356         struct kvm_vcpu *vcpu;
1357
1358         kvm_for_each_vcpu(i, vcpu, kvm)
1359                 vcpu->arch.last_pte_updated = NULL;
1360 }
1361
1362 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1363 {
1364         u64 *parent_pte;
1365
1366         while (sp->multimapped || sp->parent_pte) {
1367                 if (!sp->multimapped)
1368                         parent_pte = sp->parent_pte;
1369                 else {
1370                         struct kvm_pte_chain *chain;
1371
1372                         chain = container_of(sp->parent_ptes.first,
1373                                              struct kvm_pte_chain, link);
1374                         parent_pte = chain->parent_ptes[0];
1375                 }
1376                 BUG_ON(!parent_pte);
1377                 kvm_mmu_put_page(sp, parent_pte);
1378                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1379         }
1380 }
1381
1382 static int mmu_zap_unsync_children(struct kvm *kvm,
1383                                    struct kvm_mmu_page *parent)
1384 {
1385         int i, zapped = 0;
1386         struct mmu_page_path parents;
1387         struct kvm_mmu_pages pages;
1388
1389         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1390                 return 0;
1391
1392         kvm_mmu_pages_init(parent, &parents, &pages);
1393         while (mmu_unsync_walk(parent, &pages)) {
1394                 struct kvm_mmu_page *sp;
1395
1396                 for_each_sp(pages, sp, parents, i) {
1397                         kvm_mmu_zap_page(kvm, sp);
1398                         mmu_pages_clear_parents(&parents);
1399                 }
1400                 zapped += pages.nr;
1401                 kvm_mmu_pages_init(parent, &parents, &pages);
1402         }
1403
1404         return zapped;
1405 }
1406
1407 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1408 {
1409         int ret;
1410         ++kvm->stat.mmu_shadow_zapped;
1411         ret = mmu_zap_unsync_children(kvm, sp);
1412         kvm_mmu_page_unlink_children(kvm, sp);
1413         kvm_mmu_unlink_parents(kvm, sp);
1414         kvm_flush_remote_tlbs(kvm);
1415         if (!sp->role.invalid && !sp->role.direct)
1416                 unaccount_shadowed(kvm, sp->gfn);
1417         if (sp->unsync)
1418                 kvm_unlink_unsync_page(kvm, sp);
1419         if (!sp->root_count) {
1420                 hlist_del(&sp->hash_link);
1421                 kvm_mmu_free_page(kvm, sp);
1422         } else {
1423                 sp->role.invalid = 1;
1424                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1425                 kvm_reload_remote_mmus(kvm);
1426         }
1427         kvm_mmu_reset_last_pte_updated(kvm);
1428         return ret;
1429 }
1430
1431 /*
1432  * Changing the number of mmu pages allocated to the vm
1433  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1434  */
1435 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1436 {
1437         int used_pages;
1438
1439         used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1440         used_pages = max(0, used_pages);
1441
1442         /*
1443          * If we set the number of mmu pages to be smaller be than the
1444          * number of actived pages , we must to free some mmu pages before we
1445          * change the value
1446          */
1447
1448         if (used_pages > kvm_nr_mmu_pages) {
1449                 while (used_pages > kvm_nr_mmu_pages) {
1450                         struct kvm_mmu_page *page;
1451
1452                         page = container_of(kvm->arch.active_mmu_pages.prev,
1453                                             struct kvm_mmu_page, link);
1454                         kvm_mmu_zap_page(kvm, page);
1455                         used_pages--;
1456                 }
1457                 kvm->arch.n_free_mmu_pages = 0;
1458         }
1459         else
1460                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1461                                          - kvm->arch.n_alloc_mmu_pages;
1462
1463         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1464 }
1465
1466 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1467 {
1468         unsigned index;
1469         struct hlist_head *bucket;
1470         struct kvm_mmu_page *sp;
1471         struct hlist_node *node, *n;
1472         int r;
1473
1474         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1475         r = 0;
1476         index = kvm_page_table_hashfn(gfn);
1477         bucket = &kvm->arch.mmu_page_hash[index];
1478         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1479                 if (sp->gfn == gfn && !sp->role.direct) {
1480                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1481                                  sp->role.word);
1482                         r = 1;
1483                         if (kvm_mmu_zap_page(kvm, sp))
1484                                 n = bucket->first;
1485                 }
1486         return r;
1487 }
1488
1489 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1490 {
1491         unsigned index;
1492         struct hlist_head *bucket;
1493         struct kvm_mmu_page *sp;
1494         struct hlist_node *node, *nn;
1495
1496         index = kvm_page_table_hashfn(gfn);
1497         bucket = &kvm->arch.mmu_page_hash[index];
1498         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1499                 if (sp->gfn == gfn && !sp->role.direct
1500                     && !sp->role.invalid) {
1501                         pgprintk("%s: zap %lx %x\n",
1502                                  __func__, gfn, sp->role.word);
1503                         kvm_mmu_zap_page(kvm, sp);
1504                 }
1505         }
1506 }
1507
1508 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1509 {
1510         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1511         struct kvm_mmu_page *sp = page_header(__pa(pte));
1512
1513         __set_bit(slot, sp->slot_bitmap);
1514 }
1515
1516 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1517 {
1518         int i;
1519         u64 *pt = sp->spt;
1520
1521         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1522                 return;
1523
1524         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1525                 if (pt[i] == shadow_notrap_nonpresent_pte)
1526                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1527         }
1528 }
1529
1530 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1531 {
1532         struct page *page;
1533
1534         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1535
1536         if (gpa == UNMAPPED_GVA)
1537                 return NULL;
1538
1539         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1540
1541         return page;
1542 }
1543
1544 /*
1545  * The function is based on mtrr_type_lookup() in
1546  * arch/x86/kernel/cpu/mtrr/generic.c
1547  */
1548 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1549                          u64 start, u64 end)
1550 {
1551         int i;
1552         u64 base, mask;
1553         u8 prev_match, curr_match;
1554         int num_var_ranges = KVM_NR_VAR_MTRR;
1555
1556         if (!mtrr_state->enabled)
1557                 return 0xFF;
1558
1559         /* Make end inclusive end, instead of exclusive */
1560         end--;
1561
1562         /* Look in fixed ranges. Just return the type as per start */
1563         if (mtrr_state->have_fixed && (start < 0x100000)) {
1564                 int idx;
1565
1566                 if (start < 0x80000) {
1567                         idx = 0;
1568                         idx += (start >> 16);
1569                         return mtrr_state->fixed_ranges[idx];
1570                 } else if (start < 0xC0000) {
1571                         idx = 1 * 8;
1572                         idx += ((start - 0x80000) >> 14);
1573                         return mtrr_state->fixed_ranges[idx];
1574                 } else if (start < 0x1000000) {
1575                         idx = 3 * 8;
1576                         idx += ((start - 0xC0000) >> 12);
1577                         return mtrr_state->fixed_ranges[idx];
1578                 }
1579         }
1580
1581         /*
1582          * Look in variable ranges
1583          * Look of multiple ranges matching this address and pick type
1584          * as per MTRR precedence
1585          */
1586         if (!(mtrr_state->enabled & 2))
1587                 return mtrr_state->def_type;
1588
1589         prev_match = 0xFF;
1590         for (i = 0; i < num_var_ranges; ++i) {
1591                 unsigned short start_state, end_state;
1592
1593                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1594                         continue;
1595
1596                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1597                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1598                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1599                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1600
1601                 start_state = ((start & mask) == (base & mask));
1602                 end_state = ((end & mask) == (base & mask));
1603                 if (start_state != end_state)
1604                         return 0xFE;
1605
1606                 if ((start & mask) != (base & mask))
1607                         continue;
1608
1609                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1610                 if (prev_match == 0xFF) {
1611                         prev_match = curr_match;
1612                         continue;
1613                 }
1614
1615                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1616                     curr_match == MTRR_TYPE_UNCACHABLE)
1617                         return MTRR_TYPE_UNCACHABLE;
1618
1619                 if ((prev_match == MTRR_TYPE_WRBACK &&
1620                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1621                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1622                      curr_match == MTRR_TYPE_WRBACK)) {
1623                         prev_match = MTRR_TYPE_WRTHROUGH;
1624                         curr_match = MTRR_TYPE_WRTHROUGH;
1625                 }
1626
1627                 if (prev_match != curr_match)
1628                         return MTRR_TYPE_UNCACHABLE;
1629         }
1630
1631         if (prev_match != 0xFF)
1632                 return prev_match;
1633
1634         return mtrr_state->def_type;
1635 }
1636
1637 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1638 {
1639         u8 mtrr;
1640
1641         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1642                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1643         if (mtrr == 0xfe || mtrr == 0xff)
1644                 mtrr = MTRR_TYPE_WRBACK;
1645         return mtrr;
1646 }
1647 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1648
1649 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1650 {
1651         unsigned index;
1652         struct hlist_head *bucket;
1653         struct kvm_mmu_page *s;
1654         struct hlist_node *node, *n;
1655
1656         index = kvm_page_table_hashfn(sp->gfn);
1657         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1658         /* don't unsync if pagetable is shadowed with multiple roles */
1659         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1660                 if (s->gfn != sp->gfn || s->role.direct)
1661                         continue;
1662                 if (s->role.word != sp->role.word)
1663                         return 1;
1664         }
1665         ++vcpu->kvm->stat.mmu_unsync;
1666         sp->unsync = 1;
1667
1668         kvm_mmu_mark_parents_unsync(vcpu, sp);
1669
1670         mmu_convert_notrap(sp);
1671         return 0;
1672 }
1673
1674 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1675                                   bool can_unsync)
1676 {
1677         struct kvm_mmu_page *shadow;
1678
1679         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1680         if (shadow) {
1681                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1682                         return 1;
1683                 if (shadow->unsync)
1684                         return 0;
1685                 if (can_unsync && oos_shadow)
1686                         return kvm_unsync_page(vcpu, shadow);
1687                 return 1;
1688         }
1689         return 0;
1690 }
1691
1692 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1693                     unsigned pte_access, int user_fault,
1694                     int write_fault, int dirty, int largepage,
1695                     gfn_t gfn, pfn_t pfn, bool speculative,
1696                     bool can_unsync)
1697 {
1698         u64 spte;
1699         int ret = 0;
1700
1701         /*
1702          * We don't set the accessed bit, since we sometimes want to see
1703          * whether the guest actually used the pte (in order to detect
1704          * demand paging).
1705          */
1706         spte = shadow_base_present_pte | shadow_dirty_mask;
1707         if (!speculative)
1708                 spte |= shadow_accessed_mask;
1709         if (!dirty)
1710                 pte_access &= ~ACC_WRITE_MASK;
1711         if (pte_access & ACC_EXEC_MASK)
1712                 spte |= shadow_x_mask;
1713         else
1714                 spte |= shadow_nx_mask;
1715         if (pte_access & ACC_USER_MASK)
1716                 spte |= shadow_user_mask;
1717         if (largepage)
1718                 spte |= PT_PAGE_SIZE_MASK;
1719         if (tdp_enabled)
1720                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1721                         kvm_is_mmio_pfn(pfn));
1722
1723         spte |= (u64)pfn << PAGE_SHIFT;
1724
1725         if ((pte_access & ACC_WRITE_MASK)
1726             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1727
1728                 if (largepage && has_wrprotected_page(vcpu->kvm, gfn)) {
1729                         ret = 1;
1730                         spte = shadow_trap_nonpresent_pte;
1731                         goto set_pte;
1732                 }
1733
1734                 spte |= PT_WRITABLE_MASK;
1735
1736                 /*
1737                  * Optimization: for pte sync, if spte was writable the hash
1738                  * lookup is unnecessary (and expensive). Write protection
1739                  * is responsibility of mmu_get_page / kvm_sync_page.
1740                  * Same reasoning can be applied to dirty page accounting.
1741                  */
1742                 if (!can_unsync && is_writeble_pte(*sptep))
1743                         goto set_pte;
1744
1745                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1746                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1747                                  __func__, gfn);
1748                         ret = 1;
1749                         pte_access &= ~ACC_WRITE_MASK;
1750                         if (is_writeble_pte(spte))
1751                                 spte &= ~PT_WRITABLE_MASK;
1752                 }
1753         }
1754
1755         if (pte_access & ACC_WRITE_MASK)
1756                 mark_page_dirty(vcpu->kvm, gfn);
1757
1758 set_pte:
1759         __set_spte(sptep, spte);
1760         return ret;
1761 }
1762
1763 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1764                          unsigned pt_access, unsigned pte_access,
1765                          int user_fault, int write_fault, int dirty,
1766                          int *ptwrite, int largepage, gfn_t gfn,
1767                          pfn_t pfn, bool speculative)
1768 {
1769         int was_rmapped = 0;
1770         int was_writeble = is_writeble_pte(*sptep);
1771         int rmap_count;
1772
1773         pgprintk("%s: spte %llx access %x write_fault %d"
1774                  " user_fault %d gfn %lx\n",
1775                  __func__, *sptep, pt_access,
1776                  write_fault, user_fault, gfn);
1777
1778         if (is_rmap_spte(*sptep)) {
1779                 /*
1780                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1781                  * the parent of the now unreachable PTE.
1782                  */
1783                 if (largepage && !is_large_pte(*sptep)) {
1784                         struct kvm_mmu_page *child;
1785                         u64 pte = *sptep;
1786
1787                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1788                         mmu_page_remove_parent_pte(child, sptep);
1789                 } else if (pfn != spte_to_pfn(*sptep)) {
1790                         pgprintk("hfn old %lx new %lx\n",
1791                                  spte_to_pfn(*sptep), pfn);
1792                         rmap_remove(vcpu->kvm, sptep);
1793                 } else
1794                         was_rmapped = 1;
1795         }
1796         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1797                       dirty, largepage, gfn, pfn, speculative, true)) {
1798                 if (write_fault)
1799                         *ptwrite = 1;
1800                 kvm_x86_ops->tlb_flush(vcpu);
1801         }
1802
1803         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1804         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1805                  is_large_pte(*sptep)? "2MB" : "4kB",
1806                  is_present_pte(*sptep)?"RW":"R", gfn,
1807                  *shadow_pte, sptep);
1808         if (!was_rmapped && is_large_pte(*sptep))
1809                 ++vcpu->kvm->stat.lpages;
1810
1811         page_header_update_slot(vcpu->kvm, sptep, gfn);
1812         if (!was_rmapped) {
1813                 rmap_count = rmap_add(vcpu, sptep, gfn, largepage);
1814                 if (!is_rmap_spte(*sptep))
1815                         kvm_release_pfn_clean(pfn);
1816                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1817                         rmap_recycle(vcpu, gfn, largepage);
1818         } else {
1819                 if (was_writeble)
1820                         kvm_release_pfn_dirty(pfn);
1821                 else
1822                         kvm_release_pfn_clean(pfn);
1823         }
1824         if (speculative) {
1825                 vcpu->arch.last_pte_updated = sptep;
1826                 vcpu->arch.last_pte_gfn = gfn;
1827         }
1828 }
1829
1830 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1831 {
1832 }
1833
1834 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1835                         int largepage, gfn_t gfn, pfn_t pfn)
1836 {
1837         struct kvm_shadow_walk_iterator iterator;
1838         struct kvm_mmu_page *sp;
1839         int pt_write = 0;
1840         gfn_t pseudo_gfn;
1841
1842         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1843                 if (iterator.level == PT_PAGE_TABLE_LEVEL
1844                     || (largepage && iterator.level == PT_DIRECTORY_LEVEL)) {
1845                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1846                                      0, write, 1, &pt_write,
1847                                      largepage, gfn, pfn, false);
1848                         ++vcpu->stat.pf_fixed;
1849                         break;
1850                 }
1851
1852                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1853                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1854                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1855                                               iterator.level - 1,
1856                                               1, ACC_ALL, iterator.sptep);
1857                         if (!sp) {
1858                                 pgprintk("nonpaging_map: ENOMEM\n");
1859                                 kvm_release_pfn_clean(pfn);
1860                                 return -ENOMEM;
1861                         }
1862
1863                         __set_spte(iterator.sptep,
1864                                    __pa(sp->spt)
1865                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
1866                                    | shadow_user_mask | shadow_x_mask);
1867                 }
1868         }
1869         return pt_write;
1870 }
1871
1872 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1873 {
1874         int r;
1875         int largepage = 0;
1876         pfn_t pfn;
1877         unsigned long mmu_seq;
1878
1879         if (is_largepage_backed(vcpu, gfn &
1880                         ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
1881                 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
1882                 largepage = 1;
1883         }
1884
1885         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1886         smp_rmb();
1887         pfn = gfn_to_pfn(vcpu->kvm, gfn);
1888
1889         /* mmio */
1890         if (is_error_pfn(pfn)) {
1891                 kvm_release_pfn_clean(pfn);
1892                 return 1;
1893         }
1894
1895         spin_lock(&vcpu->kvm->mmu_lock);
1896         if (mmu_notifier_retry(vcpu, mmu_seq))
1897                 goto out_unlock;
1898         kvm_mmu_free_some_pages(vcpu);
1899         r = __direct_map(vcpu, v, write, largepage, gfn, pfn);
1900         spin_unlock(&vcpu->kvm->mmu_lock);
1901
1902
1903         return r;
1904
1905 out_unlock:
1906         spin_unlock(&vcpu->kvm->mmu_lock);
1907         kvm_release_pfn_clean(pfn);
1908         return 0;
1909 }
1910
1911
1912 static void mmu_free_roots(struct kvm_vcpu *vcpu)
1913 {
1914         int i;
1915         struct kvm_mmu_page *sp;
1916
1917         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
1918                 return;
1919         spin_lock(&vcpu->kvm->mmu_lock);
1920         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1921                 hpa_t root = vcpu->arch.mmu.root_hpa;
1922
1923                 sp = page_header(root);
1924                 --sp->root_count;
1925                 if (!sp->root_count && sp->role.invalid)
1926                         kvm_mmu_zap_page(vcpu->kvm, sp);
1927                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1928                 spin_unlock(&vcpu->kvm->mmu_lock);
1929                 return;
1930         }
1931         for (i = 0; i < 4; ++i) {
1932                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1933
1934                 if (root) {
1935                         root &= PT64_BASE_ADDR_MASK;
1936                         sp = page_header(root);
1937                         --sp->root_count;
1938                         if (!sp->root_count && sp->role.invalid)
1939                                 kvm_mmu_zap_page(vcpu->kvm, sp);
1940                 }
1941                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
1942         }
1943         spin_unlock(&vcpu->kvm->mmu_lock);
1944         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
1945 }
1946
1947 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
1948 {
1949         int ret = 0;
1950
1951         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
1952                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
1953                 ret = 1;
1954         }
1955
1956         return ret;
1957 }
1958
1959 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
1960 {
1961         int i;
1962         gfn_t root_gfn;
1963         struct kvm_mmu_page *sp;
1964         int direct = 0;
1965         u64 pdptr;
1966
1967         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
1968
1969         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1970                 hpa_t root = vcpu->arch.mmu.root_hpa;
1971
1972                 ASSERT(!VALID_PAGE(root));
1973                 if (tdp_enabled)
1974                         direct = 1;
1975                 if (mmu_check_root(vcpu, root_gfn))
1976                         return 1;
1977                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
1978                                       PT64_ROOT_LEVEL, direct,
1979                                       ACC_ALL, NULL);
1980                 root = __pa(sp->spt);
1981                 ++sp->root_count;
1982                 vcpu->arch.mmu.root_hpa = root;
1983                 return 0;
1984         }
1985         direct = !is_paging(vcpu);
1986         if (tdp_enabled)
1987                 direct = 1;
1988         for (i = 0; i < 4; ++i) {
1989                 hpa_t root = vcpu->arch.mmu.pae_root[i];
1990
1991                 ASSERT(!VALID_PAGE(root));
1992                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1993                         pdptr = kvm_pdptr_read(vcpu, i);
1994                         if (!is_present_gpte(pdptr)) {
1995                                 vcpu->arch.mmu.pae_root[i] = 0;
1996                                 continue;
1997                         }
1998                         root_gfn = pdptr >> PAGE_SHIFT;
1999                 } else if (vcpu->arch.mmu.root_level == 0)
2000                         root_gfn = 0;
2001                 if (mmu_check_root(vcpu, root_gfn))
2002                         return 1;
2003                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2004                                       PT32_ROOT_LEVEL, direct,
2005                                       ACC_ALL, NULL);
2006                 root = __pa(sp->spt);
2007                 ++sp->root_count;
2008                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2009         }
2010         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2011         return 0;
2012 }
2013
2014 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2015 {
2016         int i;
2017         struct kvm_mmu_page *sp;
2018
2019         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2020                 return;
2021         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2022                 hpa_t root = vcpu->arch.mmu.root_hpa;
2023                 sp = page_header(root);
2024                 mmu_sync_children(vcpu, sp);
2025                 return;
2026         }
2027         for (i = 0; i < 4; ++i) {
2028                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2029
2030                 if (root && VALID_PAGE(root)) {
2031                         root &= PT64_BASE_ADDR_MASK;
2032                         sp = page_header(root);
2033                         mmu_sync_children(vcpu, sp);
2034                 }
2035         }
2036 }
2037
2038 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2039 {
2040         spin_lock(&vcpu->kvm->mmu_lock);
2041         mmu_sync_roots(vcpu);
2042         spin_unlock(&vcpu->kvm->mmu_lock);
2043 }
2044
2045 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2046 {
2047         return vaddr;
2048 }
2049
2050 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2051                                 u32 error_code)
2052 {
2053         gfn_t gfn;
2054         int r;
2055
2056         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2057         r = mmu_topup_memory_caches(vcpu);
2058         if (r)
2059                 return r;
2060
2061         ASSERT(vcpu);
2062         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2063
2064         gfn = gva >> PAGE_SHIFT;
2065
2066         return nonpaging_map(vcpu, gva & PAGE_MASK,
2067                              error_code & PFERR_WRITE_MASK, gfn);
2068 }
2069
2070 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2071                                 u32 error_code)
2072 {
2073         pfn_t pfn;
2074         int r;
2075         int largepage = 0;
2076         gfn_t gfn = gpa >> PAGE_SHIFT;
2077         unsigned long mmu_seq;
2078
2079         ASSERT(vcpu);
2080         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2081
2082         r = mmu_topup_memory_caches(vcpu);
2083         if (r)
2084                 return r;
2085
2086         if (is_largepage_backed(vcpu, gfn &
2087                         ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
2088                 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
2089                 largepage = 1;
2090         }
2091         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2092         smp_rmb();
2093         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2094         if (is_error_pfn(pfn)) {
2095                 kvm_release_pfn_clean(pfn);
2096                 return 1;
2097         }
2098         spin_lock(&vcpu->kvm->mmu_lock);
2099         if (mmu_notifier_retry(vcpu, mmu_seq))
2100                 goto out_unlock;
2101         kvm_mmu_free_some_pages(vcpu);
2102         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2103                          largepage, gfn, pfn);
2104         spin_unlock(&vcpu->kvm->mmu_lock);
2105
2106         return r;
2107
2108 out_unlock:
2109         spin_unlock(&vcpu->kvm->mmu_lock);
2110         kvm_release_pfn_clean(pfn);
2111         return 0;
2112 }
2113
2114 static void nonpaging_free(struct kvm_vcpu *vcpu)
2115 {
2116         mmu_free_roots(vcpu);
2117 }
2118
2119 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2120 {
2121         struct kvm_mmu *context = &vcpu->arch.mmu;
2122
2123         context->new_cr3 = nonpaging_new_cr3;
2124         context->page_fault = nonpaging_page_fault;
2125         context->gva_to_gpa = nonpaging_gva_to_gpa;
2126         context->free = nonpaging_free;
2127         context->prefetch_page = nonpaging_prefetch_page;
2128         context->sync_page = nonpaging_sync_page;
2129         context->invlpg = nonpaging_invlpg;
2130         context->root_level = 0;
2131         context->shadow_root_level = PT32E_ROOT_LEVEL;
2132         context->root_hpa = INVALID_PAGE;
2133         return 0;
2134 }
2135
2136 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2137 {
2138         ++vcpu->stat.tlb_flush;
2139         kvm_x86_ops->tlb_flush(vcpu);
2140 }
2141
2142 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2143 {
2144         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2145         mmu_free_roots(vcpu);
2146 }
2147
2148 static void inject_page_fault(struct kvm_vcpu *vcpu,
2149                               u64 addr,
2150                               u32 err_code)
2151 {
2152         kvm_inject_page_fault(vcpu, addr, err_code);
2153 }
2154
2155 static void paging_free(struct kvm_vcpu *vcpu)
2156 {
2157         nonpaging_free(vcpu);
2158 }
2159
2160 static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2161 {
2162         int bit7;
2163
2164         bit7 = (gpte >> 7) & 1;
2165         return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2166 }
2167
2168 #define PTTYPE 64
2169 #include "paging_tmpl.h"
2170 #undef PTTYPE
2171
2172 #define PTTYPE 32
2173 #include "paging_tmpl.h"
2174 #undef PTTYPE
2175
2176 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2177 {
2178         struct kvm_mmu *context = &vcpu->arch.mmu;
2179         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2180         u64 exb_bit_rsvd = 0;
2181
2182         if (!is_nx(vcpu))
2183                 exb_bit_rsvd = rsvd_bits(63, 63);
2184         switch (level) {
2185         case PT32_ROOT_LEVEL:
2186                 /* no rsvd bits for 2 level 4K page table entries */
2187                 context->rsvd_bits_mask[0][1] = 0;
2188                 context->rsvd_bits_mask[0][0] = 0;
2189                 if (is_cpuid_PSE36())
2190                         /* 36bits PSE 4MB page */
2191                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2192                 else
2193                         /* 32 bits PSE 4MB page */
2194                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2195                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2196                 break;
2197         case PT32E_ROOT_LEVEL:
2198                 context->rsvd_bits_mask[0][2] =
2199                         rsvd_bits(maxphyaddr, 63) |
2200                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2201                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2202                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2203                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2204                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2205                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2206                         rsvd_bits(maxphyaddr, 62) |
2207                         rsvd_bits(13, 20);              /* large page */
2208                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2209                 break;
2210         case PT64_ROOT_LEVEL:
2211                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2212                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2213                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2214                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2215                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2216                         rsvd_bits(maxphyaddr, 51);
2217                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2218                         rsvd_bits(maxphyaddr, 51);
2219                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2220                 context->rsvd_bits_mask[1][2] = context->rsvd_bits_mask[0][2];
2221                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2222                         rsvd_bits(maxphyaddr, 51) |
2223                         rsvd_bits(13, 20);              /* large page */
2224                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2225                 break;
2226         }
2227 }
2228
2229 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2230 {
2231         struct kvm_mmu *context = &vcpu->arch.mmu;
2232
2233         ASSERT(is_pae(vcpu));
2234         context->new_cr3 = paging_new_cr3;
2235         context->page_fault = paging64_page_fault;
2236         context->gva_to_gpa = paging64_gva_to_gpa;
2237         context->prefetch_page = paging64_prefetch_page;
2238         context->sync_page = paging64_sync_page;
2239         context->invlpg = paging64_invlpg;
2240         context->free = paging_free;
2241         context->root_level = level;
2242         context->shadow_root_level = level;
2243         context->root_hpa = INVALID_PAGE;
2244         return 0;
2245 }
2246
2247 static int paging64_init_context(struct kvm_vcpu *vcpu)
2248 {
2249         reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2250         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2251 }
2252
2253 static int paging32_init_context(struct kvm_vcpu *vcpu)
2254 {
2255         struct kvm_mmu *context = &vcpu->arch.mmu;
2256
2257         reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2258         context->new_cr3 = paging_new_cr3;
2259         context->page_fault = paging32_page_fault;
2260         context->gva_to_gpa = paging32_gva_to_gpa;
2261         context->free = paging_free;
2262         context->prefetch_page = paging32_prefetch_page;
2263         context->sync_page = paging32_sync_page;
2264         context->invlpg = paging32_invlpg;
2265         context->root_level = PT32_ROOT_LEVEL;
2266         context->shadow_root_level = PT32E_ROOT_LEVEL;
2267         context->root_hpa = INVALID_PAGE;
2268         return 0;
2269 }
2270
2271 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2272 {
2273         reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2274         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2275 }
2276
2277 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2278 {
2279         struct kvm_mmu *context = &vcpu->arch.mmu;
2280
2281         context->new_cr3 = nonpaging_new_cr3;
2282         context->page_fault = tdp_page_fault;
2283         context->free = nonpaging_free;
2284         context->prefetch_page = nonpaging_prefetch_page;
2285         context->sync_page = nonpaging_sync_page;
2286         context->invlpg = nonpaging_invlpg;
2287         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2288         context->root_hpa = INVALID_PAGE;
2289
2290         if (!is_paging(vcpu)) {
2291                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2292                 context->root_level = 0;
2293         } else if (is_long_mode(vcpu)) {
2294                 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2295                 context->gva_to_gpa = paging64_gva_to_gpa;
2296                 context->root_level = PT64_ROOT_LEVEL;
2297         } else if (is_pae(vcpu)) {
2298                 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2299                 context->gva_to_gpa = paging64_gva_to_gpa;
2300                 context->root_level = PT32E_ROOT_LEVEL;
2301         } else {
2302                 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2303                 context->gva_to_gpa = paging32_gva_to_gpa;
2304                 context->root_level = PT32_ROOT_LEVEL;
2305         }
2306
2307         return 0;
2308 }
2309
2310 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2311 {
2312         int r;
2313
2314         ASSERT(vcpu);
2315         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2316
2317         if (!is_paging(vcpu))
2318                 r = nonpaging_init_context(vcpu);
2319         else if (is_long_mode(vcpu))
2320                 r = paging64_init_context(vcpu);
2321         else if (is_pae(vcpu))
2322                 r = paging32E_init_context(vcpu);
2323         else
2324                 r = paging32_init_context(vcpu);
2325
2326         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2327
2328         return r;
2329 }
2330
2331 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2332 {
2333         vcpu->arch.update_pte.pfn = bad_pfn;
2334
2335         if (tdp_enabled)
2336                 return init_kvm_tdp_mmu(vcpu);
2337         else
2338                 return init_kvm_softmmu(vcpu);
2339 }
2340
2341 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2342 {
2343         ASSERT(vcpu);
2344         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2345                 vcpu->arch.mmu.free(vcpu);
2346                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2347         }
2348 }
2349
2350 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2351 {
2352         destroy_kvm_mmu(vcpu);
2353         return init_kvm_mmu(vcpu);
2354 }
2355 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2356
2357 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2358 {
2359         int r;
2360
2361         r = mmu_topup_memory_caches(vcpu);
2362         if (r)
2363                 goto out;
2364         spin_lock(&vcpu->kvm->mmu_lock);
2365         kvm_mmu_free_some_pages(vcpu);
2366         r = mmu_alloc_roots(vcpu);
2367         mmu_sync_roots(vcpu);
2368         spin_unlock(&vcpu->kvm->mmu_lock);
2369         if (r)
2370                 goto out;
2371         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2372         kvm_mmu_flush_tlb(vcpu);
2373 out:
2374         return r;
2375 }
2376 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2377
2378 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2379 {
2380         mmu_free_roots(vcpu);
2381 }
2382
2383 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2384                                   struct kvm_mmu_page *sp,
2385                                   u64 *spte)
2386 {
2387         u64 pte;
2388         struct kvm_mmu_page *child;
2389
2390         pte = *spte;
2391         if (is_shadow_present_pte(pte)) {
2392                 if (is_last_spte(pte, sp->role.level))
2393                         rmap_remove(vcpu->kvm, spte);
2394                 else {
2395                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2396                         mmu_page_remove_parent_pte(child, spte);
2397                 }
2398         }
2399         __set_spte(spte, shadow_trap_nonpresent_pte);
2400         if (is_large_pte(pte))
2401                 --vcpu->kvm->stat.lpages;
2402 }
2403
2404 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2405                                   struct kvm_mmu_page *sp,
2406                                   u64 *spte,
2407                                   const void *new)
2408 {
2409         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2410                 if (!vcpu->arch.update_pte.largepage ||
2411                     sp->role.glevels == PT32_ROOT_LEVEL) {
2412                         ++vcpu->kvm->stat.mmu_pde_zapped;
2413                         return;
2414                 }
2415         }
2416
2417         ++vcpu->kvm->stat.mmu_pte_updated;
2418         if (sp->role.glevels == PT32_ROOT_LEVEL)
2419                 paging32_update_pte(vcpu, sp, spte, new);
2420         else
2421                 paging64_update_pte(vcpu, sp, spte, new);
2422 }
2423
2424 static bool need_remote_flush(u64 old, u64 new)
2425 {
2426         if (!is_shadow_present_pte(old))
2427                 return false;
2428         if (!is_shadow_present_pte(new))
2429                 return true;
2430         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2431                 return true;
2432         old ^= PT64_NX_MASK;
2433         new ^= PT64_NX_MASK;
2434         return (old & ~new & PT64_PERM_MASK) != 0;
2435 }
2436
2437 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2438 {
2439         if (need_remote_flush(old, new))
2440                 kvm_flush_remote_tlbs(vcpu->kvm);
2441         else
2442                 kvm_mmu_flush_tlb(vcpu);
2443 }
2444
2445 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2446 {
2447         u64 *spte = vcpu->arch.last_pte_updated;
2448
2449         return !!(spte && (*spte & shadow_accessed_mask));
2450 }
2451
2452 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2453                                           const u8 *new, int bytes)
2454 {
2455         gfn_t gfn;
2456         int r;
2457         u64 gpte = 0;
2458         pfn_t pfn;
2459
2460         vcpu->arch.update_pte.largepage = 0;
2461
2462         if (bytes != 4 && bytes != 8)
2463                 return;
2464
2465         /*
2466          * Assume that the pte write on a page table of the same type
2467          * as the current vcpu paging mode.  This is nearly always true
2468          * (might be false while changing modes).  Note it is verified later
2469          * by update_pte().
2470          */
2471         if (is_pae(vcpu)) {
2472                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2473                 if ((bytes == 4) && (gpa % 4 == 0)) {
2474                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2475                         if (r)
2476                                 return;
2477                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2478                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2479                         memcpy((void *)&gpte, new, 8);
2480                 }
2481         } else {
2482                 if ((bytes == 4) && (gpa % 4 == 0))
2483                         memcpy((void *)&gpte, new, 4);
2484         }
2485         if (!is_present_gpte(gpte))
2486                 return;
2487         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2488
2489         if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2490                 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
2491                 vcpu->arch.update_pte.largepage = 1;
2492         }
2493         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2494         smp_rmb();
2495         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2496
2497         if (is_error_pfn(pfn)) {
2498                 kvm_release_pfn_clean(pfn);
2499                 return;
2500         }
2501         vcpu->arch.update_pte.gfn = gfn;
2502         vcpu->arch.update_pte.pfn = pfn;
2503 }
2504
2505 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2506 {
2507         u64 *spte = vcpu->arch.last_pte_updated;
2508
2509         if (spte
2510             && vcpu->arch.last_pte_gfn == gfn
2511             && shadow_accessed_mask
2512             && !(*spte & shadow_accessed_mask)
2513             && is_shadow_present_pte(*spte))
2514                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2515 }
2516
2517 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2518                        const u8 *new, int bytes,
2519                        bool guest_initiated)
2520 {
2521         gfn_t gfn = gpa >> PAGE_SHIFT;
2522         struct kvm_mmu_page *sp;
2523         struct hlist_node *node, *n;
2524         struct hlist_head *bucket;
2525         unsigned index;
2526         u64 entry, gentry;
2527         u64 *spte;
2528         unsigned offset = offset_in_page(gpa);
2529         unsigned pte_size;
2530         unsigned page_offset;
2531         unsigned misaligned;
2532         unsigned quadrant;
2533         int level;
2534         int flooded = 0;
2535         int npte;
2536         int r;
2537
2538         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2539         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2540         spin_lock(&vcpu->kvm->mmu_lock);
2541         kvm_mmu_access_page(vcpu, gfn);
2542         kvm_mmu_free_some_pages(vcpu);
2543         ++vcpu->kvm->stat.mmu_pte_write;
2544         kvm_mmu_audit(vcpu, "pre pte write");
2545         if (guest_initiated) {
2546                 if (gfn == vcpu->arch.last_pt_write_gfn
2547                     && !last_updated_pte_accessed(vcpu)) {
2548                         ++vcpu->arch.last_pt_write_count;
2549                         if (vcpu->arch.last_pt_write_count >= 3)
2550                                 flooded = 1;
2551                 } else {
2552                         vcpu->arch.last_pt_write_gfn = gfn;
2553                         vcpu->arch.last_pt_write_count = 1;
2554                         vcpu->arch.last_pte_updated = NULL;
2555                 }
2556         }
2557         index = kvm_page_table_hashfn(gfn);
2558         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2559         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2560                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2561                         continue;
2562                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2563                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2564                 misaligned |= bytes < 4;
2565                 if (misaligned || flooded) {
2566                         /*
2567                          * Misaligned accesses are too much trouble to fix
2568                          * up; also, they usually indicate a page is not used
2569                          * as a page table.
2570                          *
2571                          * If we're seeing too many writes to a page,
2572                          * it may no longer be a page table, or we may be
2573                          * forking, in which case it is better to unmap the
2574                          * page.
2575                          */
2576                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2577                                  gpa, bytes, sp->role.word);
2578                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2579                                 n = bucket->first;
2580                         ++vcpu->kvm->stat.mmu_flooded;
2581                         continue;
2582                 }
2583                 page_offset = offset;
2584                 level = sp->role.level;
2585                 npte = 1;
2586                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2587                         page_offset <<= 1;      /* 32->64 */
2588                         /*
2589                          * A 32-bit pde maps 4MB while the shadow pdes map
2590                          * only 2MB.  So we need to double the offset again
2591                          * and zap two pdes instead of one.
2592                          */
2593                         if (level == PT32_ROOT_LEVEL) {
2594                                 page_offset &= ~7; /* kill rounding error */
2595                                 page_offset <<= 1;
2596                                 npte = 2;
2597                         }
2598                         quadrant = page_offset >> PAGE_SHIFT;
2599                         page_offset &= ~PAGE_MASK;
2600                         if (quadrant != sp->role.quadrant)
2601                                 continue;
2602                 }
2603                 spte = &sp->spt[page_offset / sizeof(*spte)];
2604                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2605                         gentry = 0;
2606                         r = kvm_read_guest_atomic(vcpu->kvm,
2607                                                   gpa & ~(u64)(pte_size - 1),
2608                                                   &gentry, pte_size);
2609                         new = (const void *)&gentry;
2610                         if (r < 0)
2611                                 new = NULL;
2612                 }
2613                 while (npte--) {
2614                         entry = *spte;
2615                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2616                         if (new)
2617                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2618                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2619                         ++spte;
2620                 }
2621         }
2622         kvm_mmu_audit(vcpu, "post pte write");
2623         spin_unlock(&vcpu->kvm->mmu_lock);
2624         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2625                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2626                 vcpu->arch.update_pte.pfn = bad_pfn;
2627         }
2628 }
2629
2630 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2631 {
2632         gpa_t gpa;
2633         int r;
2634
2635         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2636
2637         spin_lock(&vcpu->kvm->mmu_lock);
2638         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2639         spin_unlock(&vcpu->kvm->mmu_lock);
2640         return r;
2641 }
2642 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2643
2644 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2645 {
2646         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
2647                 struct kvm_mmu_page *sp;
2648
2649                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2650                                   struct kvm_mmu_page, link);
2651                 kvm_mmu_zap_page(vcpu->kvm, sp);
2652                 ++vcpu->kvm->stat.mmu_recycled;
2653         }
2654 }
2655
2656 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2657 {
2658         int r;
2659         enum emulation_result er;
2660
2661         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2662         if (r < 0)
2663                 goto out;
2664
2665         if (!r) {
2666                 r = 1;
2667                 goto out;
2668         }
2669
2670         r = mmu_topup_memory_caches(vcpu);
2671         if (r)
2672                 goto out;
2673
2674         er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
2675
2676         switch (er) {
2677         case EMULATE_DONE:
2678                 return 1;
2679         case EMULATE_DO_MMIO:
2680                 ++vcpu->stat.mmio_exits;
2681                 return 0;
2682         case EMULATE_FAIL:
2683                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2684                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2685                 return 0;
2686         default:
2687                 BUG();
2688         }
2689 out:
2690         return r;
2691 }
2692 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2693
2694 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2695 {
2696         vcpu->arch.mmu.invlpg(vcpu, gva);
2697         kvm_mmu_flush_tlb(vcpu);
2698         ++vcpu->stat.invlpg;
2699 }
2700 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2701
2702 void kvm_enable_tdp(void)
2703 {
2704         tdp_enabled = true;
2705 }
2706 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2707
2708 void kvm_disable_tdp(void)
2709 {
2710         tdp_enabled = false;
2711 }
2712 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2713
2714 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2715 {
2716         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2717 }
2718
2719 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2720 {
2721         struct page *page;
2722         int i;
2723
2724         ASSERT(vcpu);
2725
2726         if (vcpu->kvm->arch.n_requested_mmu_pages)
2727                 vcpu->kvm->arch.n_free_mmu_pages =
2728                                         vcpu->kvm->arch.n_requested_mmu_pages;
2729         else
2730                 vcpu->kvm->arch.n_free_mmu_pages =
2731                                         vcpu->kvm->arch.n_alloc_mmu_pages;
2732         /*
2733          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2734          * Therefore we need to allocate shadow page tables in the first
2735          * 4GB of memory, which happens to fit the DMA32 zone.
2736          */
2737         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2738         if (!page)
2739                 goto error_1;
2740         vcpu->arch.mmu.pae_root = page_address(page);
2741         for (i = 0; i < 4; ++i)
2742                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2743
2744         return 0;
2745
2746 error_1:
2747         free_mmu_pages(vcpu);
2748         return -ENOMEM;
2749 }
2750
2751 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2752 {
2753         ASSERT(vcpu);
2754         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2755
2756         return alloc_mmu_pages(vcpu);
2757 }
2758
2759 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2760 {
2761         ASSERT(vcpu);
2762         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2763
2764         return init_kvm_mmu(vcpu);
2765 }
2766
2767 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2768 {
2769         ASSERT(vcpu);
2770
2771         destroy_kvm_mmu(vcpu);
2772         free_mmu_pages(vcpu);
2773         mmu_free_memory_caches(vcpu);
2774 }
2775
2776 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2777 {
2778         struct kvm_mmu_page *sp;
2779
2780         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2781                 int i;
2782                 u64 *pt;
2783
2784                 if (!test_bit(slot, sp->slot_bitmap))
2785                         continue;
2786
2787                 pt = sp->spt;
2788                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2789                         /* avoid RMW */
2790                         if (pt[i] & PT_WRITABLE_MASK)
2791                                 pt[i] &= ~PT_WRITABLE_MASK;
2792         }
2793         kvm_flush_remote_tlbs(kvm);
2794 }
2795
2796 void kvm_mmu_zap_all(struct kvm *kvm)
2797 {
2798         struct kvm_mmu_page *sp, *node;
2799
2800         spin_lock(&kvm->mmu_lock);
2801         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2802                 if (kvm_mmu_zap_page(kvm, sp))
2803                         node = container_of(kvm->arch.active_mmu_pages.next,
2804                                             struct kvm_mmu_page, link);
2805         spin_unlock(&kvm->mmu_lock);
2806
2807         kvm_flush_remote_tlbs(kvm);
2808 }
2809
2810 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2811 {
2812         struct kvm_mmu_page *page;
2813
2814         page = container_of(kvm->arch.active_mmu_pages.prev,
2815                             struct kvm_mmu_page, link);
2816         kvm_mmu_zap_page(kvm, page);
2817 }
2818
2819 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2820 {
2821         struct kvm *kvm;
2822         struct kvm *kvm_freed = NULL;
2823         int cache_count = 0;
2824
2825         spin_lock(&kvm_lock);
2826
2827         list_for_each_entry(kvm, &vm_list, vm_list) {
2828                 int npages;
2829
2830                 if (!down_read_trylock(&kvm->slots_lock))
2831                         continue;
2832                 spin_lock(&kvm->mmu_lock);
2833                 npages = kvm->arch.n_alloc_mmu_pages -
2834                          kvm->arch.n_free_mmu_pages;
2835                 cache_count += npages;
2836                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2837                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2838                         cache_count--;
2839                         kvm_freed = kvm;
2840                 }
2841                 nr_to_scan--;
2842
2843                 spin_unlock(&kvm->mmu_lock);
2844                 up_read(&kvm->slots_lock);
2845         }
2846         if (kvm_freed)
2847                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2848
2849         spin_unlock(&kvm_lock);
2850
2851         return cache_count;
2852 }
2853
2854 static struct shrinker mmu_shrinker = {
2855         .shrink = mmu_shrink,
2856         .seeks = DEFAULT_SEEKS * 10,
2857 };
2858
2859 static void mmu_destroy_caches(void)
2860 {
2861         if (pte_chain_cache)
2862                 kmem_cache_destroy(pte_chain_cache);
2863         if (rmap_desc_cache)
2864                 kmem_cache_destroy(rmap_desc_cache);
2865         if (mmu_page_header_cache)
2866                 kmem_cache_destroy(mmu_page_header_cache);
2867 }
2868
2869 void kvm_mmu_module_exit(void)
2870 {
2871         mmu_destroy_caches();
2872         unregister_shrinker(&mmu_shrinker);
2873 }
2874
2875 int kvm_mmu_module_init(void)
2876 {
2877         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2878                                             sizeof(struct kvm_pte_chain),
2879                                             0, 0, NULL);
2880         if (!pte_chain_cache)
2881                 goto nomem;
2882         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2883                                             sizeof(struct kvm_rmap_desc),
2884                                             0, 0, NULL);
2885         if (!rmap_desc_cache)
2886                 goto nomem;
2887
2888         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
2889                                                   sizeof(struct kvm_mmu_page),
2890                                                   0, 0, NULL);
2891         if (!mmu_page_header_cache)
2892                 goto nomem;
2893
2894         register_shrinker(&mmu_shrinker);
2895
2896         return 0;
2897
2898 nomem:
2899         mmu_destroy_caches();
2900         return -ENOMEM;
2901 }
2902
2903 /*
2904  * Caculate mmu pages needed for kvm.
2905  */
2906 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
2907 {
2908         int i;
2909         unsigned int nr_mmu_pages;
2910         unsigned int  nr_pages = 0;
2911
2912         for (i = 0; i < kvm->nmemslots; i++)
2913                 nr_pages += kvm->memslots[i].npages;
2914
2915         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2916         nr_mmu_pages = max(nr_mmu_pages,
2917                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2918
2919         return nr_mmu_pages;
2920 }
2921
2922 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2923                                 unsigned len)
2924 {
2925         if (len > buffer->len)
2926                 return NULL;
2927         return buffer->ptr;
2928 }
2929
2930 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
2931                                 unsigned len)
2932 {
2933         void *ret;
2934
2935         ret = pv_mmu_peek_buffer(buffer, len);
2936         if (!ret)
2937                 return ret;
2938         buffer->ptr += len;
2939         buffer->len -= len;
2940         buffer->processed += len;
2941         return ret;
2942 }
2943
2944 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
2945                              gpa_t addr, gpa_t value)
2946 {
2947         int bytes = 8;
2948         int r;
2949
2950         if (!is_long_mode(vcpu) && !is_pae(vcpu))
2951                 bytes = 4;
2952
2953         r = mmu_topup_memory_caches(vcpu);
2954         if (r)
2955                 return r;
2956
2957         if (!emulator_write_phys(vcpu, addr, &value, bytes))
2958                 return -EFAULT;
2959
2960         return 1;
2961 }
2962
2963 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2964 {
2965         kvm_set_cr3(vcpu, vcpu->arch.cr3);
2966         return 1;
2967 }
2968
2969 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
2970 {
2971         spin_lock(&vcpu->kvm->mmu_lock);
2972         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
2973         spin_unlock(&vcpu->kvm->mmu_lock);
2974         return 1;
2975 }
2976
2977 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
2978                              struct kvm_pv_mmu_op_buffer *buffer)
2979 {
2980         struct kvm_mmu_op_header *header;
2981
2982         header = pv_mmu_peek_buffer(buffer, sizeof *header);
2983         if (!header)
2984                 return 0;
2985         switch (header->op) {
2986         case KVM_MMU_OP_WRITE_PTE: {
2987                 struct kvm_mmu_op_write_pte *wpte;
2988
2989                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
2990                 if (!wpte)
2991                         return 0;
2992                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
2993                                         wpte->pte_val);
2994         }
2995         case KVM_MMU_OP_FLUSH_TLB: {
2996                 struct kvm_mmu_op_flush_tlb *ftlb;
2997
2998                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
2999                 if (!ftlb)
3000                         return 0;
3001                 return kvm_pv_mmu_flush_tlb(vcpu);
3002         }
3003         case KVM_MMU_OP_RELEASE_PT: {
3004                 struct kvm_mmu_op_release_pt *rpt;
3005
3006                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3007                 if (!rpt)
3008                         return 0;
3009                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3010         }
3011         default: return 0;
3012         }
3013 }
3014
3015 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3016                   gpa_t addr, unsigned long *ret)
3017 {
3018         int r;
3019         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3020
3021         buffer->ptr = buffer->buf;
3022         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3023         buffer->processed = 0;
3024
3025         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3026         if (r)
3027                 goto out;
3028
3029         while (buffer->len) {
3030                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3031                 if (r < 0)
3032                         goto out;
3033                 if (r == 0)
3034                         break;
3035         }
3036
3037         r = 1;
3038 out:
3039         *ret = buffer->processed;
3040         return r;
3041 }
3042
3043 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3044 {
3045         struct kvm_shadow_walk_iterator iterator;
3046         int nr_sptes = 0;
3047
3048         spin_lock(&vcpu->kvm->mmu_lock);
3049         for_each_shadow_entry(vcpu, addr, iterator) {
3050                 sptes[iterator.level-1] = *iterator.sptep;
3051                 nr_sptes++;
3052                 if (!is_shadow_present_pte(*iterator.sptep))
3053                         break;
3054         }
3055         spin_unlock(&vcpu->kvm->mmu_lock);
3056
3057         return nr_sptes;
3058 }
3059 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3060
3061 #ifdef AUDIT
3062
3063 static const char *audit_msg;
3064
3065 static gva_t canonicalize(gva_t gva)
3066 {
3067 #ifdef CONFIG_X86_64
3068         gva = (long long)(gva << 16) >> 16;
3069 #endif
3070         return gva;
3071 }
3072
3073
3074 typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
3075                                  u64 *sptep);
3076
3077 static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3078                             inspect_spte_fn fn)
3079 {
3080         int i;
3081
3082         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3083                 u64 ent = sp->spt[i];
3084
3085                 if (is_shadow_present_pte(ent)) {
3086                         if (!is_last_spte(ent, sp->role.level)) {
3087                                 struct kvm_mmu_page *child;
3088                                 child = page_header(ent & PT64_BASE_ADDR_MASK);
3089                                 __mmu_spte_walk(kvm, child, fn);
3090                         } else
3091                                 fn(kvm, sp, &sp->spt[i]);
3092                 }
3093         }
3094 }
3095
3096 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3097 {
3098         int i;
3099         struct kvm_mmu_page *sp;
3100
3101         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3102                 return;
3103         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3104                 hpa_t root = vcpu->arch.mmu.root_hpa;
3105                 sp = page_header(root);
3106                 __mmu_spte_walk(vcpu->kvm, sp, fn);
3107                 return;
3108         }
3109         for (i = 0; i < 4; ++i) {
3110                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3111
3112                 if (root && VALID_PAGE(root)) {
3113                         root &= PT64_BASE_ADDR_MASK;
3114                         sp = page_header(root);
3115                         __mmu_spte_walk(vcpu->kvm, sp, fn);
3116                 }
3117         }
3118         return;
3119 }
3120
3121 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3122                                 gva_t va, int level)
3123 {
3124         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3125         int i;
3126         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3127
3128         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3129                 u64 ent = pt[i];
3130
3131                 if (ent == shadow_trap_nonpresent_pte)
3132                         continue;
3133
3134                 va = canonicalize(va);
3135                 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3136                         audit_mappings_page(vcpu, ent, va, level - 1);
3137                 else {
3138                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3139                         gfn_t gfn = gpa >> PAGE_SHIFT;
3140                         pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3141                         hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3142
3143                         if (is_error_pfn(pfn)) {
3144                                 kvm_release_pfn_clean(pfn);
3145                                 continue;
3146                         }
3147
3148                         if (is_shadow_present_pte(ent)
3149                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3150                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3151                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3152                                        audit_msg, vcpu->arch.mmu.root_level,
3153                                        va, gpa, hpa, ent,
3154                                        is_shadow_present_pte(ent));
3155                         else if (ent == shadow_notrap_nonpresent_pte
3156                                  && !is_error_hpa(hpa))
3157                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3158                                        " valid guest gva %lx\n", audit_msg, va);
3159                         kvm_release_pfn_clean(pfn);
3160
3161                 }
3162         }
3163 }
3164
3165 static void audit_mappings(struct kvm_vcpu *vcpu)
3166 {
3167         unsigned i;
3168
3169         if (vcpu->arch.mmu.root_level == 4)
3170                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3171         else
3172                 for (i = 0; i < 4; ++i)
3173                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3174                                 audit_mappings_page(vcpu,
3175                                                     vcpu->arch.mmu.pae_root[i],
3176                                                     i << 30,
3177                                                     2);
3178 }
3179
3180 static int count_rmaps(struct kvm_vcpu *vcpu)
3181 {
3182         int nmaps = 0;
3183         int i, j, k;
3184
3185         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3186                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3187                 struct kvm_rmap_desc *d;
3188
3189                 for (j = 0; j < m->npages; ++j) {
3190                         unsigned long *rmapp = &m->rmap[j];
3191
3192                         if (!*rmapp)
3193                                 continue;
3194                         if (!(*rmapp & 1)) {
3195                                 ++nmaps;
3196                                 continue;
3197                         }
3198                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3199                         while (d) {
3200                                 for (k = 0; k < RMAP_EXT; ++k)
3201                                         if (d->sptes[k])
3202                                                 ++nmaps;
3203                                         else
3204                                                 break;
3205                                 d = d->more;
3206                         }
3207                 }
3208         }
3209         return nmaps;
3210 }
3211
3212 void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
3213 {
3214         unsigned long *rmapp;
3215         struct kvm_mmu_page *rev_sp;
3216         gfn_t gfn;
3217
3218         if (*sptep & PT_WRITABLE_MASK) {
3219                 rev_sp = page_header(__pa(sptep));
3220                 gfn = rev_sp->gfns[sptep - rev_sp->spt];
3221
3222                 if (!gfn_to_memslot(kvm, gfn)) {
3223                         if (!printk_ratelimit())
3224                                 return;
3225                         printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3226                                          audit_msg, gfn);
3227                         printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3228                                         audit_msg, sptep - rev_sp->spt,
3229                                         rev_sp->gfn);
3230                         dump_stack();
3231                         return;
3232                 }
3233
3234                 rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
3235                                     is_large_pte(*sptep));
3236                 if (!*rmapp) {
3237                         if (!printk_ratelimit())
3238                                 return;
3239                         printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3240                                          audit_msg, *sptep);
3241                         dump_stack();
3242                 }
3243         }
3244
3245 }
3246
3247 void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3248 {
3249         mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3250 }
3251
3252 static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3253 {
3254         struct kvm_mmu_page *sp;
3255         int i;
3256
3257         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3258                 u64 *pt = sp->spt;
3259
3260                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3261                         continue;
3262
3263                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3264                         u64 ent = pt[i];
3265
3266                         if (!(ent & PT_PRESENT_MASK))
3267                                 continue;
3268                         if (!(ent & PT_WRITABLE_MASK))
3269                                 continue;
3270                         inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3271                 }
3272         }
3273         return;
3274 }
3275
3276 static void audit_rmap(struct kvm_vcpu *vcpu)
3277 {
3278         check_writable_mappings_rmap(vcpu);
3279         count_rmaps(vcpu);
3280 }
3281
3282 static void audit_write_protection(struct kvm_vcpu *vcpu)
3283 {
3284         struct kvm_mmu_page *sp;
3285         struct kvm_memory_slot *slot;
3286         unsigned long *rmapp;
3287         u64 *spte;
3288         gfn_t gfn;
3289
3290         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3291                 if (sp->role.direct)
3292                         continue;
3293                 if (sp->unsync)
3294                         continue;
3295
3296                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3297                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3298                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3299
3300                 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3301                 while (spte) {
3302                         if (*spte & PT_WRITABLE_MASK)
3303                                 printk(KERN_ERR "%s: (%s) shadow page has "
3304                                 "writable mappings: gfn %lx role %x\n",
3305                                __func__, audit_msg, sp->gfn,
3306                                sp->role.word);
3307                         spte = rmap_next(vcpu->kvm, rmapp, spte);
3308                 }
3309         }
3310 }
3311
3312 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3313 {
3314         int olddbg = dbg;
3315
3316         dbg = 0;
3317         audit_msg = msg;
3318         audit_rmap(vcpu);
3319         audit_write_protection(vcpu);
3320         if (strcmp("pre pte write", audit_msg) != 0)
3321                 audit_mappings(vcpu);
3322         audit_writable_sptes_have_rmaps(vcpu);
3323         dbg = olddbg;
3324 }
3325
3326 #endif