Merge branch 'for-rmk/samsung6' of git://git.fluff.org/bjdooks/linux into devel-stable
[linux-2.6.git] / arch / x86 / kvm / mmu.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * MMU support
8  *
9  * Copyright (C) 2006 Qumranet, Inc.
10  *
11  * Authors:
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *   Avi Kivity   <avi@qumranet.com>
14  *
15  * This work is licensed under the terms of the GNU GPL, version 2.  See
16  * the COPYING file in the top-level directory.
17  *
18  */
19
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29 #include <linux/swap.h>
30 #include <linux/hugetlb.h>
31 #include <linux/compiler.h>
32
33 #include <asm/page.h>
34 #include <asm/cmpxchg.h>
35 #include <asm/io.h>
36 #include <asm/vmx.h>
37
38 /*
39  * When setting this variable to true it enables Two-Dimensional-Paging
40  * where the hardware walks 2 page tables:
41  * 1. the guest-virtual to guest-physical
42  * 2. while doing 1. it walks guest-physical to host-physical
43  * If the hardware supports that we don't need to do shadow paging.
44  */
45 bool tdp_enabled = false;
46
47 #undef MMU_DEBUG
48
49 #undef AUDIT
50
51 #ifdef AUDIT
52 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
53 #else
54 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
55 #endif
56
57 #ifdef MMU_DEBUG
58
59 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
60 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
61
62 #else
63
64 #define pgprintk(x...) do { } while (0)
65 #define rmap_printk(x...) do { } while (0)
66
67 #endif
68
69 #if defined(MMU_DEBUG) || defined(AUDIT)
70 static int dbg = 0;
71 module_param(dbg, bool, 0644);
72 #endif
73
74 static int oos_shadow = 1;
75 module_param(oos_shadow, bool, 0644);
76
77 #ifndef MMU_DEBUG
78 #define ASSERT(x) do { } while (0)
79 #else
80 #define ASSERT(x)                                                       \
81         if (!(x)) {                                                     \
82                 printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
83                        __FILE__, __LINE__, #x);                         \
84         }
85 #endif
86
87 #define PT_FIRST_AVAIL_BITS_SHIFT 9
88 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
89
90 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
91
92 #define PT64_LEVEL_BITS 9
93
94 #define PT64_LEVEL_SHIFT(level) \
95                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
96
97 #define PT64_LEVEL_MASK(level) \
98                 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
99
100 #define PT64_INDEX(address, level)\
101         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
102
103
104 #define PT32_LEVEL_BITS 10
105
106 #define PT32_LEVEL_SHIFT(level) \
107                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
108
109 #define PT32_LEVEL_MASK(level) \
110                 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
111 #define PT32_LVL_OFFSET_MASK(level) \
112         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
113                                                 * PT32_LEVEL_BITS))) - 1))
114
115 #define PT32_INDEX(address, level)\
116         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
117
118
119 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
120 #define PT64_DIR_BASE_ADDR_MASK \
121         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
122 #define PT64_LVL_ADDR_MASK(level) \
123         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
124                                                 * PT64_LEVEL_BITS))) - 1))
125 #define PT64_LVL_OFFSET_MASK(level) \
126         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
127                                                 * PT64_LEVEL_BITS))) - 1))
128
129 #define PT32_BASE_ADDR_MASK PAGE_MASK
130 #define PT32_DIR_BASE_ADDR_MASK \
131         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
132 #define PT32_LVL_ADDR_MASK(level) \
133         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
134                                             * PT32_LEVEL_BITS))) - 1))
135
136 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
137                         | PT64_NX_MASK)
138
139 #define PFERR_PRESENT_MASK (1U << 0)
140 #define PFERR_WRITE_MASK (1U << 1)
141 #define PFERR_USER_MASK (1U << 2)
142 #define PFERR_RSVD_MASK (1U << 3)
143 #define PFERR_FETCH_MASK (1U << 4)
144
145 #define PT_PDPE_LEVEL 3
146 #define PT_DIRECTORY_LEVEL 2
147 #define PT_PAGE_TABLE_LEVEL 1
148
149 #define RMAP_EXT 4
150
151 #define ACC_EXEC_MASK    1
152 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
153 #define ACC_USER_MASK    PT_USER_MASK
154 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
155
156 #define CREATE_TRACE_POINTS
157 #include "mmutrace.h"
158
159 #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
160
161 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
162
163 struct kvm_rmap_desc {
164         u64 *sptes[RMAP_EXT];
165         struct kvm_rmap_desc *more;
166 };
167
168 struct kvm_shadow_walk_iterator {
169         u64 addr;
170         hpa_t shadow_addr;
171         int level;
172         u64 *sptep;
173         unsigned index;
174 };
175
176 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
177         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
178              shadow_walk_okay(&(_walker));                      \
179              shadow_walk_next(&(_walker)))
180
181
182 struct kvm_unsync_walk {
183         int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
184 };
185
186 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
187
188 static struct kmem_cache *pte_chain_cache;
189 static struct kmem_cache *rmap_desc_cache;
190 static struct kmem_cache *mmu_page_header_cache;
191
192 static u64 __read_mostly shadow_trap_nonpresent_pte;
193 static u64 __read_mostly shadow_notrap_nonpresent_pte;
194 static u64 __read_mostly shadow_base_present_pte;
195 static u64 __read_mostly shadow_nx_mask;
196 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
197 static u64 __read_mostly shadow_user_mask;
198 static u64 __read_mostly shadow_accessed_mask;
199 static u64 __read_mostly shadow_dirty_mask;
200
201 static inline u64 rsvd_bits(int s, int e)
202 {
203         return ((1ULL << (e - s + 1)) - 1) << s;
204 }
205
206 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
207 {
208         shadow_trap_nonpresent_pte = trap_pte;
209         shadow_notrap_nonpresent_pte = notrap_pte;
210 }
211 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
212
213 void kvm_mmu_set_base_ptes(u64 base_pte)
214 {
215         shadow_base_present_pte = base_pte;
216 }
217 EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
218
219 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
220                 u64 dirty_mask, u64 nx_mask, u64 x_mask)
221 {
222         shadow_user_mask = user_mask;
223         shadow_accessed_mask = accessed_mask;
224         shadow_dirty_mask = dirty_mask;
225         shadow_nx_mask = nx_mask;
226         shadow_x_mask = x_mask;
227 }
228 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
229
230 static int is_write_protection(struct kvm_vcpu *vcpu)
231 {
232         return vcpu->arch.cr0 & X86_CR0_WP;
233 }
234
235 static int is_cpuid_PSE36(void)
236 {
237         return 1;
238 }
239
240 static int is_nx(struct kvm_vcpu *vcpu)
241 {
242         return vcpu->arch.shadow_efer & EFER_NX;
243 }
244
245 static int is_shadow_present_pte(u64 pte)
246 {
247         return pte != shadow_trap_nonpresent_pte
248                 && pte != shadow_notrap_nonpresent_pte;
249 }
250
251 static int is_large_pte(u64 pte)
252 {
253         return pte & PT_PAGE_SIZE_MASK;
254 }
255
256 static int is_writeble_pte(unsigned long pte)
257 {
258         return pte & PT_WRITABLE_MASK;
259 }
260
261 static int is_dirty_gpte(unsigned long pte)
262 {
263         return pte & PT_DIRTY_MASK;
264 }
265
266 static int is_rmap_spte(u64 pte)
267 {
268         return is_shadow_present_pte(pte);
269 }
270
271 static int is_last_spte(u64 pte, int level)
272 {
273         if (level == PT_PAGE_TABLE_LEVEL)
274                 return 1;
275         if (is_large_pte(pte))
276                 return 1;
277         return 0;
278 }
279
280 static pfn_t spte_to_pfn(u64 pte)
281 {
282         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
283 }
284
285 static gfn_t pse36_gfn_delta(u32 gpte)
286 {
287         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
288
289         return (gpte & PT32_DIR_PSE36_MASK) << shift;
290 }
291
292 static void __set_spte(u64 *sptep, u64 spte)
293 {
294 #ifdef CONFIG_X86_64
295         set_64bit((unsigned long *)sptep, spte);
296 #else
297         set_64bit((unsigned long long *)sptep, spte);
298 #endif
299 }
300
301 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
302                                   struct kmem_cache *base_cache, int min)
303 {
304         void *obj;
305
306         if (cache->nobjs >= min)
307                 return 0;
308         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
309                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
310                 if (!obj)
311                         return -ENOMEM;
312                 cache->objects[cache->nobjs++] = obj;
313         }
314         return 0;
315 }
316
317 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
318 {
319         while (mc->nobjs)
320                 kfree(mc->objects[--mc->nobjs]);
321 }
322
323 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
324                                        int min)
325 {
326         struct page *page;
327
328         if (cache->nobjs >= min)
329                 return 0;
330         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
331                 page = alloc_page(GFP_KERNEL);
332                 if (!page)
333                         return -ENOMEM;
334                 set_page_private(page, 0);
335                 cache->objects[cache->nobjs++] = page_address(page);
336         }
337         return 0;
338 }
339
340 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
341 {
342         while (mc->nobjs)
343                 free_page((unsigned long)mc->objects[--mc->nobjs]);
344 }
345
346 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
347 {
348         int r;
349
350         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
351                                    pte_chain_cache, 4);
352         if (r)
353                 goto out;
354         r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
355                                    rmap_desc_cache, 4);
356         if (r)
357                 goto out;
358         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
359         if (r)
360                 goto out;
361         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
362                                    mmu_page_header_cache, 4);
363 out:
364         return r;
365 }
366
367 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
368 {
369         mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
370         mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
371         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
372         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
373 }
374
375 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
376                                     size_t size)
377 {
378         void *p;
379
380         BUG_ON(!mc->nobjs);
381         p = mc->objects[--mc->nobjs];
382         return p;
383 }
384
385 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
386 {
387         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
388                                       sizeof(struct kvm_pte_chain));
389 }
390
391 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
392 {
393         kfree(pc);
394 }
395
396 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
397 {
398         return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
399                                       sizeof(struct kvm_rmap_desc));
400 }
401
402 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
403 {
404         kfree(rd);
405 }
406
407 /*
408  * Return the pointer to the largepage write count for a given
409  * gfn, handling slots that are not large page aligned.
410  */
411 static int *slot_largepage_idx(gfn_t gfn,
412                                struct kvm_memory_slot *slot,
413                                int level)
414 {
415         unsigned long idx;
416
417         idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
418               (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
419         return &slot->lpage_info[level - 2][idx].write_count;
420 }
421
422 static void account_shadowed(struct kvm *kvm, gfn_t gfn)
423 {
424         struct kvm_memory_slot *slot;
425         int *write_count;
426         int i;
427
428         gfn = unalias_gfn(kvm, gfn);
429
430         slot = gfn_to_memslot_unaliased(kvm, gfn);
431         for (i = PT_DIRECTORY_LEVEL;
432              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
433                 write_count   = slot_largepage_idx(gfn, slot, i);
434                 *write_count += 1;
435         }
436 }
437
438 static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
439 {
440         struct kvm_memory_slot *slot;
441         int *write_count;
442         int i;
443
444         gfn = unalias_gfn(kvm, gfn);
445         for (i = PT_DIRECTORY_LEVEL;
446              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
447                 slot          = gfn_to_memslot_unaliased(kvm, gfn);
448                 write_count   = slot_largepage_idx(gfn, slot, i);
449                 *write_count -= 1;
450                 WARN_ON(*write_count < 0);
451         }
452 }
453
454 static int has_wrprotected_page(struct kvm *kvm,
455                                 gfn_t gfn,
456                                 int level)
457 {
458         struct kvm_memory_slot *slot;
459         int *largepage_idx;
460
461         gfn = unalias_gfn(kvm, gfn);
462         slot = gfn_to_memslot_unaliased(kvm, gfn);
463         if (slot) {
464                 largepage_idx = slot_largepage_idx(gfn, slot, level);
465                 return *largepage_idx;
466         }
467
468         return 1;
469 }
470
471 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
472 {
473         unsigned long page_size = PAGE_SIZE;
474         struct vm_area_struct *vma;
475         unsigned long addr;
476         int i, ret = 0;
477
478         addr = gfn_to_hva(kvm, gfn);
479         if (kvm_is_error_hva(addr))
480                 return PT_PAGE_TABLE_LEVEL;
481
482         down_read(&current->mm->mmap_sem);
483         vma = find_vma(current->mm, addr);
484         if (!vma)
485                 goto out;
486
487         page_size = vma_kernel_pagesize(vma);
488
489 out:
490         up_read(&current->mm->mmap_sem);
491
492         for (i = PT_PAGE_TABLE_LEVEL;
493              i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
494                 if (page_size >= KVM_HPAGE_SIZE(i))
495                         ret = i;
496                 else
497                         break;
498         }
499
500         return ret;
501 }
502
503 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
504 {
505         struct kvm_memory_slot *slot;
506         int host_level;
507         int level = PT_PAGE_TABLE_LEVEL;
508
509         slot = gfn_to_memslot(vcpu->kvm, large_gfn);
510         if (slot && slot->dirty_bitmap)
511                 return PT_PAGE_TABLE_LEVEL;
512
513         host_level = host_mapping_level(vcpu->kvm, large_gfn);
514
515         if (host_level == PT_PAGE_TABLE_LEVEL)
516                 return host_level;
517
518         for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level)
519                 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
520                         break;
521
522         return level - 1;
523 }
524
525 /*
526  * Take gfn and return the reverse mapping to it.
527  * Note: gfn must be unaliased before this function get called
528  */
529
530 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
531 {
532         struct kvm_memory_slot *slot;
533         unsigned long idx;
534
535         slot = gfn_to_memslot(kvm, gfn);
536         if (likely(level == PT_PAGE_TABLE_LEVEL))
537                 return &slot->rmap[gfn - slot->base_gfn];
538
539         idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
540                 (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
541
542         return &slot->lpage_info[level - 2][idx].rmap_pde;
543 }
544
545 /*
546  * Reverse mapping data structures:
547  *
548  * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
549  * that points to page_address(page).
550  *
551  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
552  * containing more mappings.
553  *
554  * Returns the number of rmap entries before the spte was added or zero if
555  * the spte was not added.
556  *
557  */
558 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
559 {
560         struct kvm_mmu_page *sp;
561         struct kvm_rmap_desc *desc;
562         unsigned long *rmapp;
563         int i, count = 0;
564
565         if (!is_rmap_spte(*spte))
566                 return count;
567         gfn = unalias_gfn(vcpu->kvm, gfn);
568         sp = page_header(__pa(spte));
569         sp->gfns[spte - sp->spt] = gfn;
570         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
571         if (!*rmapp) {
572                 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
573                 *rmapp = (unsigned long)spte;
574         } else if (!(*rmapp & 1)) {
575                 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
576                 desc = mmu_alloc_rmap_desc(vcpu);
577                 desc->sptes[0] = (u64 *)*rmapp;
578                 desc->sptes[1] = spte;
579                 *rmapp = (unsigned long)desc | 1;
580         } else {
581                 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
582                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
583                 while (desc->sptes[RMAP_EXT-1] && desc->more) {
584                         desc = desc->more;
585                         count += RMAP_EXT;
586                 }
587                 if (desc->sptes[RMAP_EXT-1]) {
588                         desc->more = mmu_alloc_rmap_desc(vcpu);
589                         desc = desc->more;
590                 }
591                 for (i = 0; desc->sptes[i]; ++i)
592                         ;
593                 desc->sptes[i] = spte;
594         }
595         return count;
596 }
597
598 static void rmap_desc_remove_entry(unsigned long *rmapp,
599                                    struct kvm_rmap_desc *desc,
600                                    int i,
601                                    struct kvm_rmap_desc *prev_desc)
602 {
603         int j;
604
605         for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
606                 ;
607         desc->sptes[i] = desc->sptes[j];
608         desc->sptes[j] = NULL;
609         if (j != 0)
610                 return;
611         if (!prev_desc && !desc->more)
612                 *rmapp = (unsigned long)desc->sptes[0];
613         else
614                 if (prev_desc)
615                         prev_desc->more = desc->more;
616                 else
617                         *rmapp = (unsigned long)desc->more | 1;
618         mmu_free_rmap_desc(desc);
619 }
620
621 static void rmap_remove(struct kvm *kvm, u64 *spte)
622 {
623         struct kvm_rmap_desc *desc;
624         struct kvm_rmap_desc *prev_desc;
625         struct kvm_mmu_page *sp;
626         pfn_t pfn;
627         unsigned long *rmapp;
628         int i;
629
630         if (!is_rmap_spte(*spte))
631                 return;
632         sp = page_header(__pa(spte));
633         pfn = spte_to_pfn(*spte);
634         if (*spte & shadow_accessed_mask)
635                 kvm_set_pfn_accessed(pfn);
636         if (is_writeble_pte(*spte))
637                 kvm_set_pfn_dirty(pfn);
638         rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
639         if (!*rmapp) {
640                 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
641                 BUG();
642         } else if (!(*rmapp & 1)) {
643                 rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
644                 if ((u64 *)*rmapp != spte) {
645                         printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
646                                spte, *spte);
647                         BUG();
648                 }
649                 *rmapp = 0;
650         } else {
651                 rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
652                 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
653                 prev_desc = NULL;
654                 while (desc) {
655                         for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
656                                 if (desc->sptes[i] == spte) {
657                                         rmap_desc_remove_entry(rmapp,
658                                                                desc, i,
659                                                                prev_desc);
660                                         return;
661                                 }
662                         prev_desc = desc;
663                         desc = desc->more;
664                 }
665                 BUG();
666         }
667 }
668
669 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
670 {
671         struct kvm_rmap_desc *desc;
672         struct kvm_rmap_desc *prev_desc;
673         u64 *prev_spte;
674         int i;
675
676         if (!*rmapp)
677                 return NULL;
678         else if (!(*rmapp & 1)) {
679                 if (!spte)
680                         return (u64 *)*rmapp;
681                 return NULL;
682         }
683         desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
684         prev_desc = NULL;
685         prev_spte = NULL;
686         while (desc) {
687                 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
688                         if (prev_spte == spte)
689                                 return desc->sptes[i];
690                         prev_spte = desc->sptes[i];
691                 }
692                 desc = desc->more;
693         }
694         return NULL;
695 }
696
697 static int rmap_write_protect(struct kvm *kvm, u64 gfn)
698 {
699         unsigned long *rmapp;
700         u64 *spte;
701         int i, write_protected = 0;
702
703         gfn = unalias_gfn(kvm, gfn);
704         rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
705
706         spte = rmap_next(kvm, rmapp, NULL);
707         while (spte) {
708                 BUG_ON(!spte);
709                 BUG_ON(!(*spte & PT_PRESENT_MASK));
710                 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
711                 if (is_writeble_pte(*spte)) {
712                         __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
713                         write_protected = 1;
714                 }
715                 spte = rmap_next(kvm, rmapp, spte);
716         }
717         if (write_protected) {
718                 pfn_t pfn;
719
720                 spte = rmap_next(kvm, rmapp, NULL);
721                 pfn = spte_to_pfn(*spte);
722                 kvm_set_pfn_dirty(pfn);
723         }
724
725         /* check for huge page mappings */
726         for (i = PT_DIRECTORY_LEVEL;
727              i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
728                 rmapp = gfn_to_rmap(kvm, gfn, i);
729                 spte = rmap_next(kvm, rmapp, NULL);
730                 while (spte) {
731                         BUG_ON(!spte);
732                         BUG_ON(!(*spte & PT_PRESENT_MASK));
733                         BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
734                         pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
735                         if (is_writeble_pte(*spte)) {
736                                 rmap_remove(kvm, spte);
737                                 --kvm->stat.lpages;
738                                 __set_spte(spte, shadow_trap_nonpresent_pte);
739                                 spte = NULL;
740                                 write_protected = 1;
741                         }
742                         spte = rmap_next(kvm, rmapp, spte);
743                 }
744         }
745
746         return write_protected;
747 }
748
749 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
750                            unsigned long data)
751 {
752         u64 *spte;
753         int need_tlb_flush = 0;
754
755         while ((spte = rmap_next(kvm, rmapp, NULL))) {
756                 BUG_ON(!(*spte & PT_PRESENT_MASK));
757                 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
758                 rmap_remove(kvm, spte);
759                 __set_spte(spte, shadow_trap_nonpresent_pte);
760                 need_tlb_flush = 1;
761         }
762         return need_tlb_flush;
763 }
764
765 static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
766                              unsigned long data)
767 {
768         int need_flush = 0;
769         u64 *spte, new_spte;
770         pte_t *ptep = (pte_t *)data;
771         pfn_t new_pfn;
772
773         WARN_ON(pte_huge(*ptep));
774         new_pfn = pte_pfn(*ptep);
775         spte = rmap_next(kvm, rmapp, NULL);
776         while (spte) {
777                 BUG_ON(!is_shadow_present_pte(*spte));
778                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
779                 need_flush = 1;
780                 if (pte_write(*ptep)) {
781                         rmap_remove(kvm, spte);
782                         __set_spte(spte, shadow_trap_nonpresent_pte);
783                         spte = rmap_next(kvm, rmapp, NULL);
784                 } else {
785                         new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
786                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
787
788                         new_spte &= ~PT_WRITABLE_MASK;
789                         new_spte &= ~SPTE_HOST_WRITEABLE;
790                         if (is_writeble_pte(*spte))
791                                 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792                         __set_spte(spte, new_spte);
793                         spte = rmap_next(kvm, rmapp, spte);
794                 }
795         }
796         if (need_flush)
797                 kvm_flush_remote_tlbs(kvm);
798
799         return 0;
800 }
801
802 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
803                           unsigned long data,
804                           int (*handler)(struct kvm *kvm, unsigned long *rmapp,
805                                          unsigned long data))
806 {
807         int i, j;
808         int retval = 0;
809
810         /*
811          * If mmap_sem isn't taken, we can look the memslots with only
812          * the mmu_lock by skipping over the slots with userspace_addr == 0.
813          */
814         for (i = 0; i < kvm->nmemslots; i++) {
815                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
816                 unsigned long start = memslot->userspace_addr;
817                 unsigned long end;
818
819                 /* mmu_lock protects userspace_addr */
820                 if (!start)
821                         continue;
822
823                 end = start + (memslot->npages << PAGE_SHIFT);
824                 if (hva >= start && hva < end) {
825                         gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
826
827                         retval |= handler(kvm, &memslot->rmap[gfn_offset],
828                                           data);
829
830                         for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
831                                 int idx = gfn_offset;
832                                 idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
833                                 retval |= handler(kvm,
834                                         &memslot->lpage_info[j][idx].rmap_pde,
835                                         data);
836                         }
837                 }
838         }
839
840         return retval;
841 }
842
843 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
844 {
845         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
846 }
847
848 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
849 {
850         kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
851 }
852
853 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
854                          unsigned long data)
855 {
856         u64 *spte;
857         int young = 0;
858
859         /* always return old for EPT */
860         if (!shadow_accessed_mask)
861                 return 0;
862
863         spte = rmap_next(kvm, rmapp, NULL);
864         while (spte) {
865                 int _young;
866                 u64 _spte = *spte;
867                 BUG_ON(!(_spte & PT_PRESENT_MASK));
868                 _young = _spte & PT_ACCESSED_MASK;
869                 if (_young) {
870                         young = 1;
871                         clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
872                 }
873                 spte = rmap_next(kvm, rmapp, spte);
874         }
875         return young;
876 }
877
878 #define RMAP_RECYCLE_THRESHOLD 1000
879
880 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
881 {
882         unsigned long *rmapp;
883         struct kvm_mmu_page *sp;
884
885         sp = page_header(__pa(spte));
886
887         gfn = unalias_gfn(vcpu->kvm, gfn);
888         rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
889
890         kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
891         kvm_flush_remote_tlbs(vcpu->kvm);
892 }
893
894 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
895 {
896         return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
897 }
898
899 #ifdef MMU_DEBUG
900 static int is_empty_shadow_page(u64 *spt)
901 {
902         u64 *pos;
903         u64 *end;
904
905         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
906                 if (is_shadow_present_pte(*pos)) {
907                         printk(KERN_ERR "%s: %p %llx\n", __func__,
908                                pos, *pos);
909                         return 0;
910                 }
911         return 1;
912 }
913 #endif
914
915 static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
916 {
917         ASSERT(is_empty_shadow_page(sp->spt));
918         list_del(&sp->link);
919         __free_page(virt_to_page(sp->spt));
920         __free_page(virt_to_page(sp->gfns));
921         kfree(sp);
922         ++kvm->arch.n_free_mmu_pages;
923 }
924
925 static unsigned kvm_page_table_hashfn(gfn_t gfn)
926 {
927         return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
928 }
929
930 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
931                                                u64 *parent_pte)
932 {
933         struct kvm_mmu_page *sp;
934
935         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
936         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
937         sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
938         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
939         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
940         INIT_LIST_HEAD(&sp->oos_link);
941         bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
942         sp->multimapped = 0;
943         sp->parent_pte = parent_pte;
944         --vcpu->kvm->arch.n_free_mmu_pages;
945         return sp;
946 }
947
948 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
949                                     struct kvm_mmu_page *sp, u64 *parent_pte)
950 {
951         struct kvm_pte_chain *pte_chain;
952         struct hlist_node *node;
953         int i;
954
955         if (!parent_pte)
956                 return;
957         if (!sp->multimapped) {
958                 u64 *old = sp->parent_pte;
959
960                 if (!old) {
961                         sp->parent_pte = parent_pte;
962                         return;
963                 }
964                 sp->multimapped = 1;
965                 pte_chain = mmu_alloc_pte_chain(vcpu);
966                 INIT_HLIST_HEAD(&sp->parent_ptes);
967                 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
968                 pte_chain->parent_ptes[0] = old;
969         }
970         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
971                 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
972                         continue;
973                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
974                         if (!pte_chain->parent_ptes[i]) {
975                                 pte_chain->parent_ptes[i] = parent_pte;
976                                 return;
977                         }
978         }
979         pte_chain = mmu_alloc_pte_chain(vcpu);
980         BUG_ON(!pte_chain);
981         hlist_add_head(&pte_chain->link, &sp->parent_ptes);
982         pte_chain->parent_ptes[0] = parent_pte;
983 }
984
985 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
986                                        u64 *parent_pte)
987 {
988         struct kvm_pte_chain *pte_chain;
989         struct hlist_node *node;
990         int i;
991
992         if (!sp->multimapped) {
993                 BUG_ON(sp->parent_pte != parent_pte);
994                 sp->parent_pte = NULL;
995                 return;
996         }
997         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
998                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
999                         if (!pte_chain->parent_ptes[i])
1000                                 break;
1001                         if (pte_chain->parent_ptes[i] != parent_pte)
1002                                 continue;
1003                         while (i + 1 < NR_PTE_CHAIN_ENTRIES
1004                                 && pte_chain->parent_ptes[i + 1]) {
1005                                 pte_chain->parent_ptes[i]
1006                                         = pte_chain->parent_ptes[i + 1];
1007                                 ++i;
1008                         }
1009                         pte_chain->parent_ptes[i] = NULL;
1010                         if (i == 0) {
1011                                 hlist_del(&pte_chain->link);
1012                                 mmu_free_pte_chain(pte_chain);
1013                                 if (hlist_empty(&sp->parent_ptes)) {
1014                                         sp->multimapped = 0;
1015                                         sp->parent_pte = NULL;
1016                                 }
1017                         }
1018                         return;
1019                 }
1020         BUG();
1021 }
1022
1023
1024 static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1025                             mmu_parent_walk_fn fn)
1026 {
1027         struct kvm_pte_chain *pte_chain;
1028         struct hlist_node *node;
1029         struct kvm_mmu_page *parent_sp;
1030         int i;
1031
1032         if (!sp->multimapped && sp->parent_pte) {
1033                 parent_sp = page_header(__pa(sp->parent_pte));
1034                 fn(vcpu, parent_sp);
1035                 mmu_parent_walk(vcpu, parent_sp, fn);
1036                 return;
1037         }
1038         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1039                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1040                         if (!pte_chain->parent_ptes[i])
1041                                 break;
1042                         parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
1043                         fn(vcpu, parent_sp);
1044                         mmu_parent_walk(vcpu, parent_sp, fn);
1045                 }
1046 }
1047
1048 static void kvm_mmu_update_unsync_bitmap(u64 *spte)
1049 {
1050         unsigned int index;
1051         struct kvm_mmu_page *sp = page_header(__pa(spte));
1052
1053         index = spte - sp->spt;
1054         if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
1055                 sp->unsync_children++;
1056         WARN_ON(!sp->unsync_children);
1057 }
1058
1059 static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
1060 {
1061         struct kvm_pte_chain *pte_chain;
1062         struct hlist_node *node;
1063         int i;
1064
1065         if (!sp->parent_pte)
1066                 return;
1067
1068         if (!sp->multimapped) {
1069                 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
1070                 return;
1071         }
1072
1073         hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1074                 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1075                         if (!pte_chain->parent_ptes[i])
1076                                 break;
1077                         kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
1078                 }
1079 }
1080
1081 static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1082 {
1083         kvm_mmu_update_parents_unsync(sp);
1084         return 1;
1085 }
1086
1087 static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
1088                                         struct kvm_mmu_page *sp)
1089 {
1090         mmu_parent_walk(vcpu, sp, unsync_walk_fn);
1091         kvm_mmu_update_parents_unsync(sp);
1092 }
1093
1094 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1095                                     struct kvm_mmu_page *sp)
1096 {
1097         int i;
1098
1099         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1100                 sp->spt[i] = shadow_trap_nonpresent_pte;
1101 }
1102
1103 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1104                                struct kvm_mmu_page *sp)
1105 {
1106         return 1;
1107 }
1108
1109 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1110 {
1111 }
1112
1113 #define KVM_PAGE_ARRAY_NR 16
1114
1115 struct kvm_mmu_pages {
1116         struct mmu_page_and_offset {
1117                 struct kvm_mmu_page *sp;
1118                 unsigned int idx;
1119         } page[KVM_PAGE_ARRAY_NR];
1120         unsigned int nr;
1121 };
1122
1123 #define for_each_unsync_children(bitmap, idx)           \
1124         for (idx = find_first_bit(bitmap, 512);         \
1125              idx < 512;                                 \
1126              idx = find_next_bit(bitmap, 512, idx+1))
1127
1128 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1129                          int idx)
1130 {
1131         int i;
1132
1133         if (sp->unsync)
1134                 for (i=0; i < pvec->nr; i++)
1135                         if (pvec->page[i].sp == sp)
1136                                 return 0;
1137
1138         pvec->page[pvec->nr].sp = sp;
1139         pvec->page[pvec->nr].idx = idx;
1140         pvec->nr++;
1141         return (pvec->nr == KVM_PAGE_ARRAY_NR);
1142 }
1143
1144 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1145                            struct kvm_mmu_pages *pvec)
1146 {
1147         int i, ret, nr_unsync_leaf = 0;
1148
1149         for_each_unsync_children(sp->unsync_child_bitmap, i) {
1150                 u64 ent = sp->spt[i];
1151
1152                 if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
1153                         struct kvm_mmu_page *child;
1154                         child = page_header(ent & PT64_BASE_ADDR_MASK);
1155
1156                         if (child->unsync_children) {
1157                                 if (mmu_pages_add(pvec, child, i))
1158                                         return -ENOSPC;
1159
1160                                 ret = __mmu_unsync_walk(child, pvec);
1161                                 if (!ret)
1162                                         __clear_bit(i, sp->unsync_child_bitmap);
1163                                 else if (ret > 0)
1164                                         nr_unsync_leaf += ret;
1165                                 else
1166                                         return ret;
1167                         }
1168
1169                         if (child->unsync) {
1170                                 nr_unsync_leaf++;
1171                                 if (mmu_pages_add(pvec, child, i))
1172                                         return -ENOSPC;
1173                         }
1174                 }
1175         }
1176
1177         if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
1178                 sp->unsync_children = 0;
1179
1180         return nr_unsync_leaf;
1181 }
1182
1183 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1184                            struct kvm_mmu_pages *pvec)
1185 {
1186         if (!sp->unsync_children)
1187                 return 0;
1188
1189         mmu_pages_add(pvec, sp, 0);
1190         return __mmu_unsync_walk(sp, pvec);
1191 }
1192
1193 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
1194 {
1195         unsigned index;
1196         struct hlist_head *bucket;
1197         struct kvm_mmu_page *sp;
1198         struct hlist_node *node;
1199
1200         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1201         index = kvm_page_table_hashfn(gfn);
1202         bucket = &kvm->arch.mmu_page_hash[index];
1203         hlist_for_each_entry(sp, node, bucket, hash_link)
1204                 if (sp->gfn == gfn && !sp->role.direct
1205                     && !sp->role.invalid) {
1206                         pgprintk("%s: found role %x\n",
1207                                  __func__, sp->role.word);
1208                         return sp;
1209                 }
1210         return NULL;
1211 }
1212
1213 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1214 {
1215         WARN_ON(!sp->unsync);
1216         sp->unsync = 0;
1217         --kvm->stat.mmu_unsync;
1218 }
1219
1220 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
1221
1222 static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1223 {
1224         if (sp->role.glevels != vcpu->arch.mmu.root_level) {
1225                 kvm_mmu_zap_page(vcpu->kvm, sp);
1226                 return 1;
1227         }
1228
1229         trace_kvm_mmu_sync_page(sp);
1230         if (rmap_write_protect(vcpu->kvm, sp->gfn))
1231                 kvm_flush_remote_tlbs(vcpu->kvm);
1232         kvm_unlink_unsync_page(vcpu->kvm, sp);
1233         if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1234                 kvm_mmu_zap_page(vcpu->kvm, sp);
1235                 return 1;
1236         }
1237
1238         kvm_mmu_flush_tlb(vcpu);
1239         return 0;
1240 }
1241
1242 struct mmu_page_path {
1243         struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1244         unsigned int idx[PT64_ROOT_LEVEL-1];
1245 };
1246
1247 #define for_each_sp(pvec, sp, parents, i)                       \
1248                 for (i = mmu_pages_next(&pvec, &parents, -1),   \
1249                         sp = pvec.page[i].sp;                   \
1250                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
1251                         i = mmu_pages_next(&pvec, &parents, i))
1252
1253 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1254                           struct mmu_page_path *parents,
1255                           int i)
1256 {
1257         int n;
1258
1259         for (n = i+1; n < pvec->nr; n++) {
1260                 struct kvm_mmu_page *sp = pvec->page[n].sp;
1261
1262                 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1263                         parents->idx[0] = pvec->page[n].idx;
1264                         return n;
1265                 }
1266
1267                 parents->parent[sp->role.level-2] = sp;
1268                 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1269         }
1270
1271         return n;
1272 }
1273
1274 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
1275 {
1276         struct kvm_mmu_page *sp;
1277         unsigned int level = 0;
1278
1279         do {
1280                 unsigned int idx = parents->idx[level];
1281
1282                 sp = parents->parent[level];
1283                 if (!sp)
1284                         return;
1285
1286                 --sp->unsync_children;
1287                 WARN_ON((int)sp->unsync_children < 0);
1288                 __clear_bit(idx, sp->unsync_child_bitmap);
1289                 level++;
1290         } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
1291 }
1292
1293 static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1294                                struct mmu_page_path *parents,
1295                                struct kvm_mmu_pages *pvec)
1296 {
1297         parents->parent[parent->role.level-1] = NULL;
1298         pvec->nr = 0;
1299 }
1300
1301 static void mmu_sync_children(struct kvm_vcpu *vcpu,
1302                               struct kvm_mmu_page *parent)
1303 {
1304         int i;
1305         struct kvm_mmu_page *sp;
1306         struct mmu_page_path parents;
1307         struct kvm_mmu_pages pages;
1308
1309         kvm_mmu_pages_init(parent, &parents, &pages);
1310         while (mmu_unsync_walk(parent, &pages)) {
1311                 int protected = 0;
1312
1313                 for_each_sp(pages, sp, parents, i)
1314                         protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1315
1316                 if (protected)
1317                         kvm_flush_remote_tlbs(vcpu->kvm);
1318
1319                 for_each_sp(pages, sp, parents, i) {
1320                         kvm_sync_page(vcpu, sp);
1321                         mmu_pages_clear_parents(&parents);
1322                 }
1323                 cond_resched_lock(&vcpu->kvm->mmu_lock);
1324                 kvm_mmu_pages_init(parent, &parents, &pages);
1325         }
1326 }
1327
1328 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1329                                              gfn_t gfn,
1330                                              gva_t gaddr,
1331                                              unsigned level,
1332                                              int direct,
1333                                              unsigned access,
1334                                              u64 *parent_pte)
1335 {
1336         union kvm_mmu_page_role role;
1337         unsigned index;
1338         unsigned quadrant;
1339         struct hlist_head *bucket;
1340         struct kvm_mmu_page *sp;
1341         struct hlist_node *node, *tmp;
1342
1343         role = vcpu->arch.mmu.base_role;
1344         role.level = level;
1345         role.direct = direct;
1346         role.access = access;
1347         if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
1348                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1349                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1350                 role.quadrant = quadrant;
1351         }
1352         index = kvm_page_table_hashfn(gfn);
1353         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1354         hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
1355                 if (sp->gfn == gfn) {
1356                         if (sp->unsync)
1357                                 if (kvm_sync_page(vcpu, sp))
1358                                         continue;
1359
1360                         if (sp->role.word != role.word)
1361                                 continue;
1362
1363                         mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1364                         if (sp->unsync_children) {
1365                                 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1366                                 kvm_mmu_mark_parents_unsync(vcpu, sp);
1367                         }
1368                         trace_kvm_mmu_get_page(sp, false);
1369                         return sp;
1370                 }
1371         ++vcpu->kvm->stat.mmu_cache_miss;
1372         sp = kvm_mmu_alloc_page(vcpu, parent_pte);
1373         if (!sp)
1374                 return sp;
1375         sp->gfn = gfn;
1376         sp->role = role;
1377         hlist_add_head(&sp->hash_link, bucket);
1378         if (!direct) {
1379                 if (rmap_write_protect(vcpu->kvm, gfn))
1380                         kvm_flush_remote_tlbs(vcpu->kvm);
1381                 account_shadowed(vcpu->kvm, gfn);
1382         }
1383         if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1384                 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1385         else
1386                 nonpaging_prefetch_page(vcpu, sp);
1387         trace_kvm_mmu_get_page(sp, true);
1388         return sp;
1389 }
1390
1391 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1392                              struct kvm_vcpu *vcpu, u64 addr)
1393 {
1394         iterator->addr = addr;
1395         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1396         iterator->level = vcpu->arch.mmu.shadow_root_level;
1397         if (iterator->level == PT32E_ROOT_LEVEL) {
1398                 iterator->shadow_addr
1399                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1400                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1401                 --iterator->level;
1402                 if (!iterator->shadow_addr)
1403                         iterator->level = 0;
1404         }
1405 }
1406
1407 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1408 {
1409         if (iterator->level < PT_PAGE_TABLE_LEVEL)
1410                 return false;
1411
1412         if (iterator->level == PT_PAGE_TABLE_LEVEL)
1413                 if (is_large_pte(*iterator->sptep))
1414                         return false;
1415
1416         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1417         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1418         return true;
1419 }
1420
1421 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1422 {
1423         iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1424         --iterator->level;
1425 }
1426
1427 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
1428                                          struct kvm_mmu_page *sp)
1429 {
1430         unsigned i;
1431         u64 *pt;
1432         u64 ent;
1433
1434         pt = sp->spt;
1435
1436         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1437                 ent = pt[i];
1438
1439                 if (is_shadow_present_pte(ent)) {
1440                         if (!is_last_spte(ent, sp->role.level)) {
1441                                 ent &= PT64_BASE_ADDR_MASK;
1442                                 mmu_page_remove_parent_pte(page_header(ent),
1443                                                            &pt[i]);
1444                         } else {
1445                                 if (is_large_pte(ent))
1446                                         --kvm->stat.lpages;
1447                                 rmap_remove(kvm, &pt[i]);
1448                         }
1449                 }
1450                 pt[i] = shadow_trap_nonpresent_pte;
1451         }
1452 }
1453
1454 static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1455 {
1456         mmu_page_remove_parent_pte(sp, parent_pte);
1457 }
1458
1459 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1460 {
1461         int i;
1462         struct kvm_vcpu *vcpu;
1463
1464         kvm_for_each_vcpu(i, vcpu, kvm)
1465                 vcpu->arch.last_pte_updated = NULL;
1466 }
1467
1468 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1469 {
1470         u64 *parent_pte;
1471
1472         while (sp->multimapped || sp->parent_pte) {
1473                 if (!sp->multimapped)
1474                         parent_pte = sp->parent_pte;
1475                 else {
1476                         struct kvm_pte_chain *chain;
1477
1478                         chain = container_of(sp->parent_ptes.first,
1479                                              struct kvm_pte_chain, link);
1480                         parent_pte = chain->parent_ptes[0];
1481                 }
1482                 BUG_ON(!parent_pte);
1483                 kvm_mmu_put_page(sp, parent_pte);
1484                 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1485         }
1486 }
1487
1488 static int mmu_zap_unsync_children(struct kvm *kvm,
1489                                    struct kvm_mmu_page *parent)
1490 {
1491         int i, zapped = 0;
1492         struct mmu_page_path parents;
1493         struct kvm_mmu_pages pages;
1494
1495         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
1496                 return 0;
1497
1498         kvm_mmu_pages_init(parent, &parents, &pages);
1499         while (mmu_unsync_walk(parent, &pages)) {
1500                 struct kvm_mmu_page *sp;
1501
1502                 for_each_sp(pages, sp, parents, i) {
1503                         kvm_mmu_zap_page(kvm, sp);
1504                         mmu_pages_clear_parents(&parents);
1505                 }
1506                 zapped += pages.nr;
1507                 kvm_mmu_pages_init(parent, &parents, &pages);
1508         }
1509
1510         return zapped;
1511 }
1512
1513 static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1514 {
1515         int ret;
1516
1517         trace_kvm_mmu_zap_page(sp);
1518         ++kvm->stat.mmu_shadow_zapped;
1519         ret = mmu_zap_unsync_children(kvm, sp);
1520         kvm_mmu_page_unlink_children(kvm, sp);
1521         kvm_mmu_unlink_parents(kvm, sp);
1522         kvm_flush_remote_tlbs(kvm);
1523         if (!sp->role.invalid && !sp->role.direct)
1524                 unaccount_shadowed(kvm, sp->gfn);
1525         if (sp->unsync)
1526                 kvm_unlink_unsync_page(kvm, sp);
1527         if (!sp->root_count) {
1528                 hlist_del(&sp->hash_link);
1529                 kvm_mmu_free_page(kvm, sp);
1530         } else {
1531                 sp->role.invalid = 1;
1532                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
1533                 kvm_reload_remote_mmus(kvm);
1534         }
1535         kvm_mmu_reset_last_pte_updated(kvm);
1536         return ret;
1537 }
1538
1539 /*
1540  * Changing the number of mmu pages allocated to the vm
1541  * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
1542  */
1543 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1544 {
1545         int used_pages;
1546
1547         used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
1548         used_pages = max(0, used_pages);
1549
1550         /*
1551          * If we set the number of mmu pages to be smaller be than the
1552          * number of actived pages , we must to free some mmu pages before we
1553          * change the value
1554          */
1555
1556         if (used_pages > kvm_nr_mmu_pages) {
1557                 while (used_pages > kvm_nr_mmu_pages) {
1558                         struct kvm_mmu_page *page;
1559
1560                         page = container_of(kvm->arch.active_mmu_pages.prev,
1561                                             struct kvm_mmu_page, link);
1562                         kvm_mmu_zap_page(kvm, page);
1563                         used_pages--;
1564                 }
1565                 kvm->arch.n_free_mmu_pages = 0;
1566         }
1567         else
1568                 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1569                                          - kvm->arch.n_alloc_mmu_pages;
1570
1571         kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
1572 }
1573
1574 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
1575 {
1576         unsigned index;
1577         struct hlist_head *bucket;
1578         struct kvm_mmu_page *sp;
1579         struct hlist_node *node, *n;
1580         int r;
1581
1582         pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1583         r = 0;
1584         index = kvm_page_table_hashfn(gfn);
1585         bucket = &kvm->arch.mmu_page_hash[index];
1586         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
1587                 if (sp->gfn == gfn && !sp->role.direct) {
1588                         pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1589                                  sp->role.word);
1590                         r = 1;
1591                         if (kvm_mmu_zap_page(kvm, sp))
1592                                 n = bucket->first;
1593                 }
1594         return r;
1595 }
1596
1597 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
1598 {
1599         unsigned index;
1600         struct hlist_head *bucket;
1601         struct kvm_mmu_page *sp;
1602         struct hlist_node *node, *nn;
1603
1604         index = kvm_page_table_hashfn(gfn);
1605         bucket = &kvm->arch.mmu_page_hash[index];
1606         hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
1607                 if (sp->gfn == gfn && !sp->role.direct
1608                     && !sp->role.invalid) {
1609                         pgprintk("%s: zap %lx %x\n",
1610                                  __func__, gfn, sp->role.word);
1611                         kvm_mmu_zap_page(kvm, sp);
1612                 }
1613         }
1614 }
1615
1616 static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
1617 {
1618         int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
1619         struct kvm_mmu_page *sp = page_header(__pa(pte));
1620
1621         __set_bit(slot, sp->slot_bitmap);
1622 }
1623
1624 static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1625 {
1626         int i;
1627         u64 *pt = sp->spt;
1628
1629         if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1630                 return;
1631
1632         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1633                 if (pt[i] == shadow_notrap_nonpresent_pte)
1634                         __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1635         }
1636 }
1637
1638 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1639 {
1640         struct page *page;
1641
1642         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
1643
1644         if (gpa == UNMAPPED_GVA)
1645                 return NULL;
1646
1647         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1648
1649         return page;
1650 }
1651
1652 /*
1653  * The function is based on mtrr_type_lookup() in
1654  * arch/x86/kernel/cpu/mtrr/generic.c
1655  */
1656 static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1657                          u64 start, u64 end)
1658 {
1659         int i;
1660         u64 base, mask;
1661         u8 prev_match, curr_match;
1662         int num_var_ranges = KVM_NR_VAR_MTRR;
1663
1664         if (!mtrr_state->enabled)
1665                 return 0xFF;
1666
1667         /* Make end inclusive end, instead of exclusive */
1668         end--;
1669
1670         /* Look in fixed ranges. Just return the type as per start */
1671         if (mtrr_state->have_fixed && (start < 0x100000)) {
1672                 int idx;
1673
1674                 if (start < 0x80000) {
1675                         idx = 0;
1676                         idx += (start >> 16);
1677                         return mtrr_state->fixed_ranges[idx];
1678                 } else if (start < 0xC0000) {
1679                         idx = 1 * 8;
1680                         idx += ((start - 0x80000) >> 14);
1681                         return mtrr_state->fixed_ranges[idx];
1682                 } else if (start < 0x1000000) {
1683                         idx = 3 * 8;
1684                         idx += ((start - 0xC0000) >> 12);
1685                         return mtrr_state->fixed_ranges[idx];
1686                 }
1687         }
1688
1689         /*
1690          * Look in variable ranges
1691          * Look of multiple ranges matching this address and pick type
1692          * as per MTRR precedence
1693          */
1694         if (!(mtrr_state->enabled & 2))
1695                 return mtrr_state->def_type;
1696
1697         prev_match = 0xFF;
1698         for (i = 0; i < num_var_ranges; ++i) {
1699                 unsigned short start_state, end_state;
1700
1701                 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1702                         continue;
1703
1704                 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1705                        (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1706                 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1707                        (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1708
1709                 start_state = ((start & mask) == (base & mask));
1710                 end_state = ((end & mask) == (base & mask));
1711                 if (start_state != end_state)
1712                         return 0xFE;
1713
1714                 if ((start & mask) != (base & mask))
1715                         continue;
1716
1717                 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1718                 if (prev_match == 0xFF) {
1719                         prev_match = curr_match;
1720                         continue;
1721                 }
1722
1723                 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1724                     curr_match == MTRR_TYPE_UNCACHABLE)
1725                         return MTRR_TYPE_UNCACHABLE;
1726
1727                 if ((prev_match == MTRR_TYPE_WRBACK &&
1728                      curr_match == MTRR_TYPE_WRTHROUGH) ||
1729                     (prev_match == MTRR_TYPE_WRTHROUGH &&
1730                      curr_match == MTRR_TYPE_WRBACK)) {
1731                         prev_match = MTRR_TYPE_WRTHROUGH;
1732                         curr_match = MTRR_TYPE_WRTHROUGH;
1733                 }
1734
1735                 if (prev_match != curr_match)
1736                         return MTRR_TYPE_UNCACHABLE;
1737         }
1738
1739         if (prev_match != 0xFF)
1740                 return prev_match;
1741
1742         return mtrr_state->def_type;
1743 }
1744
1745 u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1746 {
1747         u8 mtrr;
1748
1749         mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1750                              (gfn << PAGE_SHIFT) + PAGE_SIZE);
1751         if (mtrr == 0xfe || mtrr == 0xff)
1752                 mtrr = MTRR_TYPE_WRBACK;
1753         return mtrr;
1754 }
1755 EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1756
1757 static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1758 {
1759         unsigned index;
1760         struct hlist_head *bucket;
1761         struct kvm_mmu_page *s;
1762         struct hlist_node *node, *n;
1763
1764         trace_kvm_mmu_unsync_page(sp);
1765         index = kvm_page_table_hashfn(sp->gfn);
1766         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
1767         /* don't unsync if pagetable is shadowed with multiple roles */
1768         hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
1769                 if (s->gfn != sp->gfn || s->role.direct)
1770                         continue;
1771                 if (s->role.word != sp->role.word)
1772                         return 1;
1773         }
1774         ++vcpu->kvm->stat.mmu_unsync;
1775         sp->unsync = 1;
1776
1777         kvm_mmu_mark_parents_unsync(vcpu, sp);
1778
1779         mmu_convert_notrap(sp);
1780         return 0;
1781 }
1782
1783 static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1784                                   bool can_unsync)
1785 {
1786         struct kvm_mmu_page *shadow;
1787
1788         shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
1789         if (shadow) {
1790                 if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
1791                         return 1;
1792                 if (shadow->unsync)
1793                         return 0;
1794                 if (can_unsync && oos_shadow)
1795                         return kvm_unsync_page(vcpu, shadow);
1796                 return 1;
1797         }
1798         return 0;
1799 }
1800
1801 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1802                     unsigned pte_access, int user_fault,
1803                     int write_fault, int dirty, int level,
1804                     gfn_t gfn, pfn_t pfn, bool speculative,
1805                     bool can_unsync, bool reset_host_protection)
1806 {
1807         u64 spte;
1808         int ret = 0;
1809
1810         /*
1811          * We don't set the accessed bit, since we sometimes want to see
1812          * whether the guest actually used the pte (in order to detect
1813          * demand paging).
1814          */
1815         spte = shadow_base_present_pte | shadow_dirty_mask;
1816         if (!speculative)
1817                 spte |= shadow_accessed_mask;
1818         if (!dirty)
1819                 pte_access &= ~ACC_WRITE_MASK;
1820         if (pte_access & ACC_EXEC_MASK)
1821                 spte |= shadow_x_mask;
1822         else
1823                 spte |= shadow_nx_mask;
1824         if (pte_access & ACC_USER_MASK)
1825                 spte |= shadow_user_mask;
1826         if (level > PT_PAGE_TABLE_LEVEL)
1827                 spte |= PT_PAGE_SIZE_MASK;
1828         if (tdp_enabled)
1829                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1830                         kvm_is_mmio_pfn(pfn));
1831
1832         if (reset_host_protection)
1833                 spte |= SPTE_HOST_WRITEABLE;
1834
1835         spte |= (u64)pfn << PAGE_SHIFT;
1836
1837         if ((pte_access & ACC_WRITE_MASK)
1838             || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1839
1840                 if (level > PT_PAGE_TABLE_LEVEL &&
1841                     has_wrprotected_page(vcpu->kvm, gfn, level)) {
1842                         ret = 1;
1843                         spte = shadow_trap_nonpresent_pte;
1844                         goto set_pte;
1845                 }
1846
1847                 spte |= PT_WRITABLE_MASK;
1848
1849                 /*
1850                  * Optimization: for pte sync, if spte was writable the hash
1851                  * lookup is unnecessary (and expensive). Write protection
1852                  * is responsibility of mmu_get_page / kvm_sync_page.
1853                  * Same reasoning can be applied to dirty page accounting.
1854                  */
1855                 if (!can_unsync && is_writeble_pte(*sptep))
1856                         goto set_pte;
1857
1858                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1859                         pgprintk("%s: found shadow page for %lx, marking ro\n",
1860                                  __func__, gfn);
1861                         ret = 1;
1862                         pte_access &= ~ACC_WRITE_MASK;
1863                         if (is_writeble_pte(spte))
1864                                 spte &= ~PT_WRITABLE_MASK;
1865                 }
1866         }
1867
1868         if (pte_access & ACC_WRITE_MASK)
1869                 mark_page_dirty(vcpu->kvm, gfn);
1870
1871 set_pte:
1872         __set_spte(sptep, spte);
1873         return ret;
1874 }
1875
1876 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1877                          unsigned pt_access, unsigned pte_access,
1878                          int user_fault, int write_fault, int dirty,
1879                          int *ptwrite, int level, gfn_t gfn,
1880                          pfn_t pfn, bool speculative,
1881                          bool reset_host_protection)
1882 {
1883         int was_rmapped = 0;
1884         int was_writeble = is_writeble_pte(*sptep);
1885         int rmap_count;
1886
1887         pgprintk("%s: spte %llx access %x write_fault %d"
1888                  " user_fault %d gfn %lx\n",
1889                  __func__, *sptep, pt_access,
1890                  write_fault, user_fault, gfn);
1891
1892         if (is_rmap_spte(*sptep)) {
1893                 /*
1894                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1895                  * the parent of the now unreachable PTE.
1896                  */
1897                 if (level > PT_PAGE_TABLE_LEVEL &&
1898                     !is_large_pte(*sptep)) {
1899                         struct kvm_mmu_page *child;
1900                         u64 pte = *sptep;
1901
1902                         child = page_header(pte & PT64_BASE_ADDR_MASK);
1903                         mmu_page_remove_parent_pte(child, sptep);
1904                 } else if (pfn != spte_to_pfn(*sptep)) {
1905                         pgprintk("hfn old %lx new %lx\n",
1906                                  spte_to_pfn(*sptep), pfn);
1907                         rmap_remove(vcpu->kvm, sptep);
1908                 } else
1909                         was_rmapped = 1;
1910         }
1911
1912         if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1913                       dirty, level, gfn, pfn, speculative, true,
1914                       reset_host_protection)) {
1915                 if (write_fault)
1916                         *ptwrite = 1;
1917                 kvm_x86_ops->tlb_flush(vcpu);
1918         }
1919
1920         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1921         pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1922                  is_large_pte(*sptep)? "2MB" : "4kB",
1923                  *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
1924                  *sptep, sptep);
1925         if (!was_rmapped && is_large_pte(*sptep))
1926                 ++vcpu->kvm->stat.lpages;
1927
1928         page_header_update_slot(vcpu->kvm, sptep, gfn);
1929         if (!was_rmapped) {
1930                 rmap_count = rmap_add(vcpu, sptep, gfn);
1931                 kvm_release_pfn_clean(pfn);
1932                 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1933                         rmap_recycle(vcpu, sptep, gfn);
1934         } else {
1935                 if (was_writeble)
1936                         kvm_release_pfn_dirty(pfn);
1937                 else
1938                         kvm_release_pfn_clean(pfn);
1939         }
1940         if (speculative) {
1941                 vcpu->arch.last_pte_updated = sptep;
1942                 vcpu->arch.last_pte_gfn = gfn;
1943         }
1944 }
1945
1946 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1947 {
1948 }
1949
1950 static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1951                         int level, gfn_t gfn, pfn_t pfn)
1952 {
1953         struct kvm_shadow_walk_iterator iterator;
1954         struct kvm_mmu_page *sp;
1955         int pt_write = 0;
1956         gfn_t pseudo_gfn;
1957
1958         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
1959                 if (iterator.level == level) {
1960                         mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
1961                                      0, write, 1, &pt_write,
1962                                      level, gfn, pfn, false, true);
1963                         ++vcpu->stat.pf_fixed;
1964                         break;
1965                 }
1966
1967                 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
1968                         pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
1969                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
1970                                               iterator.level - 1,
1971                                               1, ACC_ALL, iterator.sptep);
1972                         if (!sp) {
1973                                 pgprintk("nonpaging_map: ENOMEM\n");
1974                                 kvm_release_pfn_clean(pfn);
1975                                 return -ENOMEM;
1976                         }
1977
1978                         __set_spte(iterator.sptep,
1979                                    __pa(sp->spt)
1980                                    | PT_PRESENT_MASK | PT_WRITABLE_MASK
1981                                    | shadow_user_mask | shadow_x_mask);
1982                 }
1983         }
1984         return pt_write;
1985 }
1986
1987 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1988 {
1989         int r;
1990         int level;
1991         pfn_t pfn;
1992         unsigned long mmu_seq;
1993
1994         level = mapping_level(vcpu, gfn);
1995
1996         /*
1997          * This path builds a PAE pagetable - so we can map 2mb pages at
1998          * maximum. Therefore check if the level is larger than that.
1999          */
2000         if (level > PT_DIRECTORY_LEVEL)
2001                 level = PT_DIRECTORY_LEVEL;
2002
2003         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2004
2005         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2006         smp_rmb();
2007         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2008
2009         /* mmio */
2010         if (is_error_pfn(pfn)) {
2011                 kvm_release_pfn_clean(pfn);
2012                 return 1;
2013         }
2014
2015         spin_lock(&vcpu->kvm->mmu_lock);
2016         if (mmu_notifier_retry(vcpu, mmu_seq))
2017                 goto out_unlock;
2018         kvm_mmu_free_some_pages(vcpu);
2019         r = __direct_map(vcpu, v, write, level, gfn, pfn);
2020         spin_unlock(&vcpu->kvm->mmu_lock);
2021
2022
2023         return r;
2024
2025 out_unlock:
2026         spin_unlock(&vcpu->kvm->mmu_lock);
2027         kvm_release_pfn_clean(pfn);
2028         return 0;
2029 }
2030
2031
2032 static void mmu_free_roots(struct kvm_vcpu *vcpu)
2033 {
2034         int i;
2035         struct kvm_mmu_page *sp;
2036
2037         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2038                 return;
2039         spin_lock(&vcpu->kvm->mmu_lock);
2040         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2041                 hpa_t root = vcpu->arch.mmu.root_hpa;
2042
2043                 sp = page_header(root);
2044                 --sp->root_count;
2045                 if (!sp->root_count && sp->role.invalid)
2046                         kvm_mmu_zap_page(vcpu->kvm, sp);
2047                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2048                 spin_unlock(&vcpu->kvm->mmu_lock);
2049                 return;
2050         }
2051         for (i = 0; i < 4; ++i) {
2052                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2053
2054                 if (root) {
2055                         root &= PT64_BASE_ADDR_MASK;
2056                         sp = page_header(root);
2057                         --sp->root_count;
2058                         if (!sp->root_count && sp->role.invalid)
2059                                 kvm_mmu_zap_page(vcpu->kvm, sp);
2060                 }
2061                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2062         }
2063         spin_unlock(&vcpu->kvm->mmu_lock);
2064         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2065 }
2066
2067 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2068 {
2069         int ret = 0;
2070
2071         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
2072                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2073                 ret = 1;
2074         }
2075
2076         return ret;
2077 }
2078
2079 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
2080 {
2081         int i;
2082         gfn_t root_gfn;
2083         struct kvm_mmu_page *sp;
2084         int direct = 0;
2085         u64 pdptr;
2086
2087         root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
2088
2089         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2090                 hpa_t root = vcpu->arch.mmu.root_hpa;
2091
2092                 ASSERT(!VALID_PAGE(root));
2093                 if (tdp_enabled)
2094                         direct = 1;
2095                 if (mmu_check_root(vcpu, root_gfn))
2096                         return 1;
2097                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
2098                                       PT64_ROOT_LEVEL, direct,
2099                                       ACC_ALL, NULL);
2100                 root = __pa(sp->spt);
2101                 ++sp->root_count;
2102                 vcpu->arch.mmu.root_hpa = root;
2103                 return 0;
2104         }
2105         direct = !is_paging(vcpu);
2106         if (tdp_enabled)
2107                 direct = 1;
2108         for (i = 0; i < 4; ++i) {
2109                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2110
2111                 ASSERT(!VALID_PAGE(root));
2112                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
2113                         pdptr = kvm_pdptr_read(vcpu, i);
2114                         if (!is_present_gpte(pdptr)) {
2115                                 vcpu->arch.mmu.pae_root[i] = 0;
2116                                 continue;
2117                         }
2118                         root_gfn = pdptr >> PAGE_SHIFT;
2119                 } else if (vcpu->arch.mmu.root_level == 0)
2120                         root_gfn = 0;
2121                 if (mmu_check_root(vcpu, root_gfn))
2122                         return 1;
2123                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
2124                                       PT32_ROOT_LEVEL, direct,
2125                                       ACC_ALL, NULL);
2126                 root = __pa(sp->spt);
2127                 ++sp->root_count;
2128                 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
2129         }
2130         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
2131         return 0;
2132 }
2133
2134 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2135 {
2136         int i;
2137         struct kvm_mmu_page *sp;
2138
2139         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2140                 return;
2141         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2142                 hpa_t root = vcpu->arch.mmu.root_hpa;
2143                 sp = page_header(root);
2144                 mmu_sync_children(vcpu, sp);
2145                 return;
2146         }
2147         for (i = 0; i < 4; ++i) {
2148                 hpa_t root = vcpu->arch.mmu.pae_root[i];
2149
2150                 if (root && VALID_PAGE(root)) {
2151                         root &= PT64_BASE_ADDR_MASK;
2152                         sp = page_header(root);
2153                         mmu_sync_children(vcpu, sp);
2154                 }
2155         }
2156 }
2157
2158 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2159 {
2160         spin_lock(&vcpu->kvm->mmu_lock);
2161         mmu_sync_roots(vcpu);
2162         spin_unlock(&vcpu->kvm->mmu_lock);
2163 }
2164
2165 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
2166 {
2167         return vaddr;
2168 }
2169
2170 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2171                                 u32 error_code)
2172 {
2173         gfn_t gfn;
2174         int r;
2175
2176         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
2177         r = mmu_topup_memory_caches(vcpu);
2178         if (r)
2179                 return r;
2180
2181         ASSERT(vcpu);
2182         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2183
2184         gfn = gva >> PAGE_SHIFT;
2185
2186         return nonpaging_map(vcpu, gva & PAGE_MASK,
2187                              error_code & PFERR_WRITE_MASK, gfn);
2188 }
2189
2190 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2191                                 u32 error_code)
2192 {
2193         pfn_t pfn;
2194         int r;
2195         int level;
2196         gfn_t gfn = gpa >> PAGE_SHIFT;
2197         unsigned long mmu_seq;
2198
2199         ASSERT(vcpu);
2200         ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2201
2202         r = mmu_topup_memory_caches(vcpu);
2203         if (r)
2204                 return r;
2205
2206         level = mapping_level(vcpu, gfn);
2207
2208         gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2209
2210         mmu_seq = vcpu->kvm->mmu_notifier_seq;
2211         smp_rmb();
2212         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2213         if (is_error_pfn(pfn)) {
2214                 kvm_release_pfn_clean(pfn);
2215                 return 1;
2216         }
2217         spin_lock(&vcpu->kvm->mmu_lock);
2218         if (mmu_notifier_retry(vcpu, mmu_seq))
2219                 goto out_unlock;
2220         kvm_mmu_free_some_pages(vcpu);
2221         r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
2222                          level, gfn, pfn);
2223         spin_unlock(&vcpu->kvm->mmu_lock);
2224
2225         return r;
2226
2227 out_unlock:
2228         spin_unlock(&vcpu->kvm->mmu_lock);
2229         kvm_release_pfn_clean(pfn);
2230         return 0;
2231 }
2232
2233 static void nonpaging_free(struct kvm_vcpu *vcpu)
2234 {
2235         mmu_free_roots(vcpu);
2236 }
2237
2238 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2239 {
2240         struct kvm_mmu *context = &vcpu->arch.mmu;
2241
2242         context->new_cr3 = nonpaging_new_cr3;
2243         context->page_fault = nonpaging_page_fault;
2244         context->gva_to_gpa = nonpaging_gva_to_gpa;
2245         context->free = nonpaging_free;
2246         context->prefetch_page = nonpaging_prefetch_page;
2247         context->sync_page = nonpaging_sync_page;
2248         context->invlpg = nonpaging_invlpg;
2249         context->root_level = 0;
2250         context->shadow_root_level = PT32E_ROOT_LEVEL;
2251         context->root_hpa = INVALID_PAGE;
2252         return 0;
2253 }
2254
2255 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
2256 {
2257         ++vcpu->stat.tlb_flush;
2258         kvm_x86_ops->tlb_flush(vcpu);
2259 }
2260
2261 static void paging_new_cr3(struct kvm_vcpu *vcpu)
2262 {
2263         pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
2264         mmu_free_roots(vcpu);
2265 }
2266
2267 static void inject_page_fault(struct kvm_vcpu *vcpu,
2268                               u64 addr,
2269                               u32 err_code)
2270 {
2271         kvm_inject_page_fault(vcpu, addr, err_code);
2272 }
2273
2274 static void paging_free(struct kvm_vcpu *vcpu)
2275 {
2276         nonpaging_free(vcpu);
2277 }
2278
2279 static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2280 {
2281         int bit7;
2282
2283         bit7 = (gpte >> 7) & 1;
2284         return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2285 }
2286
2287 #define PTTYPE 64
2288 #include "paging_tmpl.h"
2289 #undef PTTYPE
2290
2291 #define PTTYPE 32
2292 #include "paging_tmpl.h"
2293 #undef PTTYPE
2294
2295 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2296 {
2297         struct kvm_mmu *context = &vcpu->arch.mmu;
2298         int maxphyaddr = cpuid_maxphyaddr(vcpu);
2299         u64 exb_bit_rsvd = 0;
2300
2301         if (!is_nx(vcpu))
2302                 exb_bit_rsvd = rsvd_bits(63, 63);
2303         switch (level) {
2304         case PT32_ROOT_LEVEL:
2305                 /* no rsvd bits for 2 level 4K page table entries */
2306                 context->rsvd_bits_mask[0][1] = 0;
2307                 context->rsvd_bits_mask[0][0] = 0;
2308                 if (is_cpuid_PSE36())
2309                         /* 36bits PSE 4MB page */
2310                         context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2311                 else
2312                         /* 32 bits PSE 4MB page */
2313                         context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
2314                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2315                 break;
2316         case PT32E_ROOT_LEVEL:
2317                 context->rsvd_bits_mask[0][2] =
2318                         rsvd_bits(maxphyaddr, 63) |
2319                         rsvd_bits(7, 8) | rsvd_bits(1, 2);      /* PDPTE */
2320                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2321                         rsvd_bits(maxphyaddr, 62);      /* PDE */
2322                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2323                         rsvd_bits(maxphyaddr, 62);      /* PTE */
2324                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2325                         rsvd_bits(maxphyaddr, 62) |
2326                         rsvd_bits(13, 20);              /* large page */
2327                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2328                 break;
2329         case PT64_ROOT_LEVEL:
2330                 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2331                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2332                 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2333                         rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2334                 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
2335                         rsvd_bits(maxphyaddr, 51);
2336                 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2337                         rsvd_bits(maxphyaddr, 51);
2338                 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
2339                 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2340                         rsvd_bits(maxphyaddr, 51) |
2341                         rsvd_bits(13, 29);
2342                 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2343                         rsvd_bits(maxphyaddr, 51) |
2344                         rsvd_bits(13, 20);              /* large page */
2345                 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
2346                 break;
2347         }
2348 }
2349
2350 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
2351 {
2352         struct kvm_mmu *context = &vcpu->arch.mmu;
2353
2354         ASSERT(is_pae(vcpu));
2355         context->new_cr3 = paging_new_cr3;
2356         context->page_fault = paging64_page_fault;
2357         context->gva_to_gpa = paging64_gva_to_gpa;
2358         context->prefetch_page = paging64_prefetch_page;
2359         context->sync_page = paging64_sync_page;
2360         context->invlpg = paging64_invlpg;
2361         context->free = paging_free;
2362         context->root_level = level;
2363         context->shadow_root_level = level;
2364         context->root_hpa = INVALID_PAGE;
2365         return 0;
2366 }
2367
2368 static int paging64_init_context(struct kvm_vcpu *vcpu)
2369 {
2370         reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2371         return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2372 }
2373
2374 static int paging32_init_context(struct kvm_vcpu *vcpu)
2375 {
2376         struct kvm_mmu *context = &vcpu->arch.mmu;
2377
2378         reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2379         context->new_cr3 = paging_new_cr3;
2380         context->page_fault = paging32_page_fault;
2381         context->gva_to_gpa = paging32_gva_to_gpa;
2382         context->free = paging_free;
2383         context->prefetch_page = paging32_prefetch_page;
2384         context->sync_page = paging32_sync_page;
2385         context->invlpg = paging32_invlpg;
2386         context->root_level = PT32_ROOT_LEVEL;
2387         context->shadow_root_level = PT32E_ROOT_LEVEL;
2388         context->root_hpa = INVALID_PAGE;
2389         return 0;
2390 }
2391
2392 static int paging32E_init_context(struct kvm_vcpu *vcpu)
2393 {
2394         reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2395         return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
2396 }
2397
2398 static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2399 {
2400         struct kvm_mmu *context = &vcpu->arch.mmu;
2401
2402         context->new_cr3 = nonpaging_new_cr3;
2403         context->page_fault = tdp_page_fault;
2404         context->free = nonpaging_free;
2405         context->prefetch_page = nonpaging_prefetch_page;
2406         context->sync_page = nonpaging_sync_page;
2407         context->invlpg = nonpaging_invlpg;
2408         context->shadow_root_level = kvm_x86_ops->get_tdp_level();
2409         context->root_hpa = INVALID_PAGE;
2410
2411         if (!is_paging(vcpu)) {
2412                 context->gva_to_gpa = nonpaging_gva_to_gpa;
2413                 context->root_level = 0;
2414         } else if (is_long_mode(vcpu)) {
2415                 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
2416                 context->gva_to_gpa = paging64_gva_to_gpa;
2417                 context->root_level = PT64_ROOT_LEVEL;
2418         } else if (is_pae(vcpu)) {
2419                 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
2420                 context->gva_to_gpa = paging64_gva_to_gpa;
2421                 context->root_level = PT32E_ROOT_LEVEL;
2422         } else {
2423                 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
2424                 context->gva_to_gpa = paging32_gva_to_gpa;
2425                 context->root_level = PT32_ROOT_LEVEL;
2426         }
2427
2428         return 0;
2429 }
2430
2431 static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
2432 {
2433         int r;
2434
2435         ASSERT(vcpu);
2436         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2437
2438         if (!is_paging(vcpu))
2439                 r = nonpaging_init_context(vcpu);
2440         else if (is_long_mode(vcpu))
2441                 r = paging64_init_context(vcpu);
2442         else if (is_pae(vcpu))
2443                 r = paging32E_init_context(vcpu);
2444         else
2445                 r = paging32_init_context(vcpu);
2446
2447         vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
2448
2449         return r;
2450 }
2451
2452 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2453 {
2454         vcpu->arch.update_pte.pfn = bad_pfn;
2455
2456         if (tdp_enabled)
2457                 return init_kvm_tdp_mmu(vcpu);
2458         else
2459                 return init_kvm_softmmu(vcpu);
2460 }
2461
2462 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2463 {
2464         ASSERT(vcpu);
2465         if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
2466                 vcpu->arch.mmu.free(vcpu);
2467                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
2468         }
2469 }
2470
2471 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
2472 {
2473         destroy_kvm_mmu(vcpu);
2474         return init_kvm_mmu(vcpu);
2475 }
2476 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
2477
2478 int kvm_mmu_load(struct kvm_vcpu *vcpu)
2479 {
2480         int r;
2481
2482         r = mmu_topup_memory_caches(vcpu);
2483         if (r)
2484                 goto out;
2485         spin_lock(&vcpu->kvm->mmu_lock);
2486         kvm_mmu_free_some_pages(vcpu);
2487         r = mmu_alloc_roots(vcpu);
2488         mmu_sync_roots(vcpu);
2489         spin_unlock(&vcpu->kvm->mmu_lock);
2490         if (r)
2491                 goto out;
2492         /* set_cr3() should ensure TLB has been flushed */
2493         kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
2494 out:
2495         return r;
2496 }
2497 EXPORT_SYMBOL_GPL(kvm_mmu_load);
2498
2499 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2500 {
2501         mmu_free_roots(vcpu);
2502 }
2503
2504 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2505                                   struct kvm_mmu_page *sp,
2506                                   u64 *spte)
2507 {
2508         u64 pte;
2509         struct kvm_mmu_page *child;
2510
2511         pte = *spte;
2512         if (is_shadow_present_pte(pte)) {
2513                 if (is_last_spte(pte, sp->role.level))
2514                         rmap_remove(vcpu->kvm, spte);
2515                 else {
2516                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2517                         mmu_page_remove_parent_pte(child, spte);
2518                 }
2519         }
2520         __set_spte(spte, shadow_trap_nonpresent_pte);
2521         if (is_large_pte(pte))
2522                 --vcpu->kvm->stat.lpages;
2523 }
2524
2525 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
2526                                   struct kvm_mmu_page *sp,
2527                                   u64 *spte,
2528                                   const void *new)
2529 {
2530         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
2531                 ++vcpu->kvm->stat.mmu_pde_zapped;
2532                 return;
2533         }
2534
2535         ++vcpu->kvm->stat.mmu_pte_updated;
2536         if (sp->role.glevels == PT32_ROOT_LEVEL)
2537                 paging32_update_pte(vcpu, sp, spte, new);
2538         else
2539                 paging64_update_pte(vcpu, sp, spte, new);
2540 }
2541
2542 static bool need_remote_flush(u64 old, u64 new)
2543 {
2544         if (!is_shadow_present_pte(old))
2545                 return false;
2546         if (!is_shadow_present_pte(new))
2547                 return true;
2548         if ((old ^ new) & PT64_BASE_ADDR_MASK)
2549                 return true;
2550         old ^= PT64_NX_MASK;
2551         new ^= PT64_NX_MASK;
2552         return (old & ~new & PT64_PERM_MASK) != 0;
2553 }
2554
2555 static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
2556 {
2557         if (need_remote_flush(old, new))
2558                 kvm_flush_remote_tlbs(vcpu->kvm);
2559         else
2560                 kvm_mmu_flush_tlb(vcpu);
2561 }
2562
2563 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2564 {
2565         u64 *spte = vcpu->arch.last_pte_updated;
2566
2567         return !!(spte && (*spte & shadow_accessed_mask));
2568 }
2569
2570 static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2571                                           const u8 *new, int bytes)
2572 {
2573         gfn_t gfn;
2574         int r;
2575         u64 gpte = 0;
2576         pfn_t pfn;
2577
2578         if (bytes != 4 && bytes != 8)
2579                 return;
2580
2581         /*
2582          * Assume that the pte write on a page table of the same type
2583          * as the current vcpu paging mode.  This is nearly always true
2584          * (might be false while changing modes).  Note it is verified later
2585          * by update_pte().
2586          */
2587         if (is_pae(vcpu)) {
2588                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
2589                 if ((bytes == 4) && (gpa % 4 == 0)) {
2590                         r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
2591                         if (r)
2592                                 return;
2593                         memcpy((void *)&gpte + (gpa % 8), new, 4);
2594                 } else if ((bytes == 8) && (gpa % 8 == 0)) {
2595                         memcpy((void *)&gpte, new, 8);
2596                 }
2597         } else {
2598                 if ((bytes == 4) && (gpa % 4 == 0))
2599                         memcpy((void *)&gpte, new, 4);
2600         }
2601         if (!is_present_gpte(gpte))
2602                 return;
2603         gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2604
2605         vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
2606         smp_rmb();
2607         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2608
2609         if (is_error_pfn(pfn)) {
2610                 kvm_release_pfn_clean(pfn);
2611                 return;
2612         }
2613         vcpu->arch.update_pte.gfn = gfn;
2614         vcpu->arch.update_pte.pfn = pfn;
2615 }
2616
2617 static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2618 {
2619         u64 *spte = vcpu->arch.last_pte_updated;
2620
2621         if (spte
2622             && vcpu->arch.last_pte_gfn == gfn
2623             && shadow_accessed_mask
2624             && !(*spte & shadow_accessed_mask)
2625             && is_shadow_present_pte(*spte))
2626                 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2627 }
2628
2629 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2630                        const u8 *new, int bytes,
2631                        bool guest_initiated)
2632 {
2633         gfn_t gfn = gpa >> PAGE_SHIFT;
2634         struct kvm_mmu_page *sp;
2635         struct hlist_node *node, *n;
2636         struct hlist_head *bucket;
2637         unsigned index;
2638         u64 entry, gentry;
2639         u64 *spte;
2640         unsigned offset = offset_in_page(gpa);
2641         unsigned pte_size;
2642         unsigned page_offset;
2643         unsigned misaligned;
2644         unsigned quadrant;
2645         int level;
2646         int flooded = 0;
2647         int npte;
2648         int r;
2649
2650         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
2651         mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
2652         spin_lock(&vcpu->kvm->mmu_lock);
2653         kvm_mmu_access_page(vcpu, gfn);
2654         kvm_mmu_free_some_pages(vcpu);
2655         ++vcpu->kvm->stat.mmu_pte_write;
2656         kvm_mmu_audit(vcpu, "pre pte write");
2657         if (guest_initiated) {
2658                 if (gfn == vcpu->arch.last_pt_write_gfn
2659                     && !last_updated_pte_accessed(vcpu)) {
2660                         ++vcpu->arch.last_pt_write_count;
2661                         if (vcpu->arch.last_pt_write_count >= 3)
2662                                 flooded = 1;
2663                 } else {
2664                         vcpu->arch.last_pt_write_gfn = gfn;
2665                         vcpu->arch.last_pt_write_count = 1;
2666                         vcpu->arch.last_pte_updated = NULL;
2667                 }
2668         }
2669         index = kvm_page_table_hashfn(gfn);
2670         bucket = &vcpu->kvm->arch.mmu_page_hash[index];
2671         hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
2672                 if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
2673                         continue;
2674                 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
2675                 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
2676                 misaligned |= bytes < 4;
2677                 if (misaligned || flooded) {
2678                         /*
2679                          * Misaligned accesses are too much trouble to fix
2680                          * up; also, they usually indicate a page is not used
2681                          * as a page table.
2682                          *
2683                          * If we're seeing too many writes to a page,
2684                          * it may no longer be a page table, or we may be
2685                          * forking, in which case it is better to unmap the
2686                          * page.
2687                          */
2688                         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
2689                                  gpa, bytes, sp->role.word);
2690                         if (kvm_mmu_zap_page(vcpu->kvm, sp))
2691                                 n = bucket->first;
2692                         ++vcpu->kvm->stat.mmu_flooded;
2693                         continue;
2694                 }
2695                 page_offset = offset;
2696                 level = sp->role.level;
2697                 npte = 1;
2698                 if (sp->role.glevels == PT32_ROOT_LEVEL) {
2699                         page_offset <<= 1;      /* 32->64 */
2700                         /*
2701                          * A 32-bit pde maps 4MB while the shadow pdes map
2702                          * only 2MB.  So we need to double the offset again
2703                          * and zap two pdes instead of one.
2704                          */
2705                         if (level == PT32_ROOT_LEVEL) {
2706                                 page_offset &= ~7; /* kill rounding error */
2707                                 page_offset <<= 1;
2708                                 npte = 2;
2709                         }
2710                         quadrant = page_offset >> PAGE_SHIFT;
2711                         page_offset &= ~PAGE_MASK;
2712                         if (quadrant != sp->role.quadrant)
2713                                 continue;
2714                 }
2715                 spte = &sp->spt[page_offset / sizeof(*spte)];
2716                 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
2717                         gentry = 0;
2718                         r = kvm_read_guest_atomic(vcpu->kvm,
2719                                                   gpa & ~(u64)(pte_size - 1),
2720                                                   &gentry, pte_size);
2721                         new = (const void *)&gentry;
2722                         if (r < 0)
2723                                 new = NULL;
2724                 }
2725                 while (npte--) {
2726                         entry = *spte;
2727                         mmu_pte_write_zap_pte(vcpu, sp, spte);
2728                         if (new)
2729                                 mmu_pte_write_new_pte(vcpu, sp, spte, new);
2730                         mmu_pte_write_flush_tlb(vcpu, entry, *spte);
2731                         ++spte;
2732                 }
2733         }
2734         kvm_mmu_audit(vcpu, "post pte write");
2735         spin_unlock(&vcpu->kvm->mmu_lock);
2736         if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2737                 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2738                 vcpu->arch.update_pte.pfn = bad_pfn;
2739         }
2740 }
2741
2742 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2743 {
2744         gpa_t gpa;
2745         int r;
2746
2747         if (tdp_enabled)
2748                 return 0;
2749
2750         gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
2751
2752         spin_lock(&vcpu->kvm->mmu_lock);
2753         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
2754         spin_unlock(&vcpu->kvm->mmu_lock);
2755         return r;
2756 }
2757 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2758
2759 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2760 {
2761         while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
2762                !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2763                 struct kvm_mmu_page *sp;
2764
2765                 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2766                                   struct kvm_mmu_page, link);
2767                 kvm_mmu_zap_page(vcpu->kvm, sp);
2768                 ++vcpu->kvm->stat.mmu_recycled;
2769         }
2770 }
2771
2772 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2773 {
2774         int r;
2775         enum emulation_result er;
2776
2777         r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
2778         if (r < 0)
2779                 goto out;
2780
2781         if (!r) {
2782                 r = 1;
2783                 goto out;
2784         }
2785
2786         r = mmu_topup_memory_caches(vcpu);
2787         if (r)
2788                 goto out;
2789
2790         er = emulate_instruction(vcpu, cr2, error_code, 0);
2791
2792         switch (er) {
2793         case EMULATE_DONE:
2794                 return 1;
2795         case EMULATE_DO_MMIO:
2796                 ++vcpu->stat.mmio_exits;
2797                 return 0;
2798         case EMULATE_FAIL:
2799                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2800                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
2801                 vcpu->run->internal.ndata = 0;
2802                 return 0;
2803         default:
2804                 BUG();
2805         }
2806 out:
2807         return r;
2808 }
2809 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2810
2811 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2812 {
2813         vcpu->arch.mmu.invlpg(vcpu, gva);
2814         kvm_mmu_flush_tlb(vcpu);
2815         ++vcpu->stat.invlpg;
2816 }
2817 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
2818
2819 void kvm_enable_tdp(void)
2820 {
2821         tdp_enabled = true;
2822 }
2823 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
2824
2825 void kvm_disable_tdp(void)
2826 {
2827         tdp_enabled = false;
2828 }
2829 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
2830
2831 static void free_mmu_pages(struct kvm_vcpu *vcpu)
2832 {
2833         free_page((unsigned long)vcpu->arch.mmu.pae_root);
2834 }
2835
2836 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
2837 {
2838         struct page *page;
2839         int i;
2840
2841         ASSERT(vcpu);
2842
2843         /*
2844          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
2845          * Therefore we need to allocate shadow page tables in the first
2846          * 4GB of memory, which happens to fit the DMA32 zone.
2847          */
2848         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
2849         if (!page)
2850                 goto error_1;
2851         vcpu->arch.mmu.pae_root = page_address(page);
2852         for (i = 0; i < 4; ++i)
2853                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
2854
2855         return 0;
2856
2857 error_1:
2858         free_mmu_pages(vcpu);
2859         return -ENOMEM;
2860 }
2861
2862 int kvm_mmu_create(struct kvm_vcpu *vcpu)
2863 {
2864         ASSERT(vcpu);
2865         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2866
2867         return alloc_mmu_pages(vcpu);
2868 }
2869
2870 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
2871 {
2872         ASSERT(vcpu);
2873         ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2874
2875         return init_kvm_mmu(vcpu);
2876 }
2877
2878 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
2879 {
2880         ASSERT(vcpu);
2881
2882         destroy_kvm_mmu(vcpu);
2883         free_mmu_pages(vcpu);
2884         mmu_free_memory_caches(vcpu);
2885 }
2886
2887 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
2888 {
2889         struct kvm_mmu_page *sp;
2890
2891         list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
2892                 int i;
2893                 u64 *pt;
2894
2895                 if (!test_bit(slot, sp->slot_bitmap))
2896                         continue;
2897
2898                 pt = sp->spt;
2899                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2900                         /* avoid RMW */
2901                         if (pt[i] & PT_WRITABLE_MASK)
2902                                 pt[i] &= ~PT_WRITABLE_MASK;
2903         }
2904         kvm_flush_remote_tlbs(kvm);
2905 }
2906
2907 void kvm_mmu_zap_all(struct kvm *kvm)
2908 {
2909         struct kvm_mmu_page *sp, *node;
2910
2911         spin_lock(&kvm->mmu_lock);
2912         list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
2913                 if (kvm_mmu_zap_page(kvm, sp))
2914                         node = container_of(kvm->arch.active_mmu_pages.next,
2915                                             struct kvm_mmu_page, link);
2916         spin_unlock(&kvm->mmu_lock);
2917
2918         kvm_flush_remote_tlbs(kvm);
2919 }
2920
2921 static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
2922 {
2923         struct kvm_mmu_page *page;
2924
2925         page = container_of(kvm->arch.active_mmu_pages.prev,
2926                             struct kvm_mmu_page, link);
2927         kvm_mmu_zap_page(kvm, page);
2928 }
2929
2930 static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2931 {
2932         struct kvm *kvm;
2933         struct kvm *kvm_freed = NULL;
2934         int cache_count = 0;
2935
2936         spin_lock(&kvm_lock);
2937
2938         list_for_each_entry(kvm, &vm_list, vm_list) {
2939                 int npages;
2940
2941                 if (!down_read_trylock(&kvm->slots_lock))
2942                         continue;
2943                 spin_lock(&kvm->mmu_lock);
2944                 npages = kvm->arch.n_alloc_mmu_pages -
2945                          kvm->arch.n_free_mmu_pages;
2946                 cache_count += npages;
2947                 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2948                         kvm_mmu_remove_one_alloc_mmu_page(kvm);
2949                         cache_count--;
2950                         kvm_freed = kvm;
2951                 }
2952                 nr_to_scan--;
2953
2954                 spin_unlock(&kvm->mmu_lock);
2955                 up_read(&kvm->slots_lock);
2956         }
2957         if (kvm_freed)
2958                 list_move_tail(&kvm_freed->vm_list, &vm_list);
2959
2960         spin_unlock(&kvm_lock);
2961
2962         return cache_count;
2963 }
2964
2965 static struct shrinker mmu_shrinker = {
2966         .shrink = mmu_shrink,
2967         .seeks = DEFAULT_SEEKS * 10,
2968 };
2969
2970 static void mmu_destroy_caches(void)
2971 {
2972         if (pte_chain_cache)
2973                 kmem_cache_destroy(pte_chain_cache);
2974         if (rmap_desc_cache)
2975                 kmem_cache_destroy(rmap_desc_cache);
2976         if (mmu_page_header_cache)
2977                 kmem_cache_destroy(mmu_page_header_cache);
2978 }
2979
2980 void kvm_mmu_module_exit(void)
2981 {
2982         mmu_destroy_caches();
2983         unregister_shrinker(&mmu_shrinker);
2984 }
2985
2986 int kvm_mmu_module_init(void)
2987 {
2988         pte_chain_cache = kmem_cache_create("kvm_pte_chain",
2989                                             sizeof(struct kvm_pte_chain),
2990                                             0, 0, NULL);
2991         if (!pte_chain_cache)
2992                 goto nomem;
2993         rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
2994                                             sizeof(struct kvm_rmap_desc),
2995                                             0, 0, NULL);
2996         if (!rmap_desc_cache)
2997                 goto nomem;
2998
2999         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3000                                                   sizeof(struct kvm_mmu_page),
3001                                                   0, 0, NULL);
3002         if (!mmu_page_header_cache)
3003                 goto nomem;
3004
3005         register_shrinker(&mmu_shrinker);
3006
3007         return 0;
3008
3009 nomem:
3010         mmu_destroy_caches();
3011         return -ENOMEM;
3012 }
3013
3014 /*
3015  * Caculate mmu pages needed for kvm.
3016  */
3017 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3018 {
3019         int i;
3020         unsigned int nr_mmu_pages;
3021         unsigned int  nr_pages = 0;
3022
3023         for (i = 0; i < kvm->nmemslots; i++)
3024                 nr_pages += kvm->memslots[i].npages;
3025
3026         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3027         nr_mmu_pages = max(nr_mmu_pages,
3028                         (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3029
3030         return nr_mmu_pages;
3031 }
3032
3033 static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3034                                 unsigned len)
3035 {
3036         if (len > buffer->len)
3037                 return NULL;
3038         return buffer->ptr;
3039 }
3040
3041 static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3042                                 unsigned len)
3043 {
3044         void *ret;
3045
3046         ret = pv_mmu_peek_buffer(buffer, len);
3047         if (!ret)
3048                 return ret;
3049         buffer->ptr += len;
3050         buffer->len -= len;
3051         buffer->processed += len;
3052         return ret;
3053 }
3054
3055 static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3056                              gpa_t addr, gpa_t value)
3057 {
3058         int bytes = 8;
3059         int r;
3060
3061         if (!is_long_mode(vcpu) && !is_pae(vcpu))
3062                 bytes = 4;
3063
3064         r = mmu_topup_memory_caches(vcpu);
3065         if (r)
3066                 return r;
3067
3068         if (!emulator_write_phys(vcpu, addr, &value, bytes))
3069                 return -EFAULT;
3070
3071         return 1;
3072 }
3073
3074 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3075 {
3076         kvm_set_cr3(vcpu, vcpu->arch.cr3);
3077         return 1;
3078 }
3079
3080 static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3081 {
3082         spin_lock(&vcpu->kvm->mmu_lock);
3083         mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3084         spin_unlock(&vcpu->kvm->mmu_lock);
3085         return 1;
3086 }
3087
3088 static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3089                              struct kvm_pv_mmu_op_buffer *buffer)
3090 {
3091         struct kvm_mmu_op_header *header;
3092
3093         header = pv_mmu_peek_buffer(buffer, sizeof *header);
3094         if (!header)
3095                 return 0;
3096         switch (header->op) {
3097         case KVM_MMU_OP_WRITE_PTE: {
3098                 struct kvm_mmu_op_write_pte *wpte;
3099
3100                 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3101                 if (!wpte)
3102                         return 0;
3103                 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3104                                         wpte->pte_val);
3105         }
3106         case KVM_MMU_OP_FLUSH_TLB: {
3107                 struct kvm_mmu_op_flush_tlb *ftlb;
3108
3109                 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3110                 if (!ftlb)
3111                         return 0;
3112                 return kvm_pv_mmu_flush_tlb(vcpu);
3113         }
3114         case KVM_MMU_OP_RELEASE_PT: {
3115                 struct kvm_mmu_op_release_pt *rpt;
3116
3117                 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3118                 if (!rpt)
3119                         return 0;
3120                 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3121         }
3122         default: return 0;
3123         }
3124 }
3125
3126 int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3127                   gpa_t addr, unsigned long *ret)
3128 {
3129         int r;
3130         struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
3131
3132         buffer->ptr = buffer->buf;
3133         buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3134         buffer->processed = 0;
3135
3136         r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
3137         if (r)
3138                 goto out;
3139
3140         while (buffer->len) {
3141                 r = kvm_pv_mmu_op_one(vcpu, buffer);
3142                 if (r < 0)
3143                         goto out;
3144                 if (r == 0)
3145                         break;
3146         }
3147
3148         r = 1;
3149 out:
3150         *ret = buffer->processed;
3151         return r;
3152 }
3153
3154 int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3155 {
3156         struct kvm_shadow_walk_iterator iterator;
3157         int nr_sptes = 0;
3158
3159         spin_lock(&vcpu->kvm->mmu_lock);
3160         for_each_shadow_entry(vcpu, addr, iterator) {
3161                 sptes[iterator.level-1] = *iterator.sptep;
3162                 nr_sptes++;
3163                 if (!is_shadow_present_pte(*iterator.sptep))
3164                         break;
3165         }
3166         spin_unlock(&vcpu->kvm->mmu_lock);
3167
3168         return nr_sptes;
3169 }
3170 EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3171
3172 #ifdef AUDIT
3173
3174 static const char *audit_msg;
3175
3176 static gva_t canonicalize(gva_t gva)
3177 {
3178 #ifdef CONFIG_X86_64
3179         gva = (long long)(gva << 16) >> 16;
3180 #endif
3181         return gva;
3182 }
3183
3184
3185 typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
3186                                  u64 *sptep);
3187
3188 static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3189                             inspect_spte_fn fn)
3190 {
3191         int i;
3192
3193         for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3194                 u64 ent = sp->spt[i];
3195
3196                 if (is_shadow_present_pte(ent)) {
3197                         if (!is_last_spte(ent, sp->role.level)) {
3198                                 struct kvm_mmu_page *child;
3199                                 child = page_header(ent & PT64_BASE_ADDR_MASK);
3200                                 __mmu_spte_walk(kvm, child, fn);
3201                         } else
3202                                 fn(kvm, sp, &sp->spt[i]);
3203                 }
3204         }
3205 }
3206
3207 static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3208 {
3209         int i;
3210         struct kvm_mmu_page *sp;
3211
3212         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3213                 return;
3214         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3215                 hpa_t root = vcpu->arch.mmu.root_hpa;
3216                 sp = page_header(root);
3217                 __mmu_spte_walk(vcpu->kvm, sp, fn);
3218                 return;
3219         }
3220         for (i = 0; i < 4; ++i) {
3221                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3222
3223                 if (root && VALID_PAGE(root)) {
3224                         root &= PT64_BASE_ADDR_MASK;
3225                         sp = page_header(root);
3226                         __mmu_spte_walk(vcpu->kvm, sp, fn);
3227                 }
3228         }
3229         return;
3230 }
3231
3232 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3233                                 gva_t va, int level)
3234 {
3235         u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3236         int i;
3237         gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3238
3239         for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3240                 u64 ent = pt[i];
3241
3242                 if (ent == shadow_trap_nonpresent_pte)
3243                         continue;
3244
3245                 va = canonicalize(va);
3246                 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3247                         audit_mappings_page(vcpu, ent, va, level - 1);
3248                 else {
3249                         gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
3250                         gfn_t gfn = gpa >> PAGE_SHIFT;
3251                         pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3252                         hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
3253
3254                         if (is_error_pfn(pfn)) {
3255                                 kvm_release_pfn_clean(pfn);
3256                                 continue;
3257                         }
3258
3259                         if (is_shadow_present_pte(ent)
3260                             && (ent & PT64_BASE_ADDR_MASK) != hpa)
3261                                 printk(KERN_ERR "xx audit error: (%s) levels %d"
3262                                        " gva %lx gpa %llx hpa %llx ent %llx %d\n",
3263                                        audit_msg, vcpu->arch.mmu.root_level,
3264                                        va, gpa, hpa, ent,
3265                                        is_shadow_present_pte(ent));
3266                         else if (ent == shadow_notrap_nonpresent_pte
3267                                  && !is_error_hpa(hpa))
3268                                 printk(KERN_ERR "audit: (%s) notrap shadow,"
3269                                        " valid guest gva %lx\n", audit_msg, va);
3270                         kvm_release_pfn_clean(pfn);
3271
3272                 }
3273         }
3274 }
3275
3276 static void audit_mappings(struct kvm_vcpu *vcpu)
3277 {
3278         unsigned i;
3279
3280         if (vcpu->arch.mmu.root_level == 4)
3281                 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
3282         else
3283                 for (i = 0; i < 4; ++i)
3284                         if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
3285                                 audit_mappings_page(vcpu,
3286                                                     vcpu->arch.mmu.pae_root[i],
3287                                                     i << 30,
3288                                                     2);
3289 }
3290
3291 static int count_rmaps(struct kvm_vcpu *vcpu)
3292 {
3293         int nmaps = 0;
3294         int i, j, k;
3295
3296         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
3297                 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
3298                 struct kvm_rmap_desc *d;
3299
3300                 for (j = 0; j < m->npages; ++j) {
3301                         unsigned long *rmapp = &m->rmap[j];
3302
3303                         if (!*rmapp)
3304                                 continue;
3305                         if (!(*rmapp & 1)) {
3306                                 ++nmaps;
3307                                 continue;
3308                         }
3309                         d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3310                         while (d) {
3311                                 for (k = 0; k < RMAP_EXT; ++k)
3312                                         if (d->sptes[k])
3313                                                 ++nmaps;
3314                                         else
3315                                                 break;
3316                                 d = d->more;
3317                         }
3318                 }
3319         }
3320         return nmaps;
3321 }
3322
3323 void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
3324 {
3325         unsigned long *rmapp;
3326         struct kvm_mmu_page *rev_sp;
3327         gfn_t gfn;
3328
3329         if (*sptep & PT_WRITABLE_MASK) {
3330                 rev_sp = page_header(__pa(sptep));
3331                 gfn = rev_sp->gfns[sptep - rev_sp->spt];
3332
3333                 if (!gfn_to_memslot(kvm, gfn)) {
3334                         if (!printk_ratelimit())
3335                                 return;
3336                         printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3337                                          audit_msg, gfn);
3338                         printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
3339                                         audit_msg, sptep - rev_sp->spt,
3340                                         rev_sp->gfn);
3341                         dump_stack();
3342                         return;
3343                 }
3344
3345                 rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
3346                                     is_large_pte(*sptep));
3347                 if (!*rmapp) {
3348                         if (!printk_ratelimit())
3349                                 return;
3350                         printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3351                                          audit_msg, *sptep);
3352                         dump_stack();
3353                 }
3354         }
3355
3356 }
3357
3358 void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3359 {
3360         mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3361 }
3362
3363 static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
3364 {
3365         struct kvm_mmu_page *sp;
3366         int i;
3367
3368         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3369                 u64 *pt = sp->spt;
3370
3371                 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
3372                         continue;
3373
3374                 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3375                         u64 ent = pt[i];
3376
3377                         if (!(ent & PT_PRESENT_MASK))
3378                                 continue;
3379                         if (!(ent & PT_WRITABLE_MASK))
3380                                 continue;
3381                         inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
3382                 }
3383         }
3384         return;
3385 }
3386
3387 static void audit_rmap(struct kvm_vcpu *vcpu)
3388 {
3389         check_writable_mappings_rmap(vcpu);
3390         count_rmaps(vcpu);
3391 }
3392
3393 static void audit_write_protection(struct kvm_vcpu *vcpu)
3394 {
3395         struct kvm_mmu_page *sp;
3396         struct kvm_memory_slot *slot;
3397         unsigned long *rmapp;
3398         u64 *spte;
3399         gfn_t gfn;
3400
3401         list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
3402                 if (sp->role.direct)
3403                         continue;
3404                 if (sp->unsync)
3405                         continue;
3406
3407                 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
3408                 slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
3409                 rmapp = &slot->rmap[gfn - slot->base_gfn];
3410
3411                 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3412                 while (spte) {
3413                         if (*spte & PT_WRITABLE_MASK)
3414                                 printk(KERN_ERR "%s: (%s) shadow page has "
3415                                 "writable mappings: gfn %lx role %x\n",
3416                                __func__, audit_msg, sp->gfn,
3417                                sp->role.word);
3418                         spte = rmap_next(vcpu->kvm, rmapp, spte);
3419                 }
3420         }
3421 }
3422
3423 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3424 {
3425         int olddbg = dbg;
3426
3427         dbg = 0;
3428         audit_msg = msg;
3429         audit_rmap(vcpu);
3430         audit_write_protection(vcpu);
3431         if (strcmp("pre pte write", audit_msg) != 0)
3432                 audit_mappings(vcpu);
3433         audit_writable_sptes_have_rmaps(vcpu);
3434         dbg = olddbg;
3435 }
3436
3437 #endif