KVM: Replace is_hwpoison_address with __get_user_pages
[linux-2.6.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "iodev.h"
20
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
46 #include <linux/compat.h>
47 #include <linux/srcu.h>
48 #include <linux/hugetlb.h>
49 #include <linux/slab.h>
50
51 #include <asm/processor.h>
52 #include <asm/io.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgtable.h>
55 #include <asm-generic/bitops/le.h>
56
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/kvm.h>
62
63 MODULE_AUTHOR("Qumranet");
64 MODULE_LICENSE("GPL");
65
66 /*
67  * Ordering of locks:
68  *
69  *              kvm->lock --> kvm->slots_lock --> kvm->irq_lock
70  */
71
72 DEFINE_SPINLOCK(kvm_lock);
73 LIST_HEAD(vm_list);
74
75 static cpumask_var_t cpus_hardware_enabled;
76 static int kvm_usage_count = 0;
77 static atomic_t hardware_enable_failed;
78
79 struct kmem_cache *kvm_vcpu_cache;
80 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81
82 static __read_mostly struct preempt_ops kvm_preempt_ops;
83
84 struct dentry *kvm_debugfs_dir;
85
86 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
87                            unsigned long arg);
88 static int hardware_enable_all(void);
89 static void hardware_disable_all(void);
90
91 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
92
93 bool kvm_rebooting;
94 EXPORT_SYMBOL_GPL(kvm_rebooting);
95
96 static bool largepages_enabled = true;
97
98 static struct page *hwpoison_page;
99 static pfn_t hwpoison_pfn;
100
101 static struct page *fault_page;
102 static pfn_t fault_pfn;
103
104 inline int kvm_is_mmio_pfn(pfn_t pfn)
105 {
106         if (pfn_valid(pfn)) {
107                 int reserved;
108                 struct page *tail = pfn_to_page(pfn);
109                 struct page *head = compound_trans_head(tail);
110                 reserved = PageReserved(head);
111                 if (head != tail) {
112                         /*
113                          * "head" is not a dangling pointer
114                          * (compound_trans_head takes care of that)
115                          * but the hugepage may have been splitted
116                          * from under us (and we may not hold a
117                          * reference count on the head page so it can
118                          * be reused before we run PageReferenced), so
119                          * we've to check PageTail before returning
120                          * what we just read.
121                          */
122                         smp_rmb();
123                         if (PageTail(tail))
124                                 return reserved;
125                 }
126                 return PageReserved(tail);
127         }
128
129         return true;
130 }
131
132 /*
133  * Switches to specified vcpu, until a matching vcpu_put()
134  */
135 void vcpu_load(struct kvm_vcpu *vcpu)
136 {
137         int cpu;
138
139         mutex_lock(&vcpu->mutex);
140         cpu = get_cpu();
141         preempt_notifier_register(&vcpu->preempt_notifier);
142         kvm_arch_vcpu_load(vcpu, cpu);
143         put_cpu();
144 }
145
146 void vcpu_put(struct kvm_vcpu *vcpu)
147 {
148         preempt_disable();
149         kvm_arch_vcpu_put(vcpu);
150         preempt_notifier_unregister(&vcpu->preempt_notifier);
151         preempt_enable();
152         mutex_unlock(&vcpu->mutex);
153 }
154
155 static void ack_flush(void *_completed)
156 {
157 }
158
159 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
160 {
161         int i, cpu, me;
162         cpumask_var_t cpus;
163         bool called = true;
164         struct kvm_vcpu *vcpu;
165
166         zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167
168         me = get_cpu();
169         kvm_for_each_vcpu(i, vcpu, kvm) {
170                 kvm_make_request(req, vcpu);
171                 cpu = vcpu->cpu;
172
173                 /* Set ->requests bit before we read ->mode */
174                 smp_mb();
175
176                 if (cpus != NULL && cpu != -1 && cpu != me &&
177                       kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
178                         cpumask_set_cpu(cpu, cpus);
179         }
180         if (unlikely(cpus == NULL))
181                 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
182         else if (!cpumask_empty(cpus))
183                 smp_call_function_many(cpus, ack_flush, NULL, 1);
184         else
185                 called = false;
186         put_cpu();
187         free_cpumask_var(cpus);
188         return called;
189 }
190
191 void kvm_flush_remote_tlbs(struct kvm *kvm)
192 {
193         int dirty_count = kvm->tlbs_dirty;
194
195         smp_mb();
196         if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
197                 ++kvm->stat.remote_tlb_flush;
198         cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
199 }
200
201 void kvm_reload_remote_mmus(struct kvm *kvm)
202 {
203         make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
204 }
205
206 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
207 {
208         struct page *page;
209         int r;
210
211         mutex_init(&vcpu->mutex);
212         vcpu->cpu = -1;
213         vcpu->kvm = kvm;
214         vcpu->vcpu_id = id;
215         init_waitqueue_head(&vcpu->wq);
216         kvm_async_pf_vcpu_init(vcpu);
217
218         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
219         if (!page) {
220                 r = -ENOMEM;
221                 goto fail;
222         }
223         vcpu->run = page_address(page);
224
225         r = kvm_arch_vcpu_init(vcpu);
226         if (r < 0)
227                 goto fail_free_run;
228         return 0;
229
230 fail_free_run:
231         free_page((unsigned long)vcpu->run);
232 fail:
233         return r;
234 }
235 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
236
237 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
238 {
239         kvm_arch_vcpu_uninit(vcpu);
240         free_page((unsigned long)vcpu->run);
241 }
242 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
243
244 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
245 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
246 {
247         return container_of(mn, struct kvm, mmu_notifier);
248 }
249
250 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
251                                              struct mm_struct *mm,
252                                              unsigned long address)
253 {
254         struct kvm *kvm = mmu_notifier_to_kvm(mn);
255         int need_tlb_flush, idx;
256
257         /*
258          * When ->invalidate_page runs, the linux pte has been zapped
259          * already but the page is still allocated until
260          * ->invalidate_page returns. So if we increase the sequence
261          * here the kvm page fault will notice if the spte can't be
262          * established because the page is going to be freed. If
263          * instead the kvm page fault establishes the spte before
264          * ->invalidate_page runs, kvm_unmap_hva will release it
265          * before returning.
266          *
267          * The sequence increase only need to be seen at spin_unlock
268          * time, and not at spin_lock time.
269          *
270          * Increasing the sequence after the spin_unlock would be
271          * unsafe because the kvm page fault could then establish the
272          * pte after kvm_unmap_hva returned, without noticing the page
273          * is going to be freed.
274          */
275         idx = srcu_read_lock(&kvm->srcu);
276         spin_lock(&kvm->mmu_lock);
277         kvm->mmu_notifier_seq++;
278         need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
279         spin_unlock(&kvm->mmu_lock);
280         srcu_read_unlock(&kvm->srcu, idx);
281
282         /* we've to flush the tlb before the pages can be freed */
283         if (need_tlb_flush)
284                 kvm_flush_remote_tlbs(kvm);
285
286 }
287
288 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
289                                         struct mm_struct *mm,
290                                         unsigned long address,
291                                         pte_t pte)
292 {
293         struct kvm *kvm = mmu_notifier_to_kvm(mn);
294         int idx;
295
296         idx = srcu_read_lock(&kvm->srcu);
297         spin_lock(&kvm->mmu_lock);
298         kvm->mmu_notifier_seq++;
299         kvm_set_spte_hva(kvm, address, pte);
300         spin_unlock(&kvm->mmu_lock);
301         srcu_read_unlock(&kvm->srcu, idx);
302 }
303
304 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
305                                                     struct mm_struct *mm,
306                                                     unsigned long start,
307                                                     unsigned long end)
308 {
309         struct kvm *kvm = mmu_notifier_to_kvm(mn);
310         int need_tlb_flush = 0, idx;
311
312         idx = srcu_read_lock(&kvm->srcu);
313         spin_lock(&kvm->mmu_lock);
314         /*
315          * The count increase must become visible at unlock time as no
316          * spte can be established without taking the mmu_lock and
317          * count is also read inside the mmu_lock critical section.
318          */
319         kvm->mmu_notifier_count++;
320         for (; start < end; start += PAGE_SIZE)
321                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
322         need_tlb_flush |= kvm->tlbs_dirty;
323         spin_unlock(&kvm->mmu_lock);
324         srcu_read_unlock(&kvm->srcu, idx);
325
326         /* we've to flush the tlb before the pages can be freed */
327         if (need_tlb_flush)
328                 kvm_flush_remote_tlbs(kvm);
329 }
330
331 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
332                                                   struct mm_struct *mm,
333                                                   unsigned long start,
334                                                   unsigned long end)
335 {
336         struct kvm *kvm = mmu_notifier_to_kvm(mn);
337
338         spin_lock(&kvm->mmu_lock);
339         /*
340          * This sequence increase will notify the kvm page fault that
341          * the page that is going to be mapped in the spte could have
342          * been freed.
343          */
344         kvm->mmu_notifier_seq++;
345         /*
346          * The above sequence increase must be visible before the
347          * below count decrease but both values are read by the kvm
348          * page fault under mmu_lock spinlock so we don't need to add
349          * a smb_wmb() here in between the two.
350          */
351         kvm->mmu_notifier_count--;
352         spin_unlock(&kvm->mmu_lock);
353
354         BUG_ON(kvm->mmu_notifier_count < 0);
355 }
356
357 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
358                                               struct mm_struct *mm,
359                                               unsigned long address)
360 {
361         struct kvm *kvm = mmu_notifier_to_kvm(mn);
362         int young, idx;
363
364         idx = srcu_read_lock(&kvm->srcu);
365         spin_lock(&kvm->mmu_lock);
366         young = kvm_age_hva(kvm, address);
367         spin_unlock(&kvm->mmu_lock);
368         srcu_read_unlock(&kvm->srcu, idx);
369
370         if (young)
371                 kvm_flush_remote_tlbs(kvm);
372
373         return young;
374 }
375
376 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
377                                        struct mm_struct *mm,
378                                        unsigned long address)
379 {
380         struct kvm *kvm = mmu_notifier_to_kvm(mn);
381         int young, idx;
382
383         idx = srcu_read_lock(&kvm->srcu);
384         spin_lock(&kvm->mmu_lock);
385         young = kvm_test_age_hva(kvm, address);
386         spin_unlock(&kvm->mmu_lock);
387         srcu_read_unlock(&kvm->srcu, idx);
388
389         return young;
390 }
391
392 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
393                                      struct mm_struct *mm)
394 {
395         struct kvm *kvm = mmu_notifier_to_kvm(mn);
396         int idx;
397
398         idx = srcu_read_lock(&kvm->srcu);
399         kvm_arch_flush_shadow(kvm);
400         srcu_read_unlock(&kvm->srcu, idx);
401 }
402
403 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
404         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
405         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
406         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
407         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
408         .test_young             = kvm_mmu_notifier_test_young,
409         .change_pte             = kvm_mmu_notifier_change_pte,
410         .release                = kvm_mmu_notifier_release,
411 };
412
413 static int kvm_init_mmu_notifier(struct kvm *kvm)
414 {
415         kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
416         return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
417 }
418
419 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
420
421 static int kvm_init_mmu_notifier(struct kvm *kvm)
422 {
423         return 0;
424 }
425
426 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
427
428 static struct kvm *kvm_create_vm(void)
429 {
430         int r, i;
431         struct kvm *kvm = kvm_arch_alloc_vm();
432
433         if (!kvm)
434                 return ERR_PTR(-ENOMEM);
435
436         r = kvm_arch_init_vm(kvm);
437         if (r)
438                 goto out_err_nodisable;
439
440         r = hardware_enable_all();
441         if (r)
442                 goto out_err_nodisable;
443
444 #ifdef CONFIG_HAVE_KVM_IRQCHIP
445         INIT_HLIST_HEAD(&kvm->mask_notifier_list);
446         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
447 #endif
448
449         r = -ENOMEM;
450         kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
451         if (!kvm->memslots)
452                 goto out_err_nosrcu;
453         if (init_srcu_struct(&kvm->srcu))
454                 goto out_err_nosrcu;
455         for (i = 0; i < KVM_NR_BUSES; i++) {
456                 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
457                                         GFP_KERNEL);
458                 if (!kvm->buses[i])
459                         goto out_err;
460         }
461
462         r = kvm_init_mmu_notifier(kvm);
463         if (r)
464                 goto out_err;
465
466         kvm->mm = current->mm;
467         atomic_inc(&kvm->mm->mm_count);
468         spin_lock_init(&kvm->mmu_lock);
469         kvm_eventfd_init(kvm);
470         mutex_init(&kvm->lock);
471         mutex_init(&kvm->irq_lock);
472         mutex_init(&kvm->slots_lock);
473         atomic_set(&kvm->users_count, 1);
474         spin_lock(&kvm_lock);
475         list_add(&kvm->vm_list, &vm_list);
476         spin_unlock(&kvm_lock);
477
478         return kvm;
479
480 out_err:
481         cleanup_srcu_struct(&kvm->srcu);
482 out_err_nosrcu:
483         hardware_disable_all();
484 out_err_nodisable:
485         for (i = 0; i < KVM_NR_BUSES; i++)
486                 kfree(kvm->buses[i]);
487         kfree(kvm->memslots);
488         kvm_arch_free_vm(kvm);
489         return ERR_PTR(r);
490 }
491
492 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
493 {
494         if (!memslot->dirty_bitmap)
495                 return;
496
497         if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
498                 vfree(memslot->dirty_bitmap_head);
499         else
500                 kfree(memslot->dirty_bitmap_head);
501
502         memslot->dirty_bitmap = NULL;
503         memslot->dirty_bitmap_head = NULL;
504 }
505
506 /*
507  * Free any memory in @free but not in @dont.
508  */
509 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
510                                   struct kvm_memory_slot *dont)
511 {
512         int i;
513
514         if (!dont || free->rmap != dont->rmap)
515                 vfree(free->rmap);
516
517         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
518                 kvm_destroy_dirty_bitmap(free);
519
520
521         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
522                 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
523                         vfree(free->lpage_info[i]);
524                         free->lpage_info[i] = NULL;
525                 }
526         }
527
528         free->npages = 0;
529         free->rmap = NULL;
530 }
531
532 void kvm_free_physmem(struct kvm *kvm)
533 {
534         int i;
535         struct kvm_memslots *slots = kvm->memslots;
536
537         for (i = 0; i < slots->nmemslots; ++i)
538                 kvm_free_physmem_slot(&slots->memslots[i], NULL);
539
540         kfree(kvm->memslots);
541 }
542
543 static void kvm_destroy_vm(struct kvm *kvm)
544 {
545         int i;
546         struct mm_struct *mm = kvm->mm;
547
548         kvm_arch_sync_events(kvm);
549         spin_lock(&kvm_lock);
550         list_del(&kvm->vm_list);
551         spin_unlock(&kvm_lock);
552         kvm_free_irq_routing(kvm);
553         for (i = 0; i < KVM_NR_BUSES; i++)
554                 kvm_io_bus_destroy(kvm->buses[i]);
555         kvm_coalesced_mmio_free(kvm);
556 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
557         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
558 #else
559         kvm_arch_flush_shadow(kvm);
560 #endif
561         kvm_arch_destroy_vm(kvm);
562         kvm_free_physmem(kvm);
563         cleanup_srcu_struct(&kvm->srcu);
564         kvm_arch_free_vm(kvm);
565         hardware_disable_all();
566         mmdrop(mm);
567 }
568
569 void kvm_get_kvm(struct kvm *kvm)
570 {
571         atomic_inc(&kvm->users_count);
572 }
573 EXPORT_SYMBOL_GPL(kvm_get_kvm);
574
575 void kvm_put_kvm(struct kvm *kvm)
576 {
577         if (atomic_dec_and_test(&kvm->users_count))
578                 kvm_destroy_vm(kvm);
579 }
580 EXPORT_SYMBOL_GPL(kvm_put_kvm);
581
582
583 static int kvm_vm_release(struct inode *inode, struct file *filp)
584 {
585         struct kvm *kvm = filp->private_data;
586
587         kvm_irqfd_release(kvm);
588
589         kvm_put_kvm(kvm);
590         return 0;
591 }
592
593 #ifndef CONFIG_S390
594 /*
595  * Allocation size is twice as large as the actual dirty bitmap size.
596  * This makes it possible to do double buffering: see x86's
597  * kvm_vm_ioctl_get_dirty_log().
598  */
599 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
600 {
601         unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
602
603         if (dirty_bytes > PAGE_SIZE)
604                 memslot->dirty_bitmap = vzalloc(dirty_bytes);
605         else
606                 memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
607
608         if (!memslot->dirty_bitmap)
609                 return -ENOMEM;
610
611         memslot->dirty_bitmap_head = memslot->dirty_bitmap;
612         return 0;
613 }
614 #endif /* !CONFIG_S390 */
615
616 /*
617  * Allocate some memory and give it an address in the guest physical address
618  * space.
619  *
620  * Discontiguous memory is allowed, mostly for framebuffers.
621  *
622  * Must be called holding mmap_sem for write.
623  */
624 int __kvm_set_memory_region(struct kvm *kvm,
625                             struct kvm_userspace_memory_region *mem,
626                             int user_alloc)
627 {
628         int r;
629         gfn_t base_gfn;
630         unsigned long npages;
631         unsigned long i;
632         struct kvm_memory_slot *memslot;
633         struct kvm_memory_slot old, new;
634         struct kvm_memslots *slots, *old_memslots;
635
636         r = -EINVAL;
637         /* General sanity checks */
638         if (mem->memory_size & (PAGE_SIZE - 1))
639                 goto out;
640         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
641                 goto out;
642         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
643                 goto out;
644         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
645                 goto out;
646         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
647                 goto out;
648
649         memslot = &kvm->memslots->memslots[mem->slot];
650         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
651         npages = mem->memory_size >> PAGE_SHIFT;
652
653         r = -EINVAL;
654         if (npages > KVM_MEM_MAX_NR_PAGES)
655                 goto out;
656
657         if (!npages)
658                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
659
660         new = old = *memslot;
661
662         new.id = mem->slot;
663         new.base_gfn = base_gfn;
664         new.npages = npages;
665         new.flags = mem->flags;
666
667         /* Disallow changing a memory slot's size. */
668         r = -EINVAL;
669         if (npages && old.npages && npages != old.npages)
670                 goto out_free;
671
672         /* Check for overlaps */
673         r = -EEXIST;
674         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
675                 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
676
677                 if (s == memslot || !s->npages)
678                         continue;
679                 if (!((base_gfn + npages <= s->base_gfn) ||
680                       (base_gfn >= s->base_gfn + s->npages)))
681                         goto out_free;
682         }
683
684         /* Free page dirty bitmap if unneeded */
685         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
686                 new.dirty_bitmap = NULL;
687
688         r = -ENOMEM;
689
690         /* Allocate if a slot is being created */
691 #ifndef CONFIG_S390
692         if (npages && !new.rmap) {
693                 new.rmap = vzalloc(npages * sizeof(*new.rmap));
694
695                 if (!new.rmap)
696                         goto out_free;
697
698                 new.user_alloc = user_alloc;
699                 new.userspace_addr = mem->userspace_addr;
700         }
701         if (!npages)
702                 goto skip_lpage;
703
704         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
705                 unsigned long ugfn;
706                 unsigned long j;
707                 int lpages;
708                 int level = i + 2;
709
710                 /* Avoid unused variable warning if no large pages */
711                 (void)level;
712
713                 if (new.lpage_info[i])
714                         continue;
715
716                 lpages = 1 + ((base_gfn + npages - 1)
717                              >> KVM_HPAGE_GFN_SHIFT(level));
718                 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
719
720                 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
721
722                 if (!new.lpage_info[i])
723                         goto out_free;
724
725                 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
726                         new.lpage_info[i][0].write_count = 1;
727                 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
728                         new.lpage_info[i][lpages - 1].write_count = 1;
729                 ugfn = new.userspace_addr >> PAGE_SHIFT;
730                 /*
731                  * If the gfn and userspace address are not aligned wrt each
732                  * other, or if explicitly asked to, disable large page
733                  * support for this slot
734                  */
735                 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
736                     !largepages_enabled)
737                         for (j = 0; j < lpages; ++j)
738                                 new.lpage_info[i][j].write_count = 1;
739         }
740
741 skip_lpage:
742
743         /* Allocate page dirty bitmap if needed */
744         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
745                 if (kvm_create_dirty_bitmap(&new) < 0)
746                         goto out_free;
747                 /* destroy any largepage mappings for dirty tracking */
748         }
749 #else  /* not defined CONFIG_S390 */
750         new.user_alloc = user_alloc;
751         if (user_alloc)
752                 new.userspace_addr = mem->userspace_addr;
753 #endif /* not defined CONFIG_S390 */
754
755         if (!npages) {
756                 r = -ENOMEM;
757                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
758                 if (!slots)
759                         goto out_free;
760                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
761                 if (mem->slot >= slots->nmemslots)
762                         slots->nmemslots = mem->slot + 1;
763                 slots->generation++;
764                 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
765
766                 old_memslots = kvm->memslots;
767                 rcu_assign_pointer(kvm->memslots, slots);
768                 synchronize_srcu_expedited(&kvm->srcu);
769                 /* From this point no new shadow pages pointing to a deleted
770                  * memslot will be created.
771                  *
772                  * validation of sp->gfn happens in:
773                  *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
774                  *      - kvm_is_visible_gfn (mmu_check_roots)
775                  */
776                 kvm_arch_flush_shadow(kvm);
777                 kfree(old_memslots);
778         }
779
780         r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
781         if (r)
782                 goto out_free;
783
784         /* map the pages in iommu page table */
785         if (npages) {
786                 r = kvm_iommu_map_pages(kvm, &new);
787                 if (r)
788                         goto out_free;
789         }
790
791         r = -ENOMEM;
792         slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
793         if (!slots)
794                 goto out_free;
795         memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
796         if (mem->slot >= slots->nmemslots)
797                 slots->nmemslots = mem->slot + 1;
798         slots->generation++;
799
800         /* actual memory is freed via old in kvm_free_physmem_slot below */
801         if (!npages) {
802                 new.rmap = NULL;
803                 new.dirty_bitmap = NULL;
804                 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
805                         new.lpage_info[i] = NULL;
806         }
807
808         slots->memslots[mem->slot] = new;
809         old_memslots = kvm->memslots;
810         rcu_assign_pointer(kvm->memslots, slots);
811         synchronize_srcu_expedited(&kvm->srcu);
812
813         kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
814
815         kvm_free_physmem_slot(&old, &new);
816         kfree(old_memslots);
817
818         return 0;
819
820 out_free:
821         kvm_free_physmem_slot(&new, &old);
822 out:
823         return r;
824
825 }
826 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
827
828 int kvm_set_memory_region(struct kvm *kvm,
829                           struct kvm_userspace_memory_region *mem,
830                           int user_alloc)
831 {
832         int r;
833
834         mutex_lock(&kvm->slots_lock);
835         r = __kvm_set_memory_region(kvm, mem, user_alloc);
836         mutex_unlock(&kvm->slots_lock);
837         return r;
838 }
839 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
840
841 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
842                                    struct
843                                    kvm_userspace_memory_region *mem,
844                                    int user_alloc)
845 {
846         if (mem->slot >= KVM_MEMORY_SLOTS)
847                 return -EINVAL;
848         return kvm_set_memory_region(kvm, mem, user_alloc);
849 }
850
851 int kvm_get_dirty_log(struct kvm *kvm,
852                         struct kvm_dirty_log *log, int *is_dirty)
853 {
854         struct kvm_memory_slot *memslot;
855         int r, i;
856         unsigned long n;
857         unsigned long any = 0;
858
859         r = -EINVAL;
860         if (log->slot >= KVM_MEMORY_SLOTS)
861                 goto out;
862
863         memslot = &kvm->memslots->memslots[log->slot];
864         r = -ENOENT;
865         if (!memslot->dirty_bitmap)
866                 goto out;
867
868         n = kvm_dirty_bitmap_bytes(memslot);
869
870         for (i = 0; !any && i < n/sizeof(long); ++i)
871                 any = memslot->dirty_bitmap[i];
872
873         r = -EFAULT;
874         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
875                 goto out;
876
877         if (any)
878                 *is_dirty = 1;
879
880         r = 0;
881 out:
882         return r;
883 }
884
885 void kvm_disable_largepages(void)
886 {
887         largepages_enabled = false;
888 }
889 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
890
891 int is_error_page(struct page *page)
892 {
893         return page == bad_page || page == hwpoison_page || page == fault_page;
894 }
895 EXPORT_SYMBOL_GPL(is_error_page);
896
897 int is_error_pfn(pfn_t pfn)
898 {
899         return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
900 }
901 EXPORT_SYMBOL_GPL(is_error_pfn);
902
903 int is_hwpoison_pfn(pfn_t pfn)
904 {
905         return pfn == hwpoison_pfn;
906 }
907 EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
908
909 int is_fault_pfn(pfn_t pfn)
910 {
911         return pfn == fault_pfn;
912 }
913 EXPORT_SYMBOL_GPL(is_fault_pfn);
914
915 static inline unsigned long bad_hva(void)
916 {
917         return PAGE_OFFSET;
918 }
919
920 int kvm_is_error_hva(unsigned long addr)
921 {
922         return addr == bad_hva();
923 }
924 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
925
926 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
927                                                 gfn_t gfn)
928 {
929         int i;
930
931         for (i = 0; i < slots->nmemslots; ++i) {
932                 struct kvm_memory_slot *memslot = &slots->memslots[i];
933
934                 if (gfn >= memslot->base_gfn
935                     && gfn < memslot->base_gfn + memslot->npages)
936                         return memslot;
937         }
938         return NULL;
939 }
940
941 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
942 {
943         return __gfn_to_memslot(kvm_memslots(kvm), gfn);
944 }
945 EXPORT_SYMBOL_GPL(gfn_to_memslot);
946
947 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
948 {
949         int i;
950         struct kvm_memslots *slots = kvm_memslots(kvm);
951
952         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
953                 struct kvm_memory_slot *memslot = &slots->memslots[i];
954
955                 if (memslot->flags & KVM_MEMSLOT_INVALID)
956                         continue;
957
958                 if (gfn >= memslot->base_gfn
959                     && gfn < memslot->base_gfn + memslot->npages)
960                         return 1;
961         }
962         return 0;
963 }
964 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
965
966 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
967 {
968         struct vm_area_struct *vma;
969         unsigned long addr, size;
970
971         size = PAGE_SIZE;
972
973         addr = gfn_to_hva(kvm, gfn);
974         if (kvm_is_error_hva(addr))
975                 return PAGE_SIZE;
976
977         down_read(&current->mm->mmap_sem);
978         vma = find_vma(current->mm, addr);
979         if (!vma)
980                 goto out;
981
982         size = vma_kernel_pagesize(vma);
983
984 out:
985         up_read(&current->mm->mmap_sem);
986
987         return size;
988 }
989
990 int memslot_id(struct kvm *kvm, gfn_t gfn)
991 {
992         int i;
993         struct kvm_memslots *slots = kvm_memslots(kvm);
994         struct kvm_memory_slot *memslot = NULL;
995
996         for (i = 0; i < slots->nmemslots; ++i) {
997                 memslot = &slots->memslots[i];
998
999                 if (gfn >= memslot->base_gfn
1000                     && gfn < memslot->base_gfn + memslot->npages)
1001                         break;
1002         }
1003
1004         return memslot - slots->memslots;
1005 }
1006
1007 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1008                                      gfn_t *nr_pages)
1009 {
1010         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1011                 return bad_hva();
1012
1013         if (nr_pages)
1014                 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1015
1016         return gfn_to_hva_memslot(slot, gfn);
1017 }
1018
1019 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1020 {
1021         return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1022 }
1023 EXPORT_SYMBOL_GPL(gfn_to_hva);
1024
1025 static pfn_t get_fault_pfn(void)
1026 {
1027         get_page(fault_page);
1028         return fault_pfn;
1029 }
1030
1031 static inline int check_user_page_hwpoison(unsigned long addr)
1032 {
1033         int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
1034
1035         rc = __get_user_pages(current, current->mm, addr, 1,
1036                               flags, NULL, NULL, NULL);
1037         return rc == -EHWPOISON;
1038 }
1039
1040 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1041                         bool *async, bool write_fault, bool *writable)
1042 {
1043         struct page *page[1];
1044         int npages = 0;
1045         pfn_t pfn;
1046
1047         /* we can do it either atomically or asynchronously, not both */
1048         BUG_ON(atomic && async);
1049
1050         BUG_ON(!write_fault && !writable);
1051
1052         if (writable)
1053                 *writable = true;
1054
1055         if (atomic || async)
1056                 npages = __get_user_pages_fast(addr, 1, 1, page);
1057
1058         if (unlikely(npages != 1) && !atomic) {
1059                 might_sleep();
1060
1061                 if (writable)
1062                         *writable = write_fault;
1063
1064                 npages = get_user_pages_fast(addr, 1, write_fault, page);
1065
1066                 /* map read fault as writable if possible */
1067                 if (unlikely(!write_fault) && npages == 1) {
1068                         struct page *wpage[1];
1069
1070                         npages = __get_user_pages_fast(addr, 1, 1, wpage);
1071                         if (npages == 1) {
1072                                 *writable = true;
1073                                 put_page(page[0]);
1074                                 page[0] = wpage[0];
1075                         }
1076                         npages = 1;
1077                 }
1078         }
1079
1080         if (unlikely(npages != 1)) {
1081                 struct vm_area_struct *vma;
1082
1083                 if (atomic)
1084                         return get_fault_pfn();
1085
1086                 down_read(&current->mm->mmap_sem);
1087                 if (check_user_page_hwpoison(addr)) {
1088                         up_read(&current->mm->mmap_sem);
1089                         get_page(hwpoison_page);
1090                         return page_to_pfn(hwpoison_page);
1091                 }
1092
1093                 vma = find_vma_intersection(current->mm, addr, addr+1);
1094
1095                 if (vma == NULL)
1096                         pfn = get_fault_pfn();
1097                 else if ((vma->vm_flags & VM_PFNMAP)) {
1098                         pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1099                                 vma->vm_pgoff;
1100                         BUG_ON(!kvm_is_mmio_pfn(pfn));
1101                 } else {
1102                         if (async && (vma->vm_flags & VM_WRITE))
1103                                 *async = true;
1104                         pfn = get_fault_pfn();
1105                 }
1106                 up_read(&current->mm->mmap_sem);
1107         } else
1108                 pfn = page_to_pfn(page[0]);
1109
1110         return pfn;
1111 }
1112
1113 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
1114 {
1115         return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
1116 }
1117 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1118
1119 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1120                           bool write_fault, bool *writable)
1121 {
1122         unsigned long addr;
1123
1124         if (async)
1125                 *async = false;
1126
1127         addr = gfn_to_hva(kvm, gfn);
1128         if (kvm_is_error_hva(addr)) {
1129                 get_page(bad_page);
1130                 return page_to_pfn(bad_page);
1131         }
1132
1133         return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
1134 }
1135
1136 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1137 {
1138         return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1139 }
1140 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1141
1142 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1143                        bool write_fault, bool *writable)
1144 {
1145         return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1146 }
1147 EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1148
1149 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1150 {
1151         return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
1152 }
1153 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1154
1155 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1156                       bool *writable)
1157 {
1158         return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1159 }
1160 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1161
1162 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
1163                          struct kvm_memory_slot *slot, gfn_t gfn)
1164 {
1165         unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1166         return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1167 }
1168
1169 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1170                                                                   int nr_pages)
1171 {
1172         unsigned long addr;
1173         gfn_t entry;
1174
1175         addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1176         if (kvm_is_error_hva(addr))
1177                 return -1;
1178
1179         if (entry < nr_pages)
1180                 return 0;
1181
1182         return __get_user_pages_fast(addr, nr_pages, 1, pages);
1183 }
1184 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1185
1186 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1187 {
1188         pfn_t pfn;
1189
1190         pfn = gfn_to_pfn(kvm, gfn);
1191         if (!kvm_is_mmio_pfn(pfn))
1192                 return pfn_to_page(pfn);
1193
1194         WARN_ON(kvm_is_mmio_pfn(pfn));
1195
1196         get_page(bad_page);
1197         return bad_page;
1198 }
1199
1200 EXPORT_SYMBOL_GPL(gfn_to_page);
1201
1202 void kvm_release_page_clean(struct page *page)
1203 {
1204         kvm_release_pfn_clean(page_to_pfn(page));
1205 }
1206 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1207
1208 void kvm_release_pfn_clean(pfn_t pfn)
1209 {
1210         if (!kvm_is_mmio_pfn(pfn))
1211                 put_page(pfn_to_page(pfn));
1212 }
1213 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1214
1215 void kvm_release_page_dirty(struct page *page)
1216 {
1217         kvm_release_pfn_dirty(page_to_pfn(page));
1218 }
1219 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1220
1221 void kvm_release_pfn_dirty(pfn_t pfn)
1222 {
1223         kvm_set_pfn_dirty(pfn);
1224         kvm_release_pfn_clean(pfn);
1225 }
1226 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1227
1228 void kvm_set_page_dirty(struct page *page)
1229 {
1230         kvm_set_pfn_dirty(page_to_pfn(page));
1231 }
1232 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1233
1234 void kvm_set_pfn_dirty(pfn_t pfn)
1235 {
1236         if (!kvm_is_mmio_pfn(pfn)) {
1237                 struct page *page = pfn_to_page(pfn);
1238                 if (!PageReserved(page))
1239                         SetPageDirty(page);
1240         }
1241 }
1242 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1243
1244 void kvm_set_pfn_accessed(pfn_t pfn)
1245 {
1246         if (!kvm_is_mmio_pfn(pfn))
1247                 mark_page_accessed(pfn_to_page(pfn));
1248 }
1249 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1250
1251 void kvm_get_pfn(pfn_t pfn)
1252 {
1253         if (!kvm_is_mmio_pfn(pfn))
1254                 get_page(pfn_to_page(pfn));
1255 }
1256 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1257
1258 static int next_segment(unsigned long len, int offset)
1259 {
1260         if (len > PAGE_SIZE - offset)
1261                 return PAGE_SIZE - offset;
1262         else
1263                 return len;
1264 }
1265
1266 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1267                         int len)
1268 {
1269         int r;
1270         unsigned long addr;
1271
1272         addr = gfn_to_hva(kvm, gfn);
1273         if (kvm_is_error_hva(addr))
1274                 return -EFAULT;
1275         r = copy_from_user(data, (void __user *)addr + offset, len);
1276         if (r)
1277                 return -EFAULT;
1278         return 0;
1279 }
1280 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1281
1282 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1283 {
1284         gfn_t gfn = gpa >> PAGE_SHIFT;
1285         int seg;
1286         int offset = offset_in_page(gpa);
1287         int ret;
1288
1289         while ((seg = next_segment(len, offset)) != 0) {
1290                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1291                 if (ret < 0)
1292                         return ret;
1293                 offset = 0;
1294                 len -= seg;
1295                 data += seg;
1296                 ++gfn;
1297         }
1298         return 0;
1299 }
1300 EXPORT_SYMBOL_GPL(kvm_read_guest);
1301
1302 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1303                           unsigned long len)
1304 {
1305         int r;
1306         unsigned long addr;
1307         gfn_t gfn = gpa >> PAGE_SHIFT;
1308         int offset = offset_in_page(gpa);
1309
1310         addr = gfn_to_hva(kvm, gfn);
1311         if (kvm_is_error_hva(addr))
1312                 return -EFAULT;
1313         pagefault_disable();
1314         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1315         pagefault_enable();
1316         if (r)
1317                 return -EFAULT;
1318         return 0;
1319 }
1320 EXPORT_SYMBOL(kvm_read_guest_atomic);
1321
1322 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1323                          int offset, int len)
1324 {
1325         int r;
1326         unsigned long addr;
1327
1328         addr = gfn_to_hva(kvm, gfn);
1329         if (kvm_is_error_hva(addr))
1330                 return -EFAULT;
1331         r = copy_to_user((void __user *)addr + offset, data, len);
1332         if (r)
1333                 return -EFAULT;
1334         mark_page_dirty(kvm, gfn);
1335         return 0;
1336 }
1337 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1338
1339 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1340                     unsigned long len)
1341 {
1342         gfn_t gfn = gpa >> PAGE_SHIFT;
1343         int seg;
1344         int offset = offset_in_page(gpa);
1345         int ret;
1346
1347         while ((seg = next_segment(len, offset)) != 0) {
1348                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1349                 if (ret < 0)
1350                         return ret;
1351                 offset = 0;
1352                 len -= seg;
1353                 data += seg;
1354                 ++gfn;
1355         }
1356         return 0;
1357 }
1358
1359 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1360                               gpa_t gpa)
1361 {
1362         struct kvm_memslots *slots = kvm_memslots(kvm);
1363         int offset = offset_in_page(gpa);
1364         gfn_t gfn = gpa >> PAGE_SHIFT;
1365
1366         ghc->gpa = gpa;
1367         ghc->generation = slots->generation;
1368         ghc->memslot = __gfn_to_memslot(slots, gfn);
1369         ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1370         if (!kvm_is_error_hva(ghc->hva))
1371                 ghc->hva += offset;
1372         else
1373                 return -EFAULT;
1374
1375         return 0;
1376 }
1377 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1378
1379 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1380                            void *data, unsigned long len)
1381 {
1382         struct kvm_memslots *slots = kvm_memslots(kvm);
1383         int r;
1384
1385         if (slots->generation != ghc->generation)
1386                 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1387
1388         if (kvm_is_error_hva(ghc->hva))
1389                 return -EFAULT;
1390
1391         r = copy_to_user((void __user *)ghc->hva, data, len);
1392         if (r)
1393                 return -EFAULT;
1394         mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1395
1396         return 0;
1397 }
1398 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1399
1400 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1401 {
1402         return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
1403                                     offset, len);
1404 }
1405 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1406
1407 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1408 {
1409         gfn_t gfn = gpa >> PAGE_SHIFT;
1410         int seg;
1411         int offset = offset_in_page(gpa);
1412         int ret;
1413
1414         while ((seg = next_segment(len, offset)) != 0) {
1415                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1416                 if (ret < 0)
1417                         return ret;
1418                 offset = 0;
1419                 len -= seg;
1420                 ++gfn;
1421         }
1422         return 0;
1423 }
1424 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1425
1426 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1427                              gfn_t gfn)
1428 {
1429         if (memslot && memslot->dirty_bitmap) {
1430                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1431
1432                 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1433         }
1434 }
1435
1436 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1437 {
1438         struct kvm_memory_slot *memslot;
1439
1440         memslot = gfn_to_memslot(kvm, gfn);
1441         mark_page_dirty_in_slot(kvm, memslot, gfn);
1442 }
1443
1444 /*
1445  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1446  */
1447 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1448 {
1449         DEFINE_WAIT(wait);
1450
1451         for (;;) {
1452                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1453
1454                 if (kvm_arch_vcpu_runnable(vcpu)) {
1455                         kvm_make_request(KVM_REQ_UNHALT, vcpu);
1456                         break;
1457                 }
1458                 if (kvm_cpu_has_pending_timer(vcpu))
1459                         break;
1460                 if (signal_pending(current))
1461                         break;
1462
1463                 schedule();
1464         }
1465
1466         finish_wait(&vcpu->wq, &wait);
1467 }
1468
1469 void kvm_resched(struct kvm_vcpu *vcpu)
1470 {
1471         if (!need_resched())
1472                 return;
1473         cond_resched();
1474 }
1475 EXPORT_SYMBOL_GPL(kvm_resched);
1476
1477 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1478 {
1479         ktime_t expires;
1480         DEFINE_WAIT(wait);
1481
1482         prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1483
1484         /* Sleep for 100 us, and hope lock-holder got scheduled */
1485         expires = ktime_add_ns(ktime_get(), 100000UL);
1486         schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1487
1488         finish_wait(&vcpu->wq, &wait);
1489 }
1490 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1491
1492 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1493 {
1494         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1495         struct page *page;
1496
1497         if (vmf->pgoff == 0)
1498                 page = virt_to_page(vcpu->run);
1499 #ifdef CONFIG_X86
1500         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1501                 page = virt_to_page(vcpu->arch.pio_data);
1502 #endif
1503 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1504         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1505                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1506 #endif
1507         else
1508                 return VM_FAULT_SIGBUS;
1509         get_page(page);
1510         vmf->page = page;
1511         return 0;
1512 }
1513
1514 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1515         .fault = kvm_vcpu_fault,
1516 };
1517
1518 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1519 {
1520         vma->vm_ops = &kvm_vcpu_vm_ops;
1521         return 0;
1522 }
1523
1524 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1525 {
1526         struct kvm_vcpu *vcpu = filp->private_data;
1527
1528         kvm_put_kvm(vcpu->kvm);
1529         return 0;
1530 }
1531
1532 static struct file_operations kvm_vcpu_fops = {
1533         .release        = kvm_vcpu_release,
1534         .unlocked_ioctl = kvm_vcpu_ioctl,
1535         .compat_ioctl   = kvm_vcpu_ioctl,
1536         .mmap           = kvm_vcpu_mmap,
1537         .llseek         = noop_llseek,
1538 };
1539
1540 /*
1541  * Allocates an inode for the vcpu.
1542  */
1543 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1544 {
1545         return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
1546 }
1547
1548 /*
1549  * Creates some virtual cpus.  Good luck creating more than one.
1550  */
1551 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1552 {
1553         int r;
1554         struct kvm_vcpu *vcpu, *v;
1555
1556         vcpu = kvm_arch_vcpu_create(kvm, id);
1557         if (IS_ERR(vcpu))
1558                 return PTR_ERR(vcpu);
1559
1560         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1561
1562         r = kvm_arch_vcpu_setup(vcpu);
1563         if (r)
1564                 return r;
1565
1566         mutex_lock(&kvm->lock);
1567         if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1568                 r = -EINVAL;
1569                 goto vcpu_destroy;
1570         }
1571
1572         kvm_for_each_vcpu(r, v, kvm)
1573                 if (v->vcpu_id == id) {
1574                         r = -EEXIST;
1575                         goto vcpu_destroy;
1576                 }
1577
1578         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1579
1580         /* Now it's all set up, let userspace reach it */
1581         kvm_get_kvm(kvm);
1582         r = create_vcpu_fd(vcpu);
1583         if (r < 0) {
1584                 kvm_put_kvm(kvm);
1585                 goto vcpu_destroy;
1586         }
1587
1588         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1589         smp_wmb();
1590         atomic_inc(&kvm->online_vcpus);
1591
1592 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1593         if (kvm->bsp_vcpu_id == id)
1594                 kvm->bsp_vcpu = vcpu;
1595 #endif
1596         mutex_unlock(&kvm->lock);
1597         return r;
1598
1599 vcpu_destroy:
1600         mutex_unlock(&kvm->lock);
1601         kvm_arch_vcpu_destroy(vcpu);
1602         return r;
1603 }
1604
1605 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1606 {
1607         if (sigset) {
1608                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1609                 vcpu->sigset_active = 1;
1610                 vcpu->sigset = *sigset;
1611         } else
1612                 vcpu->sigset_active = 0;
1613         return 0;
1614 }
1615
1616 static long kvm_vcpu_ioctl(struct file *filp,
1617                            unsigned int ioctl, unsigned long arg)
1618 {
1619         struct kvm_vcpu *vcpu = filp->private_data;
1620         void __user *argp = (void __user *)arg;
1621         int r;
1622         struct kvm_fpu *fpu = NULL;
1623         struct kvm_sregs *kvm_sregs = NULL;
1624
1625         if (vcpu->kvm->mm != current->mm)
1626                 return -EIO;
1627
1628 #if defined(CONFIG_S390) || defined(CONFIG_PPC)
1629         /*
1630          * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1631          * so vcpu_load() would break it.
1632          */
1633         if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1634                 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1635 #endif
1636
1637
1638         vcpu_load(vcpu);
1639         switch (ioctl) {
1640         case KVM_RUN:
1641                 r = -EINVAL;
1642                 if (arg)
1643                         goto out;
1644                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1645                 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
1646                 break;
1647         case KVM_GET_REGS: {
1648                 struct kvm_regs *kvm_regs;
1649
1650                 r = -ENOMEM;
1651                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1652                 if (!kvm_regs)
1653                         goto out;
1654                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1655                 if (r)
1656                         goto out_free1;
1657                 r = -EFAULT;
1658                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1659                         goto out_free1;
1660                 r = 0;
1661 out_free1:
1662                 kfree(kvm_regs);
1663                 break;
1664         }
1665         case KVM_SET_REGS: {
1666                 struct kvm_regs *kvm_regs;
1667
1668                 r = -ENOMEM;
1669                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1670                 if (!kvm_regs)
1671                         goto out;
1672                 r = -EFAULT;
1673                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1674                         goto out_free2;
1675                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1676                 if (r)
1677                         goto out_free2;
1678                 r = 0;
1679 out_free2:
1680                 kfree(kvm_regs);
1681                 break;
1682         }
1683         case KVM_GET_SREGS: {
1684                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1685                 r = -ENOMEM;
1686                 if (!kvm_sregs)
1687                         goto out;
1688                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1689                 if (r)
1690                         goto out;
1691                 r = -EFAULT;
1692                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1693                         goto out;
1694                 r = 0;
1695                 break;
1696         }
1697         case KVM_SET_SREGS: {
1698                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1699                 r = -ENOMEM;
1700                 if (!kvm_sregs)
1701                         goto out;
1702                 r = -EFAULT;
1703                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1704                         goto out;
1705                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1706                 if (r)
1707                         goto out;
1708                 r = 0;
1709                 break;
1710         }
1711         case KVM_GET_MP_STATE: {
1712                 struct kvm_mp_state mp_state;
1713
1714                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1715                 if (r)
1716                         goto out;
1717                 r = -EFAULT;
1718                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1719                         goto out;
1720                 r = 0;
1721                 break;
1722         }
1723         case KVM_SET_MP_STATE: {
1724                 struct kvm_mp_state mp_state;
1725
1726                 r = -EFAULT;
1727                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1728                         goto out;
1729                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1730                 if (r)
1731                         goto out;
1732                 r = 0;
1733                 break;
1734         }
1735         case KVM_TRANSLATE: {
1736                 struct kvm_translation tr;
1737
1738                 r = -EFAULT;
1739                 if (copy_from_user(&tr, argp, sizeof tr))
1740                         goto out;
1741                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1742                 if (r)
1743                         goto out;
1744                 r = -EFAULT;
1745                 if (copy_to_user(argp, &tr, sizeof tr))
1746                         goto out;
1747                 r = 0;
1748                 break;
1749         }
1750         case KVM_SET_GUEST_DEBUG: {
1751                 struct kvm_guest_debug dbg;
1752
1753                 r = -EFAULT;
1754                 if (copy_from_user(&dbg, argp, sizeof dbg))
1755                         goto out;
1756                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1757                 if (r)
1758                         goto out;
1759                 r = 0;
1760                 break;
1761         }
1762         case KVM_SET_SIGNAL_MASK: {
1763                 struct kvm_signal_mask __user *sigmask_arg = argp;
1764                 struct kvm_signal_mask kvm_sigmask;
1765                 sigset_t sigset, *p;
1766
1767                 p = NULL;
1768                 if (argp) {
1769                         r = -EFAULT;
1770                         if (copy_from_user(&kvm_sigmask, argp,
1771                                            sizeof kvm_sigmask))
1772                                 goto out;
1773                         r = -EINVAL;
1774                         if (kvm_sigmask.len != sizeof sigset)
1775                                 goto out;
1776                         r = -EFAULT;
1777                         if (copy_from_user(&sigset, sigmask_arg->sigset,
1778                                            sizeof sigset))
1779                                 goto out;
1780                         p = &sigset;
1781                 }
1782                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1783                 break;
1784         }
1785         case KVM_GET_FPU: {
1786                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1787                 r = -ENOMEM;
1788                 if (!fpu)
1789                         goto out;
1790                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1791                 if (r)
1792                         goto out;
1793                 r = -EFAULT;
1794                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1795                         goto out;
1796                 r = 0;
1797                 break;
1798         }
1799         case KVM_SET_FPU: {
1800                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1801                 r = -ENOMEM;
1802                 if (!fpu)
1803                         goto out;
1804                 r = -EFAULT;
1805                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1806                         goto out;
1807                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1808                 if (r)
1809                         goto out;
1810                 r = 0;
1811                 break;
1812         }
1813         default:
1814                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1815         }
1816 out:
1817         vcpu_put(vcpu);
1818         kfree(fpu);
1819         kfree(kvm_sregs);
1820         return r;
1821 }
1822
1823 static long kvm_vm_ioctl(struct file *filp,
1824                            unsigned int ioctl, unsigned long arg)
1825 {
1826         struct kvm *kvm = filp->private_data;
1827         void __user *argp = (void __user *)arg;
1828         int r;
1829
1830         if (kvm->mm != current->mm)
1831                 return -EIO;
1832         switch (ioctl) {
1833         case KVM_CREATE_VCPU:
1834                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1835                 if (r < 0)
1836                         goto out;
1837                 break;
1838         case KVM_SET_USER_MEMORY_REGION: {
1839                 struct kvm_userspace_memory_region kvm_userspace_mem;
1840
1841                 r = -EFAULT;
1842                 if (copy_from_user(&kvm_userspace_mem, argp,
1843                                                 sizeof kvm_userspace_mem))
1844                         goto out;
1845
1846                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1847                 if (r)
1848                         goto out;
1849                 break;
1850         }
1851         case KVM_GET_DIRTY_LOG: {
1852                 struct kvm_dirty_log log;
1853
1854                 r = -EFAULT;
1855                 if (copy_from_user(&log, argp, sizeof log))
1856                         goto out;
1857                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1858                 if (r)
1859                         goto out;
1860                 break;
1861         }
1862 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1863         case KVM_REGISTER_COALESCED_MMIO: {
1864                 struct kvm_coalesced_mmio_zone zone;
1865                 r = -EFAULT;
1866                 if (copy_from_user(&zone, argp, sizeof zone))
1867                         goto out;
1868                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1869                 if (r)
1870                         goto out;
1871                 r = 0;
1872                 break;
1873         }
1874         case KVM_UNREGISTER_COALESCED_MMIO: {
1875                 struct kvm_coalesced_mmio_zone zone;
1876                 r = -EFAULT;
1877                 if (copy_from_user(&zone, argp, sizeof zone))
1878                         goto out;
1879                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1880                 if (r)
1881                         goto out;
1882                 r = 0;
1883                 break;
1884         }
1885 #endif
1886         case KVM_IRQFD: {
1887                 struct kvm_irqfd data;
1888
1889                 r = -EFAULT;
1890                 if (copy_from_user(&data, argp, sizeof data))
1891                         goto out;
1892                 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1893                 break;
1894         }
1895         case KVM_IOEVENTFD: {
1896                 struct kvm_ioeventfd data;
1897
1898                 r = -EFAULT;
1899                 if (copy_from_user(&data, argp, sizeof data))
1900                         goto out;
1901                 r = kvm_ioeventfd(kvm, &data);
1902                 break;
1903         }
1904 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1905         case KVM_SET_BOOT_CPU_ID:
1906                 r = 0;
1907                 mutex_lock(&kvm->lock);
1908                 if (atomic_read(&kvm->online_vcpus) != 0)
1909                         r = -EBUSY;
1910                 else
1911                         kvm->bsp_vcpu_id = arg;
1912                 mutex_unlock(&kvm->lock);
1913                 break;
1914 #endif
1915         default:
1916                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1917                 if (r == -ENOTTY)
1918                         r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1919         }
1920 out:
1921         return r;
1922 }
1923
1924 #ifdef CONFIG_COMPAT
1925 struct compat_kvm_dirty_log {
1926         __u32 slot;
1927         __u32 padding1;
1928         union {
1929                 compat_uptr_t dirty_bitmap; /* one bit per page */
1930                 __u64 padding2;
1931         };
1932 };
1933
1934 static long kvm_vm_compat_ioctl(struct file *filp,
1935                            unsigned int ioctl, unsigned long arg)
1936 {
1937         struct kvm *kvm = filp->private_data;
1938         int r;
1939
1940         if (kvm->mm != current->mm)
1941                 return -EIO;
1942         switch (ioctl) {
1943         case KVM_GET_DIRTY_LOG: {
1944                 struct compat_kvm_dirty_log compat_log;
1945                 struct kvm_dirty_log log;
1946
1947                 r = -EFAULT;
1948                 if (copy_from_user(&compat_log, (void __user *)arg,
1949                                    sizeof(compat_log)))
1950                         goto out;
1951                 log.slot         = compat_log.slot;
1952                 log.padding1     = compat_log.padding1;
1953                 log.padding2     = compat_log.padding2;
1954                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1955
1956                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1957                 if (r)
1958                         goto out;
1959                 break;
1960         }
1961         default:
1962                 r = kvm_vm_ioctl(filp, ioctl, arg);
1963         }
1964
1965 out:
1966         return r;
1967 }
1968 #endif
1969
1970 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1971 {
1972         struct page *page[1];
1973         unsigned long addr;
1974         int npages;
1975         gfn_t gfn = vmf->pgoff;
1976         struct kvm *kvm = vma->vm_file->private_data;
1977
1978         addr = gfn_to_hva(kvm, gfn);
1979         if (kvm_is_error_hva(addr))
1980                 return VM_FAULT_SIGBUS;
1981
1982         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1983                                 NULL);
1984         if (unlikely(npages != 1))
1985                 return VM_FAULT_SIGBUS;
1986
1987         vmf->page = page[0];
1988         return 0;
1989 }
1990
1991 static const struct vm_operations_struct kvm_vm_vm_ops = {
1992         .fault = kvm_vm_fault,
1993 };
1994
1995 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1996 {
1997         vma->vm_ops = &kvm_vm_vm_ops;
1998         return 0;
1999 }
2000
2001 static struct file_operations kvm_vm_fops = {
2002         .release        = kvm_vm_release,
2003         .unlocked_ioctl = kvm_vm_ioctl,
2004 #ifdef CONFIG_COMPAT
2005         .compat_ioctl   = kvm_vm_compat_ioctl,
2006 #endif
2007         .mmap           = kvm_vm_mmap,
2008         .llseek         = noop_llseek,
2009 };
2010
2011 static int kvm_dev_ioctl_create_vm(void)
2012 {
2013         int r;
2014         struct kvm *kvm;
2015
2016         kvm = kvm_create_vm();
2017         if (IS_ERR(kvm))
2018                 return PTR_ERR(kvm);
2019 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2020         r = kvm_coalesced_mmio_init(kvm);
2021         if (r < 0) {
2022                 kvm_put_kvm(kvm);
2023                 return r;
2024         }
2025 #endif
2026         r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2027         if (r < 0)
2028                 kvm_put_kvm(kvm);
2029
2030         return r;
2031 }
2032
2033 static long kvm_dev_ioctl_check_extension_generic(long arg)
2034 {
2035         switch (arg) {
2036         case KVM_CAP_USER_MEMORY:
2037         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2038         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2039 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2040         case KVM_CAP_SET_BOOT_CPU_ID:
2041 #endif
2042         case KVM_CAP_INTERNAL_ERROR_DATA:
2043                 return 1;
2044 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2045         case KVM_CAP_IRQ_ROUTING:
2046                 return KVM_MAX_IRQ_ROUTES;
2047 #endif
2048         default:
2049                 break;
2050         }
2051         return kvm_dev_ioctl_check_extension(arg);
2052 }
2053
2054 static long kvm_dev_ioctl(struct file *filp,
2055                           unsigned int ioctl, unsigned long arg)
2056 {
2057         long r = -EINVAL;
2058
2059         switch (ioctl) {
2060         case KVM_GET_API_VERSION:
2061                 r = -EINVAL;
2062                 if (arg)
2063                         goto out;
2064                 r = KVM_API_VERSION;
2065                 break;
2066         case KVM_CREATE_VM:
2067                 r = -EINVAL;
2068                 if (arg)
2069                         goto out;
2070                 r = kvm_dev_ioctl_create_vm();
2071                 break;
2072         case KVM_CHECK_EXTENSION:
2073                 r = kvm_dev_ioctl_check_extension_generic(arg);
2074                 break;
2075         case KVM_GET_VCPU_MMAP_SIZE:
2076                 r = -EINVAL;
2077                 if (arg)
2078                         goto out;
2079                 r = PAGE_SIZE;     /* struct kvm_run */
2080 #ifdef CONFIG_X86
2081                 r += PAGE_SIZE;    /* pio data page */
2082 #endif
2083 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2084                 r += PAGE_SIZE;    /* coalesced mmio ring page */
2085 #endif
2086                 break;
2087         case KVM_TRACE_ENABLE:
2088         case KVM_TRACE_PAUSE:
2089         case KVM_TRACE_DISABLE:
2090                 r = -EOPNOTSUPP;
2091                 break;
2092         default:
2093                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
2094         }
2095 out:
2096         return r;
2097 }
2098
2099 static struct file_operations kvm_chardev_ops = {
2100         .unlocked_ioctl = kvm_dev_ioctl,
2101         .compat_ioctl   = kvm_dev_ioctl,
2102         .llseek         = noop_llseek,
2103 };
2104
2105 static struct miscdevice kvm_dev = {
2106         KVM_MINOR,
2107         "kvm",
2108         &kvm_chardev_ops,
2109 };
2110
2111 static void hardware_enable_nolock(void *junk)
2112 {
2113         int cpu = raw_smp_processor_id();
2114         int r;
2115
2116         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2117                 return;
2118
2119         cpumask_set_cpu(cpu, cpus_hardware_enabled);
2120
2121         r = kvm_arch_hardware_enable(NULL);
2122
2123         if (r) {
2124                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2125                 atomic_inc(&hardware_enable_failed);
2126                 printk(KERN_INFO "kvm: enabling virtualization on "
2127                                  "CPU%d failed\n", cpu);
2128         }
2129 }
2130
2131 static void hardware_enable(void *junk)
2132 {
2133         spin_lock(&kvm_lock);
2134         hardware_enable_nolock(junk);
2135         spin_unlock(&kvm_lock);
2136 }
2137
2138 static void hardware_disable_nolock(void *junk)
2139 {
2140         int cpu = raw_smp_processor_id();
2141
2142         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2143                 return;
2144         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2145         kvm_arch_hardware_disable(NULL);
2146 }
2147
2148 static void hardware_disable(void *junk)
2149 {
2150         spin_lock(&kvm_lock);
2151         hardware_disable_nolock(junk);
2152         spin_unlock(&kvm_lock);
2153 }
2154
2155 static void hardware_disable_all_nolock(void)
2156 {
2157         BUG_ON(!kvm_usage_count);
2158
2159         kvm_usage_count--;
2160         if (!kvm_usage_count)
2161                 on_each_cpu(hardware_disable_nolock, NULL, 1);
2162 }
2163
2164 static void hardware_disable_all(void)
2165 {
2166         spin_lock(&kvm_lock);
2167         hardware_disable_all_nolock();
2168         spin_unlock(&kvm_lock);
2169 }
2170
2171 static int hardware_enable_all(void)
2172 {
2173         int r = 0;
2174
2175         spin_lock(&kvm_lock);
2176
2177         kvm_usage_count++;
2178         if (kvm_usage_count == 1) {
2179                 atomic_set(&hardware_enable_failed, 0);
2180                 on_each_cpu(hardware_enable_nolock, NULL, 1);
2181
2182                 if (atomic_read(&hardware_enable_failed)) {
2183                         hardware_disable_all_nolock();
2184                         r = -EBUSY;
2185                 }
2186         }
2187
2188         spin_unlock(&kvm_lock);
2189
2190         return r;
2191 }
2192
2193 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2194                            void *v)
2195 {
2196         int cpu = (long)v;
2197
2198         if (!kvm_usage_count)
2199                 return NOTIFY_OK;
2200
2201         val &= ~CPU_TASKS_FROZEN;
2202         switch (val) {
2203         case CPU_DYING:
2204                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2205                        cpu);
2206                 hardware_disable(NULL);
2207                 break;
2208         case CPU_STARTING:
2209                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2210                        cpu);
2211                 hardware_enable(NULL);
2212                 break;
2213         }
2214         return NOTIFY_OK;
2215 }
2216
2217
2218 asmlinkage void kvm_spurious_fault(void)
2219 {
2220         /* Fault while not rebooting.  We want the trace. */
2221         BUG();
2222 }
2223 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
2224
2225 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2226                       void *v)
2227 {
2228         /*
2229          * Some (well, at least mine) BIOSes hang on reboot if
2230          * in vmx root mode.
2231          *
2232          * And Intel TXT required VMX off for all cpu when system shutdown.
2233          */
2234         printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2235         kvm_rebooting = true;
2236         on_each_cpu(hardware_disable_nolock, NULL, 1);
2237         return NOTIFY_OK;
2238 }
2239
2240 static struct notifier_block kvm_reboot_notifier = {
2241         .notifier_call = kvm_reboot,
2242         .priority = 0,
2243 };
2244
2245 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2246 {
2247         int i;
2248
2249         for (i = 0; i < bus->dev_count; i++) {
2250                 struct kvm_io_device *pos = bus->devs[i];
2251
2252                 kvm_iodevice_destructor(pos);
2253         }
2254         kfree(bus);
2255 }
2256
2257 /* kvm_io_bus_write - called under kvm->slots_lock */
2258 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2259                      int len, const void *val)
2260 {
2261         int i;
2262         struct kvm_io_bus *bus;
2263
2264         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2265         for (i = 0; i < bus->dev_count; i++)
2266                 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2267                         return 0;
2268         return -EOPNOTSUPP;
2269 }
2270
2271 /* kvm_io_bus_read - called under kvm->slots_lock */
2272 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2273                     int len, void *val)
2274 {
2275         int i;
2276         struct kvm_io_bus *bus;
2277
2278         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2279         for (i = 0; i < bus->dev_count; i++)
2280                 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2281                         return 0;
2282         return -EOPNOTSUPP;
2283 }
2284
2285 /* Caller must hold slots_lock. */
2286 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2287                             struct kvm_io_device *dev)
2288 {
2289         struct kvm_io_bus *new_bus, *bus;
2290
2291         bus = kvm->buses[bus_idx];
2292         if (bus->dev_count > NR_IOBUS_DEVS-1)
2293                 return -ENOSPC;
2294
2295         new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2296         if (!new_bus)
2297                 return -ENOMEM;
2298         memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2299         new_bus->devs[new_bus->dev_count++] = dev;
2300         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2301         synchronize_srcu_expedited(&kvm->srcu);
2302         kfree(bus);
2303
2304         return 0;
2305 }
2306
2307 /* Caller must hold slots_lock. */
2308 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2309                               struct kvm_io_device *dev)
2310 {
2311         int i, r;
2312         struct kvm_io_bus *new_bus, *bus;
2313
2314         new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2315         if (!new_bus)
2316                 return -ENOMEM;
2317
2318         bus = kvm->buses[bus_idx];
2319         memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2320
2321         r = -ENOENT;
2322         for (i = 0; i < new_bus->dev_count; i++)
2323                 if (new_bus->devs[i] == dev) {
2324                         r = 0;
2325                         new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
2326                         break;
2327                 }
2328
2329         if (r) {
2330                 kfree(new_bus);
2331                 return r;
2332         }
2333
2334         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2335         synchronize_srcu_expedited(&kvm->srcu);
2336         kfree(bus);
2337         return r;
2338 }
2339
2340 static struct notifier_block kvm_cpu_notifier = {
2341         .notifier_call = kvm_cpu_hotplug,
2342 };
2343
2344 static int vm_stat_get(void *_offset, u64 *val)
2345 {
2346         unsigned offset = (long)_offset;
2347         struct kvm *kvm;
2348
2349         *val = 0;
2350         spin_lock(&kvm_lock);
2351         list_for_each_entry(kvm, &vm_list, vm_list)
2352                 *val += *(u32 *)((void *)kvm + offset);
2353         spin_unlock(&kvm_lock);
2354         return 0;
2355 }
2356
2357 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2358
2359 static int vcpu_stat_get(void *_offset, u64 *val)
2360 {
2361         unsigned offset = (long)_offset;
2362         struct kvm *kvm;
2363         struct kvm_vcpu *vcpu;
2364         int i;
2365
2366         *val = 0;
2367         spin_lock(&kvm_lock);
2368         list_for_each_entry(kvm, &vm_list, vm_list)
2369                 kvm_for_each_vcpu(i, vcpu, kvm)
2370                         *val += *(u32 *)((void *)vcpu + offset);
2371
2372         spin_unlock(&kvm_lock);
2373         return 0;
2374 }
2375
2376 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2377
2378 static const struct file_operations *stat_fops[] = {
2379         [KVM_STAT_VCPU] = &vcpu_stat_fops,
2380         [KVM_STAT_VM]   = &vm_stat_fops,
2381 };
2382
2383 static void kvm_init_debug(void)
2384 {
2385         struct kvm_stats_debugfs_item *p;
2386
2387         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2388         for (p = debugfs_entries; p->name; ++p)
2389                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2390                                                 (void *)(long)p->offset,
2391                                                 stat_fops[p->kind]);
2392 }
2393
2394 static void kvm_exit_debug(void)
2395 {
2396         struct kvm_stats_debugfs_item *p;
2397
2398         for (p = debugfs_entries; p->name; ++p)
2399                 debugfs_remove(p->dentry);
2400         debugfs_remove(kvm_debugfs_dir);
2401 }
2402
2403 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2404 {
2405         if (kvm_usage_count)
2406                 hardware_disable_nolock(NULL);
2407         return 0;
2408 }
2409
2410 static int kvm_resume(struct sys_device *dev)
2411 {
2412         if (kvm_usage_count) {
2413                 WARN_ON(spin_is_locked(&kvm_lock));
2414                 hardware_enable_nolock(NULL);
2415         }
2416         return 0;
2417 }
2418
2419 static struct sysdev_class kvm_sysdev_class = {
2420         .name = "kvm",
2421         .suspend = kvm_suspend,
2422         .resume = kvm_resume,
2423 };
2424
2425 static struct sys_device kvm_sysdev = {
2426         .id = 0,
2427         .cls = &kvm_sysdev_class,
2428 };
2429
2430 struct page *bad_page;
2431 pfn_t bad_pfn;
2432
2433 static inline
2434 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2435 {
2436         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2437 }
2438
2439 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2440 {
2441         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2442
2443         kvm_arch_vcpu_load(vcpu, cpu);
2444 }
2445
2446 static void kvm_sched_out(struct preempt_notifier *pn,
2447                           struct task_struct *next)
2448 {
2449         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2450
2451         kvm_arch_vcpu_put(vcpu);
2452 }
2453
2454 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2455                   struct module *module)
2456 {
2457         int r;
2458         int cpu;
2459
2460         r = kvm_arch_init(opaque);
2461         if (r)
2462                 goto out_fail;
2463
2464         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2465
2466         if (bad_page == NULL) {
2467                 r = -ENOMEM;
2468                 goto out;
2469         }
2470
2471         bad_pfn = page_to_pfn(bad_page);
2472
2473         hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2474
2475         if (hwpoison_page == NULL) {
2476                 r = -ENOMEM;
2477                 goto out_free_0;
2478         }
2479
2480         hwpoison_pfn = page_to_pfn(hwpoison_page);
2481
2482         fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2483
2484         if (fault_page == NULL) {
2485                 r = -ENOMEM;
2486                 goto out_free_0;
2487         }
2488
2489         fault_pfn = page_to_pfn(fault_page);
2490
2491         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2492                 r = -ENOMEM;
2493                 goto out_free_0;
2494         }
2495
2496         r = kvm_arch_hardware_setup();
2497         if (r < 0)
2498                 goto out_free_0a;
2499
2500         for_each_online_cpu(cpu) {
2501                 smp_call_function_single(cpu,
2502                                 kvm_arch_check_processor_compat,
2503                                 &r, 1);
2504                 if (r < 0)
2505                         goto out_free_1;
2506         }
2507
2508         r = register_cpu_notifier(&kvm_cpu_notifier);
2509         if (r)
2510                 goto out_free_2;
2511         register_reboot_notifier(&kvm_reboot_notifier);
2512
2513         r = sysdev_class_register(&kvm_sysdev_class);
2514         if (r)
2515                 goto out_free_3;
2516
2517         r = sysdev_register(&kvm_sysdev);
2518         if (r)
2519                 goto out_free_4;
2520
2521         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2522         if (!vcpu_align)
2523                 vcpu_align = __alignof__(struct kvm_vcpu);
2524         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
2525                                            0, NULL);
2526         if (!kvm_vcpu_cache) {
2527                 r = -ENOMEM;
2528                 goto out_free_5;
2529         }
2530
2531         r = kvm_async_pf_init();
2532         if (r)
2533                 goto out_free;
2534
2535         kvm_chardev_ops.owner = module;
2536         kvm_vm_fops.owner = module;
2537         kvm_vcpu_fops.owner = module;
2538
2539         r = misc_register(&kvm_dev);
2540         if (r) {
2541                 printk(KERN_ERR "kvm: misc device register failed\n");
2542                 goto out_unreg;
2543         }
2544
2545         kvm_preempt_ops.sched_in = kvm_sched_in;
2546         kvm_preempt_ops.sched_out = kvm_sched_out;
2547
2548         kvm_init_debug();
2549
2550         return 0;
2551
2552 out_unreg:
2553         kvm_async_pf_deinit();
2554 out_free:
2555         kmem_cache_destroy(kvm_vcpu_cache);
2556 out_free_5:
2557         sysdev_unregister(&kvm_sysdev);
2558 out_free_4:
2559         sysdev_class_unregister(&kvm_sysdev_class);
2560 out_free_3:
2561         unregister_reboot_notifier(&kvm_reboot_notifier);
2562         unregister_cpu_notifier(&kvm_cpu_notifier);
2563 out_free_2:
2564 out_free_1:
2565         kvm_arch_hardware_unsetup();
2566 out_free_0a:
2567         free_cpumask_var(cpus_hardware_enabled);
2568 out_free_0:
2569         if (fault_page)
2570                 __free_page(fault_page);
2571         if (hwpoison_page)
2572                 __free_page(hwpoison_page);
2573         __free_page(bad_page);
2574 out:
2575         kvm_arch_exit();
2576 out_fail:
2577         return r;
2578 }
2579 EXPORT_SYMBOL_GPL(kvm_init);
2580
2581 void kvm_exit(void)
2582 {
2583         kvm_exit_debug();
2584         misc_deregister(&kvm_dev);
2585         kmem_cache_destroy(kvm_vcpu_cache);
2586         kvm_async_pf_deinit();
2587         sysdev_unregister(&kvm_sysdev);
2588         sysdev_class_unregister(&kvm_sysdev_class);
2589         unregister_reboot_notifier(&kvm_reboot_notifier);
2590         unregister_cpu_notifier(&kvm_cpu_notifier);
2591         on_each_cpu(hardware_disable_nolock, NULL, 1);
2592         kvm_arch_hardware_unsetup();
2593         kvm_arch_exit();
2594         free_cpumask_var(cpus_hardware_enabled);
2595         __free_page(hwpoison_page);
2596         __free_page(bad_page);
2597 }
2598 EXPORT_SYMBOL_GPL(kvm_exit);