4856a7dcbd7fb8742a11c15e4e4d59deb0808f1e
[linux-3.10.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "iodev.h"
20
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
46 #include <linux/compat.h>
47 #include <linux/srcu.h>
48 #include <linux/hugetlb.h>
49 #include <linux/slab.h>
50
51 #include <asm/processor.h>
52 #include <asm/io.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgtable.h>
55 #include <asm-generic/bitops/le.h>
56
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/kvm.h>
62
63 MODULE_AUTHOR("Qumranet");
64 MODULE_LICENSE("GPL");
65
66 /*
67  * Ordering of locks:
68  *
69  *              kvm->lock --> kvm->slots_lock --> kvm->irq_lock
70  */
71
72 DEFINE_SPINLOCK(kvm_lock);
73 LIST_HEAD(vm_list);
74
75 static cpumask_var_t cpus_hardware_enabled;
76 static int kvm_usage_count = 0;
77 static atomic_t hardware_enable_failed;
78
79 struct kmem_cache *kvm_vcpu_cache;
80 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81
82 static __read_mostly struct preempt_ops kvm_preempt_ops;
83
84 struct dentry *kvm_debugfs_dir;
85
86 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
87                            unsigned long arg);
88 static int hardware_enable_all(void);
89 static void hardware_disable_all(void);
90
91 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
92
93 bool kvm_rebooting;
94 EXPORT_SYMBOL_GPL(kvm_rebooting);
95
96 static bool largepages_enabled = true;
97
98 static struct page *hwpoison_page;
99 static pfn_t hwpoison_pfn;
100
101 static struct page *fault_page;
102 static pfn_t fault_pfn;
103
104 inline int kvm_is_mmio_pfn(pfn_t pfn)
105 {
106         if (pfn_valid(pfn)) {
107                 int reserved;
108                 struct page *tail = pfn_to_page(pfn);
109                 struct page *head = compound_trans_head(tail);
110                 reserved = PageReserved(head);
111                 if (head != tail) {
112                         /*
113                          * "head" is not a dangling pointer
114                          * (compound_trans_head takes care of that)
115                          * but the hugepage may have been splitted
116                          * from under us (and we may not hold a
117                          * reference count on the head page so it can
118                          * be reused before we run PageReferenced), so
119                          * we've to check PageTail before returning
120                          * what we just read.
121                          */
122                         smp_rmb();
123                         if (PageTail(tail))
124                                 return reserved;
125                 }
126                 return PageReserved(tail);
127         }
128
129         return true;
130 }
131
132 /*
133  * Switches to specified vcpu, until a matching vcpu_put()
134  */
135 void vcpu_load(struct kvm_vcpu *vcpu)
136 {
137         int cpu;
138
139         mutex_lock(&vcpu->mutex);
140         cpu = get_cpu();
141         preempt_notifier_register(&vcpu->preempt_notifier);
142         kvm_arch_vcpu_load(vcpu, cpu);
143         put_cpu();
144 }
145
146 void vcpu_put(struct kvm_vcpu *vcpu)
147 {
148         preempt_disable();
149         kvm_arch_vcpu_put(vcpu);
150         preempt_notifier_unregister(&vcpu->preempt_notifier);
151         preempt_enable();
152         mutex_unlock(&vcpu->mutex);
153 }
154
155 static void ack_flush(void *_completed)
156 {
157 }
158
159 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
160 {
161         int i, cpu, me;
162         cpumask_var_t cpus;
163         bool called = true;
164         struct kvm_vcpu *vcpu;
165
166         zalloc_cpumask_var(&cpus, GFP_ATOMIC);
167
168         me = get_cpu();
169         kvm_for_each_vcpu(i, vcpu, kvm) {
170                 kvm_make_request(req, vcpu);
171                 cpu = vcpu->cpu;
172
173                 /* Set ->requests bit before we read ->mode */
174                 smp_mb();
175
176                 if (cpus != NULL && cpu != -1 && cpu != me &&
177                       kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
178                         cpumask_set_cpu(cpu, cpus);
179         }
180         if (unlikely(cpus == NULL))
181                 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
182         else if (!cpumask_empty(cpus))
183                 smp_call_function_many(cpus, ack_flush, NULL, 1);
184         else
185                 called = false;
186         put_cpu();
187         free_cpumask_var(cpus);
188         return called;
189 }
190
191 void kvm_flush_remote_tlbs(struct kvm *kvm)
192 {
193         int dirty_count = kvm->tlbs_dirty;
194
195         smp_mb();
196         if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
197                 ++kvm->stat.remote_tlb_flush;
198         cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
199 }
200
201 void kvm_reload_remote_mmus(struct kvm *kvm)
202 {
203         make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
204 }
205
206 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
207 {
208         struct page *page;
209         int r;
210
211         mutex_init(&vcpu->mutex);
212         vcpu->cpu = -1;
213         vcpu->kvm = kvm;
214         vcpu->vcpu_id = id;
215         init_waitqueue_head(&vcpu->wq);
216         kvm_async_pf_vcpu_init(vcpu);
217
218         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
219         if (!page) {
220                 r = -ENOMEM;
221                 goto fail;
222         }
223         vcpu->run = page_address(page);
224
225         r = kvm_arch_vcpu_init(vcpu);
226         if (r < 0)
227                 goto fail_free_run;
228         return 0;
229
230 fail_free_run:
231         free_page((unsigned long)vcpu->run);
232 fail:
233         return r;
234 }
235 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
236
237 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
238 {
239         kvm_arch_vcpu_uninit(vcpu);
240         free_page((unsigned long)vcpu->run);
241 }
242 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
243
244 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
245 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
246 {
247         return container_of(mn, struct kvm, mmu_notifier);
248 }
249
250 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
251                                              struct mm_struct *mm,
252                                              unsigned long address)
253 {
254         struct kvm *kvm = mmu_notifier_to_kvm(mn);
255         int need_tlb_flush, idx;
256
257         /*
258          * When ->invalidate_page runs, the linux pte has been zapped
259          * already but the page is still allocated until
260          * ->invalidate_page returns. So if we increase the sequence
261          * here the kvm page fault will notice if the spte can't be
262          * established because the page is going to be freed. If
263          * instead the kvm page fault establishes the spte before
264          * ->invalidate_page runs, kvm_unmap_hva will release it
265          * before returning.
266          *
267          * The sequence increase only need to be seen at spin_unlock
268          * time, and not at spin_lock time.
269          *
270          * Increasing the sequence after the spin_unlock would be
271          * unsafe because the kvm page fault could then establish the
272          * pte after kvm_unmap_hva returned, without noticing the page
273          * is going to be freed.
274          */
275         idx = srcu_read_lock(&kvm->srcu);
276         spin_lock(&kvm->mmu_lock);
277         kvm->mmu_notifier_seq++;
278         need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
279         spin_unlock(&kvm->mmu_lock);
280         srcu_read_unlock(&kvm->srcu, idx);
281
282         /* we've to flush the tlb before the pages can be freed */
283         if (need_tlb_flush)
284                 kvm_flush_remote_tlbs(kvm);
285
286 }
287
288 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
289                                         struct mm_struct *mm,
290                                         unsigned long address,
291                                         pte_t pte)
292 {
293         struct kvm *kvm = mmu_notifier_to_kvm(mn);
294         int idx;
295
296         idx = srcu_read_lock(&kvm->srcu);
297         spin_lock(&kvm->mmu_lock);
298         kvm->mmu_notifier_seq++;
299         kvm_set_spte_hva(kvm, address, pte);
300         spin_unlock(&kvm->mmu_lock);
301         srcu_read_unlock(&kvm->srcu, idx);
302 }
303
304 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
305                                                     struct mm_struct *mm,
306                                                     unsigned long start,
307                                                     unsigned long end)
308 {
309         struct kvm *kvm = mmu_notifier_to_kvm(mn);
310         int need_tlb_flush = 0, idx;
311
312         idx = srcu_read_lock(&kvm->srcu);
313         spin_lock(&kvm->mmu_lock);
314         /*
315          * The count increase must become visible at unlock time as no
316          * spte can be established without taking the mmu_lock and
317          * count is also read inside the mmu_lock critical section.
318          */
319         kvm->mmu_notifier_count++;
320         for (; start < end; start += PAGE_SIZE)
321                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
322         need_tlb_flush |= kvm->tlbs_dirty;
323         spin_unlock(&kvm->mmu_lock);
324         srcu_read_unlock(&kvm->srcu, idx);
325
326         /* we've to flush the tlb before the pages can be freed */
327         if (need_tlb_flush)
328                 kvm_flush_remote_tlbs(kvm);
329 }
330
331 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
332                                                   struct mm_struct *mm,
333                                                   unsigned long start,
334                                                   unsigned long end)
335 {
336         struct kvm *kvm = mmu_notifier_to_kvm(mn);
337
338         spin_lock(&kvm->mmu_lock);
339         /*
340          * This sequence increase will notify the kvm page fault that
341          * the page that is going to be mapped in the spte could have
342          * been freed.
343          */
344         kvm->mmu_notifier_seq++;
345         /*
346          * The above sequence increase must be visible before the
347          * below count decrease but both values are read by the kvm
348          * page fault under mmu_lock spinlock so we don't need to add
349          * a smb_wmb() here in between the two.
350          */
351         kvm->mmu_notifier_count--;
352         spin_unlock(&kvm->mmu_lock);
353
354         BUG_ON(kvm->mmu_notifier_count < 0);
355 }
356
357 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
358                                               struct mm_struct *mm,
359                                               unsigned long address)
360 {
361         struct kvm *kvm = mmu_notifier_to_kvm(mn);
362         int young, idx;
363
364         idx = srcu_read_lock(&kvm->srcu);
365         spin_lock(&kvm->mmu_lock);
366         young = kvm_age_hva(kvm, address);
367         spin_unlock(&kvm->mmu_lock);
368         srcu_read_unlock(&kvm->srcu, idx);
369
370         if (young)
371                 kvm_flush_remote_tlbs(kvm);
372
373         return young;
374 }
375
376 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
377                                        struct mm_struct *mm,
378                                        unsigned long address)
379 {
380         struct kvm *kvm = mmu_notifier_to_kvm(mn);
381         int young, idx;
382
383         idx = srcu_read_lock(&kvm->srcu);
384         spin_lock(&kvm->mmu_lock);
385         young = kvm_test_age_hva(kvm, address);
386         spin_unlock(&kvm->mmu_lock);
387         srcu_read_unlock(&kvm->srcu, idx);
388
389         return young;
390 }
391
392 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
393                                      struct mm_struct *mm)
394 {
395         struct kvm *kvm = mmu_notifier_to_kvm(mn);
396         int idx;
397
398         idx = srcu_read_lock(&kvm->srcu);
399         kvm_arch_flush_shadow(kvm);
400         srcu_read_unlock(&kvm->srcu, idx);
401 }
402
403 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
404         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
405         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
406         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
407         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
408         .test_young             = kvm_mmu_notifier_test_young,
409         .change_pte             = kvm_mmu_notifier_change_pte,
410         .release                = kvm_mmu_notifier_release,
411 };
412
413 static int kvm_init_mmu_notifier(struct kvm *kvm)
414 {
415         kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
416         return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
417 }
418
419 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
420
421 static int kvm_init_mmu_notifier(struct kvm *kvm)
422 {
423         return 0;
424 }
425
426 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
427
428 static struct kvm *kvm_create_vm(void)
429 {
430         int r, i;
431         struct kvm *kvm = kvm_arch_alloc_vm();
432
433         if (!kvm)
434                 return ERR_PTR(-ENOMEM);
435
436         r = kvm_arch_init_vm(kvm);
437         if (r)
438                 goto out_err_nodisable;
439
440         r = hardware_enable_all();
441         if (r)
442                 goto out_err_nodisable;
443
444 #ifdef CONFIG_HAVE_KVM_IRQCHIP
445         INIT_HLIST_HEAD(&kvm->mask_notifier_list);
446         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
447 #endif
448
449         r = -ENOMEM;
450         kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
451         if (!kvm->memslots)
452                 goto out_err_nosrcu;
453         if (init_srcu_struct(&kvm->srcu))
454                 goto out_err_nosrcu;
455         for (i = 0; i < KVM_NR_BUSES; i++) {
456                 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
457                                         GFP_KERNEL);
458                 if (!kvm->buses[i])
459                         goto out_err;
460         }
461
462         r = kvm_init_mmu_notifier(kvm);
463         if (r)
464                 goto out_err;
465
466         kvm->mm = current->mm;
467         atomic_inc(&kvm->mm->mm_count);
468         spin_lock_init(&kvm->mmu_lock);
469         kvm_eventfd_init(kvm);
470         mutex_init(&kvm->lock);
471         mutex_init(&kvm->irq_lock);
472         mutex_init(&kvm->slots_lock);
473         atomic_set(&kvm->users_count, 1);
474         spin_lock(&kvm_lock);
475         list_add(&kvm->vm_list, &vm_list);
476         spin_unlock(&kvm_lock);
477
478         return kvm;
479
480 out_err:
481         cleanup_srcu_struct(&kvm->srcu);
482 out_err_nosrcu:
483         hardware_disable_all();
484 out_err_nodisable:
485         for (i = 0; i < KVM_NR_BUSES; i++)
486                 kfree(kvm->buses[i]);
487         kfree(kvm->memslots);
488         kvm_arch_free_vm(kvm);
489         return ERR_PTR(r);
490 }
491
492 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
493 {
494         if (!memslot->dirty_bitmap)
495                 return;
496
497         if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
498                 vfree(memslot->dirty_bitmap_head);
499         else
500                 kfree(memslot->dirty_bitmap_head);
501
502         memslot->dirty_bitmap = NULL;
503         memslot->dirty_bitmap_head = NULL;
504 }
505
506 /*
507  * Free any memory in @free but not in @dont.
508  */
509 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
510                                   struct kvm_memory_slot *dont)
511 {
512         int i;
513
514         if (!dont || free->rmap != dont->rmap)
515                 vfree(free->rmap);
516
517         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
518                 kvm_destroy_dirty_bitmap(free);
519
520
521         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
522                 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
523                         vfree(free->lpage_info[i]);
524                         free->lpage_info[i] = NULL;
525                 }
526         }
527
528         free->npages = 0;
529         free->rmap = NULL;
530 }
531
532 void kvm_free_physmem(struct kvm *kvm)
533 {
534         int i;
535         struct kvm_memslots *slots = kvm->memslots;
536
537         for (i = 0; i < slots->nmemslots; ++i)
538                 kvm_free_physmem_slot(&slots->memslots[i], NULL);
539
540         kfree(kvm->memslots);
541 }
542
543 static void kvm_destroy_vm(struct kvm *kvm)
544 {
545         int i;
546         struct mm_struct *mm = kvm->mm;
547
548         kvm_arch_sync_events(kvm);
549         spin_lock(&kvm_lock);
550         list_del(&kvm->vm_list);
551         spin_unlock(&kvm_lock);
552         kvm_free_irq_routing(kvm);
553         for (i = 0; i < KVM_NR_BUSES; i++)
554                 kvm_io_bus_destroy(kvm->buses[i]);
555         kvm_coalesced_mmio_free(kvm);
556 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
557         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
558 #else
559         kvm_arch_flush_shadow(kvm);
560 #endif
561         kvm_arch_destroy_vm(kvm);
562         kvm_free_physmem(kvm);
563         cleanup_srcu_struct(&kvm->srcu);
564         kvm_arch_free_vm(kvm);
565         hardware_disable_all();
566         mmdrop(mm);
567 }
568
569 void kvm_get_kvm(struct kvm *kvm)
570 {
571         atomic_inc(&kvm->users_count);
572 }
573 EXPORT_SYMBOL_GPL(kvm_get_kvm);
574
575 void kvm_put_kvm(struct kvm *kvm)
576 {
577         if (atomic_dec_and_test(&kvm->users_count))
578                 kvm_destroy_vm(kvm);
579 }
580 EXPORT_SYMBOL_GPL(kvm_put_kvm);
581
582
583 static int kvm_vm_release(struct inode *inode, struct file *filp)
584 {
585         struct kvm *kvm = filp->private_data;
586
587         kvm_irqfd_release(kvm);
588
589         kvm_put_kvm(kvm);
590         return 0;
591 }
592
593 #ifndef CONFIG_S390
594 /*
595  * Allocation size is twice as large as the actual dirty bitmap size.
596  * This makes it possible to do double buffering: see x86's
597  * kvm_vm_ioctl_get_dirty_log().
598  */
599 static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
600 {
601         unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
602
603         if (dirty_bytes > PAGE_SIZE)
604                 memslot->dirty_bitmap = vzalloc(dirty_bytes);
605         else
606                 memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
607
608         if (!memslot->dirty_bitmap)
609                 return -ENOMEM;
610
611         memslot->dirty_bitmap_head = memslot->dirty_bitmap;
612         return 0;
613 }
614 #endif /* !CONFIG_S390 */
615
616 /*
617  * Allocate some memory and give it an address in the guest physical address
618  * space.
619  *
620  * Discontiguous memory is allowed, mostly for framebuffers.
621  *
622  * Must be called holding mmap_sem for write.
623  */
624 int __kvm_set_memory_region(struct kvm *kvm,
625                             struct kvm_userspace_memory_region *mem,
626                             int user_alloc)
627 {
628         int r;
629         gfn_t base_gfn;
630         unsigned long npages;
631         unsigned long i;
632         struct kvm_memory_slot *memslot;
633         struct kvm_memory_slot old, new;
634         struct kvm_memslots *slots, *old_memslots;
635
636         r = -EINVAL;
637         /* General sanity checks */
638         if (mem->memory_size & (PAGE_SIZE - 1))
639                 goto out;
640         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
641                 goto out;
642         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
643                 goto out;
644         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
645                 goto out;
646         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
647                 goto out;
648
649         memslot = &kvm->memslots->memslots[mem->slot];
650         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
651         npages = mem->memory_size >> PAGE_SHIFT;
652
653         r = -EINVAL;
654         if (npages > KVM_MEM_MAX_NR_PAGES)
655                 goto out;
656
657         if (!npages)
658                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
659
660         new = old = *memslot;
661
662         new.id = mem->slot;
663         new.base_gfn = base_gfn;
664         new.npages = npages;
665         new.flags = mem->flags;
666
667         /* Disallow changing a memory slot's size. */
668         r = -EINVAL;
669         if (npages && old.npages && npages != old.npages)
670                 goto out_free;
671
672         /* Check for overlaps */
673         r = -EEXIST;
674         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
675                 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
676
677                 if (s == memslot || !s->npages)
678                         continue;
679                 if (!((base_gfn + npages <= s->base_gfn) ||
680                       (base_gfn >= s->base_gfn + s->npages)))
681                         goto out_free;
682         }
683
684         /* Free page dirty bitmap if unneeded */
685         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
686                 new.dirty_bitmap = NULL;
687
688         r = -ENOMEM;
689
690         /* Allocate if a slot is being created */
691 #ifndef CONFIG_S390
692         if (npages && !new.rmap) {
693                 new.rmap = vzalloc(npages * sizeof(*new.rmap));
694
695                 if (!new.rmap)
696                         goto out_free;
697
698                 new.user_alloc = user_alloc;
699                 new.userspace_addr = mem->userspace_addr;
700         }
701         if (!npages)
702                 goto skip_lpage;
703
704         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
705                 unsigned long ugfn;
706                 unsigned long j;
707                 int lpages;
708                 int level = i + 2;
709
710                 /* Avoid unused variable warning if no large pages */
711                 (void)level;
712
713                 if (new.lpage_info[i])
714                         continue;
715
716                 lpages = 1 + ((base_gfn + npages - 1)
717                              >> KVM_HPAGE_GFN_SHIFT(level));
718                 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
719
720                 new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
721
722                 if (!new.lpage_info[i])
723                         goto out_free;
724
725                 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
726                         new.lpage_info[i][0].write_count = 1;
727                 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
728                         new.lpage_info[i][lpages - 1].write_count = 1;
729                 ugfn = new.userspace_addr >> PAGE_SHIFT;
730                 /*
731                  * If the gfn and userspace address are not aligned wrt each
732                  * other, or if explicitly asked to, disable large page
733                  * support for this slot
734                  */
735                 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
736                     !largepages_enabled)
737                         for (j = 0; j < lpages; ++j)
738                                 new.lpage_info[i][j].write_count = 1;
739         }
740
741 skip_lpage:
742
743         /* Allocate page dirty bitmap if needed */
744         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
745                 if (kvm_create_dirty_bitmap(&new) < 0)
746                         goto out_free;
747                 /* destroy any largepage mappings for dirty tracking */
748         }
749 #else  /* not defined CONFIG_S390 */
750         new.user_alloc = user_alloc;
751         if (user_alloc)
752                 new.userspace_addr = mem->userspace_addr;
753 #endif /* not defined CONFIG_S390 */
754
755         if (!npages) {
756                 r = -ENOMEM;
757                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
758                 if (!slots)
759                         goto out_free;
760                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
761                 if (mem->slot >= slots->nmemslots)
762                         slots->nmemslots = mem->slot + 1;
763                 slots->generation++;
764                 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
765
766                 old_memslots = kvm->memslots;
767                 rcu_assign_pointer(kvm->memslots, slots);
768                 synchronize_srcu_expedited(&kvm->srcu);
769                 /* From this point no new shadow pages pointing to a deleted
770                  * memslot will be created.
771                  *
772                  * validation of sp->gfn happens in:
773                  *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
774                  *      - kvm_is_visible_gfn (mmu_check_roots)
775                  */
776                 kvm_arch_flush_shadow(kvm);
777                 kfree(old_memslots);
778         }
779
780         r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
781         if (r)
782                 goto out_free;
783
784         /* map the pages in iommu page table */
785         if (npages) {
786                 r = kvm_iommu_map_pages(kvm, &new);
787                 if (r)
788                         goto out_free;
789         }
790
791         r = -ENOMEM;
792         slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
793         if (!slots)
794                 goto out_free;
795         memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
796         if (mem->slot >= slots->nmemslots)
797                 slots->nmemslots = mem->slot + 1;
798         slots->generation++;
799
800         /* actual memory is freed via old in kvm_free_physmem_slot below */
801         if (!npages) {
802                 new.rmap = NULL;
803                 new.dirty_bitmap = NULL;
804                 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
805                         new.lpage_info[i] = NULL;
806         }
807
808         slots->memslots[mem->slot] = new;
809         old_memslots = kvm->memslots;
810         rcu_assign_pointer(kvm->memslots, slots);
811         synchronize_srcu_expedited(&kvm->srcu);
812
813         kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
814
815         kvm_free_physmem_slot(&old, &new);
816         kfree(old_memslots);
817
818         return 0;
819
820 out_free:
821         kvm_free_physmem_slot(&new, &old);
822 out:
823         return r;
824
825 }
826 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
827
828 int kvm_set_memory_region(struct kvm *kvm,
829                           struct kvm_userspace_memory_region *mem,
830                           int user_alloc)
831 {
832         int r;
833
834         mutex_lock(&kvm->slots_lock);
835         r = __kvm_set_memory_region(kvm, mem, user_alloc);
836         mutex_unlock(&kvm->slots_lock);
837         return r;
838 }
839 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
840
841 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
842                                    struct
843                                    kvm_userspace_memory_region *mem,
844                                    int user_alloc)
845 {
846         if (mem->slot >= KVM_MEMORY_SLOTS)
847                 return -EINVAL;
848         return kvm_set_memory_region(kvm, mem, user_alloc);
849 }
850
851 int kvm_get_dirty_log(struct kvm *kvm,
852                         struct kvm_dirty_log *log, int *is_dirty)
853 {
854         struct kvm_memory_slot *memslot;
855         int r, i;
856         unsigned long n;
857         unsigned long any = 0;
858
859         r = -EINVAL;
860         if (log->slot >= KVM_MEMORY_SLOTS)
861                 goto out;
862
863         memslot = &kvm->memslots->memslots[log->slot];
864         r = -ENOENT;
865         if (!memslot->dirty_bitmap)
866                 goto out;
867
868         n = kvm_dirty_bitmap_bytes(memslot);
869
870         for (i = 0; !any && i < n/sizeof(long); ++i)
871                 any = memslot->dirty_bitmap[i];
872
873         r = -EFAULT;
874         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
875                 goto out;
876
877         if (any)
878                 *is_dirty = 1;
879
880         r = 0;
881 out:
882         return r;
883 }
884
885 void kvm_disable_largepages(void)
886 {
887         largepages_enabled = false;
888 }
889 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
890
891 int is_error_page(struct page *page)
892 {
893         return page == bad_page || page == hwpoison_page || page == fault_page;
894 }
895 EXPORT_SYMBOL_GPL(is_error_page);
896
897 int is_error_pfn(pfn_t pfn)
898 {
899         return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
900 }
901 EXPORT_SYMBOL_GPL(is_error_pfn);
902
903 int is_hwpoison_pfn(pfn_t pfn)
904 {
905         return pfn == hwpoison_pfn;
906 }
907 EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
908
909 int is_fault_pfn(pfn_t pfn)
910 {
911         return pfn == fault_pfn;
912 }
913 EXPORT_SYMBOL_GPL(is_fault_pfn);
914
915 static inline unsigned long bad_hva(void)
916 {
917         return PAGE_OFFSET;
918 }
919
920 int kvm_is_error_hva(unsigned long addr)
921 {
922         return addr == bad_hva();
923 }
924 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
925
926 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
927                                                 gfn_t gfn)
928 {
929         int i;
930
931         for (i = 0; i < slots->nmemslots; ++i) {
932                 struct kvm_memory_slot *memslot = &slots->memslots[i];
933
934                 if (gfn >= memslot->base_gfn
935                     && gfn < memslot->base_gfn + memslot->npages)
936                         return memslot;
937         }
938         return NULL;
939 }
940
941 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
942 {
943         return __gfn_to_memslot(kvm_memslots(kvm), gfn);
944 }
945 EXPORT_SYMBOL_GPL(gfn_to_memslot);
946
947 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
948 {
949         int i;
950         struct kvm_memslots *slots = kvm_memslots(kvm);
951
952         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
953                 struct kvm_memory_slot *memslot = &slots->memslots[i];
954
955                 if (memslot->flags & KVM_MEMSLOT_INVALID)
956                         continue;
957
958                 if (gfn >= memslot->base_gfn
959                     && gfn < memslot->base_gfn + memslot->npages)
960                         return 1;
961         }
962         return 0;
963 }
964 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
965
966 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
967 {
968         struct vm_area_struct *vma;
969         unsigned long addr, size;
970
971         size = PAGE_SIZE;
972
973         addr = gfn_to_hva(kvm, gfn);
974         if (kvm_is_error_hva(addr))
975                 return PAGE_SIZE;
976
977         down_read(&current->mm->mmap_sem);
978         vma = find_vma(current->mm, addr);
979         if (!vma)
980                 goto out;
981
982         size = vma_kernel_pagesize(vma);
983
984 out:
985         up_read(&current->mm->mmap_sem);
986
987         return size;
988 }
989
990 int memslot_id(struct kvm *kvm, gfn_t gfn)
991 {
992         int i;
993         struct kvm_memslots *slots = kvm_memslots(kvm);
994         struct kvm_memory_slot *memslot = NULL;
995
996         for (i = 0; i < slots->nmemslots; ++i) {
997                 memslot = &slots->memslots[i];
998
999                 if (gfn >= memslot->base_gfn
1000                     && gfn < memslot->base_gfn + memslot->npages)
1001                         break;
1002         }
1003
1004         return memslot - slots->memslots;
1005 }
1006
1007 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1008                                      gfn_t *nr_pages)
1009 {
1010         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1011                 return bad_hva();
1012
1013         if (nr_pages)
1014                 *nr_pages = slot->npages - (gfn - slot->base_gfn);
1015
1016         return gfn_to_hva_memslot(slot, gfn);
1017 }
1018
1019 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1020 {
1021         return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
1022 }
1023 EXPORT_SYMBOL_GPL(gfn_to_hva);
1024
1025 static pfn_t get_fault_pfn(void)
1026 {
1027         get_page(fault_page);
1028         return fault_pfn;
1029 }
1030
1031 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
1032                         bool *async, bool write_fault, bool *writable)
1033 {
1034         struct page *page[1];
1035         int npages = 0;
1036         pfn_t pfn;
1037
1038         /* we can do it either atomically or asynchronously, not both */
1039         BUG_ON(atomic && async);
1040
1041         BUG_ON(!write_fault && !writable);
1042
1043         if (writable)
1044                 *writable = true;
1045
1046         if (atomic || async)
1047                 npages = __get_user_pages_fast(addr, 1, 1, page);
1048
1049         if (unlikely(npages != 1) && !atomic) {
1050                 might_sleep();
1051
1052                 if (writable)
1053                         *writable = write_fault;
1054
1055                 npages = get_user_pages_fast(addr, 1, write_fault, page);
1056
1057                 /* map read fault as writable if possible */
1058                 if (unlikely(!write_fault) && npages == 1) {
1059                         struct page *wpage[1];
1060
1061                         npages = __get_user_pages_fast(addr, 1, 1, wpage);
1062                         if (npages == 1) {
1063                                 *writable = true;
1064                                 put_page(page[0]);
1065                                 page[0] = wpage[0];
1066                         }
1067                         npages = 1;
1068                 }
1069         }
1070
1071         if (unlikely(npages != 1)) {
1072                 struct vm_area_struct *vma;
1073
1074                 if (atomic)
1075                         return get_fault_pfn();
1076
1077                 down_read(&current->mm->mmap_sem);
1078                 if (is_hwpoison_address(addr)) {
1079                         up_read(&current->mm->mmap_sem);
1080                         get_page(hwpoison_page);
1081                         return page_to_pfn(hwpoison_page);
1082                 }
1083
1084                 vma = find_vma_intersection(current->mm, addr, addr+1);
1085
1086                 if (vma == NULL)
1087                         pfn = get_fault_pfn();
1088                 else if ((vma->vm_flags & VM_PFNMAP)) {
1089                         pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1090                                 vma->vm_pgoff;
1091                         BUG_ON(!kvm_is_mmio_pfn(pfn));
1092                 } else {
1093                         if (async && (vma->vm_flags & VM_WRITE))
1094                                 *async = true;
1095                         pfn = get_fault_pfn();
1096                 }
1097                 up_read(&current->mm->mmap_sem);
1098         } else
1099                 pfn = page_to_pfn(page[0]);
1100
1101         return pfn;
1102 }
1103
1104 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
1105 {
1106         return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
1107 }
1108 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1109
1110 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1111                           bool write_fault, bool *writable)
1112 {
1113         unsigned long addr;
1114
1115         if (async)
1116                 *async = false;
1117
1118         addr = gfn_to_hva(kvm, gfn);
1119         if (kvm_is_error_hva(addr)) {
1120                 get_page(bad_page);
1121                 return page_to_pfn(bad_page);
1122         }
1123
1124         return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
1125 }
1126
1127 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1128 {
1129         return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1130 }
1131 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1132
1133 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1134                        bool write_fault, bool *writable)
1135 {
1136         return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1137 }
1138 EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1139
1140 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1141 {
1142         return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
1143 }
1144 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1145
1146 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1147                       bool *writable)
1148 {
1149         return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1150 }
1151 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1152
1153 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
1154                          struct kvm_memory_slot *slot, gfn_t gfn)
1155 {
1156         unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1157         return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1158 }
1159
1160 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1161                                                                   int nr_pages)
1162 {
1163         unsigned long addr;
1164         gfn_t entry;
1165
1166         addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1167         if (kvm_is_error_hva(addr))
1168                 return -1;
1169
1170         if (entry < nr_pages)
1171                 return 0;
1172
1173         return __get_user_pages_fast(addr, nr_pages, 1, pages);
1174 }
1175 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1176
1177 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1178 {
1179         pfn_t pfn;
1180
1181         pfn = gfn_to_pfn(kvm, gfn);
1182         if (!kvm_is_mmio_pfn(pfn))
1183                 return pfn_to_page(pfn);
1184
1185         WARN_ON(kvm_is_mmio_pfn(pfn));
1186
1187         get_page(bad_page);
1188         return bad_page;
1189 }
1190
1191 EXPORT_SYMBOL_GPL(gfn_to_page);
1192
1193 void kvm_release_page_clean(struct page *page)
1194 {
1195         kvm_release_pfn_clean(page_to_pfn(page));
1196 }
1197 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1198
1199 void kvm_release_pfn_clean(pfn_t pfn)
1200 {
1201         if (!kvm_is_mmio_pfn(pfn))
1202                 put_page(pfn_to_page(pfn));
1203 }
1204 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1205
1206 void kvm_release_page_dirty(struct page *page)
1207 {
1208         kvm_release_pfn_dirty(page_to_pfn(page));
1209 }
1210 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1211
1212 void kvm_release_pfn_dirty(pfn_t pfn)
1213 {
1214         kvm_set_pfn_dirty(pfn);
1215         kvm_release_pfn_clean(pfn);
1216 }
1217 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1218
1219 void kvm_set_page_dirty(struct page *page)
1220 {
1221         kvm_set_pfn_dirty(page_to_pfn(page));
1222 }
1223 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1224
1225 void kvm_set_pfn_dirty(pfn_t pfn)
1226 {
1227         if (!kvm_is_mmio_pfn(pfn)) {
1228                 struct page *page = pfn_to_page(pfn);
1229                 if (!PageReserved(page))
1230                         SetPageDirty(page);
1231         }
1232 }
1233 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1234
1235 void kvm_set_pfn_accessed(pfn_t pfn)
1236 {
1237         if (!kvm_is_mmio_pfn(pfn))
1238                 mark_page_accessed(pfn_to_page(pfn));
1239 }
1240 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1241
1242 void kvm_get_pfn(pfn_t pfn)
1243 {
1244         if (!kvm_is_mmio_pfn(pfn))
1245                 get_page(pfn_to_page(pfn));
1246 }
1247 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1248
1249 static int next_segment(unsigned long len, int offset)
1250 {
1251         if (len > PAGE_SIZE - offset)
1252                 return PAGE_SIZE - offset;
1253         else
1254                 return len;
1255 }
1256
1257 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1258                         int len)
1259 {
1260         int r;
1261         unsigned long addr;
1262
1263         addr = gfn_to_hva(kvm, gfn);
1264         if (kvm_is_error_hva(addr))
1265                 return -EFAULT;
1266         r = copy_from_user(data, (void __user *)addr + offset, len);
1267         if (r)
1268                 return -EFAULT;
1269         return 0;
1270 }
1271 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1272
1273 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1274 {
1275         gfn_t gfn = gpa >> PAGE_SHIFT;
1276         int seg;
1277         int offset = offset_in_page(gpa);
1278         int ret;
1279
1280         while ((seg = next_segment(len, offset)) != 0) {
1281                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1282                 if (ret < 0)
1283                         return ret;
1284                 offset = 0;
1285                 len -= seg;
1286                 data += seg;
1287                 ++gfn;
1288         }
1289         return 0;
1290 }
1291 EXPORT_SYMBOL_GPL(kvm_read_guest);
1292
1293 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1294                           unsigned long len)
1295 {
1296         int r;
1297         unsigned long addr;
1298         gfn_t gfn = gpa >> PAGE_SHIFT;
1299         int offset = offset_in_page(gpa);
1300
1301         addr = gfn_to_hva(kvm, gfn);
1302         if (kvm_is_error_hva(addr))
1303                 return -EFAULT;
1304         pagefault_disable();
1305         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1306         pagefault_enable();
1307         if (r)
1308                 return -EFAULT;
1309         return 0;
1310 }
1311 EXPORT_SYMBOL(kvm_read_guest_atomic);
1312
1313 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1314                          int offset, int len)
1315 {
1316         int r;
1317         unsigned long addr;
1318
1319         addr = gfn_to_hva(kvm, gfn);
1320         if (kvm_is_error_hva(addr))
1321                 return -EFAULT;
1322         r = copy_to_user((void __user *)addr + offset, data, len);
1323         if (r)
1324                 return -EFAULT;
1325         mark_page_dirty(kvm, gfn);
1326         return 0;
1327 }
1328 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1329
1330 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1331                     unsigned long len)
1332 {
1333         gfn_t gfn = gpa >> PAGE_SHIFT;
1334         int seg;
1335         int offset = offset_in_page(gpa);
1336         int ret;
1337
1338         while ((seg = next_segment(len, offset)) != 0) {
1339                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1340                 if (ret < 0)
1341                         return ret;
1342                 offset = 0;
1343                 len -= seg;
1344                 data += seg;
1345                 ++gfn;
1346         }
1347         return 0;
1348 }
1349
1350 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1351                               gpa_t gpa)
1352 {
1353         struct kvm_memslots *slots = kvm_memslots(kvm);
1354         int offset = offset_in_page(gpa);
1355         gfn_t gfn = gpa >> PAGE_SHIFT;
1356
1357         ghc->gpa = gpa;
1358         ghc->generation = slots->generation;
1359         ghc->memslot = __gfn_to_memslot(slots, gfn);
1360         ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1361         if (!kvm_is_error_hva(ghc->hva))
1362                 ghc->hva += offset;
1363         else
1364                 return -EFAULT;
1365
1366         return 0;
1367 }
1368 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1369
1370 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1371                            void *data, unsigned long len)
1372 {
1373         struct kvm_memslots *slots = kvm_memslots(kvm);
1374         int r;
1375
1376         if (slots->generation != ghc->generation)
1377                 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1378
1379         if (kvm_is_error_hva(ghc->hva))
1380                 return -EFAULT;
1381
1382         r = copy_to_user((void __user *)ghc->hva, data, len);
1383         if (r)
1384                 return -EFAULT;
1385         mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1386
1387         return 0;
1388 }
1389 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1390
1391 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1392 {
1393         return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
1394                                     offset, len);
1395 }
1396 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1397
1398 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1399 {
1400         gfn_t gfn = gpa >> PAGE_SHIFT;
1401         int seg;
1402         int offset = offset_in_page(gpa);
1403         int ret;
1404
1405         while ((seg = next_segment(len, offset)) != 0) {
1406                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1407                 if (ret < 0)
1408                         return ret;
1409                 offset = 0;
1410                 len -= seg;
1411                 ++gfn;
1412         }
1413         return 0;
1414 }
1415 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1416
1417 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1418                              gfn_t gfn)
1419 {
1420         if (memslot && memslot->dirty_bitmap) {
1421                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1422
1423                 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1424         }
1425 }
1426
1427 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1428 {
1429         struct kvm_memory_slot *memslot;
1430
1431         memslot = gfn_to_memslot(kvm, gfn);
1432         mark_page_dirty_in_slot(kvm, memslot, gfn);
1433 }
1434
1435 /*
1436  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1437  */
1438 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1439 {
1440         DEFINE_WAIT(wait);
1441
1442         for (;;) {
1443                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1444
1445                 if (kvm_arch_vcpu_runnable(vcpu)) {
1446                         kvm_make_request(KVM_REQ_UNHALT, vcpu);
1447                         break;
1448                 }
1449                 if (kvm_cpu_has_pending_timer(vcpu))
1450                         break;
1451                 if (signal_pending(current))
1452                         break;
1453
1454                 schedule();
1455         }
1456
1457         finish_wait(&vcpu->wq, &wait);
1458 }
1459
1460 void kvm_resched(struct kvm_vcpu *vcpu)
1461 {
1462         if (!need_resched())
1463                 return;
1464         cond_resched();
1465 }
1466 EXPORT_SYMBOL_GPL(kvm_resched);
1467
1468 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1469 {
1470         ktime_t expires;
1471         DEFINE_WAIT(wait);
1472
1473         prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1474
1475         /* Sleep for 100 us, and hope lock-holder got scheduled */
1476         expires = ktime_add_ns(ktime_get(), 100000UL);
1477         schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1478
1479         finish_wait(&vcpu->wq, &wait);
1480 }
1481 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1482
1483 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1484 {
1485         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1486         struct page *page;
1487
1488         if (vmf->pgoff == 0)
1489                 page = virt_to_page(vcpu->run);
1490 #ifdef CONFIG_X86
1491         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1492                 page = virt_to_page(vcpu->arch.pio_data);
1493 #endif
1494 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1495         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1496                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1497 #endif
1498         else
1499                 return VM_FAULT_SIGBUS;
1500         get_page(page);
1501         vmf->page = page;
1502         return 0;
1503 }
1504
1505 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1506         .fault = kvm_vcpu_fault,
1507 };
1508
1509 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1510 {
1511         vma->vm_ops = &kvm_vcpu_vm_ops;
1512         return 0;
1513 }
1514
1515 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1516 {
1517         struct kvm_vcpu *vcpu = filp->private_data;
1518
1519         kvm_put_kvm(vcpu->kvm);
1520         return 0;
1521 }
1522
1523 static struct file_operations kvm_vcpu_fops = {
1524         .release        = kvm_vcpu_release,
1525         .unlocked_ioctl = kvm_vcpu_ioctl,
1526         .compat_ioctl   = kvm_vcpu_ioctl,
1527         .mmap           = kvm_vcpu_mmap,
1528         .llseek         = noop_llseek,
1529 };
1530
1531 /*
1532  * Allocates an inode for the vcpu.
1533  */
1534 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1535 {
1536         return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
1537 }
1538
1539 /*
1540  * Creates some virtual cpus.  Good luck creating more than one.
1541  */
1542 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1543 {
1544         int r;
1545         struct kvm_vcpu *vcpu, *v;
1546
1547         vcpu = kvm_arch_vcpu_create(kvm, id);
1548         if (IS_ERR(vcpu))
1549                 return PTR_ERR(vcpu);
1550
1551         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1552
1553         r = kvm_arch_vcpu_setup(vcpu);
1554         if (r)
1555                 return r;
1556
1557         mutex_lock(&kvm->lock);
1558         if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1559                 r = -EINVAL;
1560                 goto vcpu_destroy;
1561         }
1562
1563         kvm_for_each_vcpu(r, v, kvm)
1564                 if (v->vcpu_id == id) {
1565                         r = -EEXIST;
1566                         goto vcpu_destroy;
1567                 }
1568
1569         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1570
1571         /* Now it's all set up, let userspace reach it */
1572         kvm_get_kvm(kvm);
1573         r = create_vcpu_fd(vcpu);
1574         if (r < 0) {
1575                 kvm_put_kvm(kvm);
1576                 goto vcpu_destroy;
1577         }
1578
1579         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1580         smp_wmb();
1581         atomic_inc(&kvm->online_vcpus);
1582
1583 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1584         if (kvm->bsp_vcpu_id == id)
1585                 kvm->bsp_vcpu = vcpu;
1586 #endif
1587         mutex_unlock(&kvm->lock);
1588         return r;
1589
1590 vcpu_destroy:
1591         mutex_unlock(&kvm->lock);
1592         kvm_arch_vcpu_destroy(vcpu);
1593         return r;
1594 }
1595
1596 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1597 {
1598         if (sigset) {
1599                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1600                 vcpu->sigset_active = 1;
1601                 vcpu->sigset = *sigset;
1602         } else
1603                 vcpu->sigset_active = 0;
1604         return 0;
1605 }
1606
1607 static long kvm_vcpu_ioctl(struct file *filp,
1608                            unsigned int ioctl, unsigned long arg)
1609 {
1610         struct kvm_vcpu *vcpu = filp->private_data;
1611         void __user *argp = (void __user *)arg;
1612         int r;
1613         struct kvm_fpu *fpu = NULL;
1614         struct kvm_sregs *kvm_sregs = NULL;
1615
1616         if (vcpu->kvm->mm != current->mm)
1617                 return -EIO;
1618
1619 #if defined(CONFIG_S390) || defined(CONFIG_PPC)
1620         /*
1621          * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1622          * so vcpu_load() would break it.
1623          */
1624         if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1625                 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1626 #endif
1627
1628
1629         vcpu_load(vcpu);
1630         switch (ioctl) {
1631         case KVM_RUN:
1632                 r = -EINVAL;
1633                 if (arg)
1634                         goto out;
1635                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1636                 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
1637                 break;
1638         case KVM_GET_REGS: {
1639                 struct kvm_regs *kvm_regs;
1640
1641                 r = -ENOMEM;
1642                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1643                 if (!kvm_regs)
1644                         goto out;
1645                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1646                 if (r)
1647                         goto out_free1;
1648                 r = -EFAULT;
1649                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1650                         goto out_free1;
1651                 r = 0;
1652 out_free1:
1653                 kfree(kvm_regs);
1654                 break;
1655         }
1656         case KVM_SET_REGS: {
1657                 struct kvm_regs *kvm_regs;
1658
1659                 r = -ENOMEM;
1660                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1661                 if (!kvm_regs)
1662                         goto out;
1663                 r = -EFAULT;
1664                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1665                         goto out_free2;
1666                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1667                 if (r)
1668                         goto out_free2;
1669                 r = 0;
1670 out_free2:
1671                 kfree(kvm_regs);
1672                 break;
1673         }
1674         case KVM_GET_SREGS: {
1675                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1676                 r = -ENOMEM;
1677                 if (!kvm_sregs)
1678                         goto out;
1679                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1680                 if (r)
1681                         goto out;
1682                 r = -EFAULT;
1683                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1684                         goto out;
1685                 r = 0;
1686                 break;
1687         }
1688         case KVM_SET_SREGS: {
1689                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1690                 r = -ENOMEM;
1691                 if (!kvm_sregs)
1692                         goto out;
1693                 r = -EFAULT;
1694                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1695                         goto out;
1696                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1697                 if (r)
1698                         goto out;
1699                 r = 0;
1700                 break;
1701         }
1702         case KVM_GET_MP_STATE: {
1703                 struct kvm_mp_state mp_state;
1704
1705                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1706                 if (r)
1707                         goto out;
1708                 r = -EFAULT;
1709                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1710                         goto out;
1711                 r = 0;
1712                 break;
1713         }
1714         case KVM_SET_MP_STATE: {
1715                 struct kvm_mp_state mp_state;
1716
1717                 r = -EFAULT;
1718                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1719                         goto out;
1720                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1721                 if (r)
1722                         goto out;
1723                 r = 0;
1724                 break;
1725         }
1726         case KVM_TRANSLATE: {
1727                 struct kvm_translation tr;
1728
1729                 r = -EFAULT;
1730                 if (copy_from_user(&tr, argp, sizeof tr))
1731                         goto out;
1732                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1733                 if (r)
1734                         goto out;
1735                 r = -EFAULT;
1736                 if (copy_to_user(argp, &tr, sizeof tr))
1737                         goto out;
1738                 r = 0;
1739                 break;
1740         }
1741         case KVM_SET_GUEST_DEBUG: {
1742                 struct kvm_guest_debug dbg;
1743
1744                 r = -EFAULT;
1745                 if (copy_from_user(&dbg, argp, sizeof dbg))
1746                         goto out;
1747                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1748                 if (r)
1749                         goto out;
1750                 r = 0;
1751                 break;
1752         }
1753         case KVM_SET_SIGNAL_MASK: {
1754                 struct kvm_signal_mask __user *sigmask_arg = argp;
1755                 struct kvm_signal_mask kvm_sigmask;
1756                 sigset_t sigset, *p;
1757
1758                 p = NULL;
1759                 if (argp) {
1760                         r = -EFAULT;
1761                         if (copy_from_user(&kvm_sigmask, argp,
1762                                            sizeof kvm_sigmask))
1763                                 goto out;
1764                         r = -EINVAL;
1765                         if (kvm_sigmask.len != sizeof sigset)
1766                                 goto out;
1767                         r = -EFAULT;
1768                         if (copy_from_user(&sigset, sigmask_arg->sigset,
1769                                            sizeof sigset))
1770                                 goto out;
1771                         p = &sigset;
1772                 }
1773                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1774                 break;
1775         }
1776         case KVM_GET_FPU: {
1777                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1778                 r = -ENOMEM;
1779                 if (!fpu)
1780                         goto out;
1781                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1782                 if (r)
1783                         goto out;
1784                 r = -EFAULT;
1785                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1786                         goto out;
1787                 r = 0;
1788                 break;
1789         }
1790         case KVM_SET_FPU: {
1791                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1792                 r = -ENOMEM;
1793                 if (!fpu)
1794                         goto out;
1795                 r = -EFAULT;
1796                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1797                         goto out;
1798                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1799                 if (r)
1800                         goto out;
1801                 r = 0;
1802                 break;
1803         }
1804         default:
1805                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1806         }
1807 out:
1808         vcpu_put(vcpu);
1809         kfree(fpu);
1810         kfree(kvm_sregs);
1811         return r;
1812 }
1813
1814 static long kvm_vm_ioctl(struct file *filp,
1815                            unsigned int ioctl, unsigned long arg)
1816 {
1817         struct kvm *kvm = filp->private_data;
1818         void __user *argp = (void __user *)arg;
1819         int r;
1820
1821         if (kvm->mm != current->mm)
1822                 return -EIO;
1823         switch (ioctl) {
1824         case KVM_CREATE_VCPU:
1825                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1826                 if (r < 0)
1827                         goto out;
1828                 break;
1829         case KVM_SET_USER_MEMORY_REGION: {
1830                 struct kvm_userspace_memory_region kvm_userspace_mem;
1831
1832                 r = -EFAULT;
1833                 if (copy_from_user(&kvm_userspace_mem, argp,
1834                                                 sizeof kvm_userspace_mem))
1835                         goto out;
1836
1837                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1838                 if (r)
1839                         goto out;
1840                 break;
1841         }
1842         case KVM_GET_DIRTY_LOG: {
1843                 struct kvm_dirty_log log;
1844
1845                 r = -EFAULT;
1846                 if (copy_from_user(&log, argp, sizeof log))
1847                         goto out;
1848                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1849                 if (r)
1850                         goto out;
1851                 break;
1852         }
1853 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1854         case KVM_REGISTER_COALESCED_MMIO: {
1855                 struct kvm_coalesced_mmio_zone zone;
1856                 r = -EFAULT;
1857                 if (copy_from_user(&zone, argp, sizeof zone))
1858                         goto out;
1859                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1860                 if (r)
1861                         goto out;
1862                 r = 0;
1863                 break;
1864         }
1865         case KVM_UNREGISTER_COALESCED_MMIO: {
1866                 struct kvm_coalesced_mmio_zone zone;
1867                 r = -EFAULT;
1868                 if (copy_from_user(&zone, argp, sizeof zone))
1869                         goto out;
1870                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1871                 if (r)
1872                         goto out;
1873                 r = 0;
1874                 break;
1875         }
1876 #endif
1877         case KVM_IRQFD: {
1878                 struct kvm_irqfd data;
1879
1880                 r = -EFAULT;
1881                 if (copy_from_user(&data, argp, sizeof data))
1882                         goto out;
1883                 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1884                 break;
1885         }
1886         case KVM_IOEVENTFD: {
1887                 struct kvm_ioeventfd data;
1888
1889                 r = -EFAULT;
1890                 if (copy_from_user(&data, argp, sizeof data))
1891                         goto out;
1892                 r = kvm_ioeventfd(kvm, &data);
1893                 break;
1894         }
1895 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1896         case KVM_SET_BOOT_CPU_ID:
1897                 r = 0;
1898                 mutex_lock(&kvm->lock);
1899                 if (atomic_read(&kvm->online_vcpus) != 0)
1900                         r = -EBUSY;
1901                 else
1902                         kvm->bsp_vcpu_id = arg;
1903                 mutex_unlock(&kvm->lock);
1904                 break;
1905 #endif
1906         default:
1907                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1908                 if (r == -ENOTTY)
1909                         r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1910         }
1911 out:
1912         return r;
1913 }
1914
1915 #ifdef CONFIG_COMPAT
1916 struct compat_kvm_dirty_log {
1917         __u32 slot;
1918         __u32 padding1;
1919         union {
1920                 compat_uptr_t dirty_bitmap; /* one bit per page */
1921                 __u64 padding2;
1922         };
1923 };
1924
1925 static long kvm_vm_compat_ioctl(struct file *filp,
1926                            unsigned int ioctl, unsigned long arg)
1927 {
1928         struct kvm *kvm = filp->private_data;
1929         int r;
1930
1931         if (kvm->mm != current->mm)
1932                 return -EIO;
1933         switch (ioctl) {
1934         case KVM_GET_DIRTY_LOG: {
1935                 struct compat_kvm_dirty_log compat_log;
1936                 struct kvm_dirty_log log;
1937
1938                 r = -EFAULT;
1939                 if (copy_from_user(&compat_log, (void __user *)arg,
1940                                    sizeof(compat_log)))
1941                         goto out;
1942                 log.slot         = compat_log.slot;
1943                 log.padding1     = compat_log.padding1;
1944                 log.padding2     = compat_log.padding2;
1945                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1946
1947                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1948                 if (r)
1949                         goto out;
1950                 break;
1951         }
1952         default:
1953                 r = kvm_vm_ioctl(filp, ioctl, arg);
1954         }
1955
1956 out:
1957         return r;
1958 }
1959 #endif
1960
1961 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1962 {
1963         struct page *page[1];
1964         unsigned long addr;
1965         int npages;
1966         gfn_t gfn = vmf->pgoff;
1967         struct kvm *kvm = vma->vm_file->private_data;
1968
1969         addr = gfn_to_hva(kvm, gfn);
1970         if (kvm_is_error_hva(addr))
1971                 return VM_FAULT_SIGBUS;
1972
1973         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1974                                 NULL);
1975         if (unlikely(npages != 1))
1976                 return VM_FAULT_SIGBUS;
1977
1978         vmf->page = page[0];
1979         return 0;
1980 }
1981
1982 static const struct vm_operations_struct kvm_vm_vm_ops = {
1983         .fault = kvm_vm_fault,
1984 };
1985
1986 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1987 {
1988         vma->vm_ops = &kvm_vm_vm_ops;
1989         return 0;
1990 }
1991
1992 static struct file_operations kvm_vm_fops = {
1993         .release        = kvm_vm_release,
1994         .unlocked_ioctl = kvm_vm_ioctl,
1995 #ifdef CONFIG_COMPAT
1996         .compat_ioctl   = kvm_vm_compat_ioctl,
1997 #endif
1998         .mmap           = kvm_vm_mmap,
1999         .llseek         = noop_llseek,
2000 };
2001
2002 static int kvm_dev_ioctl_create_vm(void)
2003 {
2004         int r;
2005         struct kvm *kvm;
2006
2007         kvm = kvm_create_vm();
2008         if (IS_ERR(kvm))
2009                 return PTR_ERR(kvm);
2010 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2011         r = kvm_coalesced_mmio_init(kvm);
2012         if (r < 0) {
2013                 kvm_put_kvm(kvm);
2014                 return r;
2015         }
2016 #endif
2017         r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
2018         if (r < 0)
2019                 kvm_put_kvm(kvm);
2020
2021         return r;
2022 }
2023
2024 static long kvm_dev_ioctl_check_extension_generic(long arg)
2025 {
2026         switch (arg) {
2027         case KVM_CAP_USER_MEMORY:
2028         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2029         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2030 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2031         case KVM_CAP_SET_BOOT_CPU_ID:
2032 #endif
2033         case KVM_CAP_INTERNAL_ERROR_DATA:
2034                 return 1;
2035 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2036         case KVM_CAP_IRQ_ROUTING:
2037                 return KVM_MAX_IRQ_ROUTES;
2038 #endif
2039         default:
2040                 break;
2041         }
2042         return kvm_dev_ioctl_check_extension(arg);
2043 }
2044
2045 static long kvm_dev_ioctl(struct file *filp,
2046                           unsigned int ioctl, unsigned long arg)
2047 {
2048         long r = -EINVAL;
2049
2050         switch (ioctl) {
2051         case KVM_GET_API_VERSION:
2052                 r = -EINVAL;
2053                 if (arg)
2054                         goto out;
2055                 r = KVM_API_VERSION;
2056                 break;
2057         case KVM_CREATE_VM:
2058                 r = -EINVAL;
2059                 if (arg)
2060                         goto out;
2061                 r = kvm_dev_ioctl_create_vm();
2062                 break;
2063         case KVM_CHECK_EXTENSION:
2064                 r = kvm_dev_ioctl_check_extension_generic(arg);
2065                 break;
2066         case KVM_GET_VCPU_MMAP_SIZE:
2067                 r = -EINVAL;
2068                 if (arg)
2069                         goto out;
2070                 r = PAGE_SIZE;     /* struct kvm_run */
2071 #ifdef CONFIG_X86
2072                 r += PAGE_SIZE;    /* pio data page */
2073 #endif
2074 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2075                 r += PAGE_SIZE;    /* coalesced mmio ring page */
2076 #endif
2077                 break;
2078         case KVM_TRACE_ENABLE:
2079         case KVM_TRACE_PAUSE:
2080         case KVM_TRACE_DISABLE:
2081                 r = -EOPNOTSUPP;
2082                 break;
2083         default:
2084                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
2085         }
2086 out:
2087         return r;
2088 }
2089
2090 static struct file_operations kvm_chardev_ops = {
2091         .unlocked_ioctl = kvm_dev_ioctl,
2092         .compat_ioctl   = kvm_dev_ioctl,
2093         .llseek         = noop_llseek,
2094 };
2095
2096 static struct miscdevice kvm_dev = {
2097         KVM_MINOR,
2098         "kvm",
2099         &kvm_chardev_ops,
2100 };
2101
2102 static void hardware_enable_nolock(void *junk)
2103 {
2104         int cpu = raw_smp_processor_id();
2105         int r;
2106
2107         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2108                 return;
2109
2110         cpumask_set_cpu(cpu, cpus_hardware_enabled);
2111
2112         r = kvm_arch_hardware_enable(NULL);
2113
2114         if (r) {
2115                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2116                 atomic_inc(&hardware_enable_failed);
2117                 printk(KERN_INFO "kvm: enabling virtualization on "
2118                                  "CPU%d failed\n", cpu);
2119         }
2120 }
2121
2122 static void hardware_enable(void *junk)
2123 {
2124         spin_lock(&kvm_lock);
2125         hardware_enable_nolock(junk);
2126         spin_unlock(&kvm_lock);
2127 }
2128
2129 static void hardware_disable_nolock(void *junk)
2130 {
2131         int cpu = raw_smp_processor_id();
2132
2133         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2134                 return;
2135         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2136         kvm_arch_hardware_disable(NULL);
2137 }
2138
2139 static void hardware_disable(void *junk)
2140 {
2141         spin_lock(&kvm_lock);
2142         hardware_disable_nolock(junk);
2143         spin_unlock(&kvm_lock);
2144 }
2145
2146 static void hardware_disable_all_nolock(void)
2147 {
2148         BUG_ON(!kvm_usage_count);
2149
2150         kvm_usage_count--;
2151         if (!kvm_usage_count)
2152                 on_each_cpu(hardware_disable_nolock, NULL, 1);
2153 }
2154
2155 static void hardware_disable_all(void)
2156 {
2157         spin_lock(&kvm_lock);
2158         hardware_disable_all_nolock();
2159         spin_unlock(&kvm_lock);
2160 }
2161
2162 static int hardware_enable_all(void)
2163 {
2164         int r = 0;
2165
2166         spin_lock(&kvm_lock);
2167
2168         kvm_usage_count++;
2169         if (kvm_usage_count == 1) {
2170                 atomic_set(&hardware_enable_failed, 0);
2171                 on_each_cpu(hardware_enable_nolock, NULL, 1);
2172
2173                 if (atomic_read(&hardware_enable_failed)) {
2174                         hardware_disable_all_nolock();
2175                         r = -EBUSY;
2176                 }
2177         }
2178
2179         spin_unlock(&kvm_lock);
2180
2181         return r;
2182 }
2183
2184 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2185                            void *v)
2186 {
2187         int cpu = (long)v;
2188
2189         if (!kvm_usage_count)
2190                 return NOTIFY_OK;
2191
2192         val &= ~CPU_TASKS_FROZEN;
2193         switch (val) {
2194         case CPU_DYING:
2195                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2196                        cpu);
2197                 hardware_disable(NULL);
2198                 break;
2199         case CPU_STARTING:
2200                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2201                        cpu);
2202                 hardware_enable(NULL);
2203                 break;
2204         }
2205         return NOTIFY_OK;
2206 }
2207
2208
2209 asmlinkage void kvm_spurious_fault(void)
2210 {
2211         /* Fault while not rebooting.  We want the trace. */
2212         BUG();
2213 }
2214 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
2215
2216 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2217                       void *v)
2218 {
2219         /*
2220          * Some (well, at least mine) BIOSes hang on reboot if
2221          * in vmx root mode.
2222          *
2223          * And Intel TXT required VMX off for all cpu when system shutdown.
2224          */
2225         printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2226         kvm_rebooting = true;
2227         on_each_cpu(hardware_disable_nolock, NULL, 1);
2228         return NOTIFY_OK;
2229 }
2230
2231 static struct notifier_block kvm_reboot_notifier = {
2232         .notifier_call = kvm_reboot,
2233         .priority = 0,
2234 };
2235
2236 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2237 {
2238         int i;
2239
2240         for (i = 0; i < bus->dev_count; i++) {
2241                 struct kvm_io_device *pos = bus->devs[i];
2242
2243                 kvm_iodevice_destructor(pos);
2244         }
2245         kfree(bus);
2246 }
2247
2248 /* kvm_io_bus_write - called under kvm->slots_lock */
2249 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2250                      int len, const void *val)
2251 {
2252         int i;
2253         struct kvm_io_bus *bus;
2254
2255         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2256         for (i = 0; i < bus->dev_count; i++)
2257                 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2258                         return 0;
2259         return -EOPNOTSUPP;
2260 }
2261
2262 /* kvm_io_bus_read - called under kvm->slots_lock */
2263 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2264                     int len, void *val)
2265 {
2266         int i;
2267         struct kvm_io_bus *bus;
2268
2269         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2270         for (i = 0; i < bus->dev_count; i++)
2271                 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2272                         return 0;
2273         return -EOPNOTSUPP;
2274 }
2275
2276 /* Caller must hold slots_lock. */
2277 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2278                             struct kvm_io_device *dev)
2279 {
2280         struct kvm_io_bus *new_bus, *bus;
2281
2282         bus = kvm->buses[bus_idx];
2283         if (bus->dev_count > NR_IOBUS_DEVS-1)
2284                 return -ENOSPC;
2285
2286         new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2287         if (!new_bus)
2288                 return -ENOMEM;
2289         memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2290         new_bus->devs[new_bus->dev_count++] = dev;
2291         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2292         synchronize_srcu_expedited(&kvm->srcu);
2293         kfree(bus);
2294
2295         return 0;
2296 }
2297
2298 /* Caller must hold slots_lock. */
2299 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2300                               struct kvm_io_device *dev)
2301 {
2302         int i, r;
2303         struct kvm_io_bus *new_bus, *bus;
2304
2305         new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2306         if (!new_bus)
2307                 return -ENOMEM;
2308
2309         bus = kvm->buses[bus_idx];
2310         memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2311
2312         r = -ENOENT;
2313         for (i = 0; i < new_bus->dev_count; i++)
2314                 if (new_bus->devs[i] == dev) {
2315                         r = 0;
2316                         new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
2317                         break;
2318                 }
2319
2320         if (r) {
2321                 kfree(new_bus);
2322                 return r;
2323         }
2324
2325         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2326         synchronize_srcu_expedited(&kvm->srcu);
2327         kfree(bus);
2328         return r;
2329 }
2330
2331 static struct notifier_block kvm_cpu_notifier = {
2332         .notifier_call = kvm_cpu_hotplug,
2333 };
2334
2335 static int vm_stat_get(void *_offset, u64 *val)
2336 {
2337         unsigned offset = (long)_offset;
2338         struct kvm *kvm;
2339
2340         *val = 0;
2341         spin_lock(&kvm_lock);
2342         list_for_each_entry(kvm, &vm_list, vm_list)
2343                 *val += *(u32 *)((void *)kvm + offset);
2344         spin_unlock(&kvm_lock);
2345         return 0;
2346 }
2347
2348 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2349
2350 static int vcpu_stat_get(void *_offset, u64 *val)
2351 {
2352         unsigned offset = (long)_offset;
2353         struct kvm *kvm;
2354         struct kvm_vcpu *vcpu;
2355         int i;
2356
2357         *val = 0;
2358         spin_lock(&kvm_lock);
2359         list_for_each_entry(kvm, &vm_list, vm_list)
2360                 kvm_for_each_vcpu(i, vcpu, kvm)
2361                         *val += *(u32 *)((void *)vcpu + offset);
2362
2363         spin_unlock(&kvm_lock);
2364         return 0;
2365 }
2366
2367 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2368
2369 static const struct file_operations *stat_fops[] = {
2370         [KVM_STAT_VCPU] = &vcpu_stat_fops,
2371         [KVM_STAT_VM]   = &vm_stat_fops,
2372 };
2373
2374 static void kvm_init_debug(void)
2375 {
2376         struct kvm_stats_debugfs_item *p;
2377
2378         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2379         for (p = debugfs_entries; p->name; ++p)
2380                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2381                                                 (void *)(long)p->offset,
2382                                                 stat_fops[p->kind]);
2383 }
2384
2385 static void kvm_exit_debug(void)
2386 {
2387         struct kvm_stats_debugfs_item *p;
2388
2389         for (p = debugfs_entries; p->name; ++p)
2390                 debugfs_remove(p->dentry);
2391         debugfs_remove(kvm_debugfs_dir);
2392 }
2393
2394 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2395 {
2396         if (kvm_usage_count)
2397                 hardware_disable_nolock(NULL);
2398         return 0;
2399 }
2400
2401 static int kvm_resume(struct sys_device *dev)
2402 {
2403         if (kvm_usage_count) {
2404                 WARN_ON(spin_is_locked(&kvm_lock));
2405                 hardware_enable_nolock(NULL);
2406         }
2407         return 0;
2408 }
2409
2410 static struct sysdev_class kvm_sysdev_class = {
2411         .name = "kvm",
2412         .suspend = kvm_suspend,
2413         .resume = kvm_resume,
2414 };
2415
2416 static struct sys_device kvm_sysdev = {
2417         .id = 0,
2418         .cls = &kvm_sysdev_class,
2419 };
2420
2421 struct page *bad_page;
2422 pfn_t bad_pfn;
2423
2424 static inline
2425 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2426 {
2427         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2428 }
2429
2430 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2431 {
2432         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2433
2434         kvm_arch_vcpu_load(vcpu, cpu);
2435 }
2436
2437 static void kvm_sched_out(struct preempt_notifier *pn,
2438                           struct task_struct *next)
2439 {
2440         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2441
2442         kvm_arch_vcpu_put(vcpu);
2443 }
2444
2445 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2446                   struct module *module)
2447 {
2448         int r;
2449         int cpu;
2450
2451         r = kvm_arch_init(opaque);
2452         if (r)
2453                 goto out_fail;
2454
2455         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2456
2457         if (bad_page == NULL) {
2458                 r = -ENOMEM;
2459                 goto out;
2460         }
2461
2462         bad_pfn = page_to_pfn(bad_page);
2463
2464         hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2465
2466         if (hwpoison_page == NULL) {
2467                 r = -ENOMEM;
2468                 goto out_free_0;
2469         }
2470
2471         hwpoison_pfn = page_to_pfn(hwpoison_page);
2472
2473         fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2474
2475         if (fault_page == NULL) {
2476                 r = -ENOMEM;
2477                 goto out_free_0;
2478         }
2479
2480         fault_pfn = page_to_pfn(fault_page);
2481
2482         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2483                 r = -ENOMEM;
2484                 goto out_free_0;
2485         }
2486
2487         r = kvm_arch_hardware_setup();
2488         if (r < 0)
2489                 goto out_free_0a;
2490
2491         for_each_online_cpu(cpu) {
2492                 smp_call_function_single(cpu,
2493                                 kvm_arch_check_processor_compat,
2494                                 &r, 1);
2495                 if (r < 0)
2496                         goto out_free_1;
2497         }
2498
2499         r = register_cpu_notifier(&kvm_cpu_notifier);
2500         if (r)
2501                 goto out_free_2;
2502         register_reboot_notifier(&kvm_reboot_notifier);
2503
2504         r = sysdev_class_register(&kvm_sysdev_class);
2505         if (r)
2506                 goto out_free_3;
2507
2508         r = sysdev_register(&kvm_sysdev);
2509         if (r)
2510                 goto out_free_4;
2511
2512         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2513         if (!vcpu_align)
2514                 vcpu_align = __alignof__(struct kvm_vcpu);
2515         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
2516                                            0, NULL);
2517         if (!kvm_vcpu_cache) {
2518                 r = -ENOMEM;
2519                 goto out_free_5;
2520         }
2521
2522         r = kvm_async_pf_init();
2523         if (r)
2524                 goto out_free;
2525
2526         kvm_chardev_ops.owner = module;
2527         kvm_vm_fops.owner = module;
2528         kvm_vcpu_fops.owner = module;
2529
2530         r = misc_register(&kvm_dev);
2531         if (r) {
2532                 printk(KERN_ERR "kvm: misc device register failed\n");
2533                 goto out_unreg;
2534         }
2535
2536         kvm_preempt_ops.sched_in = kvm_sched_in;
2537         kvm_preempt_ops.sched_out = kvm_sched_out;
2538
2539         kvm_init_debug();
2540
2541         return 0;
2542
2543 out_unreg:
2544         kvm_async_pf_deinit();
2545 out_free:
2546         kmem_cache_destroy(kvm_vcpu_cache);
2547 out_free_5:
2548         sysdev_unregister(&kvm_sysdev);
2549 out_free_4:
2550         sysdev_class_unregister(&kvm_sysdev_class);
2551 out_free_3:
2552         unregister_reboot_notifier(&kvm_reboot_notifier);
2553         unregister_cpu_notifier(&kvm_cpu_notifier);
2554 out_free_2:
2555 out_free_1:
2556         kvm_arch_hardware_unsetup();
2557 out_free_0a:
2558         free_cpumask_var(cpus_hardware_enabled);
2559 out_free_0:
2560         if (fault_page)
2561                 __free_page(fault_page);
2562         if (hwpoison_page)
2563                 __free_page(hwpoison_page);
2564         __free_page(bad_page);
2565 out:
2566         kvm_arch_exit();
2567 out_fail:
2568         return r;
2569 }
2570 EXPORT_SYMBOL_GPL(kvm_init);
2571
2572 void kvm_exit(void)
2573 {
2574         kvm_exit_debug();
2575         misc_deregister(&kvm_dev);
2576         kmem_cache_destroy(kvm_vcpu_cache);
2577         kvm_async_pf_deinit();
2578         sysdev_unregister(&kvm_sysdev);
2579         sysdev_class_unregister(&kvm_sysdev_class);
2580         unregister_reboot_notifier(&kvm_reboot_notifier);
2581         unregister_cpu_notifier(&kvm_cpu_notifier);
2582         on_each_cpu(hardware_disable_nolock, NULL, 1);
2583         kvm_arch_hardware_unsetup();
2584         kvm_arch_exit();
2585         free_cpumask_var(cpus_hardware_enabled);
2586         __free_page(hwpoison_page);
2587         __free_page(bad_page);
2588 }
2589 EXPORT_SYMBOL_GPL(kvm_exit);