KVM: x86: trace "exit to userspace" event
[linux-2.6.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "iodev.h"
20
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
46 #include <linux/compat.h>
47 #include <linux/srcu.h>
48 #include <linux/hugetlb.h>
49 #include <linux/slab.h>
50
51 #include <asm/processor.h>
52 #include <asm/io.h>
53 #include <asm/uaccess.h>
54 #include <asm/pgtable.h>
55 #include <asm-generic/bitops/le.h>
56
57 #include "coalesced_mmio.h"
58 #include "async_pf.h"
59
60 #define CREATE_TRACE_POINTS
61 #include <trace/events/kvm.h>
62
63 MODULE_AUTHOR("Qumranet");
64 MODULE_LICENSE("GPL");
65
66 /*
67  * Ordering of locks:
68  *
69  *              kvm->lock --> kvm->slots_lock --> kvm->irq_lock
70  */
71
72 DEFINE_SPINLOCK(kvm_lock);
73 LIST_HEAD(vm_list);
74
75 static cpumask_var_t cpus_hardware_enabled;
76 static int kvm_usage_count = 0;
77 static atomic_t hardware_enable_failed;
78
79 struct kmem_cache *kvm_vcpu_cache;
80 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
81
82 static __read_mostly struct preempt_ops kvm_preempt_ops;
83
84 struct dentry *kvm_debugfs_dir;
85
86 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
87                            unsigned long arg);
88 static int hardware_enable_all(void);
89 static void hardware_disable_all(void);
90
91 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
92
93 static bool kvm_rebooting;
94
95 static bool largepages_enabled = true;
96
97 static struct page *hwpoison_page;
98 static pfn_t hwpoison_pfn;
99
100 static struct page *fault_page;
101 static pfn_t fault_pfn;
102
103 inline int kvm_is_mmio_pfn(pfn_t pfn)
104 {
105         if (pfn_valid(pfn)) {
106                 struct page *page = compound_head(pfn_to_page(pfn));
107                 return PageReserved(page);
108         }
109
110         return true;
111 }
112
113 /*
114  * Switches to specified vcpu, until a matching vcpu_put()
115  */
116 void vcpu_load(struct kvm_vcpu *vcpu)
117 {
118         int cpu;
119
120         mutex_lock(&vcpu->mutex);
121         cpu = get_cpu();
122         preempt_notifier_register(&vcpu->preempt_notifier);
123         kvm_arch_vcpu_load(vcpu, cpu);
124         put_cpu();
125 }
126
127 void vcpu_put(struct kvm_vcpu *vcpu)
128 {
129         preempt_disable();
130         kvm_arch_vcpu_put(vcpu);
131         preempt_notifier_unregister(&vcpu->preempt_notifier);
132         preempt_enable();
133         mutex_unlock(&vcpu->mutex);
134 }
135
136 static void ack_flush(void *_completed)
137 {
138 }
139
140 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
141 {
142         int i, cpu, me;
143         cpumask_var_t cpus;
144         bool called = true;
145         struct kvm_vcpu *vcpu;
146
147         zalloc_cpumask_var(&cpus, GFP_ATOMIC);
148
149         raw_spin_lock(&kvm->requests_lock);
150         me = smp_processor_id();
151         kvm_for_each_vcpu(i, vcpu, kvm) {
152                 if (kvm_make_check_request(req, vcpu))
153                         continue;
154                 cpu = vcpu->cpu;
155                 if (cpus != NULL && cpu != -1 && cpu != me)
156                         cpumask_set_cpu(cpu, cpus);
157         }
158         if (unlikely(cpus == NULL))
159                 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
160         else if (!cpumask_empty(cpus))
161                 smp_call_function_many(cpus, ack_flush, NULL, 1);
162         else
163                 called = false;
164         raw_spin_unlock(&kvm->requests_lock);
165         free_cpumask_var(cpus);
166         return called;
167 }
168
169 void kvm_flush_remote_tlbs(struct kvm *kvm)
170 {
171         if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
172                 ++kvm->stat.remote_tlb_flush;
173 }
174
175 void kvm_reload_remote_mmus(struct kvm *kvm)
176 {
177         make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
178 }
179
180 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
181 {
182         struct page *page;
183         int r;
184
185         mutex_init(&vcpu->mutex);
186         vcpu->cpu = -1;
187         vcpu->kvm = kvm;
188         vcpu->vcpu_id = id;
189         init_waitqueue_head(&vcpu->wq);
190         kvm_async_pf_vcpu_init(vcpu);
191
192         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
193         if (!page) {
194                 r = -ENOMEM;
195                 goto fail;
196         }
197         vcpu->run = page_address(page);
198
199         r = kvm_arch_vcpu_init(vcpu);
200         if (r < 0)
201                 goto fail_free_run;
202         return 0;
203
204 fail_free_run:
205         free_page((unsigned long)vcpu->run);
206 fail:
207         return r;
208 }
209 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
210
211 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
212 {
213         kvm_arch_vcpu_uninit(vcpu);
214         free_page((unsigned long)vcpu->run);
215 }
216 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
217
218 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
219 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
220 {
221         return container_of(mn, struct kvm, mmu_notifier);
222 }
223
224 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
225                                              struct mm_struct *mm,
226                                              unsigned long address)
227 {
228         struct kvm *kvm = mmu_notifier_to_kvm(mn);
229         int need_tlb_flush, idx;
230
231         /*
232          * When ->invalidate_page runs, the linux pte has been zapped
233          * already but the page is still allocated until
234          * ->invalidate_page returns. So if we increase the sequence
235          * here the kvm page fault will notice if the spte can't be
236          * established because the page is going to be freed. If
237          * instead the kvm page fault establishes the spte before
238          * ->invalidate_page runs, kvm_unmap_hva will release it
239          * before returning.
240          *
241          * The sequence increase only need to be seen at spin_unlock
242          * time, and not at spin_lock time.
243          *
244          * Increasing the sequence after the spin_unlock would be
245          * unsafe because the kvm page fault could then establish the
246          * pte after kvm_unmap_hva returned, without noticing the page
247          * is going to be freed.
248          */
249         idx = srcu_read_lock(&kvm->srcu);
250         spin_lock(&kvm->mmu_lock);
251         kvm->mmu_notifier_seq++;
252         need_tlb_flush = kvm_unmap_hva(kvm, address);
253         spin_unlock(&kvm->mmu_lock);
254         srcu_read_unlock(&kvm->srcu, idx);
255
256         /* we've to flush the tlb before the pages can be freed */
257         if (need_tlb_flush)
258                 kvm_flush_remote_tlbs(kvm);
259
260 }
261
262 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
263                                         struct mm_struct *mm,
264                                         unsigned long address,
265                                         pte_t pte)
266 {
267         struct kvm *kvm = mmu_notifier_to_kvm(mn);
268         int idx;
269
270         idx = srcu_read_lock(&kvm->srcu);
271         spin_lock(&kvm->mmu_lock);
272         kvm->mmu_notifier_seq++;
273         kvm_set_spte_hva(kvm, address, pte);
274         spin_unlock(&kvm->mmu_lock);
275         srcu_read_unlock(&kvm->srcu, idx);
276 }
277
278 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
279                                                     struct mm_struct *mm,
280                                                     unsigned long start,
281                                                     unsigned long end)
282 {
283         struct kvm *kvm = mmu_notifier_to_kvm(mn);
284         int need_tlb_flush = 0, idx;
285
286         idx = srcu_read_lock(&kvm->srcu);
287         spin_lock(&kvm->mmu_lock);
288         /*
289          * The count increase must become visible at unlock time as no
290          * spte can be established without taking the mmu_lock and
291          * count is also read inside the mmu_lock critical section.
292          */
293         kvm->mmu_notifier_count++;
294         for (; start < end; start += PAGE_SIZE)
295                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
296         spin_unlock(&kvm->mmu_lock);
297         srcu_read_unlock(&kvm->srcu, idx);
298
299         /* we've to flush the tlb before the pages can be freed */
300         if (need_tlb_flush)
301                 kvm_flush_remote_tlbs(kvm);
302 }
303
304 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
305                                                   struct mm_struct *mm,
306                                                   unsigned long start,
307                                                   unsigned long end)
308 {
309         struct kvm *kvm = mmu_notifier_to_kvm(mn);
310
311         spin_lock(&kvm->mmu_lock);
312         /*
313          * This sequence increase will notify the kvm page fault that
314          * the page that is going to be mapped in the spte could have
315          * been freed.
316          */
317         kvm->mmu_notifier_seq++;
318         /*
319          * The above sequence increase must be visible before the
320          * below count decrease but both values are read by the kvm
321          * page fault under mmu_lock spinlock so we don't need to add
322          * a smb_wmb() here in between the two.
323          */
324         kvm->mmu_notifier_count--;
325         spin_unlock(&kvm->mmu_lock);
326
327         BUG_ON(kvm->mmu_notifier_count < 0);
328 }
329
330 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
331                                               struct mm_struct *mm,
332                                               unsigned long address)
333 {
334         struct kvm *kvm = mmu_notifier_to_kvm(mn);
335         int young, idx;
336
337         idx = srcu_read_lock(&kvm->srcu);
338         spin_lock(&kvm->mmu_lock);
339         young = kvm_age_hva(kvm, address);
340         spin_unlock(&kvm->mmu_lock);
341         srcu_read_unlock(&kvm->srcu, idx);
342
343         if (young)
344                 kvm_flush_remote_tlbs(kvm);
345
346         return young;
347 }
348
349 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
350                                      struct mm_struct *mm)
351 {
352         struct kvm *kvm = mmu_notifier_to_kvm(mn);
353         int idx;
354
355         idx = srcu_read_lock(&kvm->srcu);
356         kvm_arch_flush_shadow(kvm);
357         srcu_read_unlock(&kvm->srcu, idx);
358 }
359
360 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
361         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
362         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
363         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
364         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
365         .change_pte             = kvm_mmu_notifier_change_pte,
366         .release                = kvm_mmu_notifier_release,
367 };
368
369 static int kvm_init_mmu_notifier(struct kvm *kvm)
370 {
371         kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
372         return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
373 }
374
375 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
376
377 static int kvm_init_mmu_notifier(struct kvm *kvm)
378 {
379         return 0;
380 }
381
382 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
383
384 static struct kvm *kvm_create_vm(void)
385 {
386         int r = 0, i;
387         struct kvm *kvm = kvm_arch_create_vm();
388
389         if (IS_ERR(kvm))
390                 goto out;
391
392         r = hardware_enable_all();
393         if (r)
394                 goto out_err_nodisable;
395
396 #ifdef CONFIG_HAVE_KVM_IRQCHIP
397         INIT_HLIST_HEAD(&kvm->mask_notifier_list);
398         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
399 #endif
400
401         r = -ENOMEM;
402         kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
403         if (!kvm->memslots)
404                 goto out_err;
405         if (init_srcu_struct(&kvm->srcu))
406                 goto out_err;
407         for (i = 0; i < KVM_NR_BUSES; i++) {
408                 kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
409                                         GFP_KERNEL);
410                 if (!kvm->buses[i]) {
411                         cleanup_srcu_struct(&kvm->srcu);
412                         goto out_err;
413                 }
414         }
415
416         r = kvm_init_mmu_notifier(kvm);
417         if (r) {
418                 cleanup_srcu_struct(&kvm->srcu);
419                 goto out_err;
420         }
421
422         kvm->mm = current->mm;
423         atomic_inc(&kvm->mm->mm_count);
424         spin_lock_init(&kvm->mmu_lock);
425         raw_spin_lock_init(&kvm->requests_lock);
426         kvm_eventfd_init(kvm);
427         mutex_init(&kvm->lock);
428         mutex_init(&kvm->irq_lock);
429         mutex_init(&kvm->slots_lock);
430         atomic_set(&kvm->users_count, 1);
431         spin_lock(&kvm_lock);
432         list_add(&kvm->vm_list, &vm_list);
433         spin_unlock(&kvm_lock);
434 out:
435         return kvm;
436
437 out_err:
438         hardware_disable_all();
439 out_err_nodisable:
440         for (i = 0; i < KVM_NR_BUSES; i++)
441                 kfree(kvm->buses[i]);
442         kfree(kvm->memslots);
443         kfree(kvm);
444         return ERR_PTR(r);
445 }
446
447 /*
448  * Free any memory in @free but not in @dont.
449  */
450 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
451                                   struct kvm_memory_slot *dont)
452 {
453         int i;
454
455         if (!dont || free->rmap != dont->rmap)
456                 vfree(free->rmap);
457
458         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
459                 vfree(free->dirty_bitmap);
460
461
462         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
463                 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
464                         vfree(free->lpage_info[i]);
465                         free->lpage_info[i] = NULL;
466                 }
467         }
468
469         free->npages = 0;
470         free->dirty_bitmap = NULL;
471         free->rmap = NULL;
472 }
473
474 void kvm_free_physmem(struct kvm *kvm)
475 {
476         int i;
477         struct kvm_memslots *slots = kvm->memslots;
478
479         for (i = 0; i < slots->nmemslots; ++i)
480                 kvm_free_physmem_slot(&slots->memslots[i], NULL);
481
482         kfree(kvm->memslots);
483 }
484
485 static void kvm_destroy_vm(struct kvm *kvm)
486 {
487         int i;
488         struct mm_struct *mm = kvm->mm;
489
490         kvm_arch_sync_events(kvm);
491         spin_lock(&kvm_lock);
492         list_del(&kvm->vm_list);
493         spin_unlock(&kvm_lock);
494         kvm_free_irq_routing(kvm);
495         for (i = 0; i < KVM_NR_BUSES; i++)
496                 kvm_io_bus_destroy(kvm->buses[i]);
497         kvm_coalesced_mmio_free(kvm);
498 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
499         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
500 #else
501         kvm_arch_flush_shadow(kvm);
502 #endif
503         kvm_arch_destroy_vm(kvm);
504         hardware_disable_all();
505         mmdrop(mm);
506 }
507
508 void kvm_get_kvm(struct kvm *kvm)
509 {
510         atomic_inc(&kvm->users_count);
511 }
512 EXPORT_SYMBOL_GPL(kvm_get_kvm);
513
514 void kvm_put_kvm(struct kvm *kvm)
515 {
516         if (atomic_dec_and_test(&kvm->users_count))
517                 kvm_destroy_vm(kvm);
518 }
519 EXPORT_SYMBOL_GPL(kvm_put_kvm);
520
521
522 static int kvm_vm_release(struct inode *inode, struct file *filp)
523 {
524         struct kvm *kvm = filp->private_data;
525
526         kvm_irqfd_release(kvm);
527
528         kvm_put_kvm(kvm);
529         return 0;
530 }
531
532 /*
533  * Allocate some memory and give it an address in the guest physical address
534  * space.
535  *
536  * Discontiguous memory is allowed, mostly for framebuffers.
537  *
538  * Must be called holding mmap_sem for write.
539  */
540 int __kvm_set_memory_region(struct kvm *kvm,
541                             struct kvm_userspace_memory_region *mem,
542                             int user_alloc)
543 {
544         int r, flush_shadow = 0;
545         gfn_t base_gfn;
546         unsigned long npages;
547         unsigned long i;
548         struct kvm_memory_slot *memslot;
549         struct kvm_memory_slot old, new;
550         struct kvm_memslots *slots, *old_memslots;
551
552         r = -EINVAL;
553         /* General sanity checks */
554         if (mem->memory_size & (PAGE_SIZE - 1))
555                 goto out;
556         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
557                 goto out;
558         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
559                 goto out;
560         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
561                 goto out;
562         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
563                 goto out;
564
565         memslot = &kvm->memslots->memslots[mem->slot];
566         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
567         npages = mem->memory_size >> PAGE_SHIFT;
568
569         r = -EINVAL;
570         if (npages > KVM_MEM_MAX_NR_PAGES)
571                 goto out;
572
573         if (!npages)
574                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
575
576         new = old = *memslot;
577
578         new.id = mem->slot;
579         new.base_gfn = base_gfn;
580         new.npages = npages;
581         new.flags = mem->flags;
582
583         /* Disallow changing a memory slot's size. */
584         r = -EINVAL;
585         if (npages && old.npages && npages != old.npages)
586                 goto out_free;
587
588         /* Check for overlaps */
589         r = -EEXIST;
590         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
591                 struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
592
593                 if (s == memslot || !s->npages)
594                         continue;
595                 if (!((base_gfn + npages <= s->base_gfn) ||
596                       (base_gfn >= s->base_gfn + s->npages)))
597                         goto out_free;
598         }
599
600         /* Free page dirty bitmap if unneeded */
601         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
602                 new.dirty_bitmap = NULL;
603
604         r = -ENOMEM;
605
606         /* Allocate if a slot is being created */
607 #ifndef CONFIG_S390
608         if (npages && !new.rmap) {
609                 new.rmap = vmalloc(npages * sizeof(*new.rmap));
610
611                 if (!new.rmap)
612                         goto out_free;
613
614                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
615
616                 new.user_alloc = user_alloc;
617                 new.userspace_addr = mem->userspace_addr;
618         }
619         if (!npages)
620                 goto skip_lpage;
621
622         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
623                 unsigned long ugfn;
624                 unsigned long j;
625                 int lpages;
626                 int level = i + 2;
627
628                 /* Avoid unused variable warning if no large pages */
629                 (void)level;
630
631                 if (new.lpage_info[i])
632                         continue;
633
634                 lpages = 1 + ((base_gfn + npages - 1)
635                              >> KVM_HPAGE_GFN_SHIFT(level));
636                 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
637
638                 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
639
640                 if (!new.lpage_info[i])
641                         goto out_free;
642
643                 memset(new.lpage_info[i], 0,
644                        lpages * sizeof(*new.lpage_info[i]));
645
646                 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
647                         new.lpage_info[i][0].write_count = 1;
648                 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
649                         new.lpage_info[i][lpages - 1].write_count = 1;
650                 ugfn = new.userspace_addr >> PAGE_SHIFT;
651                 /*
652                  * If the gfn and userspace address are not aligned wrt each
653                  * other, or if explicitly asked to, disable large page
654                  * support for this slot
655                  */
656                 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
657                     !largepages_enabled)
658                         for (j = 0; j < lpages; ++j)
659                                 new.lpage_info[i][j].write_count = 1;
660         }
661
662 skip_lpage:
663
664         /* Allocate page dirty bitmap if needed */
665         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
666                 unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(&new);
667
668                 new.dirty_bitmap = vmalloc(dirty_bytes);
669                 if (!new.dirty_bitmap)
670                         goto out_free;
671                 memset(new.dirty_bitmap, 0, dirty_bytes);
672                 /* destroy any largepage mappings for dirty tracking */
673                 if (old.npages)
674                         flush_shadow = 1;
675         }
676 #else  /* not defined CONFIG_S390 */
677         new.user_alloc = user_alloc;
678         if (user_alloc)
679                 new.userspace_addr = mem->userspace_addr;
680 #endif /* not defined CONFIG_S390 */
681
682         if (!npages) {
683                 r = -ENOMEM;
684                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
685                 if (!slots)
686                         goto out_free;
687                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
688                 if (mem->slot >= slots->nmemslots)
689                         slots->nmemslots = mem->slot + 1;
690                 slots->generation++;
691                 slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
692
693                 old_memslots = kvm->memslots;
694                 rcu_assign_pointer(kvm->memslots, slots);
695                 synchronize_srcu_expedited(&kvm->srcu);
696                 /* From this point no new shadow pages pointing to a deleted
697                  * memslot will be created.
698                  *
699                  * validation of sp->gfn happens in:
700                  *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
701                  *      - kvm_is_visible_gfn (mmu_check_roots)
702                  */
703                 kvm_arch_flush_shadow(kvm);
704                 kfree(old_memslots);
705         }
706
707         r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
708         if (r)
709                 goto out_free;
710
711         /* map the pages in iommu page table */
712         if (npages) {
713                 r = kvm_iommu_map_pages(kvm, &new);
714                 if (r)
715                         goto out_free;
716         }
717
718         r = -ENOMEM;
719         slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
720         if (!slots)
721                 goto out_free;
722         memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
723         if (mem->slot >= slots->nmemslots)
724                 slots->nmemslots = mem->slot + 1;
725         slots->generation++;
726
727         /* actual memory is freed via old in kvm_free_physmem_slot below */
728         if (!npages) {
729                 new.rmap = NULL;
730                 new.dirty_bitmap = NULL;
731                 for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
732                         new.lpage_info[i] = NULL;
733         }
734
735         slots->memslots[mem->slot] = new;
736         old_memslots = kvm->memslots;
737         rcu_assign_pointer(kvm->memslots, slots);
738         synchronize_srcu_expedited(&kvm->srcu);
739
740         kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
741
742         kvm_free_physmem_slot(&old, &new);
743         kfree(old_memslots);
744
745         if (flush_shadow)
746                 kvm_arch_flush_shadow(kvm);
747
748         return 0;
749
750 out_free:
751         kvm_free_physmem_slot(&new, &old);
752 out:
753         return r;
754
755 }
756 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
757
758 int kvm_set_memory_region(struct kvm *kvm,
759                           struct kvm_userspace_memory_region *mem,
760                           int user_alloc)
761 {
762         int r;
763
764         mutex_lock(&kvm->slots_lock);
765         r = __kvm_set_memory_region(kvm, mem, user_alloc);
766         mutex_unlock(&kvm->slots_lock);
767         return r;
768 }
769 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
770
771 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
772                                    struct
773                                    kvm_userspace_memory_region *mem,
774                                    int user_alloc)
775 {
776         if (mem->slot >= KVM_MEMORY_SLOTS)
777                 return -EINVAL;
778         return kvm_set_memory_region(kvm, mem, user_alloc);
779 }
780
781 int kvm_get_dirty_log(struct kvm *kvm,
782                         struct kvm_dirty_log *log, int *is_dirty)
783 {
784         struct kvm_memory_slot *memslot;
785         int r, i;
786         unsigned long n;
787         unsigned long any = 0;
788
789         r = -EINVAL;
790         if (log->slot >= KVM_MEMORY_SLOTS)
791                 goto out;
792
793         memslot = &kvm->memslots->memslots[log->slot];
794         r = -ENOENT;
795         if (!memslot->dirty_bitmap)
796                 goto out;
797
798         n = kvm_dirty_bitmap_bytes(memslot);
799
800         for (i = 0; !any && i < n/sizeof(long); ++i)
801                 any = memslot->dirty_bitmap[i];
802
803         r = -EFAULT;
804         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
805                 goto out;
806
807         if (any)
808                 *is_dirty = 1;
809
810         r = 0;
811 out:
812         return r;
813 }
814
815 void kvm_disable_largepages(void)
816 {
817         largepages_enabled = false;
818 }
819 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
820
821 int is_error_page(struct page *page)
822 {
823         return page == bad_page || page == hwpoison_page || page == fault_page;
824 }
825 EXPORT_SYMBOL_GPL(is_error_page);
826
827 int is_error_pfn(pfn_t pfn)
828 {
829         return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
830 }
831 EXPORT_SYMBOL_GPL(is_error_pfn);
832
833 int is_hwpoison_pfn(pfn_t pfn)
834 {
835         return pfn == hwpoison_pfn;
836 }
837 EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
838
839 int is_fault_pfn(pfn_t pfn)
840 {
841         return pfn == fault_pfn;
842 }
843 EXPORT_SYMBOL_GPL(is_fault_pfn);
844
845 static inline unsigned long bad_hva(void)
846 {
847         return PAGE_OFFSET;
848 }
849
850 int kvm_is_error_hva(unsigned long addr)
851 {
852         return addr == bad_hva();
853 }
854 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
855
856 static struct kvm_memory_slot *__gfn_to_memslot(struct kvm_memslots *slots,
857                                                 gfn_t gfn)
858 {
859         int i;
860
861         for (i = 0; i < slots->nmemslots; ++i) {
862                 struct kvm_memory_slot *memslot = &slots->memslots[i];
863
864                 if (gfn >= memslot->base_gfn
865                     && gfn < memslot->base_gfn + memslot->npages)
866                         return memslot;
867         }
868         return NULL;
869 }
870
871 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
872 {
873         return __gfn_to_memslot(kvm_memslots(kvm), gfn);
874 }
875 EXPORT_SYMBOL_GPL(gfn_to_memslot);
876
877 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
878 {
879         int i;
880         struct kvm_memslots *slots = kvm_memslots(kvm);
881
882         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
883                 struct kvm_memory_slot *memslot = &slots->memslots[i];
884
885                 if (memslot->flags & KVM_MEMSLOT_INVALID)
886                         continue;
887
888                 if (gfn >= memslot->base_gfn
889                     && gfn < memslot->base_gfn + memslot->npages)
890                         return 1;
891         }
892         return 0;
893 }
894 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
895
896 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
897 {
898         struct vm_area_struct *vma;
899         unsigned long addr, size;
900
901         size = PAGE_SIZE;
902
903         addr = gfn_to_hva(kvm, gfn);
904         if (kvm_is_error_hva(addr))
905                 return PAGE_SIZE;
906
907         down_read(&current->mm->mmap_sem);
908         vma = find_vma(current->mm, addr);
909         if (!vma)
910                 goto out;
911
912         size = vma_kernel_pagesize(vma);
913
914 out:
915         up_read(&current->mm->mmap_sem);
916
917         return size;
918 }
919
920 int memslot_id(struct kvm *kvm, gfn_t gfn)
921 {
922         int i;
923         struct kvm_memslots *slots = kvm_memslots(kvm);
924         struct kvm_memory_slot *memslot = NULL;
925
926         for (i = 0; i < slots->nmemslots; ++i) {
927                 memslot = &slots->memslots[i];
928
929                 if (gfn >= memslot->base_gfn
930                     && gfn < memslot->base_gfn + memslot->npages)
931                         break;
932         }
933
934         return memslot - slots->memslots;
935 }
936
937 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
938                                      gfn_t *nr_pages)
939 {
940         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
941                 return bad_hva();
942
943         if (nr_pages)
944                 *nr_pages = slot->npages - (gfn - slot->base_gfn);
945
946         return gfn_to_hva_memslot(slot, gfn);
947 }
948
949 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
950 {
951         return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
952 }
953 EXPORT_SYMBOL_GPL(gfn_to_hva);
954
955 static pfn_t get_fault_pfn(void)
956 {
957         get_page(fault_page);
958         return fault_pfn;
959 }
960
961 static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
962                         bool *async, bool write_fault, bool *writable)
963 {
964         struct page *page[1];
965         int npages = 0;
966         pfn_t pfn;
967
968         /* we can do it either atomically or asynchronously, not both */
969         BUG_ON(atomic && async);
970
971         BUG_ON(!write_fault && !writable);
972
973         if (writable)
974                 *writable = true;
975
976         if (atomic || async)
977                 npages = __get_user_pages_fast(addr, 1, 1, page);
978
979         if (unlikely(npages != 1) && !atomic) {
980                 might_sleep();
981
982                 if (writable)
983                         *writable = write_fault;
984
985                 npages = get_user_pages_fast(addr, 1, write_fault, page);
986
987                 /* map read fault as writable if possible */
988                 if (unlikely(!write_fault) && npages == 1) {
989                         struct page *wpage[1];
990
991                         npages = __get_user_pages_fast(addr, 1, 1, wpage);
992                         if (npages == 1) {
993                                 *writable = true;
994                                 put_page(page[0]);
995                                 page[0] = wpage[0];
996                         }
997                         npages = 1;
998                 }
999         }
1000
1001         if (unlikely(npages != 1)) {
1002                 struct vm_area_struct *vma;
1003
1004                 if (atomic)
1005                         return get_fault_pfn();
1006
1007                 down_read(&current->mm->mmap_sem);
1008                 if (is_hwpoison_address(addr)) {
1009                         up_read(&current->mm->mmap_sem);
1010                         get_page(hwpoison_page);
1011                         return page_to_pfn(hwpoison_page);
1012                 }
1013
1014                 vma = find_vma_intersection(current->mm, addr, addr+1);
1015
1016                 if (vma == NULL)
1017                         pfn = get_fault_pfn();
1018                 else if ((vma->vm_flags & VM_PFNMAP)) {
1019                         pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1020                                 vma->vm_pgoff;
1021                         BUG_ON(!kvm_is_mmio_pfn(pfn));
1022                 } else {
1023                         if (async && (vma->vm_flags & VM_WRITE))
1024                                 *async = true;
1025                         pfn = get_fault_pfn();
1026                 }
1027                 up_read(&current->mm->mmap_sem);
1028         } else
1029                 pfn = page_to_pfn(page[0]);
1030
1031         return pfn;
1032 }
1033
1034 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr)
1035 {
1036         return hva_to_pfn(kvm, addr, true, NULL, true, NULL);
1037 }
1038 EXPORT_SYMBOL_GPL(hva_to_pfn_atomic);
1039
1040 static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
1041                           bool write_fault, bool *writable)
1042 {
1043         unsigned long addr;
1044
1045         if (async)
1046                 *async = false;
1047
1048         addr = gfn_to_hva(kvm, gfn);
1049         if (kvm_is_error_hva(addr)) {
1050                 get_page(bad_page);
1051                 return page_to_pfn(bad_page);
1052         }
1053
1054         return hva_to_pfn(kvm, addr, atomic, async, write_fault, writable);
1055 }
1056
1057 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
1058 {
1059         return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
1060 }
1061 EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
1062
1063 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
1064                        bool write_fault, bool *writable)
1065 {
1066         return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
1067 }
1068 EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
1069
1070 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1071 {
1072         return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
1073 }
1074 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1075
1076 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1077                       bool *writable)
1078 {
1079         return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
1080 }
1081 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
1082
1083 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
1084                          struct kvm_memory_slot *slot, gfn_t gfn)
1085 {
1086         unsigned long addr = gfn_to_hva_memslot(slot, gfn);
1087         return hva_to_pfn(kvm, addr, false, NULL, true, NULL);
1088 }
1089
1090 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
1091                                                                   int nr_pages)
1092 {
1093         unsigned long addr;
1094         gfn_t entry;
1095
1096         addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
1097         if (kvm_is_error_hva(addr))
1098                 return -1;
1099
1100         if (entry < nr_pages)
1101                 return 0;
1102
1103         return __get_user_pages_fast(addr, nr_pages, 1, pages);
1104 }
1105 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
1106
1107 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1108 {
1109         pfn_t pfn;
1110
1111         pfn = gfn_to_pfn(kvm, gfn);
1112         if (!kvm_is_mmio_pfn(pfn))
1113                 return pfn_to_page(pfn);
1114
1115         WARN_ON(kvm_is_mmio_pfn(pfn));
1116
1117         get_page(bad_page);
1118         return bad_page;
1119 }
1120
1121 EXPORT_SYMBOL_GPL(gfn_to_page);
1122
1123 void kvm_release_page_clean(struct page *page)
1124 {
1125         kvm_release_pfn_clean(page_to_pfn(page));
1126 }
1127 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1128
1129 void kvm_release_pfn_clean(pfn_t pfn)
1130 {
1131         if (!kvm_is_mmio_pfn(pfn))
1132                 put_page(pfn_to_page(pfn));
1133 }
1134 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1135
1136 void kvm_release_page_dirty(struct page *page)
1137 {
1138         kvm_release_pfn_dirty(page_to_pfn(page));
1139 }
1140 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1141
1142 void kvm_release_pfn_dirty(pfn_t pfn)
1143 {
1144         kvm_set_pfn_dirty(pfn);
1145         kvm_release_pfn_clean(pfn);
1146 }
1147 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1148
1149 void kvm_set_page_dirty(struct page *page)
1150 {
1151         kvm_set_pfn_dirty(page_to_pfn(page));
1152 }
1153 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1154
1155 void kvm_set_pfn_dirty(pfn_t pfn)
1156 {
1157         if (!kvm_is_mmio_pfn(pfn)) {
1158                 struct page *page = pfn_to_page(pfn);
1159                 if (!PageReserved(page))
1160                         SetPageDirty(page);
1161         }
1162 }
1163 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1164
1165 void kvm_set_pfn_accessed(pfn_t pfn)
1166 {
1167         if (!kvm_is_mmio_pfn(pfn))
1168                 mark_page_accessed(pfn_to_page(pfn));
1169 }
1170 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1171
1172 void kvm_get_pfn(pfn_t pfn)
1173 {
1174         if (!kvm_is_mmio_pfn(pfn))
1175                 get_page(pfn_to_page(pfn));
1176 }
1177 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1178
1179 static int next_segment(unsigned long len, int offset)
1180 {
1181         if (len > PAGE_SIZE - offset)
1182                 return PAGE_SIZE - offset;
1183         else
1184                 return len;
1185 }
1186
1187 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1188                         int len)
1189 {
1190         int r;
1191         unsigned long addr;
1192
1193         addr = gfn_to_hva(kvm, gfn);
1194         if (kvm_is_error_hva(addr))
1195                 return -EFAULT;
1196         r = copy_from_user(data, (void __user *)addr + offset, len);
1197         if (r)
1198                 return -EFAULT;
1199         return 0;
1200 }
1201 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1202
1203 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1204 {
1205         gfn_t gfn = gpa >> PAGE_SHIFT;
1206         int seg;
1207         int offset = offset_in_page(gpa);
1208         int ret;
1209
1210         while ((seg = next_segment(len, offset)) != 0) {
1211                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1212                 if (ret < 0)
1213                         return ret;
1214                 offset = 0;
1215                 len -= seg;
1216                 data += seg;
1217                 ++gfn;
1218         }
1219         return 0;
1220 }
1221 EXPORT_SYMBOL_GPL(kvm_read_guest);
1222
1223 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1224                           unsigned long len)
1225 {
1226         int r;
1227         unsigned long addr;
1228         gfn_t gfn = gpa >> PAGE_SHIFT;
1229         int offset = offset_in_page(gpa);
1230
1231         addr = gfn_to_hva(kvm, gfn);
1232         if (kvm_is_error_hva(addr))
1233                 return -EFAULT;
1234         pagefault_disable();
1235         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1236         pagefault_enable();
1237         if (r)
1238                 return -EFAULT;
1239         return 0;
1240 }
1241 EXPORT_SYMBOL(kvm_read_guest_atomic);
1242
1243 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1244                          int offset, int len)
1245 {
1246         int r;
1247         unsigned long addr;
1248
1249         addr = gfn_to_hva(kvm, gfn);
1250         if (kvm_is_error_hva(addr))
1251                 return -EFAULT;
1252         r = copy_to_user((void __user *)addr + offset, data, len);
1253         if (r)
1254                 return -EFAULT;
1255         mark_page_dirty(kvm, gfn);
1256         return 0;
1257 }
1258 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1259
1260 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1261                     unsigned long len)
1262 {
1263         gfn_t gfn = gpa >> PAGE_SHIFT;
1264         int seg;
1265         int offset = offset_in_page(gpa);
1266         int ret;
1267
1268         while ((seg = next_segment(len, offset)) != 0) {
1269                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1270                 if (ret < 0)
1271                         return ret;
1272                 offset = 0;
1273                 len -= seg;
1274                 data += seg;
1275                 ++gfn;
1276         }
1277         return 0;
1278 }
1279
1280 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1281                               gpa_t gpa)
1282 {
1283         struct kvm_memslots *slots = kvm_memslots(kvm);
1284         int offset = offset_in_page(gpa);
1285         gfn_t gfn = gpa >> PAGE_SHIFT;
1286
1287         ghc->gpa = gpa;
1288         ghc->generation = slots->generation;
1289         ghc->memslot = __gfn_to_memslot(slots, gfn);
1290         ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
1291         if (!kvm_is_error_hva(ghc->hva))
1292                 ghc->hva += offset;
1293         else
1294                 return -EFAULT;
1295
1296         return 0;
1297 }
1298 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
1299
1300 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1301                            void *data, unsigned long len)
1302 {
1303         struct kvm_memslots *slots = kvm_memslots(kvm);
1304         int r;
1305
1306         if (slots->generation != ghc->generation)
1307                 kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
1308
1309         if (kvm_is_error_hva(ghc->hva))
1310                 return -EFAULT;
1311
1312         r = copy_to_user((void __user *)ghc->hva, data, len);
1313         if (r)
1314                 return -EFAULT;
1315         mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
1316
1317         return 0;
1318 }
1319 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
1320
1321 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1322 {
1323         return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1324 }
1325 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1326
1327 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1328 {
1329         gfn_t gfn = gpa >> PAGE_SHIFT;
1330         int seg;
1331         int offset = offset_in_page(gpa);
1332         int ret;
1333
1334         while ((seg = next_segment(len, offset)) != 0) {
1335                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1336                 if (ret < 0)
1337                         return ret;
1338                 offset = 0;
1339                 len -= seg;
1340                 ++gfn;
1341         }
1342         return 0;
1343 }
1344 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1345
1346 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1347                              gfn_t gfn)
1348 {
1349         if (memslot && memslot->dirty_bitmap) {
1350                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1351
1352                 generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1353         }
1354 }
1355
1356 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1357 {
1358         struct kvm_memory_slot *memslot;
1359
1360         memslot = gfn_to_memslot(kvm, gfn);
1361         mark_page_dirty_in_slot(kvm, memslot, gfn);
1362 }
1363
1364 /*
1365  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1366  */
1367 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1368 {
1369         DEFINE_WAIT(wait);
1370
1371         for (;;) {
1372                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1373
1374                 if (kvm_arch_vcpu_runnable(vcpu)) {
1375                         kvm_make_request(KVM_REQ_UNHALT, vcpu);
1376                         break;
1377                 }
1378                 if (kvm_cpu_has_pending_timer(vcpu))
1379                         break;
1380                 if (signal_pending(current))
1381                         break;
1382
1383                 schedule();
1384         }
1385
1386         finish_wait(&vcpu->wq, &wait);
1387 }
1388
1389 void kvm_resched(struct kvm_vcpu *vcpu)
1390 {
1391         if (!need_resched())
1392                 return;
1393         cond_resched();
1394 }
1395 EXPORT_SYMBOL_GPL(kvm_resched);
1396
1397 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu)
1398 {
1399         ktime_t expires;
1400         DEFINE_WAIT(wait);
1401
1402         prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1403
1404         /* Sleep for 100 us, and hope lock-holder got scheduled */
1405         expires = ktime_add_ns(ktime_get(), 100000UL);
1406         schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1407
1408         finish_wait(&vcpu->wq, &wait);
1409 }
1410 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
1411
1412 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1413 {
1414         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1415         struct page *page;
1416
1417         if (vmf->pgoff == 0)
1418                 page = virt_to_page(vcpu->run);
1419 #ifdef CONFIG_X86
1420         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1421                 page = virt_to_page(vcpu->arch.pio_data);
1422 #endif
1423 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1424         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1425                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1426 #endif
1427         else
1428                 return VM_FAULT_SIGBUS;
1429         get_page(page);
1430         vmf->page = page;
1431         return 0;
1432 }
1433
1434 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1435         .fault = kvm_vcpu_fault,
1436 };
1437
1438 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1439 {
1440         vma->vm_ops = &kvm_vcpu_vm_ops;
1441         return 0;
1442 }
1443
1444 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1445 {
1446         struct kvm_vcpu *vcpu = filp->private_data;
1447
1448         kvm_put_kvm(vcpu->kvm);
1449         return 0;
1450 }
1451
1452 static struct file_operations kvm_vcpu_fops = {
1453         .release        = kvm_vcpu_release,
1454         .unlocked_ioctl = kvm_vcpu_ioctl,
1455         .compat_ioctl   = kvm_vcpu_ioctl,
1456         .mmap           = kvm_vcpu_mmap,
1457         .llseek         = noop_llseek,
1458 };
1459
1460 /*
1461  * Allocates an inode for the vcpu.
1462  */
1463 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1464 {
1465         return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
1466 }
1467
1468 /*
1469  * Creates some virtual cpus.  Good luck creating more than one.
1470  */
1471 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1472 {
1473         int r;
1474         struct kvm_vcpu *vcpu, *v;
1475
1476         vcpu = kvm_arch_vcpu_create(kvm, id);
1477         if (IS_ERR(vcpu))
1478                 return PTR_ERR(vcpu);
1479
1480         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1481
1482         r = kvm_arch_vcpu_setup(vcpu);
1483         if (r)
1484                 return r;
1485
1486         mutex_lock(&kvm->lock);
1487         if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1488                 r = -EINVAL;
1489                 goto vcpu_destroy;
1490         }
1491
1492         kvm_for_each_vcpu(r, v, kvm)
1493                 if (v->vcpu_id == id) {
1494                         r = -EEXIST;
1495                         goto vcpu_destroy;
1496                 }
1497
1498         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1499
1500         /* Now it's all set up, let userspace reach it */
1501         kvm_get_kvm(kvm);
1502         r = create_vcpu_fd(vcpu);
1503         if (r < 0) {
1504                 kvm_put_kvm(kvm);
1505                 goto vcpu_destroy;
1506         }
1507
1508         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1509         smp_wmb();
1510         atomic_inc(&kvm->online_vcpus);
1511
1512 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1513         if (kvm->bsp_vcpu_id == id)
1514                 kvm->bsp_vcpu = vcpu;
1515 #endif
1516         mutex_unlock(&kvm->lock);
1517         return r;
1518
1519 vcpu_destroy:
1520         mutex_unlock(&kvm->lock);
1521         kvm_arch_vcpu_destroy(vcpu);
1522         return r;
1523 }
1524
1525 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1526 {
1527         if (sigset) {
1528                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1529                 vcpu->sigset_active = 1;
1530                 vcpu->sigset = *sigset;
1531         } else
1532                 vcpu->sigset_active = 0;
1533         return 0;
1534 }
1535
1536 static long kvm_vcpu_ioctl(struct file *filp,
1537                            unsigned int ioctl, unsigned long arg)
1538 {
1539         struct kvm_vcpu *vcpu = filp->private_data;
1540         void __user *argp = (void __user *)arg;
1541         int r;
1542         struct kvm_fpu *fpu = NULL;
1543         struct kvm_sregs *kvm_sregs = NULL;
1544
1545         if (vcpu->kvm->mm != current->mm)
1546                 return -EIO;
1547
1548 #if defined(CONFIG_S390) || defined(CONFIG_PPC)
1549         /*
1550          * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1551          * so vcpu_load() would break it.
1552          */
1553         if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1554                 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1555 #endif
1556
1557
1558         vcpu_load(vcpu);
1559         switch (ioctl) {
1560         case KVM_RUN:
1561                 r = -EINVAL;
1562                 if (arg)
1563                         goto out;
1564                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1565                 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
1566                 break;
1567         case KVM_GET_REGS: {
1568                 struct kvm_regs *kvm_regs;
1569
1570                 r = -ENOMEM;
1571                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1572                 if (!kvm_regs)
1573                         goto out;
1574                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1575                 if (r)
1576                         goto out_free1;
1577                 r = -EFAULT;
1578                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1579                         goto out_free1;
1580                 r = 0;
1581 out_free1:
1582                 kfree(kvm_regs);
1583                 break;
1584         }
1585         case KVM_SET_REGS: {
1586                 struct kvm_regs *kvm_regs;
1587
1588                 r = -ENOMEM;
1589                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1590                 if (!kvm_regs)
1591                         goto out;
1592                 r = -EFAULT;
1593                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1594                         goto out_free2;
1595                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1596                 if (r)
1597                         goto out_free2;
1598                 r = 0;
1599 out_free2:
1600                 kfree(kvm_regs);
1601                 break;
1602         }
1603         case KVM_GET_SREGS: {
1604                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1605                 r = -ENOMEM;
1606                 if (!kvm_sregs)
1607                         goto out;
1608                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1609                 if (r)
1610                         goto out;
1611                 r = -EFAULT;
1612                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1613                         goto out;
1614                 r = 0;
1615                 break;
1616         }
1617         case KVM_SET_SREGS: {
1618                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1619                 r = -ENOMEM;
1620                 if (!kvm_sregs)
1621                         goto out;
1622                 r = -EFAULT;
1623                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1624                         goto out;
1625                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1626                 if (r)
1627                         goto out;
1628                 r = 0;
1629                 break;
1630         }
1631         case KVM_GET_MP_STATE: {
1632                 struct kvm_mp_state mp_state;
1633
1634                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1635                 if (r)
1636                         goto out;
1637                 r = -EFAULT;
1638                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1639                         goto out;
1640                 r = 0;
1641                 break;
1642         }
1643         case KVM_SET_MP_STATE: {
1644                 struct kvm_mp_state mp_state;
1645
1646                 r = -EFAULT;
1647                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1648                         goto out;
1649                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1650                 if (r)
1651                         goto out;
1652                 r = 0;
1653                 break;
1654         }
1655         case KVM_TRANSLATE: {
1656                 struct kvm_translation tr;
1657
1658                 r = -EFAULT;
1659                 if (copy_from_user(&tr, argp, sizeof tr))
1660                         goto out;
1661                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1662                 if (r)
1663                         goto out;
1664                 r = -EFAULT;
1665                 if (copy_to_user(argp, &tr, sizeof tr))
1666                         goto out;
1667                 r = 0;
1668                 break;
1669         }
1670         case KVM_SET_GUEST_DEBUG: {
1671                 struct kvm_guest_debug dbg;
1672
1673                 r = -EFAULT;
1674                 if (copy_from_user(&dbg, argp, sizeof dbg))
1675                         goto out;
1676                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
1677                 if (r)
1678                         goto out;
1679                 r = 0;
1680                 break;
1681         }
1682         case KVM_SET_SIGNAL_MASK: {
1683                 struct kvm_signal_mask __user *sigmask_arg = argp;
1684                 struct kvm_signal_mask kvm_sigmask;
1685                 sigset_t sigset, *p;
1686
1687                 p = NULL;
1688                 if (argp) {
1689                         r = -EFAULT;
1690                         if (copy_from_user(&kvm_sigmask, argp,
1691                                            sizeof kvm_sigmask))
1692                                 goto out;
1693                         r = -EINVAL;
1694                         if (kvm_sigmask.len != sizeof sigset)
1695                                 goto out;
1696                         r = -EFAULT;
1697                         if (copy_from_user(&sigset, sigmask_arg->sigset,
1698                                            sizeof sigset))
1699                                 goto out;
1700                         p = &sigset;
1701                 }
1702                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1703                 break;
1704         }
1705         case KVM_GET_FPU: {
1706                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1707                 r = -ENOMEM;
1708                 if (!fpu)
1709                         goto out;
1710                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1711                 if (r)
1712                         goto out;
1713                 r = -EFAULT;
1714                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1715                         goto out;
1716                 r = 0;
1717                 break;
1718         }
1719         case KVM_SET_FPU: {
1720                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1721                 r = -ENOMEM;
1722                 if (!fpu)
1723                         goto out;
1724                 r = -EFAULT;
1725                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1726                         goto out;
1727                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1728                 if (r)
1729                         goto out;
1730                 r = 0;
1731                 break;
1732         }
1733         default:
1734                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1735         }
1736 out:
1737         vcpu_put(vcpu);
1738         kfree(fpu);
1739         kfree(kvm_sregs);
1740         return r;
1741 }
1742
1743 static long kvm_vm_ioctl(struct file *filp,
1744                            unsigned int ioctl, unsigned long arg)
1745 {
1746         struct kvm *kvm = filp->private_data;
1747         void __user *argp = (void __user *)arg;
1748         int r;
1749
1750         if (kvm->mm != current->mm)
1751                 return -EIO;
1752         switch (ioctl) {
1753         case KVM_CREATE_VCPU:
1754                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1755                 if (r < 0)
1756                         goto out;
1757                 break;
1758         case KVM_SET_USER_MEMORY_REGION: {
1759                 struct kvm_userspace_memory_region kvm_userspace_mem;
1760
1761                 r = -EFAULT;
1762                 if (copy_from_user(&kvm_userspace_mem, argp,
1763                                                 sizeof kvm_userspace_mem))
1764                         goto out;
1765
1766                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1767                 if (r)
1768                         goto out;
1769                 break;
1770         }
1771         case KVM_GET_DIRTY_LOG: {
1772                 struct kvm_dirty_log log;
1773
1774                 r = -EFAULT;
1775                 if (copy_from_user(&log, argp, sizeof log))
1776                         goto out;
1777                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1778                 if (r)
1779                         goto out;
1780                 break;
1781         }
1782 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1783         case KVM_REGISTER_COALESCED_MMIO: {
1784                 struct kvm_coalesced_mmio_zone zone;
1785                 r = -EFAULT;
1786                 if (copy_from_user(&zone, argp, sizeof zone))
1787                         goto out;
1788                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1789                 if (r)
1790                         goto out;
1791                 r = 0;
1792                 break;
1793         }
1794         case KVM_UNREGISTER_COALESCED_MMIO: {
1795                 struct kvm_coalesced_mmio_zone zone;
1796                 r = -EFAULT;
1797                 if (copy_from_user(&zone, argp, sizeof zone))
1798                         goto out;
1799                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1800                 if (r)
1801                         goto out;
1802                 r = 0;
1803                 break;
1804         }
1805 #endif
1806         case KVM_IRQFD: {
1807                 struct kvm_irqfd data;
1808
1809                 r = -EFAULT;
1810                 if (copy_from_user(&data, argp, sizeof data))
1811                         goto out;
1812                 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
1813                 break;
1814         }
1815         case KVM_IOEVENTFD: {
1816                 struct kvm_ioeventfd data;
1817
1818                 r = -EFAULT;
1819                 if (copy_from_user(&data, argp, sizeof data))
1820                         goto out;
1821                 r = kvm_ioeventfd(kvm, &data);
1822                 break;
1823         }
1824 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1825         case KVM_SET_BOOT_CPU_ID:
1826                 r = 0;
1827                 mutex_lock(&kvm->lock);
1828                 if (atomic_read(&kvm->online_vcpus) != 0)
1829                         r = -EBUSY;
1830                 else
1831                         kvm->bsp_vcpu_id = arg;
1832                 mutex_unlock(&kvm->lock);
1833                 break;
1834 #endif
1835         default:
1836                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1837                 if (r == -ENOTTY)
1838                         r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
1839         }
1840 out:
1841         return r;
1842 }
1843
1844 #ifdef CONFIG_COMPAT
1845 struct compat_kvm_dirty_log {
1846         __u32 slot;
1847         __u32 padding1;
1848         union {
1849                 compat_uptr_t dirty_bitmap; /* one bit per page */
1850                 __u64 padding2;
1851         };
1852 };
1853
1854 static long kvm_vm_compat_ioctl(struct file *filp,
1855                            unsigned int ioctl, unsigned long arg)
1856 {
1857         struct kvm *kvm = filp->private_data;
1858         int r;
1859
1860         if (kvm->mm != current->mm)
1861                 return -EIO;
1862         switch (ioctl) {
1863         case KVM_GET_DIRTY_LOG: {
1864                 struct compat_kvm_dirty_log compat_log;
1865                 struct kvm_dirty_log log;
1866
1867                 r = -EFAULT;
1868                 if (copy_from_user(&compat_log, (void __user *)arg,
1869                                    sizeof(compat_log)))
1870                         goto out;
1871                 log.slot         = compat_log.slot;
1872                 log.padding1     = compat_log.padding1;
1873                 log.padding2     = compat_log.padding2;
1874                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
1875
1876                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1877                 if (r)
1878                         goto out;
1879                 break;
1880         }
1881         default:
1882                 r = kvm_vm_ioctl(filp, ioctl, arg);
1883         }
1884
1885 out:
1886         return r;
1887 }
1888 #endif
1889
1890 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1891 {
1892         struct page *page[1];
1893         unsigned long addr;
1894         int npages;
1895         gfn_t gfn = vmf->pgoff;
1896         struct kvm *kvm = vma->vm_file->private_data;
1897
1898         addr = gfn_to_hva(kvm, gfn);
1899         if (kvm_is_error_hva(addr))
1900                 return VM_FAULT_SIGBUS;
1901
1902         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1903                                 NULL);
1904         if (unlikely(npages != 1))
1905                 return VM_FAULT_SIGBUS;
1906
1907         vmf->page = page[0];
1908         return 0;
1909 }
1910
1911 static const struct vm_operations_struct kvm_vm_vm_ops = {
1912         .fault = kvm_vm_fault,
1913 };
1914
1915 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1916 {
1917         vma->vm_ops = &kvm_vm_vm_ops;
1918         return 0;
1919 }
1920
1921 static struct file_operations kvm_vm_fops = {
1922         .release        = kvm_vm_release,
1923         .unlocked_ioctl = kvm_vm_ioctl,
1924 #ifdef CONFIG_COMPAT
1925         .compat_ioctl   = kvm_vm_compat_ioctl,
1926 #endif
1927         .mmap           = kvm_vm_mmap,
1928         .llseek         = noop_llseek,
1929 };
1930
1931 static int kvm_dev_ioctl_create_vm(void)
1932 {
1933         int fd, r;
1934         struct kvm *kvm;
1935
1936         kvm = kvm_create_vm();
1937         if (IS_ERR(kvm))
1938                 return PTR_ERR(kvm);
1939 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1940         r = kvm_coalesced_mmio_init(kvm);
1941         if (r < 0) {
1942                 kvm_put_kvm(kvm);
1943                 return r;
1944         }
1945 #endif
1946         fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
1947         if (fd < 0)
1948                 kvm_put_kvm(kvm);
1949
1950         return fd;
1951 }
1952
1953 static long kvm_dev_ioctl_check_extension_generic(long arg)
1954 {
1955         switch (arg) {
1956         case KVM_CAP_USER_MEMORY:
1957         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
1958         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
1959 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1960         case KVM_CAP_SET_BOOT_CPU_ID:
1961 #endif
1962         case KVM_CAP_INTERNAL_ERROR_DATA:
1963                 return 1;
1964 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1965         case KVM_CAP_IRQ_ROUTING:
1966                 return KVM_MAX_IRQ_ROUTES;
1967 #endif
1968         default:
1969                 break;
1970         }
1971         return kvm_dev_ioctl_check_extension(arg);
1972 }
1973
1974 static long kvm_dev_ioctl(struct file *filp,
1975                           unsigned int ioctl, unsigned long arg)
1976 {
1977         long r = -EINVAL;
1978
1979         switch (ioctl) {
1980         case KVM_GET_API_VERSION:
1981                 r = -EINVAL;
1982                 if (arg)
1983                         goto out;
1984                 r = KVM_API_VERSION;
1985                 break;
1986         case KVM_CREATE_VM:
1987                 r = -EINVAL;
1988                 if (arg)
1989                         goto out;
1990                 r = kvm_dev_ioctl_create_vm();
1991                 break;
1992         case KVM_CHECK_EXTENSION:
1993                 r = kvm_dev_ioctl_check_extension_generic(arg);
1994                 break;
1995         case KVM_GET_VCPU_MMAP_SIZE:
1996                 r = -EINVAL;
1997                 if (arg)
1998                         goto out;
1999                 r = PAGE_SIZE;     /* struct kvm_run */
2000 #ifdef CONFIG_X86
2001                 r += PAGE_SIZE;    /* pio data page */
2002 #endif
2003 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2004                 r += PAGE_SIZE;    /* coalesced mmio ring page */
2005 #endif
2006                 break;
2007         case KVM_TRACE_ENABLE:
2008         case KVM_TRACE_PAUSE:
2009         case KVM_TRACE_DISABLE:
2010                 r = -EOPNOTSUPP;
2011                 break;
2012         default:
2013                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
2014         }
2015 out:
2016         return r;
2017 }
2018
2019 static struct file_operations kvm_chardev_ops = {
2020         .unlocked_ioctl = kvm_dev_ioctl,
2021         .compat_ioctl   = kvm_dev_ioctl,
2022         .llseek         = noop_llseek,
2023 };
2024
2025 static struct miscdevice kvm_dev = {
2026         KVM_MINOR,
2027         "kvm",
2028         &kvm_chardev_ops,
2029 };
2030
2031 static void hardware_enable(void *junk)
2032 {
2033         int cpu = raw_smp_processor_id();
2034         int r;
2035
2036         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2037                 return;
2038
2039         cpumask_set_cpu(cpu, cpus_hardware_enabled);
2040
2041         r = kvm_arch_hardware_enable(NULL);
2042
2043         if (r) {
2044                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2045                 atomic_inc(&hardware_enable_failed);
2046                 printk(KERN_INFO "kvm: enabling virtualization on "
2047                                  "CPU%d failed\n", cpu);
2048         }
2049 }
2050
2051 static void hardware_disable(void *junk)
2052 {
2053         int cpu = raw_smp_processor_id();
2054
2055         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2056                 return;
2057         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2058         kvm_arch_hardware_disable(NULL);
2059 }
2060
2061 static void hardware_disable_all_nolock(void)
2062 {
2063         BUG_ON(!kvm_usage_count);
2064
2065         kvm_usage_count--;
2066         if (!kvm_usage_count)
2067                 on_each_cpu(hardware_disable, NULL, 1);
2068 }
2069
2070 static void hardware_disable_all(void)
2071 {
2072         spin_lock(&kvm_lock);
2073         hardware_disable_all_nolock();
2074         spin_unlock(&kvm_lock);
2075 }
2076
2077 static int hardware_enable_all(void)
2078 {
2079         int r = 0;
2080
2081         spin_lock(&kvm_lock);
2082
2083         kvm_usage_count++;
2084         if (kvm_usage_count == 1) {
2085                 atomic_set(&hardware_enable_failed, 0);
2086                 on_each_cpu(hardware_enable, NULL, 1);
2087
2088                 if (atomic_read(&hardware_enable_failed)) {
2089                         hardware_disable_all_nolock();
2090                         r = -EBUSY;
2091                 }
2092         }
2093
2094         spin_unlock(&kvm_lock);
2095
2096         return r;
2097 }
2098
2099 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2100                            void *v)
2101 {
2102         int cpu = (long)v;
2103
2104         if (!kvm_usage_count)
2105                 return NOTIFY_OK;
2106
2107         val &= ~CPU_TASKS_FROZEN;
2108         switch (val) {
2109         case CPU_DYING:
2110                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2111                        cpu);
2112                 hardware_disable(NULL);
2113                 break;
2114         case CPU_STARTING:
2115                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2116                        cpu);
2117                 spin_lock(&kvm_lock);
2118                 hardware_enable(NULL);
2119                 spin_unlock(&kvm_lock);
2120                 break;
2121         }
2122         return NOTIFY_OK;
2123 }
2124
2125
2126 asmlinkage void kvm_handle_fault_on_reboot(void)
2127 {
2128         if (kvm_rebooting) {
2129                 /* spin while reset goes on */
2130                 local_irq_enable();
2131                 while (true)
2132                         cpu_relax();
2133         }
2134         /* Fault while not rebooting.  We want the trace. */
2135         BUG();
2136 }
2137 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2138
2139 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2140                       void *v)
2141 {
2142         /*
2143          * Some (well, at least mine) BIOSes hang on reboot if
2144          * in vmx root mode.
2145          *
2146          * And Intel TXT required VMX off for all cpu when system shutdown.
2147          */
2148         printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2149         kvm_rebooting = true;
2150         on_each_cpu(hardware_disable, NULL, 1);
2151         return NOTIFY_OK;
2152 }
2153
2154 static struct notifier_block kvm_reboot_notifier = {
2155         .notifier_call = kvm_reboot,
2156         .priority = 0,
2157 };
2158
2159 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2160 {
2161         int i;
2162
2163         for (i = 0; i < bus->dev_count; i++) {
2164                 struct kvm_io_device *pos = bus->devs[i];
2165
2166                 kvm_iodevice_destructor(pos);
2167         }
2168         kfree(bus);
2169 }
2170
2171 /* kvm_io_bus_write - called under kvm->slots_lock */
2172 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2173                      int len, const void *val)
2174 {
2175         int i;
2176         struct kvm_io_bus *bus;
2177
2178         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2179         for (i = 0; i < bus->dev_count; i++)
2180                 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2181                         return 0;
2182         return -EOPNOTSUPP;
2183 }
2184
2185 /* kvm_io_bus_read - called under kvm->slots_lock */
2186 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2187                     int len, void *val)
2188 {
2189         int i;
2190         struct kvm_io_bus *bus;
2191
2192         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
2193         for (i = 0; i < bus->dev_count; i++)
2194                 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2195                         return 0;
2196         return -EOPNOTSUPP;
2197 }
2198
2199 /* Caller must hold slots_lock. */
2200 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2201                             struct kvm_io_device *dev)
2202 {
2203         struct kvm_io_bus *new_bus, *bus;
2204
2205         bus = kvm->buses[bus_idx];
2206         if (bus->dev_count > NR_IOBUS_DEVS-1)
2207                 return -ENOSPC;
2208
2209         new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2210         if (!new_bus)
2211                 return -ENOMEM;
2212         memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2213         new_bus->devs[new_bus->dev_count++] = dev;
2214         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2215         synchronize_srcu_expedited(&kvm->srcu);
2216         kfree(bus);
2217
2218         return 0;
2219 }
2220
2221 /* Caller must hold slots_lock. */
2222 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2223                               struct kvm_io_device *dev)
2224 {
2225         int i, r;
2226         struct kvm_io_bus *new_bus, *bus;
2227
2228         new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
2229         if (!new_bus)
2230                 return -ENOMEM;
2231
2232         bus = kvm->buses[bus_idx];
2233         memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
2234
2235         r = -ENOENT;
2236         for (i = 0; i < new_bus->dev_count; i++)
2237                 if (new_bus->devs[i] == dev) {
2238                         r = 0;
2239                         new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
2240                         break;
2241                 }
2242
2243         if (r) {
2244                 kfree(new_bus);
2245                 return r;
2246         }
2247
2248         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2249         synchronize_srcu_expedited(&kvm->srcu);
2250         kfree(bus);
2251         return r;
2252 }
2253
2254 static struct notifier_block kvm_cpu_notifier = {
2255         .notifier_call = kvm_cpu_hotplug,
2256 };
2257
2258 static int vm_stat_get(void *_offset, u64 *val)
2259 {
2260         unsigned offset = (long)_offset;
2261         struct kvm *kvm;
2262
2263         *val = 0;
2264         spin_lock(&kvm_lock);
2265         list_for_each_entry(kvm, &vm_list, vm_list)
2266                 *val += *(u32 *)((void *)kvm + offset);
2267         spin_unlock(&kvm_lock);
2268         return 0;
2269 }
2270
2271 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2272
2273 static int vcpu_stat_get(void *_offset, u64 *val)
2274 {
2275         unsigned offset = (long)_offset;
2276         struct kvm *kvm;
2277         struct kvm_vcpu *vcpu;
2278         int i;
2279
2280         *val = 0;
2281         spin_lock(&kvm_lock);
2282         list_for_each_entry(kvm, &vm_list, vm_list)
2283                 kvm_for_each_vcpu(i, vcpu, kvm)
2284                         *val += *(u32 *)((void *)vcpu + offset);
2285
2286         spin_unlock(&kvm_lock);
2287         return 0;
2288 }
2289
2290 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2291
2292 static const struct file_operations *stat_fops[] = {
2293         [KVM_STAT_VCPU] = &vcpu_stat_fops,
2294         [KVM_STAT_VM]   = &vm_stat_fops,
2295 };
2296
2297 static void kvm_init_debug(void)
2298 {
2299         struct kvm_stats_debugfs_item *p;
2300
2301         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2302         for (p = debugfs_entries; p->name; ++p)
2303                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2304                                                 (void *)(long)p->offset,
2305                                                 stat_fops[p->kind]);
2306 }
2307
2308 static void kvm_exit_debug(void)
2309 {
2310         struct kvm_stats_debugfs_item *p;
2311
2312         for (p = debugfs_entries; p->name; ++p)
2313                 debugfs_remove(p->dentry);
2314         debugfs_remove(kvm_debugfs_dir);
2315 }
2316
2317 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2318 {
2319         if (kvm_usage_count)
2320                 hardware_disable(NULL);
2321         return 0;
2322 }
2323
2324 static int kvm_resume(struct sys_device *dev)
2325 {
2326         if (kvm_usage_count) {
2327                 WARN_ON(spin_is_locked(&kvm_lock));
2328                 hardware_enable(NULL);
2329         }
2330         return 0;
2331 }
2332
2333 static struct sysdev_class kvm_sysdev_class = {
2334         .name = "kvm",
2335         .suspend = kvm_suspend,
2336         .resume = kvm_resume,
2337 };
2338
2339 static struct sys_device kvm_sysdev = {
2340         .id = 0,
2341         .cls = &kvm_sysdev_class,
2342 };
2343
2344 struct page *bad_page;
2345 pfn_t bad_pfn;
2346
2347 static inline
2348 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2349 {
2350         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2351 }
2352
2353 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2354 {
2355         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2356
2357         kvm_arch_vcpu_load(vcpu, cpu);
2358 }
2359
2360 static void kvm_sched_out(struct preempt_notifier *pn,
2361                           struct task_struct *next)
2362 {
2363         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2364
2365         kvm_arch_vcpu_put(vcpu);
2366 }
2367
2368 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2369                   struct module *module)
2370 {
2371         int r;
2372         int cpu;
2373
2374         r = kvm_arch_init(opaque);
2375         if (r)
2376                 goto out_fail;
2377
2378         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2379
2380         if (bad_page == NULL) {
2381                 r = -ENOMEM;
2382                 goto out;
2383         }
2384
2385         bad_pfn = page_to_pfn(bad_page);
2386
2387         hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2388
2389         if (hwpoison_page == NULL) {
2390                 r = -ENOMEM;
2391                 goto out_free_0;
2392         }
2393
2394         hwpoison_pfn = page_to_pfn(hwpoison_page);
2395
2396         fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2397
2398         if (fault_page == NULL) {
2399                 r = -ENOMEM;
2400                 goto out_free_0;
2401         }
2402
2403         fault_pfn = page_to_pfn(fault_page);
2404
2405         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2406                 r = -ENOMEM;
2407                 goto out_free_0;
2408         }
2409
2410         r = kvm_arch_hardware_setup();
2411         if (r < 0)
2412                 goto out_free_0a;
2413
2414         for_each_online_cpu(cpu) {
2415                 smp_call_function_single(cpu,
2416                                 kvm_arch_check_processor_compat,
2417                                 &r, 1);
2418                 if (r < 0)
2419                         goto out_free_1;
2420         }
2421
2422         r = register_cpu_notifier(&kvm_cpu_notifier);
2423         if (r)
2424                 goto out_free_2;
2425         register_reboot_notifier(&kvm_reboot_notifier);
2426
2427         r = sysdev_class_register(&kvm_sysdev_class);
2428         if (r)
2429                 goto out_free_3;
2430
2431         r = sysdev_register(&kvm_sysdev);
2432         if (r)
2433                 goto out_free_4;
2434
2435         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2436         if (!vcpu_align)
2437                 vcpu_align = __alignof__(struct kvm_vcpu);
2438         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
2439                                            0, NULL);
2440         if (!kvm_vcpu_cache) {
2441                 r = -ENOMEM;
2442                 goto out_free_5;
2443         }
2444
2445         r = kvm_async_pf_init();
2446         if (r)
2447                 goto out_free;
2448
2449         kvm_chardev_ops.owner = module;
2450         kvm_vm_fops.owner = module;
2451         kvm_vcpu_fops.owner = module;
2452
2453         r = misc_register(&kvm_dev);
2454         if (r) {
2455                 printk(KERN_ERR "kvm: misc device register failed\n");
2456                 goto out_unreg;
2457         }
2458
2459         kvm_preempt_ops.sched_in = kvm_sched_in;
2460         kvm_preempt_ops.sched_out = kvm_sched_out;
2461
2462         kvm_init_debug();
2463
2464         return 0;
2465
2466 out_unreg:
2467         kvm_async_pf_deinit();
2468 out_free:
2469         kmem_cache_destroy(kvm_vcpu_cache);
2470 out_free_5:
2471         sysdev_unregister(&kvm_sysdev);
2472 out_free_4:
2473         sysdev_class_unregister(&kvm_sysdev_class);
2474 out_free_3:
2475         unregister_reboot_notifier(&kvm_reboot_notifier);
2476         unregister_cpu_notifier(&kvm_cpu_notifier);
2477 out_free_2:
2478 out_free_1:
2479         kvm_arch_hardware_unsetup();
2480 out_free_0a:
2481         free_cpumask_var(cpus_hardware_enabled);
2482 out_free_0:
2483         if (fault_page)
2484                 __free_page(fault_page);
2485         if (hwpoison_page)
2486                 __free_page(hwpoison_page);
2487         __free_page(bad_page);
2488 out:
2489         kvm_arch_exit();
2490 out_fail:
2491         return r;
2492 }
2493 EXPORT_SYMBOL_GPL(kvm_init);
2494
2495 void kvm_exit(void)
2496 {
2497         kvm_exit_debug();
2498         misc_deregister(&kvm_dev);
2499         kmem_cache_destroy(kvm_vcpu_cache);
2500         kvm_async_pf_deinit();
2501         sysdev_unregister(&kvm_sysdev);
2502         sysdev_class_unregister(&kvm_sysdev_class);
2503         unregister_reboot_notifier(&kvm_reboot_notifier);
2504         unregister_cpu_notifier(&kvm_cpu_notifier);
2505         on_each_cpu(hardware_disable, NULL, 1);
2506         kvm_arch_hardware_unsetup();
2507         kvm_arch_exit();
2508         free_cpumask_var(cpus_hardware_enabled);
2509         __free_page(hwpoison_page);
2510         __free_page(bad_page);
2511 }
2512 EXPORT_SYMBOL_GPL(kvm_exit);