KVM: split out kvm_free_assigned_irq()
[linux-2.6.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "iodev.h"
19
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44
45 #include <asm/processor.h>
46 #include <asm/io.h>
47 #include <asm/uaccess.h>
48 #include <asm/pgtable.h>
49
50 #ifdef CONFIG_X86
51 #include <asm/msidef.h>
52 #endif
53
54 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
55 #include "coalesced_mmio.h"
56 #endif
57
58 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
59 #include <linux/pci.h>
60 #include <linux/interrupt.h>
61 #include "irq.h"
62 #endif
63
64 MODULE_AUTHOR("Qumranet");
65 MODULE_LICENSE("GPL");
66
67 static int msi2intx = 1;
68 module_param(msi2intx, bool, 0);
69
70 DEFINE_SPINLOCK(kvm_lock);
71 LIST_HEAD(vm_list);
72
73 static cpumask_t cpus_hardware_enabled;
74
75 struct kmem_cache *kvm_vcpu_cache;
76 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
77
78 static __read_mostly struct preempt_ops kvm_preempt_ops;
79
80 struct dentry *kvm_debugfs_dir;
81
82 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
83                            unsigned long arg);
84
85 static bool kvm_rebooting;
86
87 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
88
89 #ifdef CONFIG_X86
90 static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev)
91 {
92         int vcpu_id;
93         struct kvm_vcpu *vcpu;
94         struct kvm_ioapic *ioapic = ioapic_irqchip(dev->kvm);
95         int dest_id = (dev->guest_msi.address_lo & MSI_ADDR_DEST_ID_MASK)
96                         >> MSI_ADDR_DEST_ID_SHIFT;
97         int vector = (dev->guest_msi.data & MSI_DATA_VECTOR_MASK)
98                         >> MSI_DATA_VECTOR_SHIFT;
99         int dest_mode = test_bit(MSI_ADDR_DEST_MODE_SHIFT,
100                                 (unsigned long *)&dev->guest_msi.address_lo);
101         int trig_mode = test_bit(MSI_DATA_TRIGGER_SHIFT,
102                                 (unsigned long *)&dev->guest_msi.data);
103         int delivery_mode = test_bit(MSI_DATA_DELIVERY_MODE_SHIFT,
104                                 (unsigned long *)&dev->guest_msi.data);
105         u32 deliver_bitmask;
106
107         BUG_ON(!ioapic);
108
109         deliver_bitmask = kvm_ioapic_get_delivery_bitmask(ioapic,
110                                 dest_id, dest_mode);
111         /* IOAPIC delivery mode value is the same as MSI here */
112         switch (delivery_mode) {
113         case IOAPIC_LOWEST_PRIORITY:
114                 vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
115                                 deliver_bitmask);
116                 if (vcpu != NULL)
117                         kvm_apic_set_irq(vcpu, vector, trig_mode);
118                 else
119                         printk(KERN_INFO "kvm: null lowest priority vcpu!\n");
120                 break;
121         case IOAPIC_FIXED:
122                 for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
123                         if (!(deliver_bitmask & (1 << vcpu_id)))
124                                 continue;
125                         deliver_bitmask &= ~(1 << vcpu_id);
126                         vcpu = ioapic->kvm->vcpus[vcpu_id];
127                         if (vcpu)
128                                 kvm_apic_set_irq(vcpu, vector, trig_mode);
129                 }
130                 break;
131         default:
132                 printk(KERN_INFO "kvm: unsupported MSI delivery mode\n");
133         }
134 }
135 #else
136 static void assigned_device_msi_dispatch(struct kvm_assigned_dev_kernel *dev) {}
137 #endif
138
139 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
140                                                       int assigned_dev_id)
141 {
142         struct list_head *ptr;
143         struct kvm_assigned_dev_kernel *match;
144
145         list_for_each(ptr, head) {
146                 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
147                 if (match->assigned_dev_id == assigned_dev_id)
148                         return match;
149         }
150         return NULL;
151 }
152
153 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
154 {
155         struct kvm_assigned_dev_kernel *assigned_dev;
156
157         assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
158                                     interrupt_work);
159
160         /* This is taken to safely inject irq inside the guest. When
161          * the interrupt injection (or the ioapic code) uses a
162          * finer-grained lock, update this
163          */
164         mutex_lock(&assigned_dev->kvm->lock);
165         if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_INTX)
166                 kvm_set_irq(assigned_dev->kvm,
167                             assigned_dev->irq_source_id,
168                             assigned_dev->guest_irq, 1);
169         else if (assigned_dev->irq_requested_type &
170                                 KVM_ASSIGNED_DEV_GUEST_MSI) {
171                 assigned_device_msi_dispatch(assigned_dev);
172                 enable_irq(assigned_dev->host_irq);
173         }
174         mutex_unlock(&assigned_dev->kvm->lock);
175         kvm_put_kvm(assigned_dev->kvm);
176 }
177
178 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
179 {
180         struct kvm_assigned_dev_kernel *assigned_dev =
181                 (struct kvm_assigned_dev_kernel *) dev_id;
182
183         kvm_get_kvm(assigned_dev->kvm);
184         schedule_work(&assigned_dev->interrupt_work);
185         disable_irq_nosync(irq);
186         return IRQ_HANDLED;
187 }
188
189 /* Ack the irq line for an assigned device */
190 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
191 {
192         struct kvm_assigned_dev_kernel *dev;
193
194         if (kian->gsi == -1)
195                 return;
196
197         dev = container_of(kian, struct kvm_assigned_dev_kernel,
198                            ack_notifier);
199         kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
200         enable_irq(dev->host_irq);
201 }
202
203 static void kvm_free_assigned_irq(struct kvm *kvm,
204                                   struct kvm_assigned_dev_kernel *assigned_dev)
205 {
206         if (!irqchip_in_kernel(kvm))
207                 return;
208
209         kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
210
211         if (assigned_dev->irq_source_id != -1)
212                 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
213         assigned_dev->irq_source_id = -1;
214
215         if (!assigned_dev->irq_requested_type)
216                 return;
217
218         if (cancel_work_sync(&assigned_dev->interrupt_work))
219                 /* We had pending work. That means we will have to take
220                  * care of kvm_put_kvm.
221                  */
222                 kvm_put_kvm(kvm);
223
224         free_irq(assigned_dev->host_irq, (void *)assigned_dev);
225
226         if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
227                 pci_disable_msi(assigned_dev->dev);
228
229         assigned_dev->irq_requested_type = 0;
230 }
231
232
233 static void kvm_free_assigned_device(struct kvm *kvm,
234                                      struct kvm_assigned_dev_kernel
235                                      *assigned_dev)
236 {
237         kvm_free_assigned_irq(kvm, assigned_dev);
238
239         pci_reset_function(assigned_dev->dev);
240
241         pci_release_regions(assigned_dev->dev);
242         pci_disable_device(assigned_dev->dev);
243         pci_dev_put(assigned_dev->dev);
244
245         list_del(&assigned_dev->list);
246         kfree(assigned_dev);
247 }
248
249 void kvm_free_all_assigned_devices(struct kvm *kvm)
250 {
251         struct list_head *ptr, *ptr2;
252         struct kvm_assigned_dev_kernel *assigned_dev;
253
254         list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
255                 assigned_dev = list_entry(ptr,
256                                           struct kvm_assigned_dev_kernel,
257                                           list);
258
259                 kvm_free_assigned_device(kvm, assigned_dev);
260         }
261 }
262
263 static int assigned_device_update_intx(struct kvm *kvm,
264                         struct kvm_assigned_dev_kernel *adev,
265                         struct kvm_assigned_irq *airq)
266 {
267         adev->guest_irq = airq->guest_irq;
268         adev->ack_notifier.gsi = airq->guest_irq;
269
270         if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
271                 return 0;
272
273         if (irqchip_in_kernel(kvm)) {
274                 if (!msi2intx &&
275                     adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) {
276                         free_irq(adev->host_irq, (void *)kvm);
277                         pci_disable_msi(adev->dev);
278                 }
279
280                 if (!capable(CAP_SYS_RAWIO))
281                         return -EPERM;
282
283                 if (airq->host_irq)
284                         adev->host_irq = airq->host_irq;
285                 else
286                         adev->host_irq = adev->dev->irq;
287
288                 /* Even though this is PCI, we don't want to use shared
289                  * interrupts. Sharing host devices with guest-assigned devices
290                  * on the same interrupt line is not a happy situation: there
291                  * are going to be long delays in accepting, acking, etc.
292                  */
293                 if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
294                                 0, "kvm_assigned_intx_device", (void *)adev))
295                         return -EIO;
296         }
297
298         adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
299                                    KVM_ASSIGNED_DEV_HOST_INTX;
300         return 0;
301 }
302
303 #ifdef CONFIG_X86
304 static int assigned_device_update_msi(struct kvm *kvm,
305                         struct kvm_assigned_dev_kernel *adev,
306                         struct kvm_assigned_irq *airq)
307 {
308         int r;
309
310         if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
311                 /* x86 don't care upper address of guest msi message addr */
312                 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
313                 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
314                 adev->guest_msi.address_lo = airq->guest_msi.addr_lo;
315                 adev->guest_msi.data = airq->guest_msi.data;
316                 adev->ack_notifier.gsi = -1;
317         } else if (msi2intx) {
318                 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
319                 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
320                 adev->guest_irq = airq->guest_irq;
321                 adev->ack_notifier.gsi = airq->guest_irq;
322         }
323
324         if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
325                 return 0;
326
327         if (irqchip_in_kernel(kvm)) {
328                 if (!msi2intx) {
329                         if (adev->irq_requested_type &
330                                         KVM_ASSIGNED_DEV_HOST_INTX)
331                                 free_irq(adev->host_irq, (void *)adev);
332
333                         r = pci_enable_msi(adev->dev);
334                         if (r)
335                                 return r;
336                 }
337
338                 adev->host_irq = adev->dev->irq;
339                 if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
340                                 "kvm_assigned_msi_device", (void *)adev))
341                         return -EIO;
342         }
343
344         if (!msi2intx)
345                 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
346
347         adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
348         return 0;
349 }
350 #endif
351
352 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
353                                    struct kvm_assigned_irq
354                                    *assigned_irq)
355 {
356         int r = 0;
357         struct kvm_assigned_dev_kernel *match;
358
359         mutex_lock(&kvm->lock);
360
361         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
362                                       assigned_irq->assigned_dev_id);
363         if (!match) {
364                 mutex_unlock(&kvm->lock);
365                 return -EINVAL;
366         }
367
368         if (!match->irq_requested_type) {
369                 INIT_WORK(&match->interrupt_work,
370                                 kvm_assigned_dev_interrupt_work_handler);
371                 if (irqchip_in_kernel(kvm)) {
372                         /* Register ack nofitier */
373                         match->ack_notifier.gsi = -1;
374                         match->ack_notifier.irq_acked =
375                                         kvm_assigned_dev_ack_irq;
376                         kvm_register_irq_ack_notifier(kvm,
377                                         &match->ack_notifier);
378
379                         /* Request IRQ source ID */
380                         r = kvm_request_irq_source_id(kvm);
381                         if (r < 0)
382                                 goto out_release;
383                         else
384                                 match->irq_source_id = r;
385
386 #ifdef CONFIG_X86
387                         /* Determine host device irq type, we can know the
388                          * result from dev->msi_enabled */
389                         if (msi2intx)
390                                 pci_enable_msi(match->dev);
391 #endif
392                 }
393         }
394
395         if ((!msi2intx &&
396              (assigned_irq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI)) ||
397             (msi2intx && match->dev->msi_enabled)) {
398 #ifdef CONFIG_X86
399                 r = assigned_device_update_msi(kvm, match, assigned_irq);
400                 if (r) {
401                         printk(KERN_WARNING "kvm: failed to enable "
402                                         "MSI device!\n");
403                         goto out_release;
404                 }
405 #else
406                 r = -ENOTTY;
407 #endif
408         } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
409                 /* Host device IRQ 0 means don't support INTx */
410                 if (!msi2intx) {
411                         printk(KERN_WARNING
412                                "kvm: wait device to enable MSI!\n");
413                         r = 0;
414                 } else {
415                         printk(KERN_WARNING
416                                "kvm: failed to enable MSI device!\n");
417                         r = -ENOTTY;
418                         goto out_release;
419                 }
420         } else {
421                 /* Non-sharing INTx mode */
422                 r = assigned_device_update_intx(kvm, match, assigned_irq);
423                 if (r) {
424                         printk(KERN_WARNING "kvm: failed to enable "
425                                         "INTx device!\n");
426                         goto out_release;
427                 }
428         }
429
430         mutex_unlock(&kvm->lock);
431         return r;
432 out_release:
433         mutex_unlock(&kvm->lock);
434         kvm_free_assigned_device(kvm, match);
435         return r;
436 }
437
438 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
439                                       struct kvm_assigned_pci_dev *assigned_dev)
440 {
441         int r = 0;
442         struct kvm_assigned_dev_kernel *match;
443         struct pci_dev *dev;
444
445         mutex_lock(&kvm->lock);
446
447         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
448                                       assigned_dev->assigned_dev_id);
449         if (match) {
450                 /* device already assigned */
451                 r = -EINVAL;
452                 goto out;
453         }
454
455         match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
456         if (match == NULL) {
457                 printk(KERN_INFO "%s: Couldn't allocate memory\n",
458                        __func__);
459                 r = -ENOMEM;
460                 goto out;
461         }
462         dev = pci_get_bus_and_slot(assigned_dev->busnr,
463                                    assigned_dev->devfn);
464         if (!dev) {
465                 printk(KERN_INFO "%s: host device not found\n", __func__);
466                 r = -EINVAL;
467                 goto out_free;
468         }
469         if (pci_enable_device(dev)) {
470                 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
471                 r = -EBUSY;
472                 goto out_put;
473         }
474         r = pci_request_regions(dev, "kvm_assigned_device");
475         if (r) {
476                 printk(KERN_INFO "%s: Could not get access to device regions\n",
477                        __func__);
478                 goto out_disable;
479         }
480
481         pci_reset_function(dev);
482
483         match->assigned_dev_id = assigned_dev->assigned_dev_id;
484         match->host_busnr = assigned_dev->busnr;
485         match->host_devfn = assigned_dev->devfn;
486         match->dev = dev;
487         match->irq_source_id = -1;
488         match->kvm = kvm;
489
490         list_add(&match->list, &kvm->arch.assigned_dev_head);
491
492         if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
493                 r = kvm_iommu_map_guest(kvm, match);
494                 if (r)
495                         goto out_list_del;
496         }
497
498 out:
499         mutex_unlock(&kvm->lock);
500         return r;
501 out_list_del:
502         list_del(&match->list);
503         pci_release_regions(dev);
504 out_disable:
505         pci_disable_device(dev);
506 out_put:
507         pci_dev_put(dev);
508 out_free:
509         kfree(match);
510         mutex_unlock(&kvm->lock);
511         return r;
512 }
513 #endif
514
515 static inline int valid_vcpu(int n)
516 {
517         return likely(n >= 0 && n < KVM_MAX_VCPUS);
518 }
519
520 inline int kvm_is_mmio_pfn(pfn_t pfn)
521 {
522         if (pfn_valid(pfn))
523                 return PageReserved(pfn_to_page(pfn));
524
525         return true;
526 }
527
528 /*
529  * Switches to specified vcpu, until a matching vcpu_put()
530  */
531 void vcpu_load(struct kvm_vcpu *vcpu)
532 {
533         int cpu;
534
535         mutex_lock(&vcpu->mutex);
536         cpu = get_cpu();
537         preempt_notifier_register(&vcpu->preempt_notifier);
538         kvm_arch_vcpu_load(vcpu, cpu);
539         put_cpu();
540 }
541
542 void vcpu_put(struct kvm_vcpu *vcpu)
543 {
544         preempt_disable();
545         kvm_arch_vcpu_put(vcpu);
546         preempt_notifier_unregister(&vcpu->preempt_notifier);
547         preempt_enable();
548         mutex_unlock(&vcpu->mutex);
549 }
550
551 static void ack_flush(void *_completed)
552 {
553 }
554
555 void kvm_flush_remote_tlbs(struct kvm *kvm)
556 {
557         int i, cpu, me;
558         cpumask_t cpus;
559         struct kvm_vcpu *vcpu;
560
561         me = get_cpu();
562         cpus_clear(cpus);
563         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
564                 vcpu = kvm->vcpus[i];
565                 if (!vcpu)
566                         continue;
567                 if (test_and_set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
568                         continue;
569                 cpu = vcpu->cpu;
570                 if (cpu != -1 && cpu != me)
571                         cpu_set(cpu, cpus);
572         }
573         if (cpus_empty(cpus))
574                 goto out;
575         ++kvm->stat.remote_tlb_flush;
576         smp_call_function_mask(cpus, ack_flush, NULL, 1);
577 out:
578         put_cpu();
579 }
580
581 void kvm_reload_remote_mmus(struct kvm *kvm)
582 {
583         int i, cpu, me;
584         cpumask_t cpus;
585         struct kvm_vcpu *vcpu;
586
587         me = get_cpu();
588         cpus_clear(cpus);
589         for (i = 0; i < KVM_MAX_VCPUS; ++i) {
590                 vcpu = kvm->vcpus[i];
591                 if (!vcpu)
592                         continue;
593                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
594                         continue;
595                 cpu = vcpu->cpu;
596                 if (cpu != -1 && cpu != me)
597                         cpu_set(cpu, cpus);
598         }
599         if (cpus_empty(cpus))
600                 goto out;
601         smp_call_function_mask(cpus, ack_flush, NULL, 1);
602 out:
603         put_cpu();
604 }
605
606
607 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
608 {
609         struct page *page;
610         int r;
611
612         mutex_init(&vcpu->mutex);
613         vcpu->cpu = -1;
614         vcpu->kvm = kvm;
615         vcpu->vcpu_id = id;
616         init_waitqueue_head(&vcpu->wq);
617
618         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
619         if (!page) {
620                 r = -ENOMEM;
621                 goto fail;
622         }
623         vcpu->run = page_address(page);
624
625         r = kvm_arch_vcpu_init(vcpu);
626         if (r < 0)
627                 goto fail_free_run;
628         return 0;
629
630 fail_free_run:
631         free_page((unsigned long)vcpu->run);
632 fail:
633         return r;
634 }
635 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
636
637 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
638 {
639         kvm_arch_vcpu_uninit(vcpu);
640         free_page((unsigned long)vcpu->run);
641 }
642 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
643
644 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
645 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
646 {
647         return container_of(mn, struct kvm, mmu_notifier);
648 }
649
650 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
651                                              struct mm_struct *mm,
652                                              unsigned long address)
653 {
654         struct kvm *kvm = mmu_notifier_to_kvm(mn);
655         int need_tlb_flush;
656
657         /*
658          * When ->invalidate_page runs, the linux pte has been zapped
659          * already but the page is still allocated until
660          * ->invalidate_page returns. So if we increase the sequence
661          * here the kvm page fault will notice if the spte can't be
662          * established because the page is going to be freed. If
663          * instead the kvm page fault establishes the spte before
664          * ->invalidate_page runs, kvm_unmap_hva will release it
665          * before returning.
666          *
667          * The sequence increase only need to be seen at spin_unlock
668          * time, and not at spin_lock time.
669          *
670          * Increasing the sequence after the spin_unlock would be
671          * unsafe because the kvm page fault could then establish the
672          * pte after kvm_unmap_hva returned, without noticing the page
673          * is going to be freed.
674          */
675         spin_lock(&kvm->mmu_lock);
676         kvm->mmu_notifier_seq++;
677         need_tlb_flush = kvm_unmap_hva(kvm, address);
678         spin_unlock(&kvm->mmu_lock);
679
680         /* we've to flush the tlb before the pages can be freed */
681         if (need_tlb_flush)
682                 kvm_flush_remote_tlbs(kvm);
683
684 }
685
686 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
687                                                     struct mm_struct *mm,
688                                                     unsigned long start,
689                                                     unsigned long end)
690 {
691         struct kvm *kvm = mmu_notifier_to_kvm(mn);
692         int need_tlb_flush = 0;
693
694         spin_lock(&kvm->mmu_lock);
695         /*
696          * The count increase must become visible at unlock time as no
697          * spte can be established without taking the mmu_lock and
698          * count is also read inside the mmu_lock critical section.
699          */
700         kvm->mmu_notifier_count++;
701         for (; start < end; start += PAGE_SIZE)
702                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
703         spin_unlock(&kvm->mmu_lock);
704
705         /* we've to flush the tlb before the pages can be freed */
706         if (need_tlb_flush)
707                 kvm_flush_remote_tlbs(kvm);
708 }
709
710 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
711                                                   struct mm_struct *mm,
712                                                   unsigned long start,
713                                                   unsigned long end)
714 {
715         struct kvm *kvm = mmu_notifier_to_kvm(mn);
716
717         spin_lock(&kvm->mmu_lock);
718         /*
719          * This sequence increase will notify the kvm page fault that
720          * the page that is going to be mapped in the spte could have
721          * been freed.
722          */
723         kvm->mmu_notifier_seq++;
724         /*
725          * The above sequence increase must be visible before the
726          * below count decrease but both values are read by the kvm
727          * page fault under mmu_lock spinlock so we don't need to add
728          * a smb_wmb() here in between the two.
729          */
730         kvm->mmu_notifier_count--;
731         spin_unlock(&kvm->mmu_lock);
732
733         BUG_ON(kvm->mmu_notifier_count < 0);
734 }
735
736 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
737                                               struct mm_struct *mm,
738                                               unsigned long address)
739 {
740         struct kvm *kvm = mmu_notifier_to_kvm(mn);
741         int young;
742
743         spin_lock(&kvm->mmu_lock);
744         young = kvm_age_hva(kvm, address);
745         spin_unlock(&kvm->mmu_lock);
746
747         if (young)
748                 kvm_flush_remote_tlbs(kvm);
749
750         return young;
751 }
752
753 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
754         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
755         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
756         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
757         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
758 };
759 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
760
761 static struct kvm *kvm_create_vm(void)
762 {
763         struct kvm *kvm = kvm_arch_create_vm();
764 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
765         struct page *page;
766 #endif
767
768         if (IS_ERR(kvm))
769                 goto out;
770
771 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
772         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
773         if (!page) {
774                 kfree(kvm);
775                 return ERR_PTR(-ENOMEM);
776         }
777         kvm->coalesced_mmio_ring =
778                         (struct kvm_coalesced_mmio_ring *)page_address(page);
779 #endif
780
781 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
782         {
783                 int err;
784                 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
785                 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
786                 if (err) {
787 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
788                         put_page(page);
789 #endif
790                         kfree(kvm);
791                         return ERR_PTR(err);
792                 }
793         }
794 #endif
795
796         kvm->mm = current->mm;
797         atomic_inc(&kvm->mm->mm_count);
798         spin_lock_init(&kvm->mmu_lock);
799         kvm_io_bus_init(&kvm->pio_bus);
800         mutex_init(&kvm->lock);
801         kvm_io_bus_init(&kvm->mmio_bus);
802         init_rwsem(&kvm->slots_lock);
803         atomic_set(&kvm->users_count, 1);
804         spin_lock(&kvm_lock);
805         list_add(&kvm->vm_list, &vm_list);
806         spin_unlock(&kvm_lock);
807 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
808         kvm_coalesced_mmio_init(kvm);
809 #endif
810 out:
811         return kvm;
812 }
813
814 /*
815  * Free any memory in @free but not in @dont.
816  */
817 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
818                                   struct kvm_memory_slot *dont)
819 {
820         if (!dont || free->rmap != dont->rmap)
821                 vfree(free->rmap);
822
823         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
824                 vfree(free->dirty_bitmap);
825
826         if (!dont || free->lpage_info != dont->lpage_info)
827                 vfree(free->lpage_info);
828
829         free->npages = 0;
830         free->dirty_bitmap = NULL;
831         free->rmap = NULL;
832         free->lpage_info = NULL;
833 }
834
835 void kvm_free_physmem(struct kvm *kvm)
836 {
837         int i;
838
839         for (i = 0; i < kvm->nmemslots; ++i)
840                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
841 }
842
843 static void kvm_destroy_vm(struct kvm *kvm)
844 {
845         struct mm_struct *mm = kvm->mm;
846
847         spin_lock(&kvm_lock);
848         list_del(&kvm->vm_list);
849         spin_unlock(&kvm_lock);
850         kvm_io_bus_destroy(&kvm->pio_bus);
851         kvm_io_bus_destroy(&kvm->mmio_bus);
852 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
853         if (kvm->coalesced_mmio_ring != NULL)
854                 free_page((unsigned long)kvm->coalesced_mmio_ring);
855 #endif
856 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
857         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
858 #endif
859         kvm_arch_destroy_vm(kvm);
860         mmdrop(mm);
861 }
862
863 void kvm_get_kvm(struct kvm *kvm)
864 {
865         atomic_inc(&kvm->users_count);
866 }
867 EXPORT_SYMBOL_GPL(kvm_get_kvm);
868
869 void kvm_put_kvm(struct kvm *kvm)
870 {
871         if (atomic_dec_and_test(&kvm->users_count))
872                 kvm_destroy_vm(kvm);
873 }
874 EXPORT_SYMBOL_GPL(kvm_put_kvm);
875
876
877 static int kvm_vm_release(struct inode *inode, struct file *filp)
878 {
879         struct kvm *kvm = filp->private_data;
880
881         kvm_put_kvm(kvm);
882         return 0;
883 }
884
885 /*
886  * Allocate some memory and give it an address in the guest physical address
887  * space.
888  *
889  * Discontiguous memory is allowed, mostly for framebuffers.
890  *
891  * Must be called holding mmap_sem for write.
892  */
893 int __kvm_set_memory_region(struct kvm *kvm,
894                             struct kvm_userspace_memory_region *mem,
895                             int user_alloc)
896 {
897         int r;
898         gfn_t base_gfn;
899         unsigned long npages;
900         unsigned long i;
901         struct kvm_memory_slot *memslot;
902         struct kvm_memory_slot old, new;
903
904         r = -EINVAL;
905         /* General sanity checks */
906         if (mem->memory_size & (PAGE_SIZE - 1))
907                 goto out;
908         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
909                 goto out;
910         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
911                 goto out;
912         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
913                 goto out;
914         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
915                 goto out;
916
917         memslot = &kvm->memslots[mem->slot];
918         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
919         npages = mem->memory_size >> PAGE_SHIFT;
920
921         if (!npages)
922                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
923
924         new = old = *memslot;
925
926         new.base_gfn = base_gfn;
927         new.npages = npages;
928         new.flags = mem->flags;
929
930         /* Disallow changing a memory slot's size. */
931         r = -EINVAL;
932         if (npages && old.npages && npages != old.npages)
933                 goto out_free;
934
935         /* Check for overlaps */
936         r = -EEXIST;
937         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
938                 struct kvm_memory_slot *s = &kvm->memslots[i];
939
940                 if (s == memslot)
941                         continue;
942                 if (!((base_gfn + npages <= s->base_gfn) ||
943                       (base_gfn >= s->base_gfn + s->npages)))
944                         goto out_free;
945         }
946
947         /* Free page dirty bitmap if unneeded */
948         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
949                 new.dirty_bitmap = NULL;
950
951         r = -ENOMEM;
952
953         /* Allocate if a slot is being created */
954 #ifndef CONFIG_S390
955         if (npages && !new.rmap) {
956                 new.rmap = vmalloc(npages * sizeof(struct page *));
957
958                 if (!new.rmap)
959                         goto out_free;
960
961                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
962
963                 new.user_alloc = user_alloc;
964                 /*
965                  * hva_to_rmmap() serialzies with the mmu_lock and to be
966                  * safe it has to ignore memslots with !user_alloc &&
967                  * !userspace_addr.
968                  */
969                 if (user_alloc)
970                         new.userspace_addr = mem->userspace_addr;
971                 else
972                         new.userspace_addr = 0;
973         }
974         if (npages && !new.lpage_info) {
975                 int largepages = npages / KVM_PAGES_PER_HPAGE;
976                 if (npages % KVM_PAGES_PER_HPAGE)
977                         largepages++;
978                 if (base_gfn % KVM_PAGES_PER_HPAGE)
979                         largepages++;
980
981                 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
982
983                 if (!new.lpage_info)
984                         goto out_free;
985
986                 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
987
988                 if (base_gfn % KVM_PAGES_PER_HPAGE)
989                         new.lpage_info[0].write_count = 1;
990                 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
991                         new.lpage_info[largepages-1].write_count = 1;
992         }
993
994         /* Allocate page dirty bitmap if needed */
995         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
996                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
997
998                 new.dirty_bitmap = vmalloc(dirty_bytes);
999                 if (!new.dirty_bitmap)
1000                         goto out_free;
1001                 memset(new.dirty_bitmap, 0, dirty_bytes);
1002         }
1003 #endif /* not defined CONFIG_S390 */
1004
1005         if (!npages)
1006                 kvm_arch_flush_shadow(kvm);
1007
1008         spin_lock(&kvm->mmu_lock);
1009         if (mem->slot >= kvm->nmemslots)
1010                 kvm->nmemslots = mem->slot + 1;
1011
1012         *memslot = new;
1013         spin_unlock(&kvm->mmu_lock);
1014
1015         r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1016         if (r) {
1017                 spin_lock(&kvm->mmu_lock);
1018                 *memslot = old;
1019                 spin_unlock(&kvm->mmu_lock);
1020                 goto out_free;
1021         }
1022
1023         kvm_free_physmem_slot(&old, &new);
1024 #ifdef CONFIG_DMAR
1025         /* map the pages in iommu page table */
1026         r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1027         if (r)
1028                 goto out;
1029 #endif
1030         return 0;
1031
1032 out_free:
1033         kvm_free_physmem_slot(&new, &old);
1034 out:
1035         return r;
1036
1037 }
1038 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1039
1040 int kvm_set_memory_region(struct kvm *kvm,
1041                           struct kvm_userspace_memory_region *mem,
1042                           int user_alloc)
1043 {
1044         int r;
1045
1046         down_write(&kvm->slots_lock);
1047         r = __kvm_set_memory_region(kvm, mem, user_alloc);
1048         up_write(&kvm->slots_lock);
1049         return r;
1050 }
1051 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1052
1053 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1054                                    struct
1055                                    kvm_userspace_memory_region *mem,
1056                                    int user_alloc)
1057 {
1058         if (mem->slot >= KVM_MEMORY_SLOTS)
1059                 return -EINVAL;
1060         return kvm_set_memory_region(kvm, mem, user_alloc);
1061 }
1062
1063 int kvm_get_dirty_log(struct kvm *kvm,
1064                         struct kvm_dirty_log *log, int *is_dirty)
1065 {
1066         struct kvm_memory_slot *memslot;
1067         int r, i;
1068         int n;
1069         unsigned long any = 0;
1070
1071         r = -EINVAL;
1072         if (log->slot >= KVM_MEMORY_SLOTS)
1073                 goto out;
1074
1075         memslot = &kvm->memslots[log->slot];
1076         r = -ENOENT;
1077         if (!memslot->dirty_bitmap)
1078                 goto out;
1079
1080         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1081
1082         for (i = 0; !any && i < n/sizeof(long); ++i)
1083                 any = memslot->dirty_bitmap[i];
1084
1085         r = -EFAULT;
1086         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1087                 goto out;
1088
1089         if (any)
1090                 *is_dirty = 1;
1091
1092         r = 0;
1093 out:
1094         return r;
1095 }
1096
1097 int is_error_page(struct page *page)
1098 {
1099         return page == bad_page;
1100 }
1101 EXPORT_SYMBOL_GPL(is_error_page);
1102
1103 int is_error_pfn(pfn_t pfn)
1104 {
1105         return pfn == bad_pfn;
1106 }
1107 EXPORT_SYMBOL_GPL(is_error_pfn);
1108
1109 static inline unsigned long bad_hva(void)
1110 {
1111         return PAGE_OFFSET;
1112 }
1113
1114 int kvm_is_error_hva(unsigned long addr)
1115 {
1116         return addr == bad_hva();
1117 }
1118 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1119
1120 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
1121 {
1122         int i;
1123
1124         for (i = 0; i < kvm->nmemslots; ++i) {
1125                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1126
1127                 if (gfn >= memslot->base_gfn
1128                     && gfn < memslot->base_gfn + memslot->npages)
1129                         return memslot;
1130         }
1131         return NULL;
1132 }
1133 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
1134
1135 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1136 {
1137         gfn = unalias_gfn(kvm, gfn);
1138         return gfn_to_memslot_unaliased(kvm, gfn);
1139 }
1140
1141 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1142 {
1143         int i;
1144
1145         gfn = unalias_gfn(kvm, gfn);
1146         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1147                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1148
1149                 if (gfn >= memslot->base_gfn
1150                     && gfn < memslot->base_gfn + memslot->npages)
1151                         return 1;
1152         }
1153         return 0;
1154 }
1155 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1156
1157 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1158 {
1159         struct kvm_memory_slot *slot;
1160
1161         gfn = unalias_gfn(kvm, gfn);
1162         slot = gfn_to_memslot_unaliased(kvm, gfn);
1163         if (!slot)
1164                 return bad_hva();
1165         return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1166 }
1167 EXPORT_SYMBOL_GPL(gfn_to_hva);
1168
1169 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1170 {
1171         struct page *page[1];
1172         unsigned long addr;
1173         int npages;
1174         pfn_t pfn;
1175
1176         might_sleep();
1177
1178         addr = gfn_to_hva(kvm, gfn);
1179         if (kvm_is_error_hva(addr)) {
1180                 get_page(bad_page);
1181                 return page_to_pfn(bad_page);
1182         }
1183
1184         npages = get_user_pages_fast(addr, 1, 1, page);
1185
1186         if (unlikely(npages != 1)) {
1187                 struct vm_area_struct *vma;
1188
1189                 down_read(&current->mm->mmap_sem);
1190                 vma = find_vma(current->mm, addr);
1191
1192                 if (vma == NULL || addr < vma->vm_start ||
1193                     !(vma->vm_flags & VM_PFNMAP)) {
1194                         up_read(&current->mm->mmap_sem);
1195                         get_page(bad_page);
1196                         return page_to_pfn(bad_page);
1197                 }
1198
1199                 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1200                 up_read(&current->mm->mmap_sem);
1201                 BUG_ON(!kvm_is_mmio_pfn(pfn));
1202         } else
1203                 pfn = page_to_pfn(page[0]);
1204
1205         return pfn;
1206 }
1207
1208 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1209
1210 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1211 {
1212         pfn_t pfn;
1213
1214         pfn = gfn_to_pfn(kvm, gfn);
1215         if (!kvm_is_mmio_pfn(pfn))
1216                 return pfn_to_page(pfn);
1217
1218         WARN_ON(kvm_is_mmio_pfn(pfn));
1219
1220         get_page(bad_page);
1221         return bad_page;
1222 }
1223
1224 EXPORT_SYMBOL_GPL(gfn_to_page);
1225
1226 void kvm_release_page_clean(struct page *page)
1227 {
1228         kvm_release_pfn_clean(page_to_pfn(page));
1229 }
1230 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1231
1232 void kvm_release_pfn_clean(pfn_t pfn)
1233 {
1234         if (!kvm_is_mmio_pfn(pfn))
1235                 put_page(pfn_to_page(pfn));
1236 }
1237 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1238
1239 void kvm_release_page_dirty(struct page *page)
1240 {
1241         kvm_release_pfn_dirty(page_to_pfn(page));
1242 }
1243 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1244
1245 void kvm_release_pfn_dirty(pfn_t pfn)
1246 {
1247         kvm_set_pfn_dirty(pfn);
1248         kvm_release_pfn_clean(pfn);
1249 }
1250 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1251
1252 void kvm_set_page_dirty(struct page *page)
1253 {
1254         kvm_set_pfn_dirty(page_to_pfn(page));
1255 }
1256 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1257
1258 void kvm_set_pfn_dirty(pfn_t pfn)
1259 {
1260         if (!kvm_is_mmio_pfn(pfn)) {
1261                 struct page *page = pfn_to_page(pfn);
1262                 if (!PageReserved(page))
1263                         SetPageDirty(page);
1264         }
1265 }
1266 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1267
1268 void kvm_set_pfn_accessed(pfn_t pfn)
1269 {
1270         if (!kvm_is_mmio_pfn(pfn))
1271                 mark_page_accessed(pfn_to_page(pfn));
1272 }
1273 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1274
1275 void kvm_get_pfn(pfn_t pfn)
1276 {
1277         if (!kvm_is_mmio_pfn(pfn))
1278                 get_page(pfn_to_page(pfn));
1279 }
1280 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1281
1282 static int next_segment(unsigned long len, int offset)
1283 {
1284         if (len > PAGE_SIZE - offset)
1285                 return PAGE_SIZE - offset;
1286         else
1287                 return len;
1288 }
1289
1290 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1291                         int len)
1292 {
1293         int r;
1294         unsigned long addr;
1295
1296         addr = gfn_to_hva(kvm, gfn);
1297         if (kvm_is_error_hva(addr))
1298                 return -EFAULT;
1299         r = copy_from_user(data, (void __user *)addr + offset, len);
1300         if (r)
1301                 return -EFAULT;
1302         return 0;
1303 }
1304 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1305
1306 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1307 {
1308         gfn_t gfn = gpa >> PAGE_SHIFT;
1309         int seg;
1310         int offset = offset_in_page(gpa);
1311         int ret;
1312
1313         while ((seg = next_segment(len, offset)) != 0) {
1314                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1315                 if (ret < 0)
1316                         return ret;
1317                 offset = 0;
1318                 len -= seg;
1319                 data += seg;
1320                 ++gfn;
1321         }
1322         return 0;
1323 }
1324 EXPORT_SYMBOL_GPL(kvm_read_guest);
1325
1326 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1327                           unsigned long len)
1328 {
1329         int r;
1330         unsigned long addr;
1331         gfn_t gfn = gpa >> PAGE_SHIFT;
1332         int offset = offset_in_page(gpa);
1333
1334         addr = gfn_to_hva(kvm, gfn);
1335         if (kvm_is_error_hva(addr))
1336                 return -EFAULT;
1337         pagefault_disable();
1338         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1339         pagefault_enable();
1340         if (r)
1341                 return -EFAULT;
1342         return 0;
1343 }
1344 EXPORT_SYMBOL(kvm_read_guest_atomic);
1345
1346 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1347                          int offset, int len)
1348 {
1349         int r;
1350         unsigned long addr;
1351
1352         addr = gfn_to_hva(kvm, gfn);
1353         if (kvm_is_error_hva(addr))
1354                 return -EFAULT;
1355         r = copy_to_user((void __user *)addr + offset, data, len);
1356         if (r)
1357                 return -EFAULT;
1358         mark_page_dirty(kvm, gfn);
1359         return 0;
1360 }
1361 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1362
1363 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1364                     unsigned long len)
1365 {
1366         gfn_t gfn = gpa >> PAGE_SHIFT;
1367         int seg;
1368         int offset = offset_in_page(gpa);
1369         int ret;
1370
1371         while ((seg = next_segment(len, offset)) != 0) {
1372                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1373                 if (ret < 0)
1374                         return ret;
1375                 offset = 0;
1376                 len -= seg;
1377                 data += seg;
1378                 ++gfn;
1379         }
1380         return 0;
1381 }
1382
1383 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1384 {
1385         return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1386 }
1387 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1388
1389 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1390 {
1391         gfn_t gfn = gpa >> PAGE_SHIFT;
1392         int seg;
1393         int offset = offset_in_page(gpa);
1394         int ret;
1395
1396         while ((seg = next_segment(len, offset)) != 0) {
1397                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1398                 if (ret < 0)
1399                         return ret;
1400                 offset = 0;
1401                 len -= seg;
1402                 ++gfn;
1403         }
1404         return 0;
1405 }
1406 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1407
1408 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1409 {
1410         struct kvm_memory_slot *memslot;
1411
1412         gfn = unalias_gfn(kvm, gfn);
1413         memslot = gfn_to_memslot_unaliased(kvm, gfn);
1414         if (memslot && memslot->dirty_bitmap) {
1415                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1416
1417                 /* avoid RMW */
1418                 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1419                         set_bit(rel_gfn, memslot->dirty_bitmap);
1420         }
1421 }
1422
1423 /*
1424  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1425  */
1426 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1427 {
1428         DEFINE_WAIT(wait);
1429
1430         for (;;) {
1431                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1432
1433                 if (kvm_cpu_has_interrupt(vcpu) ||
1434                     kvm_cpu_has_pending_timer(vcpu) ||
1435                     kvm_arch_vcpu_runnable(vcpu)) {
1436                         set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1437                         break;
1438                 }
1439                 if (signal_pending(current))
1440                         break;
1441
1442                 vcpu_put(vcpu);
1443                 schedule();
1444                 vcpu_load(vcpu);
1445         }
1446
1447         finish_wait(&vcpu->wq, &wait);
1448 }
1449
1450 void kvm_resched(struct kvm_vcpu *vcpu)
1451 {
1452         if (!need_resched())
1453                 return;
1454         cond_resched();
1455 }
1456 EXPORT_SYMBOL_GPL(kvm_resched);
1457
1458 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1459 {
1460         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1461         struct page *page;
1462
1463         if (vmf->pgoff == 0)
1464                 page = virt_to_page(vcpu->run);
1465 #ifdef CONFIG_X86
1466         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1467                 page = virt_to_page(vcpu->arch.pio_data);
1468 #endif
1469 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1470         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1471                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1472 #endif
1473         else
1474                 return VM_FAULT_SIGBUS;
1475         get_page(page);
1476         vmf->page = page;
1477         return 0;
1478 }
1479
1480 static struct vm_operations_struct kvm_vcpu_vm_ops = {
1481         .fault = kvm_vcpu_fault,
1482 };
1483
1484 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1485 {
1486         vma->vm_ops = &kvm_vcpu_vm_ops;
1487         return 0;
1488 }
1489
1490 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1491 {
1492         struct kvm_vcpu *vcpu = filp->private_data;
1493
1494         kvm_put_kvm(vcpu->kvm);
1495         return 0;
1496 }
1497
1498 static const struct file_operations kvm_vcpu_fops = {
1499         .release        = kvm_vcpu_release,
1500         .unlocked_ioctl = kvm_vcpu_ioctl,
1501         .compat_ioctl   = kvm_vcpu_ioctl,
1502         .mmap           = kvm_vcpu_mmap,
1503 };
1504
1505 /*
1506  * Allocates an inode for the vcpu.
1507  */
1508 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1509 {
1510         int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1511         if (fd < 0)
1512                 kvm_put_kvm(vcpu->kvm);
1513         return fd;
1514 }
1515
1516 /*
1517  * Creates some virtual cpus.  Good luck creating more than one.
1518  */
1519 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1520 {
1521         int r;
1522         struct kvm_vcpu *vcpu;
1523
1524         if (!valid_vcpu(n))
1525                 return -EINVAL;
1526
1527         vcpu = kvm_arch_vcpu_create(kvm, n);
1528         if (IS_ERR(vcpu))
1529                 return PTR_ERR(vcpu);
1530
1531         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1532
1533         r = kvm_arch_vcpu_setup(vcpu);
1534         if (r)
1535                 return r;
1536
1537         mutex_lock(&kvm->lock);
1538         if (kvm->vcpus[n]) {
1539                 r = -EEXIST;
1540                 goto vcpu_destroy;
1541         }
1542         kvm->vcpus[n] = vcpu;
1543         mutex_unlock(&kvm->lock);
1544
1545         /* Now it's all set up, let userspace reach it */
1546         kvm_get_kvm(kvm);
1547         r = create_vcpu_fd(vcpu);
1548         if (r < 0)
1549                 goto unlink;
1550         return r;
1551
1552 unlink:
1553         mutex_lock(&kvm->lock);
1554         kvm->vcpus[n] = NULL;
1555 vcpu_destroy:
1556         mutex_unlock(&kvm->lock);
1557         kvm_arch_vcpu_destroy(vcpu);
1558         return r;
1559 }
1560
1561 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1562 {
1563         if (sigset) {
1564                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1565                 vcpu->sigset_active = 1;
1566                 vcpu->sigset = *sigset;
1567         } else
1568                 vcpu->sigset_active = 0;
1569         return 0;
1570 }
1571
1572 static long kvm_vcpu_ioctl(struct file *filp,
1573                            unsigned int ioctl, unsigned long arg)
1574 {
1575         struct kvm_vcpu *vcpu = filp->private_data;
1576         void __user *argp = (void __user *)arg;
1577         int r;
1578         struct kvm_fpu *fpu = NULL;
1579         struct kvm_sregs *kvm_sregs = NULL;
1580
1581         if (vcpu->kvm->mm != current->mm)
1582                 return -EIO;
1583         switch (ioctl) {
1584         case KVM_RUN:
1585                 r = -EINVAL;
1586                 if (arg)
1587                         goto out;
1588                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1589                 break;
1590         case KVM_GET_REGS: {
1591                 struct kvm_regs *kvm_regs;
1592
1593                 r = -ENOMEM;
1594                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1595                 if (!kvm_regs)
1596                         goto out;
1597                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1598                 if (r)
1599                         goto out_free1;
1600                 r = -EFAULT;
1601                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1602                         goto out_free1;
1603                 r = 0;
1604 out_free1:
1605                 kfree(kvm_regs);
1606                 break;
1607         }
1608         case KVM_SET_REGS: {
1609                 struct kvm_regs *kvm_regs;
1610
1611                 r = -ENOMEM;
1612                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1613                 if (!kvm_regs)
1614                         goto out;
1615                 r = -EFAULT;
1616                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1617                         goto out_free2;
1618                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1619                 if (r)
1620                         goto out_free2;
1621                 r = 0;
1622 out_free2:
1623                 kfree(kvm_regs);
1624                 break;
1625         }
1626         case KVM_GET_SREGS: {
1627                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1628                 r = -ENOMEM;
1629                 if (!kvm_sregs)
1630                         goto out;
1631                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1632                 if (r)
1633                         goto out;
1634                 r = -EFAULT;
1635                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1636                         goto out;
1637                 r = 0;
1638                 break;
1639         }
1640         case KVM_SET_SREGS: {
1641                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1642                 r = -ENOMEM;
1643                 if (!kvm_sregs)
1644                         goto out;
1645                 r = -EFAULT;
1646                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1647                         goto out;
1648                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1649                 if (r)
1650                         goto out;
1651                 r = 0;
1652                 break;
1653         }
1654         case KVM_GET_MP_STATE: {
1655                 struct kvm_mp_state mp_state;
1656
1657                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1658                 if (r)
1659                         goto out;
1660                 r = -EFAULT;
1661                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1662                         goto out;
1663                 r = 0;
1664                 break;
1665         }
1666         case KVM_SET_MP_STATE: {
1667                 struct kvm_mp_state mp_state;
1668
1669                 r = -EFAULT;
1670                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1671                         goto out;
1672                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1673                 if (r)
1674                         goto out;
1675                 r = 0;
1676                 break;
1677         }
1678         case KVM_TRANSLATE: {
1679                 struct kvm_translation tr;
1680
1681                 r = -EFAULT;
1682                 if (copy_from_user(&tr, argp, sizeof tr))
1683                         goto out;
1684                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
1685                 if (r)
1686                         goto out;
1687                 r = -EFAULT;
1688                 if (copy_to_user(argp, &tr, sizeof tr))
1689                         goto out;
1690                 r = 0;
1691                 break;
1692         }
1693         case KVM_DEBUG_GUEST: {
1694                 struct kvm_debug_guest dbg;
1695
1696                 r = -EFAULT;
1697                 if (copy_from_user(&dbg, argp, sizeof dbg))
1698                         goto out;
1699                 r = kvm_arch_vcpu_ioctl_debug_guest(vcpu, &dbg);
1700                 if (r)
1701                         goto out;
1702                 r = 0;
1703                 break;
1704         }
1705         case KVM_SET_SIGNAL_MASK: {
1706                 struct kvm_signal_mask __user *sigmask_arg = argp;
1707                 struct kvm_signal_mask kvm_sigmask;
1708                 sigset_t sigset, *p;
1709
1710                 p = NULL;
1711                 if (argp) {
1712                         r = -EFAULT;
1713                         if (copy_from_user(&kvm_sigmask, argp,
1714                                            sizeof kvm_sigmask))
1715                                 goto out;
1716                         r = -EINVAL;
1717                         if (kvm_sigmask.len != sizeof sigset)
1718                                 goto out;
1719                         r = -EFAULT;
1720                         if (copy_from_user(&sigset, sigmask_arg->sigset,
1721                                            sizeof sigset))
1722                                 goto out;
1723                         p = &sigset;
1724                 }
1725                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1726                 break;
1727         }
1728         case KVM_GET_FPU: {
1729                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1730                 r = -ENOMEM;
1731                 if (!fpu)
1732                         goto out;
1733                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1734                 if (r)
1735                         goto out;
1736                 r = -EFAULT;
1737                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1738                         goto out;
1739                 r = 0;
1740                 break;
1741         }
1742         case KVM_SET_FPU: {
1743                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1744                 r = -ENOMEM;
1745                 if (!fpu)
1746                         goto out;
1747                 r = -EFAULT;
1748                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1749                         goto out;
1750                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1751                 if (r)
1752                         goto out;
1753                 r = 0;
1754                 break;
1755         }
1756         default:
1757                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1758         }
1759 out:
1760         kfree(fpu);
1761         kfree(kvm_sregs);
1762         return r;
1763 }
1764
1765 static long kvm_vm_ioctl(struct file *filp,
1766                            unsigned int ioctl, unsigned long arg)
1767 {
1768         struct kvm *kvm = filp->private_data;
1769         void __user *argp = (void __user *)arg;
1770         int r;
1771
1772         if (kvm->mm != current->mm)
1773                 return -EIO;
1774         switch (ioctl) {
1775         case KVM_CREATE_VCPU:
1776                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1777                 if (r < 0)
1778                         goto out;
1779                 break;
1780         case KVM_SET_USER_MEMORY_REGION: {
1781                 struct kvm_userspace_memory_region kvm_userspace_mem;
1782
1783                 r = -EFAULT;
1784                 if (copy_from_user(&kvm_userspace_mem, argp,
1785                                                 sizeof kvm_userspace_mem))
1786                         goto out;
1787
1788                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
1789                 if (r)
1790                         goto out;
1791                 break;
1792         }
1793         case KVM_GET_DIRTY_LOG: {
1794                 struct kvm_dirty_log log;
1795
1796                 r = -EFAULT;
1797                 if (copy_from_user(&log, argp, sizeof log))
1798                         goto out;
1799                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
1800                 if (r)
1801                         goto out;
1802                 break;
1803         }
1804 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1805         case KVM_REGISTER_COALESCED_MMIO: {
1806                 struct kvm_coalesced_mmio_zone zone;
1807                 r = -EFAULT;
1808                 if (copy_from_user(&zone, argp, sizeof zone))
1809                         goto out;
1810                 r = -ENXIO;
1811                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1812                 if (r)
1813                         goto out;
1814                 r = 0;
1815                 break;
1816         }
1817         case KVM_UNREGISTER_COALESCED_MMIO: {
1818                 struct kvm_coalesced_mmio_zone zone;
1819                 r = -EFAULT;
1820                 if (copy_from_user(&zone, argp, sizeof zone))
1821                         goto out;
1822                 r = -ENXIO;
1823                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1824                 if (r)
1825                         goto out;
1826                 r = 0;
1827                 break;
1828         }
1829 #endif
1830 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1831         case KVM_ASSIGN_PCI_DEVICE: {
1832                 struct kvm_assigned_pci_dev assigned_dev;
1833
1834                 r = -EFAULT;
1835                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1836                         goto out;
1837                 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
1838                 if (r)
1839                         goto out;
1840                 break;
1841         }
1842         case KVM_ASSIGN_IRQ: {
1843                 struct kvm_assigned_irq assigned_irq;
1844
1845                 r = -EFAULT;
1846                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
1847                         goto out;
1848                 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
1849                 if (r)
1850                         goto out;
1851                 break;
1852         }
1853 #endif
1854         default:
1855                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1856         }
1857 out:
1858         return r;
1859 }
1860
1861 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1862 {
1863         struct page *page[1];
1864         unsigned long addr;
1865         int npages;
1866         gfn_t gfn = vmf->pgoff;
1867         struct kvm *kvm = vma->vm_file->private_data;
1868
1869         addr = gfn_to_hva(kvm, gfn);
1870         if (kvm_is_error_hva(addr))
1871                 return VM_FAULT_SIGBUS;
1872
1873         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1874                                 NULL);
1875         if (unlikely(npages != 1))
1876                 return VM_FAULT_SIGBUS;
1877
1878         vmf->page = page[0];
1879         return 0;
1880 }
1881
1882 static struct vm_operations_struct kvm_vm_vm_ops = {
1883         .fault = kvm_vm_fault,
1884 };
1885
1886 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
1887 {
1888         vma->vm_ops = &kvm_vm_vm_ops;
1889         return 0;
1890 }
1891
1892 static const struct file_operations kvm_vm_fops = {
1893         .release        = kvm_vm_release,
1894         .unlocked_ioctl = kvm_vm_ioctl,
1895         .compat_ioctl   = kvm_vm_ioctl,
1896         .mmap           = kvm_vm_mmap,
1897 };
1898
1899 static int kvm_dev_ioctl_create_vm(void)
1900 {
1901         int fd;
1902         struct kvm *kvm;
1903
1904         kvm = kvm_create_vm();
1905         if (IS_ERR(kvm))
1906                 return PTR_ERR(kvm);
1907         fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
1908         if (fd < 0)
1909                 kvm_put_kvm(kvm);
1910
1911         return fd;
1912 }
1913
1914 static long kvm_dev_ioctl(struct file *filp,
1915                           unsigned int ioctl, unsigned long arg)
1916 {
1917         long r = -EINVAL;
1918
1919         switch (ioctl) {
1920         case KVM_GET_API_VERSION:
1921                 r = -EINVAL;
1922                 if (arg)
1923                         goto out;
1924                 r = KVM_API_VERSION;
1925                 break;
1926         case KVM_CREATE_VM:
1927                 r = -EINVAL;
1928                 if (arg)
1929                         goto out;
1930                 r = kvm_dev_ioctl_create_vm();
1931                 break;
1932         case KVM_CHECK_EXTENSION:
1933                 r = kvm_dev_ioctl_check_extension(arg);
1934                 break;
1935         case KVM_GET_VCPU_MMAP_SIZE:
1936                 r = -EINVAL;
1937                 if (arg)
1938                         goto out;
1939                 r = PAGE_SIZE;     /* struct kvm_run */
1940 #ifdef CONFIG_X86
1941                 r += PAGE_SIZE;    /* pio data page */
1942 #endif
1943 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1944                 r += PAGE_SIZE;    /* coalesced mmio ring page */
1945 #endif
1946                 break;
1947         case KVM_TRACE_ENABLE:
1948         case KVM_TRACE_PAUSE:
1949         case KVM_TRACE_DISABLE:
1950                 r = kvm_trace_ioctl(ioctl, arg);
1951                 break;
1952         default:
1953                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
1954         }
1955 out:
1956         return r;
1957 }
1958
1959 static struct file_operations kvm_chardev_ops = {
1960         .unlocked_ioctl = kvm_dev_ioctl,
1961         .compat_ioctl   = kvm_dev_ioctl,
1962 };
1963
1964 static struct miscdevice kvm_dev = {
1965         KVM_MINOR,
1966         "kvm",
1967         &kvm_chardev_ops,
1968 };
1969
1970 static void hardware_enable(void *junk)
1971 {
1972         int cpu = raw_smp_processor_id();
1973
1974         if (cpu_isset(cpu, cpus_hardware_enabled))
1975                 return;
1976         cpu_set(cpu, cpus_hardware_enabled);
1977         kvm_arch_hardware_enable(NULL);
1978 }
1979
1980 static void hardware_disable(void *junk)
1981 {
1982         int cpu = raw_smp_processor_id();
1983
1984         if (!cpu_isset(cpu, cpus_hardware_enabled))
1985                 return;
1986         cpu_clear(cpu, cpus_hardware_enabled);
1987         kvm_arch_hardware_disable(NULL);
1988 }
1989
1990 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
1991                            void *v)
1992 {
1993         int cpu = (long)v;
1994
1995         val &= ~CPU_TASKS_FROZEN;
1996         switch (val) {
1997         case CPU_DYING:
1998                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
1999                        cpu);
2000                 hardware_disable(NULL);
2001                 break;
2002         case CPU_UP_CANCELED:
2003                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2004                        cpu);
2005                 smp_call_function_single(cpu, hardware_disable, NULL, 1);
2006                 break;
2007         case CPU_ONLINE:
2008                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2009                        cpu);
2010                 smp_call_function_single(cpu, hardware_enable, NULL, 1);
2011                 break;
2012         }
2013         return NOTIFY_OK;
2014 }
2015
2016
2017 asmlinkage void kvm_handle_fault_on_reboot(void)
2018 {
2019         if (kvm_rebooting)
2020                 /* spin while reset goes on */
2021                 while (true)
2022                         ;
2023         /* Fault while not rebooting.  We want the trace. */
2024         BUG();
2025 }
2026 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2027
2028 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2029                       void *v)
2030 {
2031         if (val == SYS_RESTART) {
2032                 /*
2033                  * Some (well, at least mine) BIOSes hang on reboot if
2034                  * in vmx root mode.
2035                  */
2036                 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2037                 kvm_rebooting = true;
2038                 on_each_cpu(hardware_disable, NULL, 1);
2039         }
2040         return NOTIFY_OK;
2041 }
2042
2043 static struct notifier_block kvm_reboot_notifier = {
2044         .notifier_call = kvm_reboot,
2045         .priority = 0,
2046 };
2047
2048 void kvm_io_bus_init(struct kvm_io_bus *bus)
2049 {
2050         memset(bus, 0, sizeof(*bus));
2051 }
2052
2053 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2054 {
2055         int i;
2056
2057         for (i = 0; i < bus->dev_count; i++) {
2058                 struct kvm_io_device *pos = bus->devs[i];
2059
2060                 kvm_iodevice_destructor(pos);
2061         }
2062 }
2063
2064 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2065                                           gpa_t addr, int len, int is_write)
2066 {
2067         int i;
2068
2069         for (i = 0; i < bus->dev_count; i++) {
2070                 struct kvm_io_device *pos = bus->devs[i];
2071
2072                 if (pos->in_range(pos, addr, len, is_write))
2073                         return pos;
2074         }
2075
2076         return NULL;
2077 }
2078
2079 void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2080 {
2081         BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2082
2083         bus->devs[bus->dev_count++] = dev;
2084 }
2085
2086 static struct notifier_block kvm_cpu_notifier = {
2087         .notifier_call = kvm_cpu_hotplug,
2088         .priority = 20, /* must be > scheduler priority */
2089 };
2090
2091 static int vm_stat_get(void *_offset, u64 *val)
2092 {
2093         unsigned offset = (long)_offset;
2094         struct kvm *kvm;
2095
2096         *val = 0;
2097         spin_lock(&kvm_lock);
2098         list_for_each_entry(kvm, &vm_list, vm_list)
2099                 *val += *(u32 *)((void *)kvm + offset);
2100         spin_unlock(&kvm_lock);
2101         return 0;
2102 }
2103
2104 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2105
2106 static int vcpu_stat_get(void *_offset, u64 *val)
2107 {
2108         unsigned offset = (long)_offset;
2109         struct kvm *kvm;
2110         struct kvm_vcpu *vcpu;
2111         int i;
2112
2113         *val = 0;
2114         spin_lock(&kvm_lock);
2115         list_for_each_entry(kvm, &vm_list, vm_list)
2116                 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
2117                         vcpu = kvm->vcpus[i];
2118                         if (vcpu)
2119                                 *val += *(u32 *)((void *)vcpu + offset);
2120                 }
2121         spin_unlock(&kvm_lock);
2122         return 0;
2123 }
2124
2125 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2126
2127 static struct file_operations *stat_fops[] = {
2128         [KVM_STAT_VCPU] = &vcpu_stat_fops,
2129         [KVM_STAT_VM]   = &vm_stat_fops,
2130 };
2131
2132 static void kvm_init_debug(void)
2133 {
2134         struct kvm_stats_debugfs_item *p;
2135
2136         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2137         for (p = debugfs_entries; p->name; ++p)
2138                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2139                                                 (void *)(long)p->offset,
2140                                                 stat_fops[p->kind]);
2141 }
2142
2143 static void kvm_exit_debug(void)
2144 {
2145         struct kvm_stats_debugfs_item *p;
2146
2147         for (p = debugfs_entries; p->name; ++p)
2148                 debugfs_remove(p->dentry);
2149         debugfs_remove(kvm_debugfs_dir);
2150 }
2151
2152 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2153 {
2154         hardware_disable(NULL);
2155         return 0;
2156 }
2157
2158 static int kvm_resume(struct sys_device *dev)
2159 {
2160         hardware_enable(NULL);
2161         return 0;
2162 }
2163
2164 static struct sysdev_class kvm_sysdev_class = {
2165         .name = "kvm",
2166         .suspend = kvm_suspend,
2167         .resume = kvm_resume,
2168 };
2169
2170 static struct sys_device kvm_sysdev = {
2171         .id = 0,
2172         .cls = &kvm_sysdev_class,
2173 };
2174
2175 struct page *bad_page;
2176 pfn_t bad_pfn;
2177
2178 static inline
2179 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2180 {
2181         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2182 }
2183
2184 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2185 {
2186         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2187
2188         kvm_arch_vcpu_load(vcpu, cpu);
2189 }
2190
2191 static void kvm_sched_out(struct preempt_notifier *pn,
2192                           struct task_struct *next)
2193 {
2194         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2195
2196         kvm_arch_vcpu_put(vcpu);
2197 }
2198
2199 int kvm_init(void *opaque, unsigned int vcpu_size,
2200                   struct module *module)
2201 {
2202         int r;
2203         int cpu;
2204
2205         kvm_init_debug();
2206
2207         r = kvm_arch_init(opaque);
2208         if (r)
2209                 goto out_fail;
2210
2211         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2212
2213         if (bad_page == NULL) {
2214                 r = -ENOMEM;
2215                 goto out;
2216         }
2217
2218         bad_pfn = page_to_pfn(bad_page);
2219
2220         r = kvm_arch_hardware_setup();
2221         if (r < 0)
2222                 goto out_free_0;
2223
2224         for_each_online_cpu(cpu) {
2225                 smp_call_function_single(cpu,
2226                                 kvm_arch_check_processor_compat,
2227                                 &r, 1);
2228                 if (r < 0)
2229                         goto out_free_1;
2230         }
2231
2232         on_each_cpu(hardware_enable, NULL, 1);
2233         r = register_cpu_notifier(&kvm_cpu_notifier);
2234         if (r)
2235                 goto out_free_2;
2236         register_reboot_notifier(&kvm_reboot_notifier);
2237
2238         r = sysdev_class_register(&kvm_sysdev_class);
2239         if (r)
2240                 goto out_free_3;
2241
2242         r = sysdev_register(&kvm_sysdev);
2243         if (r)
2244                 goto out_free_4;
2245
2246         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2247         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2248                                            __alignof__(struct kvm_vcpu),
2249                                            0, NULL);
2250         if (!kvm_vcpu_cache) {
2251                 r = -ENOMEM;
2252                 goto out_free_5;
2253         }
2254
2255         kvm_chardev_ops.owner = module;
2256
2257         r = misc_register(&kvm_dev);
2258         if (r) {
2259                 printk(KERN_ERR "kvm: misc device register failed\n");
2260                 goto out_free;
2261         }
2262
2263         kvm_preempt_ops.sched_in = kvm_sched_in;
2264         kvm_preempt_ops.sched_out = kvm_sched_out;
2265 #ifndef CONFIG_X86
2266         msi2intx = 0;
2267 #endif
2268
2269         return 0;
2270
2271 out_free:
2272         kmem_cache_destroy(kvm_vcpu_cache);
2273 out_free_5:
2274         sysdev_unregister(&kvm_sysdev);
2275 out_free_4:
2276         sysdev_class_unregister(&kvm_sysdev_class);
2277 out_free_3:
2278         unregister_reboot_notifier(&kvm_reboot_notifier);
2279         unregister_cpu_notifier(&kvm_cpu_notifier);
2280 out_free_2:
2281         on_each_cpu(hardware_disable, NULL, 1);
2282 out_free_1:
2283         kvm_arch_hardware_unsetup();
2284 out_free_0:
2285         __free_page(bad_page);
2286 out:
2287         kvm_arch_exit();
2288         kvm_exit_debug();
2289 out_fail:
2290         return r;
2291 }
2292 EXPORT_SYMBOL_GPL(kvm_init);
2293
2294 void kvm_exit(void)
2295 {
2296         kvm_trace_cleanup();
2297         misc_deregister(&kvm_dev);
2298         kmem_cache_destroy(kvm_vcpu_cache);
2299         sysdev_unregister(&kvm_sysdev);
2300         sysdev_class_unregister(&kvm_sysdev_class);
2301         unregister_reboot_notifier(&kvm_reboot_notifier);
2302         unregister_cpu_notifier(&kvm_cpu_notifier);
2303         on_each_cpu(hardware_disable, NULL, 1);
2304         kvm_arch_hardware_unsetup();
2305         kvm_arch_exit();
2306         kvm_exit_debug();
2307         __free_page(bad_page);
2308 }
2309 EXPORT_SYMBOL_GPL(kvm_exit);