Use Little Endian for Dirty Bitmap
[linux-2.6.git] / virt / kvm / kvm_main.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "iodev.h"
19
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/module.h>
23 #include <linux/errno.h>
24 #include <linux/percpu.h>
25 #include <linux/gfp.h>
26 #include <linux/mm.h>
27 #include <linux/miscdevice.h>
28 #include <linux/vmalloc.h>
29 #include <linux/reboot.h>
30 #include <linux/debugfs.h>
31 #include <linux/highmem.h>
32 #include <linux/file.h>
33 #include <linux/sysdev.h>
34 #include <linux/cpu.h>
35 #include <linux/sched.h>
36 #include <linux/cpumask.h>
37 #include <linux/smp.h>
38 #include <linux/anon_inodes.h>
39 #include <linux/profile.h>
40 #include <linux/kvm_para.h>
41 #include <linux/pagemap.h>
42 #include <linux/mman.h>
43 #include <linux/swap.h>
44 #include <linux/bitops.h>
45 #include <linux/spinlock.h>
46
47 #include <asm/processor.h>
48 #include <asm/io.h>
49 #include <asm/uaccess.h>
50 #include <asm/pgtable.h>
51 #include <asm-generic/bitops/le.h>
52
53 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
54 #include "coalesced_mmio.h"
55 #endif
56
57 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
58 #include <linux/pci.h>
59 #include <linux/interrupt.h>
60 #include "irq.h"
61 #endif
62
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/kvm.h>
65
66 MODULE_AUTHOR("Qumranet");
67 MODULE_LICENSE("GPL");
68
69 /*
70  * Ordering of locks:
71  *
72  *              kvm->slots_lock --> kvm->lock --> kvm->irq_lock
73  */
74
75 DEFINE_SPINLOCK(kvm_lock);
76 LIST_HEAD(vm_list);
77
78 static cpumask_var_t cpus_hardware_enabled;
79
80 struct kmem_cache *kvm_vcpu_cache;
81 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
82
83 static __read_mostly struct preempt_ops kvm_preempt_ops;
84
85 struct dentry *kvm_debugfs_dir;
86
87 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
88                            unsigned long arg);
89
90 static bool kvm_rebooting;
91
92 static bool largepages_enabled = true;
93
94 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
95 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
96                                                       int assigned_dev_id)
97 {
98         struct list_head *ptr;
99         struct kvm_assigned_dev_kernel *match;
100
101         list_for_each(ptr, head) {
102                 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
103                 if (match->assigned_dev_id == assigned_dev_id)
104                         return match;
105         }
106         return NULL;
107 }
108
109 static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
110                                     *assigned_dev, int irq)
111 {
112         int i, index;
113         struct msix_entry *host_msix_entries;
114
115         host_msix_entries = assigned_dev->host_msix_entries;
116
117         index = -1;
118         for (i = 0; i < assigned_dev->entries_nr; i++)
119                 if (irq == host_msix_entries[i].vector) {
120                         index = i;
121                         break;
122                 }
123         if (index < 0) {
124                 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
125                 return 0;
126         }
127
128         return index;
129 }
130
131 static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
132 {
133         struct kvm_assigned_dev_kernel *assigned_dev;
134         struct kvm *kvm;
135         int i;
136
137         assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
138                                     interrupt_work);
139         kvm = assigned_dev->kvm;
140
141         mutex_lock(&kvm->irq_lock);
142         spin_lock_irq(&assigned_dev->assigned_dev_lock);
143         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
144                 struct kvm_guest_msix_entry *guest_entries =
145                         assigned_dev->guest_msix_entries;
146                 for (i = 0; i < assigned_dev->entries_nr; i++) {
147                         if (!(guest_entries[i].flags &
148                                         KVM_ASSIGNED_MSIX_PENDING))
149                                 continue;
150                         guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
151                         kvm_set_irq(assigned_dev->kvm,
152                                     assigned_dev->irq_source_id,
153                                     guest_entries[i].vector, 1);
154                 }
155         } else
156                 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
157                             assigned_dev->guest_irq, 1);
158
159         spin_unlock_irq(&assigned_dev->assigned_dev_lock);
160         mutex_unlock(&assigned_dev->kvm->irq_lock);
161 }
162
163 static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
164 {
165         unsigned long flags;
166         struct kvm_assigned_dev_kernel *assigned_dev =
167                 (struct kvm_assigned_dev_kernel *) dev_id;
168
169         spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
170         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
171                 int index = find_index_from_host_irq(assigned_dev, irq);
172                 if (index < 0)
173                         goto out;
174                 assigned_dev->guest_msix_entries[index].flags |=
175                         KVM_ASSIGNED_MSIX_PENDING;
176         }
177
178         schedule_work(&assigned_dev->interrupt_work);
179
180         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
181                 disable_irq_nosync(irq);
182                 assigned_dev->host_irq_disabled = true;
183         }
184
185 out:
186         spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
187         return IRQ_HANDLED;
188 }
189
190 /* Ack the irq line for an assigned device */
191 static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
192 {
193         struct kvm_assigned_dev_kernel *dev;
194         unsigned long flags;
195
196         if (kian->gsi == -1)
197                 return;
198
199         dev = container_of(kian, struct kvm_assigned_dev_kernel,
200                            ack_notifier);
201
202         kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
203
204         /* The guest irq may be shared so this ack may be
205          * from another device.
206          */
207         spin_lock_irqsave(&dev->assigned_dev_lock, flags);
208         if (dev->host_irq_disabled) {
209                 enable_irq(dev->host_irq);
210                 dev->host_irq_disabled = false;
211         }
212         spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
213 }
214
215 static void deassign_guest_irq(struct kvm *kvm,
216                                struct kvm_assigned_dev_kernel *assigned_dev)
217 {
218         kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
219         assigned_dev->ack_notifier.gsi = -1;
220
221         if (assigned_dev->irq_source_id != -1)
222                 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
223         assigned_dev->irq_source_id = -1;
224         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
225 }
226
227 /* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
228 static void deassign_host_irq(struct kvm *kvm,
229                               struct kvm_assigned_dev_kernel *assigned_dev)
230 {
231         /*
232          * In kvm_free_device_irq, cancel_work_sync return true if:
233          * 1. work is scheduled, and then cancelled.
234          * 2. work callback is executed.
235          *
236          * The first one ensured that the irq is disabled and no more events
237          * would happen. But for the second one, the irq may be enabled (e.g.
238          * for MSI). So we disable irq here to prevent further events.
239          *
240          * Notice this maybe result in nested disable if the interrupt type is
241          * INTx, but it's OK for we are going to free it.
242          *
243          * If this function is a part of VM destroy, please ensure that till
244          * now, the kvm state is still legal for probably we also have to wait
245          * interrupt_work done.
246          */
247         if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
248                 int i;
249                 for (i = 0; i < assigned_dev->entries_nr; i++)
250                         disable_irq_nosync(assigned_dev->
251                                            host_msix_entries[i].vector);
252
253                 cancel_work_sync(&assigned_dev->interrupt_work);
254
255                 for (i = 0; i < assigned_dev->entries_nr; i++)
256                         free_irq(assigned_dev->host_msix_entries[i].vector,
257                                  (void *)assigned_dev);
258
259                 assigned_dev->entries_nr = 0;
260                 kfree(assigned_dev->host_msix_entries);
261                 kfree(assigned_dev->guest_msix_entries);
262                 pci_disable_msix(assigned_dev->dev);
263         } else {
264                 /* Deal with MSI and INTx */
265                 disable_irq_nosync(assigned_dev->host_irq);
266                 cancel_work_sync(&assigned_dev->interrupt_work);
267
268                 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
269
270                 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
271                         pci_disable_msi(assigned_dev->dev);
272         }
273
274         assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
275 }
276
277 static int kvm_deassign_irq(struct kvm *kvm,
278                             struct kvm_assigned_dev_kernel *assigned_dev,
279                             unsigned long irq_requested_type)
280 {
281         unsigned long guest_irq_type, host_irq_type;
282
283         if (!irqchip_in_kernel(kvm))
284                 return -EINVAL;
285         /* no irq assignment to deassign */
286         if (!assigned_dev->irq_requested_type)
287                 return -ENXIO;
288
289         host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
290         guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
291
292         if (host_irq_type)
293                 deassign_host_irq(kvm, assigned_dev);
294         if (guest_irq_type)
295                 deassign_guest_irq(kvm, assigned_dev);
296
297         return 0;
298 }
299
300 static void kvm_free_assigned_irq(struct kvm *kvm,
301                                   struct kvm_assigned_dev_kernel *assigned_dev)
302 {
303         kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
304 }
305
306 static void kvm_free_assigned_device(struct kvm *kvm,
307                                      struct kvm_assigned_dev_kernel
308                                      *assigned_dev)
309 {
310         kvm_free_assigned_irq(kvm, assigned_dev);
311
312         pci_reset_function(assigned_dev->dev);
313
314         pci_release_regions(assigned_dev->dev);
315         pci_disable_device(assigned_dev->dev);
316         pci_dev_put(assigned_dev->dev);
317
318         list_del(&assigned_dev->list);
319         kfree(assigned_dev);
320 }
321
322 void kvm_free_all_assigned_devices(struct kvm *kvm)
323 {
324         struct list_head *ptr, *ptr2;
325         struct kvm_assigned_dev_kernel *assigned_dev;
326
327         list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
328                 assigned_dev = list_entry(ptr,
329                                           struct kvm_assigned_dev_kernel,
330                                           list);
331
332                 kvm_free_assigned_device(kvm, assigned_dev);
333         }
334 }
335
336 static int assigned_device_enable_host_intx(struct kvm *kvm,
337                                             struct kvm_assigned_dev_kernel *dev)
338 {
339         dev->host_irq = dev->dev->irq;
340         /* Even though this is PCI, we don't want to use shared
341          * interrupts. Sharing host devices with guest-assigned devices
342          * on the same interrupt line is not a happy situation: there
343          * are going to be long delays in accepting, acking, etc.
344          */
345         if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
346                         0, "kvm_assigned_intx_device", (void *)dev))
347                 return -EIO;
348         return 0;
349 }
350
351 #ifdef __KVM_HAVE_MSI
352 static int assigned_device_enable_host_msi(struct kvm *kvm,
353                                            struct kvm_assigned_dev_kernel *dev)
354 {
355         int r;
356
357         if (!dev->dev->msi_enabled) {
358                 r = pci_enable_msi(dev->dev);
359                 if (r)
360                         return r;
361         }
362
363         dev->host_irq = dev->dev->irq;
364         if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
365                         "kvm_assigned_msi_device", (void *)dev)) {
366                 pci_disable_msi(dev->dev);
367                 return -EIO;
368         }
369
370         return 0;
371 }
372 #endif
373
374 #ifdef __KVM_HAVE_MSIX
375 static int assigned_device_enable_host_msix(struct kvm *kvm,
376                                             struct kvm_assigned_dev_kernel *dev)
377 {
378         int i, r = -EINVAL;
379
380         /* host_msix_entries and guest_msix_entries should have been
381          * initialized */
382         if (dev->entries_nr == 0)
383                 return r;
384
385         r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
386         if (r)
387                 return r;
388
389         for (i = 0; i < dev->entries_nr; i++) {
390                 r = request_irq(dev->host_msix_entries[i].vector,
391                                 kvm_assigned_dev_intr, 0,
392                                 "kvm_assigned_msix_device",
393                                 (void *)dev);
394                 /* FIXME: free requested_irq's on failure */
395                 if (r)
396                         return r;
397         }
398
399         return 0;
400 }
401
402 #endif
403
404 static int assigned_device_enable_guest_intx(struct kvm *kvm,
405                                 struct kvm_assigned_dev_kernel *dev,
406                                 struct kvm_assigned_irq *irq)
407 {
408         dev->guest_irq = irq->guest_irq;
409         dev->ack_notifier.gsi = irq->guest_irq;
410         return 0;
411 }
412
413 #ifdef __KVM_HAVE_MSI
414 static int assigned_device_enable_guest_msi(struct kvm *kvm,
415                         struct kvm_assigned_dev_kernel *dev,
416                         struct kvm_assigned_irq *irq)
417 {
418         dev->guest_irq = irq->guest_irq;
419         dev->ack_notifier.gsi = -1;
420         dev->host_irq_disabled = false;
421         return 0;
422 }
423 #endif
424 #ifdef __KVM_HAVE_MSIX
425 static int assigned_device_enable_guest_msix(struct kvm *kvm,
426                         struct kvm_assigned_dev_kernel *dev,
427                         struct kvm_assigned_irq *irq)
428 {
429         dev->guest_irq = irq->guest_irq;
430         dev->ack_notifier.gsi = -1;
431         dev->host_irq_disabled = false;
432         return 0;
433 }
434 #endif
435
436 static int assign_host_irq(struct kvm *kvm,
437                            struct kvm_assigned_dev_kernel *dev,
438                            __u32 host_irq_type)
439 {
440         int r = -EEXIST;
441
442         if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
443                 return r;
444
445         switch (host_irq_type) {
446         case KVM_DEV_IRQ_HOST_INTX:
447                 r = assigned_device_enable_host_intx(kvm, dev);
448                 break;
449 #ifdef __KVM_HAVE_MSI
450         case KVM_DEV_IRQ_HOST_MSI:
451                 r = assigned_device_enable_host_msi(kvm, dev);
452                 break;
453 #endif
454 #ifdef __KVM_HAVE_MSIX
455         case KVM_DEV_IRQ_HOST_MSIX:
456                 r = assigned_device_enable_host_msix(kvm, dev);
457                 break;
458 #endif
459         default:
460                 r = -EINVAL;
461         }
462
463         if (!r)
464                 dev->irq_requested_type |= host_irq_type;
465
466         return r;
467 }
468
469 static int assign_guest_irq(struct kvm *kvm,
470                             struct kvm_assigned_dev_kernel *dev,
471                             struct kvm_assigned_irq *irq,
472                             unsigned long guest_irq_type)
473 {
474         int id;
475         int r = -EEXIST;
476
477         if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
478                 return r;
479
480         id = kvm_request_irq_source_id(kvm);
481         if (id < 0)
482                 return id;
483
484         dev->irq_source_id = id;
485
486         switch (guest_irq_type) {
487         case KVM_DEV_IRQ_GUEST_INTX:
488                 r = assigned_device_enable_guest_intx(kvm, dev, irq);
489                 break;
490 #ifdef __KVM_HAVE_MSI
491         case KVM_DEV_IRQ_GUEST_MSI:
492                 r = assigned_device_enable_guest_msi(kvm, dev, irq);
493                 break;
494 #endif
495 #ifdef __KVM_HAVE_MSIX
496         case KVM_DEV_IRQ_GUEST_MSIX:
497                 r = assigned_device_enable_guest_msix(kvm, dev, irq);
498                 break;
499 #endif
500         default:
501                 r = -EINVAL;
502         }
503
504         if (!r) {
505                 dev->irq_requested_type |= guest_irq_type;
506                 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
507         } else
508                 kvm_free_irq_source_id(kvm, dev->irq_source_id);
509
510         return r;
511 }
512
513 /* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
514 static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
515                                    struct kvm_assigned_irq *assigned_irq)
516 {
517         int r = -EINVAL;
518         struct kvm_assigned_dev_kernel *match;
519         unsigned long host_irq_type, guest_irq_type;
520
521         if (!capable(CAP_SYS_RAWIO))
522                 return -EPERM;
523
524         if (!irqchip_in_kernel(kvm))
525                 return r;
526
527         mutex_lock(&kvm->lock);
528         r = -ENODEV;
529         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
530                                       assigned_irq->assigned_dev_id);
531         if (!match)
532                 goto out;
533
534         host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
535         guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
536
537         r = -EINVAL;
538         /* can only assign one type at a time */
539         if (hweight_long(host_irq_type) > 1)
540                 goto out;
541         if (hweight_long(guest_irq_type) > 1)
542                 goto out;
543         if (host_irq_type == 0 && guest_irq_type == 0)
544                 goto out;
545
546         r = 0;
547         if (host_irq_type)
548                 r = assign_host_irq(kvm, match, host_irq_type);
549         if (r)
550                 goto out;
551
552         if (guest_irq_type)
553                 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
554 out:
555         mutex_unlock(&kvm->lock);
556         return r;
557 }
558
559 static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
560                                          struct kvm_assigned_irq
561                                          *assigned_irq)
562 {
563         int r = -ENODEV;
564         struct kvm_assigned_dev_kernel *match;
565
566         mutex_lock(&kvm->lock);
567
568         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
569                                       assigned_irq->assigned_dev_id);
570         if (!match)
571                 goto out;
572
573         r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
574 out:
575         mutex_unlock(&kvm->lock);
576         return r;
577 }
578
579 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
580                                       struct kvm_assigned_pci_dev *assigned_dev)
581 {
582         int r = 0;
583         struct kvm_assigned_dev_kernel *match;
584         struct pci_dev *dev;
585
586         down_read(&kvm->slots_lock);
587         mutex_lock(&kvm->lock);
588
589         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
590                                       assigned_dev->assigned_dev_id);
591         if (match) {
592                 /* device already assigned */
593                 r = -EEXIST;
594                 goto out;
595         }
596
597         match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
598         if (match == NULL) {
599                 printk(KERN_INFO "%s: Couldn't allocate memory\n",
600                        __func__);
601                 r = -ENOMEM;
602                 goto out;
603         }
604         dev = pci_get_bus_and_slot(assigned_dev->busnr,
605                                    assigned_dev->devfn);
606         if (!dev) {
607                 printk(KERN_INFO "%s: host device not found\n", __func__);
608                 r = -EINVAL;
609                 goto out_free;
610         }
611         if (pci_enable_device(dev)) {
612                 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
613                 r = -EBUSY;
614                 goto out_put;
615         }
616         r = pci_request_regions(dev, "kvm_assigned_device");
617         if (r) {
618                 printk(KERN_INFO "%s: Could not get access to device regions\n",
619                        __func__);
620                 goto out_disable;
621         }
622
623         pci_reset_function(dev);
624
625         match->assigned_dev_id = assigned_dev->assigned_dev_id;
626         match->host_busnr = assigned_dev->busnr;
627         match->host_devfn = assigned_dev->devfn;
628         match->flags = assigned_dev->flags;
629         match->dev = dev;
630         spin_lock_init(&match->assigned_dev_lock);
631         match->irq_source_id = -1;
632         match->kvm = kvm;
633         match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
634         INIT_WORK(&match->interrupt_work,
635                   kvm_assigned_dev_interrupt_work_handler);
636
637         list_add(&match->list, &kvm->arch.assigned_dev_head);
638
639         if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
640                 if (!kvm->arch.iommu_domain) {
641                         r = kvm_iommu_map_guest(kvm);
642                         if (r)
643                                 goto out_list_del;
644                 }
645                 r = kvm_assign_device(kvm, match);
646                 if (r)
647                         goto out_list_del;
648         }
649
650 out:
651         mutex_unlock(&kvm->lock);
652         up_read(&kvm->slots_lock);
653         return r;
654 out_list_del:
655         list_del(&match->list);
656         pci_release_regions(dev);
657 out_disable:
658         pci_disable_device(dev);
659 out_put:
660         pci_dev_put(dev);
661 out_free:
662         kfree(match);
663         mutex_unlock(&kvm->lock);
664         up_read(&kvm->slots_lock);
665         return r;
666 }
667 #endif
668
669 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
670 static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
671                 struct kvm_assigned_pci_dev *assigned_dev)
672 {
673         int r = 0;
674         struct kvm_assigned_dev_kernel *match;
675
676         mutex_lock(&kvm->lock);
677
678         match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
679                                       assigned_dev->assigned_dev_id);
680         if (!match) {
681                 printk(KERN_INFO "%s: device hasn't been assigned before, "
682                   "so cannot be deassigned\n", __func__);
683                 r = -EINVAL;
684                 goto out;
685         }
686
687         if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
688                 kvm_deassign_device(kvm, match);
689
690         kvm_free_assigned_device(kvm, match);
691
692 out:
693         mutex_unlock(&kvm->lock);
694         return r;
695 }
696 #endif
697
698 inline int kvm_is_mmio_pfn(pfn_t pfn)
699 {
700         if (pfn_valid(pfn)) {
701                 struct page *page = compound_head(pfn_to_page(pfn));
702                 return PageReserved(page);
703         }
704
705         return true;
706 }
707
708 /*
709  * Switches to specified vcpu, until a matching vcpu_put()
710  */
711 void vcpu_load(struct kvm_vcpu *vcpu)
712 {
713         int cpu;
714
715         mutex_lock(&vcpu->mutex);
716         cpu = get_cpu();
717         preempt_notifier_register(&vcpu->preempt_notifier);
718         kvm_arch_vcpu_load(vcpu, cpu);
719         put_cpu();
720 }
721
722 void vcpu_put(struct kvm_vcpu *vcpu)
723 {
724         preempt_disable();
725         kvm_arch_vcpu_put(vcpu);
726         preempt_notifier_unregister(&vcpu->preempt_notifier);
727         preempt_enable();
728         mutex_unlock(&vcpu->mutex);
729 }
730
731 static void ack_flush(void *_completed)
732 {
733 }
734
735 static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
736 {
737         int i, cpu, me;
738         cpumask_var_t cpus;
739         bool called = true;
740         struct kvm_vcpu *vcpu;
741
742         zalloc_cpumask_var(&cpus, GFP_ATOMIC);
743
744         spin_lock(&kvm->requests_lock);
745         me = smp_processor_id();
746         kvm_for_each_vcpu(i, vcpu, kvm) {
747                 if (test_and_set_bit(req, &vcpu->requests))
748                         continue;
749                 cpu = vcpu->cpu;
750                 if (cpus != NULL && cpu != -1 && cpu != me)
751                         cpumask_set_cpu(cpu, cpus);
752         }
753         if (unlikely(cpus == NULL))
754                 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
755         else if (!cpumask_empty(cpus))
756                 smp_call_function_many(cpus, ack_flush, NULL, 1);
757         else
758                 called = false;
759         spin_unlock(&kvm->requests_lock);
760         free_cpumask_var(cpus);
761         return called;
762 }
763
764 void kvm_flush_remote_tlbs(struct kvm *kvm)
765 {
766         if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
767                 ++kvm->stat.remote_tlb_flush;
768 }
769
770 void kvm_reload_remote_mmus(struct kvm *kvm)
771 {
772         make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
773 }
774
775 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
776 {
777         struct page *page;
778         int r;
779
780         mutex_init(&vcpu->mutex);
781         vcpu->cpu = -1;
782         vcpu->kvm = kvm;
783         vcpu->vcpu_id = id;
784         init_waitqueue_head(&vcpu->wq);
785
786         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
787         if (!page) {
788                 r = -ENOMEM;
789                 goto fail;
790         }
791         vcpu->run = page_address(page);
792
793         r = kvm_arch_vcpu_init(vcpu);
794         if (r < 0)
795                 goto fail_free_run;
796         return 0;
797
798 fail_free_run:
799         free_page((unsigned long)vcpu->run);
800 fail:
801         return r;
802 }
803 EXPORT_SYMBOL_GPL(kvm_vcpu_init);
804
805 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
806 {
807         kvm_arch_vcpu_uninit(vcpu);
808         free_page((unsigned long)vcpu->run);
809 }
810 EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
811
812 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
813 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
814 {
815         return container_of(mn, struct kvm, mmu_notifier);
816 }
817
818 static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
819                                              struct mm_struct *mm,
820                                              unsigned long address)
821 {
822         struct kvm *kvm = mmu_notifier_to_kvm(mn);
823         int need_tlb_flush;
824
825         /*
826          * When ->invalidate_page runs, the linux pte has been zapped
827          * already but the page is still allocated until
828          * ->invalidate_page returns. So if we increase the sequence
829          * here the kvm page fault will notice if the spte can't be
830          * established because the page is going to be freed. If
831          * instead the kvm page fault establishes the spte before
832          * ->invalidate_page runs, kvm_unmap_hva will release it
833          * before returning.
834          *
835          * The sequence increase only need to be seen at spin_unlock
836          * time, and not at spin_lock time.
837          *
838          * Increasing the sequence after the spin_unlock would be
839          * unsafe because the kvm page fault could then establish the
840          * pte after kvm_unmap_hva returned, without noticing the page
841          * is going to be freed.
842          */
843         spin_lock(&kvm->mmu_lock);
844         kvm->mmu_notifier_seq++;
845         need_tlb_flush = kvm_unmap_hva(kvm, address);
846         spin_unlock(&kvm->mmu_lock);
847
848         /* we've to flush the tlb before the pages can be freed */
849         if (need_tlb_flush)
850                 kvm_flush_remote_tlbs(kvm);
851
852 }
853
854 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
855                                         struct mm_struct *mm,
856                                         unsigned long address,
857                                         pte_t pte)
858 {
859         struct kvm *kvm = mmu_notifier_to_kvm(mn);
860
861         spin_lock(&kvm->mmu_lock);
862         kvm->mmu_notifier_seq++;
863         kvm_set_spte_hva(kvm, address, pte);
864         spin_unlock(&kvm->mmu_lock);
865 }
866
867 static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
868                                                     struct mm_struct *mm,
869                                                     unsigned long start,
870                                                     unsigned long end)
871 {
872         struct kvm *kvm = mmu_notifier_to_kvm(mn);
873         int need_tlb_flush = 0;
874
875         spin_lock(&kvm->mmu_lock);
876         /*
877          * The count increase must become visible at unlock time as no
878          * spte can be established without taking the mmu_lock and
879          * count is also read inside the mmu_lock critical section.
880          */
881         kvm->mmu_notifier_count++;
882         for (; start < end; start += PAGE_SIZE)
883                 need_tlb_flush |= kvm_unmap_hva(kvm, start);
884         spin_unlock(&kvm->mmu_lock);
885
886         /* we've to flush the tlb before the pages can be freed */
887         if (need_tlb_flush)
888                 kvm_flush_remote_tlbs(kvm);
889 }
890
891 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
892                                                   struct mm_struct *mm,
893                                                   unsigned long start,
894                                                   unsigned long end)
895 {
896         struct kvm *kvm = mmu_notifier_to_kvm(mn);
897
898         spin_lock(&kvm->mmu_lock);
899         /*
900          * This sequence increase will notify the kvm page fault that
901          * the page that is going to be mapped in the spte could have
902          * been freed.
903          */
904         kvm->mmu_notifier_seq++;
905         /*
906          * The above sequence increase must be visible before the
907          * below count decrease but both values are read by the kvm
908          * page fault under mmu_lock spinlock so we don't need to add
909          * a smb_wmb() here in between the two.
910          */
911         kvm->mmu_notifier_count--;
912         spin_unlock(&kvm->mmu_lock);
913
914         BUG_ON(kvm->mmu_notifier_count < 0);
915 }
916
917 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
918                                               struct mm_struct *mm,
919                                               unsigned long address)
920 {
921         struct kvm *kvm = mmu_notifier_to_kvm(mn);
922         int young;
923
924         spin_lock(&kvm->mmu_lock);
925         young = kvm_age_hva(kvm, address);
926         spin_unlock(&kvm->mmu_lock);
927
928         if (young)
929                 kvm_flush_remote_tlbs(kvm);
930
931         return young;
932 }
933
934 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
935                                      struct mm_struct *mm)
936 {
937         struct kvm *kvm = mmu_notifier_to_kvm(mn);
938         kvm_arch_flush_shadow(kvm);
939 }
940
941 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
942         .invalidate_page        = kvm_mmu_notifier_invalidate_page,
943         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
944         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
945         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
946         .change_pte             = kvm_mmu_notifier_change_pte,
947         .release                = kvm_mmu_notifier_release,
948 };
949 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
950
951 static struct kvm *kvm_create_vm(void)
952 {
953         struct kvm *kvm = kvm_arch_create_vm();
954 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
955         struct page *page;
956 #endif
957
958         if (IS_ERR(kvm))
959                 goto out;
960 #ifdef CONFIG_HAVE_KVM_IRQCHIP
961         INIT_LIST_HEAD(&kvm->irq_routing);
962         INIT_HLIST_HEAD(&kvm->mask_notifier_list);
963 #endif
964
965 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
966         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
967         if (!page) {
968                 kfree(kvm);
969                 return ERR_PTR(-ENOMEM);
970         }
971         kvm->coalesced_mmio_ring =
972                         (struct kvm_coalesced_mmio_ring *)page_address(page);
973 #endif
974
975 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
976         {
977                 int err;
978                 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
979                 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
980                 if (err) {
981 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
982                         put_page(page);
983 #endif
984                         kfree(kvm);
985                         return ERR_PTR(err);
986                 }
987         }
988 #endif
989
990         kvm->mm = current->mm;
991         atomic_inc(&kvm->mm->mm_count);
992         spin_lock_init(&kvm->mmu_lock);
993         spin_lock_init(&kvm->requests_lock);
994         kvm_io_bus_init(&kvm->pio_bus);
995         kvm_eventfd_init(kvm);
996         mutex_init(&kvm->lock);
997         mutex_init(&kvm->irq_lock);
998         kvm_io_bus_init(&kvm->mmio_bus);
999         init_rwsem(&kvm->slots_lock);
1000         atomic_set(&kvm->users_count, 1);
1001         spin_lock(&kvm_lock);
1002         list_add(&kvm->vm_list, &vm_list);
1003         spin_unlock(&kvm_lock);
1004 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1005         kvm_coalesced_mmio_init(kvm);
1006 #endif
1007 out:
1008         return kvm;
1009 }
1010
1011 /*
1012  * Free any memory in @free but not in @dont.
1013  */
1014 static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
1015                                   struct kvm_memory_slot *dont)
1016 {
1017         int i;
1018
1019         if (!dont || free->rmap != dont->rmap)
1020                 vfree(free->rmap);
1021
1022         if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
1023                 vfree(free->dirty_bitmap);
1024
1025
1026         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1027                 if (!dont || free->lpage_info[i] != dont->lpage_info[i]) {
1028                         vfree(free->lpage_info[i]);
1029                         free->lpage_info[i] = NULL;
1030                 }
1031         }
1032
1033         free->npages = 0;
1034         free->dirty_bitmap = NULL;
1035         free->rmap = NULL;
1036 }
1037
1038 void kvm_free_physmem(struct kvm *kvm)
1039 {
1040         int i;
1041
1042         for (i = 0; i < kvm->nmemslots; ++i)
1043                 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
1044 }
1045
1046 static void kvm_destroy_vm(struct kvm *kvm)
1047 {
1048         struct mm_struct *mm = kvm->mm;
1049
1050         kvm_arch_sync_events(kvm);
1051         spin_lock(&kvm_lock);
1052         list_del(&kvm->vm_list);
1053         spin_unlock(&kvm_lock);
1054         kvm_free_irq_routing(kvm);
1055         kvm_io_bus_destroy(&kvm->pio_bus);
1056         kvm_io_bus_destroy(&kvm->mmio_bus);
1057 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1058         if (kvm->coalesced_mmio_ring != NULL)
1059                 free_page((unsigned long)kvm->coalesced_mmio_ring);
1060 #endif
1061 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1062         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1063 #else
1064         kvm_arch_flush_shadow(kvm);
1065 #endif
1066         kvm_arch_destroy_vm(kvm);
1067         mmdrop(mm);
1068 }
1069
1070 void kvm_get_kvm(struct kvm *kvm)
1071 {
1072         atomic_inc(&kvm->users_count);
1073 }
1074 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1075
1076 void kvm_put_kvm(struct kvm *kvm)
1077 {
1078         if (atomic_dec_and_test(&kvm->users_count))
1079                 kvm_destroy_vm(kvm);
1080 }
1081 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1082
1083
1084 static int kvm_vm_release(struct inode *inode, struct file *filp)
1085 {
1086         struct kvm *kvm = filp->private_data;
1087
1088         kvm_irqfd_release(kvm);
1089
1090         kvm_put_kvm(kvm);
1091         return 0;
1092 }
1093
1094 /*
1095  * Allocate some memory and give it an address in the guest physical address
1096  * space.
1097  *
1098  * Discontiguous memory is allowed, mostly for framebuffers.
1099  *
1100  * Must be called holding mmap_sem for write.
1101  */
1102 int __kvm_set_memory_region(struct kvm *kvm,
1103                             struct kvm_userspace_memory_region *mem,
1104                             int user_alloc)
1105 {
1106         int r;
1107         gfn_t base_gfn;
1108         unsigned long npages;
1109         unsigned long i;
1110         struct kvm_memory_slot *memslot;
1111         struct kvm_memory_slot old, new;
1112
1113         r = -EINVAL;
1114         /* General sanity checks */
1115         if (mem->memory_size & (PAGE_SIZE - 1))
1116                 goto out;
1117         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1118                 goto out;
1119         if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
1120                 goto out;
1121         if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
1122                 goto out;
1123         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1124                 goto out;
1125
1126         memslot = &kvm->memslots[mem->slot];
1127         base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
1128         npages = mem->memory_size >> PAGE_SHIFT;
1129
1130         if (!npages)
1131                 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
1132
1133         new = old = *memslot;
1134
1135         new.base_gfn = base_gfn;
1136         new.npages = npages;
1137         new.flags = mem->flags;
1138
1139         /* Disallow changing a memory slot's size. */
1140         r = -EINVAL;
1141         if (npages && old.npages && npages != old.npages)
1142                 goto out_free;
1143
1144         /* Check for overlaps */
1145         r = -EEXIST;
1146         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1147                 struct kvm_memory_slot *s = &kvm->memslots[i];
1148
1149                 if (s == memslot || !s->npages)
1150                         continue;
1151                 if (!((base_gfn + npages <= s->base_gfn) ||
1152                       (base_gfn >= s->base_gfn + s->npages)))
1153                         goto out_free;
1154         }
1155
1156         /* Free page dirty bitmap if unneeded */
1157         if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
1158                 new.dirty_bitmap = NULL;
1159
1160         r = -ENOMEM;
1161
1162         /* Allocate if a slot is being created */
1163 #ifndef CONFIG_S390
1164         if (npages && !new.rmap) {
1165                 new.rmap = vmalloc(npages * sizeof(struct page *));
1166
1167                 if (!new.rmap)
1168                         goto out_free;
1169
1170                 memset(new.rmap, 0, npages * sizeof(*new.rmap));
1171
1172                 new.user_alloc = user_alloc;
1173                 /*
1174                  * hva_to_rmmap() serialzies with the mmu_lock and to be
1175                  * safe it has to ignore memslots with !user_alloc &&
1176                  * !userspace_addr.
1177                  */
1178                 if (user_alloc)
1179                         new.userspace_addr = mem->userspace_addr;
1180                 else
1181                         new.userspace_addr = 0;
1182         }
1183         if (!npages)
1184                 goto skip_lpage;
1185
1186         for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
1187                 unsigned long ugfn;
1188                 unsigned long j;
1189                 int lpages;
1190                 int level = i + 2;
1191
1192                 /* Avoid unused variable warning if no large pages */
1193                 (void)level;
1194
1195                 if (new.lpage_info[i])
1196                         continue;
1197
1198                 lpages = 1 + (base_gfn + npages - 1) /
1199                              KVM_PAGES_PER_HPAGE(level);
1200                 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level);
1201
1202                 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
1203
1204                 if (!new.lpage_info[i])
1205                         goto out_free;
1206
1207                 memset(new.lpage_info[i], 0,
1208                        lpages * sizeof(*new.lpage_info[i]));
1209
1210                 if (base_gfn % KVM_PAGES_PER_HPAGE(level))
1211                         new.lpage_info[i][0].write_count = 1;
1212                 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level))
1213                         new.lpage_info[i][lpages - 1].write_count = 1;
1214                 ugfn = new.userspace_addr >> PAGE_SHIFT;
1215                 /*
1216                  * If the gfn and userspace address are not aligned wrt each
1217                  * other, or if explicitly asked to, disable large page
1218                  * support for this slot
1219                  */
1220                 if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
1221                     !largepages_enabled)
1222                         for (j = 0; j < lpages; ++j)
1223                                 new.lpage_info[i][j].write_count = 1;
1224         }
1225
1226 skip_lpage:
1227
1228         /* Allocate page dirty bitmap if needed */
1229         if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1230                 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1231
1232                 new.dirty_bitmap = vmalloc(dirty_bytes);
1233                 if (!new.dirty_bitmap)
1234                         goto out_free;
1235                 memset(new.dirty_bitmap, 0, dirty_bytes);
1236                 if (old.npages)
1237                         kvm_arch_flush_shadow(kvm);
1238         }
1239 #else  /* not defined CONFIG_S390 */
1240         new.user_alloc = user_alloc;
1241         if (user_alloc)
1242                 new.userspace_addr = mem->userspace_addr;
1243 #endif /* not defined CONFIG_S390 */
1244
1245         if (!npages)
1246                 kvm_arch_flush_shadow(kvm);
1247
1248         spin_lock(&kvm->mmu_lock);
1249         if (mem->slot >= kvm->nmemslots)
1250                 kvm->nmemslots = mem->slot + 1;
1251
1252         *memslot = new;
1253         spin_unlock(&kvm->mmu_lock);
1254
1255         r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1256         if (r) {
1257                 spin_lock(&kvm->mmu_lock);
1258                 *memslot = old;
1259                 spin_unlock(&kvm->mmu_lock);
1260                 goto out_free;
1261         }
1262
1263         kvm_free_physmem_slot(&old, npages ? &new : NULL);
1264         /* Slot deletion case: we have to update the current slot */
1265         spin_lock(&kvm->mmu_lock);
1266         if (!npages)
1267                 *memslot = old;
1268         spin_unlock(&kvm->mmu_lock);
1269 #ifdef CONFIG_DMAR
1270         /* map the pages in iommu page table */
1271         r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1272         if (r)
1273                 goto out;
1274 #endif
1275         return 0;
1276
1277 out_free:
1278         kvm_free_physmem_slot(&new, &old);
1279 out:
1280         return r;
1281
1282 }
1283 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1284
1285 int kvm_set_memory_region(struct kvm *kvm,
1286                           struct kvm_userspace_memory_region *mem,
1287                           int user_alloc)
1288 {
1289         int r;
1290
1291         down_write(&kvm->slots_lock);
1292         r = __kvm_set_memory_region(kvm, mem, user_alloc);
1293         up_write(&kvm->slots_lock);
1294         return r;
1295 }
1296 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1297
1298 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1299                                    struct
1300                                    kvm_userspace_memory_region *mem,
1301                                    int user_alloc)
1302 {
1303         if (mem->slot >= KVM_MEMORY_SLOTS)
1304                 return -EINVAL;
1305         return kvm_set_memory_region(kvm, mem, user_alloc);
1306 }
1307
1308 int kvm_get_dirty_log(struct kvm *kvm,
1309                         struct kvm_dirty_log *log, int *is_dirty)
1310 {
1311         struct kvm_memory_slot *memslot;
1312         int r, i;
1313         int n;
1314         unsigned long any = 0;
1315
1316         r = -EINVAL;
1317         if (log->slot >= KVM_MEMORY_SLOTS)
1318                 goto out;
1319
1320         memslot = &kvm->memslots[log->slot];
1321         r = -ENOENT;
1322         if (!memslot->dirty_bitmap)
1323                 goto out;
1324
1325         n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
1326
1327         for (i = 0; !any && i < n/sizeof(long); ++i)
1328                 any = memslot->dirty_bitmap[i];
1329
1330         r = -EFAULT;
1331         if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1332                 goto out;
1333
1334         if (any)
1335                 *is_dirty = 1;
1336
1337         r = 0;
1338 out:
1339         return r;
1340 }
1341
1342 void kvm_disable_largepages(void)
1343 {
1344         largepages_enabled = false;
1345 }
1346 EXPORT_SYMBOL_GPL(kvm_disable_largepages);
1347
1348 int is_error_page(struct page *page)
1349 {
1350         return page == bad_page;
1351 }
1352 EXPORT_SYMBOL_GPL(is_error_page);
1353
1354 int is_error_pfn(pfn_t pfn)
1355 {
1356         return pfn == bad_pfn;
1357 }
1358 EXPORT_SYMBOL_GPL(is_error_pfn);
1359
1360 static inline unsigned long bad_hva(void)
1361 {
1362         return PAGE_OFFSET;
1363 }
1364
1365 int kvm_is_error_hva(unsigned long addr)
1366 {
1367         return addr == bad_hva();
1368 }
1369 EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1370
1371 struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
1372 {
1373         int i;
1374
1375         for (i = 0; i < kvm->nmemslots; ++i) {
1376                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1377
1378                 if (gfn >= memslot->base_gfn
1379                     && gfn < memslot->base_gfn + memslot->npages)
1380                         return memslot;
1381         }
1382         return NULL;
1383 }
1384 EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
1385
1386 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1387 {
1388         gfn = unalias_gfn(kvm, gfn);
1389         return gfn_to_memslot_unaliased(kvm, gfn);
1390 }
1391
1392 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1393 {
1394         int i;
1395
1396         gfn = unalias_gfn(kvm, gfn);
1397         for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1398                 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1399
1400                 if (gfn >= memslot->base_gfn
1401                     && gfn < memslot->base_gfn + memslot->npages)
1402                         return 1;
1403         }
1404         return 0;
1405 }
1406 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1407
1408 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
1409 {
1410         struct kvm_memory_slot *slot;
1411
1412         gfn = unalias_gfn(kvm, gfn);
1413         slot = gfn_to_memslot_unaliased(kvm, gfn);
1414         if (!slot)
1415                 return bad_hva();
1416         return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1417 }
1418 EXPORT_SYMBOL_GPL(gfn_to_hva);
1419
1420 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1421 {
1422         struct page *page[1];
1423         unsigned long addr;
1424         int npages;
1425         pfn_t pfn;
1426
1427         might_sleep();
1428
1429         addr = gfn_to_hva(kvm, gfn);
1430         if (kvm_is_error_hva(addr)) {
1431                 get_page(bad_page);
1432                 return page_to_pfn(bad_page);
1433         }
1434
1435         npages = get_user_pages_fast(addr, 1, 1, page);
1436
1437         if (unlikely(npages != 1)) {
1438                 struct vm_area_struct *vma;
1439
1440                 down_read(&current->mm->mmap_sem);
1441                 vma = find_vma(current->mm, addr);
1442
1443                 if (vma == NULL || addr < vma->vm_start ||
1444                     !(vma->vm_flags & VM_PFNMAP)) {
1445                         up_read(&current->mm->mmap_sem);
1446                         get_page(bad_page);
1447                         return page_to_pfn(bad_page);
1448                 }
1449
1450                 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1451                 up_read(&current->mm->mmap_sem);
1452                 BUG_ON(!kvm_is_mmio_pfn(pfn));
1453         } else
1454                 pfn = page_to_pfn(page[0]);
1455
1456         return pfn;
1457 }
1458
1459 EXPORT_SYMBOL_GPL(gfn_to_pfn);
1460
1461 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1462 {
1463         pfn_t pfn;
1464
1465         pfn = gfn_to_pfn(kvm, gfn);
1466         if (!kvm_is_mmio_pfn(pfn))
1467                 return pfn_to_page(pfn);
1468
1469         WARN_ON(kvm_is_mmio_pfn(pfn));
1470
1471         get_page(bad_page);
1472         return bad_page;
1473 }
1474
1475 EXPORT_SYMBOL_GPL(gfn_to_page);
1476
1477 void kvm_release_page_clean(struct page *page)
1478 {
1479         kvm_release_pfn_clean(page_to_pfn(page));
1480 }
1481 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1482
1483 void kvm_release_pfn_clean(pfn_t pfn)
1484 {
1485         if (!kvm_is_mmio_pfn(pfn))
1486                 put_page(pfn_to_page(pfn));
1487 }
1488 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1489
1490 void kvm_release_page_dirty(struct page *page)
1491 {
1492         kvm_release_pfn_dirty(page_to_pfn(page));
1493 }
1494 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1495
1496 void kvm_release_pfn_dirty(pfn_t pfn)
1497 {
1498         kvm_set_pfn_dirty(pfn);
1499         kvm_release_pfn_clean(pfn);
1500 }
1501 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1502
1503 void kvm_set_page_dirty(struct page *page)
1504 {
1505         kvm_set_pfn_dirty(page_to_pfn(page));
1506 }
1507 EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1508
1509 void kvm_set_pfn_dirty(pfn_t pfn)
1510 {
1511         if (!kvm_is_mmio_pfn(pfn)) {
1512                 struct page *page = pfn_to_page(pfn);
1513                 if (!PageReserved(page))
1514                         SetPageDirty(page);
1515         }
1516 }
1517 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1518
1519 void kvm_set_pfn_accessed(pfn_t pfn)
1520 {
1521         if (!kvm_is_mmio_pfn(pfn))
1522                 mark_page_accessed(pfn_to_page(pfn));
1523 }
1524 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1525
1526 void kvm_get_pfn(pfn_t pfn)
1527 {
1528         if (!kvm_is_mmio_pfn(pfn))
1529                 get_page(pfn_to_page(pfn));
1530 }
1531 EXPORT_SYMBOL_GPL(kvm_get_pfn);
1532
1533 static int next_segment(unsigned long len, int offset)
1534 {
1535         if (len > PAGE_SIZE - offset)
1536                 return PAGE_SIZE - offset;
1537         else
1538                 return len;
1539 }
1540
1541 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1542                         int len)
1543 {
1544         int r;
1545         unsigned long addr;
1546
1547         addr = gfn_to_hva(kvm, gfn);
1548         if (kvm_is_error_hva(addr))
1549                 return -EFAULT;
1550         r = copy_from_user(data, (void __user *)addr + offset, len);
1551         if (r)
1552                 return -EFAULT;
1553         return 0;
1554 }
1555 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1556
1557 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1558 {
1559         gfn_t gfn = gpa >> PAGE_SHIFT;
1560         int seg;
1561         int offset = offset_in_page(gpa);
1562         int ret;
1563
1564         while ((seg = next_segment(len, offset)) != 0) {
1565                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1566                 if (ret < 0)
1567                         return ret;
1568                 offset = 0;
1569                 len -= seg;
1570                 data += seg;
1571                 ++gfn;
1572         }
1573         return 0;
1574 }
1575 EXPORT_SYMBOL_GPL(kvm_read_guest);
1576
1577 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1578                           unsigned long len)
1579 {
1580         int r;
1581         unsigned long addr;
1582         gfn_t gfn = gpa >> PAGE_SHIFT;
1583         int offset = offset_in_page(gpa);
1584
1585         addr = gfn_to_hva(kvm, gfn);
1586         if (kvm_is_error_hva(addr))
1587                 return -EFAULT;
1588         pagefault_disable();
1589         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
1590         pagefault_enable();
1591         if (r)
1592                 return -EFAULT;
1593         return 0;
1594 }
1595 EXPORT_SYMBOL(kvm_read_guest_atomic);
1596
1597 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1598                          int offset, int len)
1599 {
1600         int r;
1601         unsigned long addr;
1602
1603         addr = gfn_to_hva(kvm, gfn);
1604         if (kvm_is_error_hva(addr))
1605                 return -EFAULT;
1606         r = copy_to_user((void __user *)addr + offset, data, len);
1607         if (r)
1608                 return -EFAULT;
1609         mark_page_dirty(kvm, gfn);
1610         return 0;
1611 }
1612 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1613
1614 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1615                     unsigned long len)
1616 {
1617         gfn_t gfn = gpa >> PAGE_SHIFT;
1618         int seg;
1619         int offset = offset_in_page(gpa);
1620         int ret;
1621
1622         while ((seg = next_segment(len, offset)) != 0) {
1623                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1624                 if (ret < 0)
1625                         return ret;
1626                 offset = 0;
1627                 len -= seg;
1628                 data += seg;
1629                 ++gfn;
1630         }
1631         return 0;
1632 }
1633
1634 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1635 {
1636         return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
1637 }
1638 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1639
1640 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1641 {
1642         gfn_t gfn = gpa >> PAGE_SHIFT;
1643         int seg;
1644         int offset = offset_in_page(gpa);
1645         int ret;
1646
1647         while ((seg = next_segment(len, offset)) != 0) {
1648                 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1649                 if (ret < 0)
1650                         return ret;
1651                 offset = 0;
1652                 len -= seg;
1653                 ++gfn;
1654         }
1655         return 0;
1656 }
1657 EXPORT_SYMBOL_GPL(kvm_clear_guest);
1658
1659 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1660 {
1661         struct kvm_memory_slot *memslot;
1662
1663         gfn = unalias_gfn(kvm, gfn);
1664         memslot = gfn_to_memslot_unaliased(kvm, gfn);
1665         if (memslot && memslot->dirty_bitmap) {
1666                 unsigned long rel_gfn = gfn - memslot->base_gfn;
1667
1668                 /* avoid RMW */
1669                 if (!generic_test_le_bit(rel_gfn, memslot->dirty_bitmap))
1670                         generic___set_le_bit(rel_gfn, memslot->dirty_bitmap);
1671         }
1672 }
1673
1674 /*
1675  * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1676  */
1677 void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1678 {
1679         DEFINE_WAIT(wait);
1680
1681         for (;;) {
1682                 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1683
1684                 if (kvm_arch_vcpu_runnable(vcpu)) {
1685                         set_bit(KVM_REQ_UNHALT, &vcpu->requests);
1686                         break;
1687                 }
1688                 if (kvm_cpu_has_pending_timer(vcpu))
1689                         break;
1690                 if (signal_pending(current))
1691                         break;
1692
1693                 vcpu_put(vcpu);
1694                 schedule();
1695                 vcpu_load(vcpu);
1696         }
1697
1698         finish_wait(&vcpu->wq, &wait);
1699 }
1700
1701 void kvm_resched(struct kvm_vcpu *vcpu)
1702 {
1703         if (!need_resched())
1704                 return;
1705         cond_resched();
1706 }
1707 EXPORT_SYMBOL_GPL(kvm_resched);
1708
1709 static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1710 {
1711         struct kvm_vcpu *vcpu = vma->vm_file->private_data;
1712         struct page *page;
1713
1714         if (vmf->pgoff == 0)
1715                 page = virt_to_page(vcpu->run);
1716 #ifdef CONFIG_X86
1717         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
1718                 page = virt_to_page(vcpu->arch.pio_data);
1719 #endif
1720 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1721         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1722                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1723 #endif
1724         else
1725                 return VM_FAULT_SIGBUS;
1726         get_page(page);
1727         vmf->page = page;
1728         return 0;
1729 }
1730
1731 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
1732         .fault = kvm_vcpu_fault,
1733 };
1734
1735 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1736 {
1737         vma->vm_ops = &kvm_vcpu_vm_ops;
1738         return 0;
1739 }
1740
1741 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1742 {
1743         struct kvm_vcpu *vcpu = filp->private_data;
1744
1745         kvm_put_kvm(vcpu->kvm);
1746         return 0;
1747 }
1748
1749 static struct file_operations kvm_vcpu_fops = {
1750         .release        = kvm_vcpu_release,
1751         .unlocked_ioctl = kvm_vcpu_ioctl,
1752         .compat_ioctl   = kvm_vcpu_ioctl,
1753         .mmap           = kvm_vcpu_mmap,
1754 };
1755
1756 /*
1757  * Allocates an inode for the vcpu.
1758  */
1759 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1760 {
1761         return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
1762 }
1763
1764 /*
1765  * Creates some virtual cpus.  Good luck creating more than one.
1766  */
1767 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
1768 {
1769         int r;
1770         struct kvm_vcpu *vcpu, *v;
1771
1772         vcpu = kvm_arch_vcpu_create(kvm, id);
1773         if (IS_ERR(vcpu))
1774                 return PTR_ERR(vcpu);
1775
1776         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1777
1778         r = kvm_arch_vcpu_setup(vcpu);
1779         if (r)
1780                 return r;
1781
1782         mutex_lock(&kvm->lock);
1783         if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
1784                 r = -EINVAL;
1785                 goto vcpu_destroy;
1786         }
1787
1788         kvm_for_each_vcpu(r, v, kvm)
1789                 if (v->vcpu_id == id) {
1790                         r = -EEXIST;
1791                         goto vcpu_destroy;
1792                 }
1793
1794         BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
1795
1796         /* Now it's all set up, let userspace reach it */
1797         kvm_get_kvm(kvm);
1798         r = create_vcpu_fd(vcpu);
1799         if (r < 0) {
1800                 kvm_put_kvm(kvm);
1801                 goto vcpu_destroy;
1802         }
1803
1804         kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
1805         smp_wmb();
1806         atomic_inc(&kvm->online_vcpus);
1807
1808 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1809         if (kvm->bsp_vcpu_id == id)
1810                 kvm->bsp_vcpu = vcpu;
1811 #endif
1812         mutex_unlock(&kvm->lock);
1813         return r;
1814
1815 vcpu_destroy:
1816         mutex_unlock(&kvm->lock);
1817         kvm_arch_vcpu_destroy(vcpu);
1818         return r;
1819 }
1820
1821 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1822 {
1823         if (sigset) {
1824                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1825                 vcpu->sigset_active = 1;
1826                 vcpu->sigset = *sigset;
1827         } else
1828                 vcpu->sigset_active = 0;
1829         return 0;
1830 }
1831
1832 #ifdef __KVM_HAVE_MSIX
1833 static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1834                                     struct kvm_assigned_msix_nr *entry_nr)
1835 {
1836         int r = 0;
1837         struct kvm_assigned_dev_kernel *adev;
1838
1839         mutex_lock(&kvm->lock);
1840
1841         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1842                                       entry_nr->assigned_dev_id);
1843         if (!adev) {
1844                 r = -EINVAL;
1845                 goto msix_nr_out;
1846         }
1847
1848         if (adev->entries_nr == 0) {
1849                 adev->entries_nr = entry_nr->entry_nr;
1850                 if (adev->entries_nr == 0 ||
1851                     adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1852                         r = -EINVAL;
1853                         goto msix_nr_out;
1854                 }
1855
1856                 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1857                                                 entry_nr->entry_nr,
1858                                                 GFP_KERNEL);
1859                 if (!adev->host_msix_entries) {
1860                         r = -ENOMEM;
1861                         goto msix_nr_out;
1862                 }
1863                 adev->guest_msix_entries = kzalloc(
1864                                 sizeof(struct kvm_guest_msix_entry) *
1865                                 entry_nr->entry_nr, GFP_KERNEL);
1866                 if (!adev->guest_msix_entries) {
1867                         kfree(adev->host_msix_entries);
1868                         r = -ENOMEM;
1869                         goto msix_nr_out;
1870                 }
1871         } else /* Not allowed set MSI-X number twice */
1872                 r = -EINVAL;
1873 msix_nr_out:
1874         mutex_unlock(&kvm->lock);
1875         return r;
1876 }
1877
1878 static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1879                                        struct kvm_assigned_msix_entry *entry)
1880 {
1881         int r = 0, i;
1882         struct kvm_assigned_dev_kernel *adev;
1883
1884         mutex_lock(&kvm->lock);
1885
1886         adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1887                                       entry->assigned_dev_id);
1888
1889         if (!adev) {
1890                 r = -EINVAL;
1891                 goto msix_entry_out;
1892         }
1893
1894         for (i = 0; i < adev->entries_nr; i++)
1895                 if (adev->guest_msix_entries[i].vector == 0 ||
1896                     adev->guest_msix_entries[i].entry == entry->entry) {
1897                         adev->guest_msix_entries[i].entry = entry->entry;
1898                         adev->guest_msix_entries[i].vector = entry->gsi;
1899                         adev->host_msix_entries[i].entry = entry->entry;
1900                         break;
1901                 }
1902         if (i == adev->entries_nr) {
1903                 r = -ENOSPC;
1904                 goto msix_entry_out;
1905         }
1906
1907 msix_entry_out:
1908         mutex_unlock(&kvm->lock);
1909
1910         return r;
1911 }
1912 #endif
1913
1914 static long kvm_vcpu_ioctl(struct file *filp,
1915                            unsigned int ioctl, unsigned long arg)
1916 {
1917         struct kvm_vcpu *vcpu = filp->private_data;
1918         void __user *argp = (void __user *)arg;
1919         int r;
1920         struct kvm_fpu *fpu = NULL;
1921         struct kvm_sregs *kvm_sregs = NULL;
1922
1923         if (vcpu->kvm->mm != current->mm)
1924                 return -EIO;
1925         switch (ioctl) {
1926         case KVM_RUN:
1927                 r = -EINVAL;
1928                 if (arg)
1929                         goto out;
1930                 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
1931                 break;
1932         case KVM_GET_REGS: {
1933                 struct kvm_regs *kvm_regs;
1934
1935                 r = -ENOMEM;
1936                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1937                 if (!kvm_regs)
1938                         goto out;
1939                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
1940                 if (r)
1941                         goto out_free1;
1942                 r = -EFAULT;
1943                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1944                         goto out_free1;
1945                 r = 0;
1946 out_free1:
1947                 kfree(kvm_regs);
1948                 break;
1949         }
1950         case KVM_SET_REGS: {
1951                 struct kvm_regs *kvm_regs;
1952
1953                 r = -ENOMEM;
1954                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1955                 if (!kvm_regs)
1956                         goto out;
1957                 r = -EFAULT;
1958                 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1959                         goto out_free2;
1960                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
1961                 if (r)
1962                         goto out_free2;
1963                 r = 0;
1964 out_free2:
1965                 kfree(kvm_regs);
1966                 break;
1967         }
1968         case KVM_GET_SREGS: {
1969                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1970                 r = -ENOMEM;
1971                 if (!kvm_sregs)
1972                         goto out;
1973                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1974                 if (r)
1975                         goto out;
1976                 r = -EFAULT;
1977                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1978                         goto out;
1979                 r = 0;
1980                 break;
1981         }
1982         case KVM_SET_SREGS: {
1983                 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1984                 r = -ENOMEM;
1985                 if (!kvm_sregs)
1986                         goto out;
1987                 r = -EFAULT;
1988                 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1989                         goto out;
1990                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1991                 if (r)
1992                         goto out;
1993                 r = 0;
1994                 break;
1995         }
1996         case KVM_GET_MP_STATE: {
1997                 struct kvm_mp_state mp_state;
1998
1999                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
2000                 if (r)
2001                         goto out;
2002                 r = -EFAULT;
2003                 if (copy_to_user(argp, &mp_state, sizeof mp_state))
2004                         goto out;
2005                 r = 0;
2006                 break;
2007         }
2008         case KVM_SET_MP_STATE: {
2009                 struct kvm_mp_state mp_state;
2010
2011                 r = -EFAULT;
2012                 if (copy_from_user(&mp_state, argp, sizeof mp_state))
2013                         goto out;
2014                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
2015                 if (r)
2016                         goto out;
2017                 r = 0;
2018                 break;
2019         }
2020         case KVM_TRANSLATE: {
2021                 struct kvm_translation tr;
2022
2023                 r = -EFAULT;
2024                 if (copy_from_user(&tr, argp, sizeof tr))
2025                         goto out;
2026                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
2027                 if (r)
2028                         goto out;
2029                 r = -EFAULT;
2030                 if (copy_to_user(argp, &tr, sizeof tr))
2031                         goto out;
2032                 r = 0;
2033                 break;
2034         }
2035         case KVM_SET_GUEST_DEBUG: {
2036                 struct kvm_guest_debug dbg;
2037
2038                 r = -EFAULT;
2039                 if (copy_from_user(&dbg, argp, sizeof dbg))
2040                         goto out;
2041                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
2042                 if (r)
2043                         goto out;
2044                 r = 0;
2045                 break;
2046         }
2047         case KVM_SET_SIGNAL_MASK: {
2048                 struct kvm_signal_mask __user *sigmask_arg = argp;
2049                 struct kvm_signal_mask kvm_sigmask;
2050                 sigset_t sigset, *p;
2051
2052                 p = NULL;
2053                 if (argp) {
2054                         r = -EFAULT;
2055                         if (copy_from_user(&kvm_sigmask, argp,
2056                                            sizeof kvm_sigmask))
2057                                 goto out;
2058                         r = -EINVAL;
2059                         if (kvm_sigmask.len != sizeof sigset)
2060                                 goto out;
2061                         r = -EFAULT;
2062                         if (copy_from_user(&sigset, sigmask_arg->sigset,
2063                                            sizeof sigset))
2064                                 goto out;
2065                         p = &sigset;
2066                 }
2067                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
2068                 break;
2069         }
2070         case KVM_GET_FPU: {
2071                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2072                 r = -ENOMEM;
2073                 if (!fpu)
2074                         goto out;
2075                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
2076                 if (r)
2077                         goto out;
2078                 r = -EFAULT;
2079                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
2080                         goto out;
2081                 r = 0;
2082                 break;
2083         }
2084         case KVM_SET_FPU: {
2085                 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
2086                 r = -ENOMEM;
2087                 if (!fpu)
2088                         goto out;
2089                 r = -EFAULT;
2090                 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
2091                         goto out;
2092                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
2093                 if (r)
2094                         goto out;
2095                 r = 0;
2096                 break;
2097         }
2098         default:
2099                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
2100         }
2101 out:
2102         kfree(fpu);
2103         kfree(kvm_sregs);
2104         return r;
2105 }
2106
2107 static long kvm_vm_ioctl(struct file *filp,
2108                            unsigned int ioctl, unsigned long arg)
2109 {
2110         struct kvm *kvm = filp->private_data;
2111         void __user *argp = (void __user *)arg;
2112         int r;
2113
2114         if (kvm->mm != current->mm)
2115                 return -EIO;
2116         switch (ioctl) {
2117         case KVM_CREATE_VCPU:
2118                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
2119                 if (r < 0)
2120                         goto out;
2121                 break;
2122         case KVM_SET_USER_MEMORY_REGION: {
2123                 struct kvm_userspace_memory_region kvm_userspace_mem;
2124
2125                 r = -EFAULT;
2126                 if (copy_from_user(&kvm_userspace_mem, argp,
2127                                                 sizeof kvm_userspace_mem))
2128                         goto out;
2129
2130                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
2131                 if (r)
2132                         goto out;
2133                 break;
2134         }
2135         case KVM_GET_DIRTY_LOG: {
2136                 struct kvm_dirty_log log;
2137
2138                 r = -EFAULT;
2139                 if (copy_from_user(&log, argp, sizeof log))
2140                         goto out;
2141                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
2142                 if (r)
2143                         goto out;
2144                 break;
2145         }
2146 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2147         case KVM_REGISTER_COALESCED_MMIO: {
2148                 struct kvm_coalesced_mmio_zone zone;
2149                 r = -EFAULT;
2150                 if (copy_from_user(&zone, argp, sizeof zone))
2151                         goto out;
2152                 r = -ENXIO;
2153                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
2154                 if (r)
2155                         goto out;
2156                 r = 0;
2157                 break;
2158         }
2159         case KVM_UNREGISTER_COALESCED_MMIO: {
2160                 struct kvm_coalesced_mmio_zone zone;
2161                 r = -EFAULT;
2162                 if (copy_from_user(&zone, argp, sizeof zone))
2163                         goto out;
2164                 r = -ENXIO;
2165                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
2166                 if (r)
2167                         goto out;
2168                 r = 0;
2169                 break;
2170         }
2171 #endif
2172 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
2173         case KVM_ASSIGN_PCI_DEVICE: {
2174                 struct kvm_assigned_pci_dev assigned_dev;
2175
2176                 r = -EFAULT;
2177                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2178                         goto out;
2179                 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
2180                 if (r)
2181                         goto out;
2182                 break;
2183         }
2184         case KVM_ASSIGN_IRQ: {
2185                 r = -EOPNOTSUPP;
2186                 break;
2187         }
2188 #ifdef KVM_CAP_ASSIGN_DEV_IRQ
2189         case KVM_ASSIGN_DEV_IRQ: {
2190                 struct kvm_assigned_irq assigned_irq;
2191
2192                 r = -EFAULT;
2193                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2194                         goto out;
2195                 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2196                 if (r)
2197                         goto out;
2198                 break;
2199         }
2200         case KVM_DEASSIGN_DEV_IRQ: {
2201                 struct kvm_assigned_irq assigned_irq;
2202
2203                 r = -EFAULT;
2204                 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2205                         goto out;
2206                 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
2207                 if (r)
2208                         goto out;
2209                 break;
2210         }
2211 #endif
2212 #endif
2213 #ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2214         case KVM_DEASSIGN_PCI_DEVICE: {
2215                 struct kvm_assigned_pci_dev assigned_dev;
2216
2217                 r = -EFAULT;
2218                 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2219                         goto out;
2220                 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2221                 if (r)
2222                         goto out;
2223                 break;
2224         }
2225 #endif
2226 #ifdef KVM_CAP_IRQ_ROUTING
2227         case KVM_SET_GSI_ROUTING: {
2228                 struct kvm_irq_routing routing;
2229                 struct kvm_irq_routing __user *urouting;
2230                 struct kvm_irq_routing_entry *entries;
2231
2232                 r = -EFAULT;
2233                 if (copy_from_user(&routing, argp, sizeof(routing)))
2234                         goto out;
2235                 r = -EINVAL;
2236                 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2237                         goto out;
2238                 if (routing.flags)
2239                         goto out;
2240                 r = -ENOMEM;
2241                 entries = vmalloc(routing.nr * sizeof(*entries));
2242                 if (!entries)
2243                         goto out;
2244                 r = -EFAULT;
2245                 urouting = argp;
2246                 if (copy_from_user(entries, urouting->entries,
2247                                    routing.nr * sizeof(*entries)))
2248                         goto out_free_irq_routing;
2249                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2250                                         routing.flags);
2251         out_free_irq_routing:
2252                 vfree(entries);
2253                 break;
2254         }
2255 #endif /* KVM_CAP_IRQ_ROUTING */
2256 #ifdef __KVM_HAVE_MSIX
2257         case KVM_ASSIGN_SET_MSIX_NR: {
2258                 struct kvm_assigned_msix_nr entry_nr;
2259                 r = -EFAULT;
2260                 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2261                         goto out;
2262                 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2263                 if (r)
2264                         goto out;
2265                 break;
2266         }
2267         case KVM_ASSIGN_SET_MSIX_ENTRY: {
2268                 struct kvm_assigned_msix_entry entry;
2269                 r = -EFAULT;
2270                 if (copy_from_user(&entry, argp, sizeof entry))
2271                         goto out;
2272                 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2273                 if (r)
2274                         goto out;
2275                 break;
2276         }
2277 #endif
2278         case KVM_IRQFD: {
2279                 struct kvm_irqfd data;
2280
2281                 r = -EFAULT;
2282                 if (copy_from_user(&data, argp, sizeof data))
2283                         goto out;
2284                 r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
2285                 break;
2286         }
2287         case KVM_IOEVENTFD: {
2288                 struct kvm_ioeventfd data;
2289
2290                 r = -EFAULT;
2291                 if (copy_from_user(&data, argp, sizeof data))
2292                         goto out;
2293                 r = kvm_ioeventfd(kvm, &data);
2294                 break;
2295         }
2296 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2297         case KVM_SET_BOOT_CPU_ID:
2298                 r = 0;
2299                 mutex_lock(&kvm->lock);
2300                 if (atomic_read(&kvm->online_vcpus) != 0)
2301                         r = -EBUSY;
2302                 else
2303                         kvm->bsp_vcpu_id = arg;
2304                 mutex_unlock(&kvm->lock);
2305                 break;
2306 #endif
2307         default:
2308                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2309         }
2310 out:
2311         return r;
2312 }
2313
2314 static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2315 {
2316         struct page *page[1];
2317         unsigned long addr;
2318         int npages;
2319         gfn_t gfn = vmf->pgoff;
2320         struct kvm *kvm = vma->vm_file->private_data;
2321
2322         addr = gfn_to_hva(kvm, gfn);
2323         if (kvm_is_error_hva(addr))
2324                 return VM_FAULT_SIGBUS;
2325
2326         npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2327                                 NULL);
2328         if (unlikely(npages != 1))
2329                 return VM_FAULT_SIGBUS;
2330
2331         vmf->page = page[0];
2332         return 0;
2333 }
2334
2335 static const struct vm_operations_struct kvm_vm_vm_ops = {
2336         .fault = kvm_vm_fault,
2337 };
2338
2339 static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2340 {
2341         vma->vm_ops = &kvm_vm_vm_ops;
2342         return 0;
2343 }
2344
2345 static struct file_operations kvm_vm_fops = {
2346         .release        = kvm_vm_release,
2347         .unlocked_ioctl = kvm_vm_ioctl,
2348         .compat_ioctl   = kvm_vm_ioctl,
2349         .mmap           = kvm_vm_mmap,
2350 };
2351
2352 static int kvm_dev_ioctl_create_vm(void)
2353 {
2354         int fd;
2355         struct kvm *kvm;
2356
2357         kvm = kvm_create_vm();
2358         if (IS_ERR(kvm))
2359                 return PTR_ERR(kvm);
2360         fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
2361         if (fd < 0)
2362                 kvm_put_kvm(kvm);
2363
2364         return fd;
2365 }
2366
2367 static long kvm_dev_ioctl_check_extension_generic(long arg)
2368 {
2369         switch (arg) {
2370         case KVM_CAP_USER_MEMORY:
2371         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
2372         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
2373 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
2374         case KVM_CAP_SET_BOOT_CPU_ID:
2375 #endif
2376                 return 1;
2377 #ifdef CONFIG_HAVE_KVM_IRQCHIP
2378         case KVM_CAP_IRQ_ROUTING:
2379                 return KVM_MAX_IRQ_ROUTES;
2380 #endif
2381         default:
2382                 break;
2383         }
2384         return kvm_dev_ioctl_check_extension(arg);
2385 }
2386
2387 static long kvm_dev_ioctl(struct file *filp,
2388                           unsigned int ioctl, unsigned long arg)
2389 {
2390         long r = -EINVAL;
2391
2392         switch (ioctl) {
2393         case KVM_GET_API_VERSION:
2394                 r = -EINVAL;
2395                 if (arg)
2396                         goto out;
2397                 r = KVM_API_VERSION;
2398                 break;
2399         case KVM_CREATE_VM:
2400                 r = -EINVAL;
2401                 if (arg)
2402                         goto out;
2403                 r = kvm_dev_ioctl_create_vm();
2404                 break;
2405         case KVM_CHECK_EXTENSION:
2406                 r = kvm_dev_ioctl_check_extension_generic(arg);
2407                 break;
2408         case KVM_GET_VCPU_MMAP_SIZE:
2409                 r = -EINVAL;
2410                 if (arg)
2411                         goto out;
2412                 r = PAGE_SIZE;     /* struct kvm_run */
2413 #ifdef CONFIG_X86
2414                 r += PAGE_SIZE;    /* pio data page */
2415 #endif
2416 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2417                 r += PAGE_SIZE;    /* coalesced mmio ring page */
2418 #endif
2419                 break;
2420         case KVM_TRACE_ENABLE:
2421         case KVM_TRACE_PAUSE:
2422         case KVM_TRACE_DISABLE:
2423                 r = -EOPNOTSUPP;
2424                 break;
2425         default:
2426                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
2427         }
2428 out:
2429         return r;
2430 }
2431
2432 static struct file_operations kvm_chardev_ops = {
2433         .unlocked_ioctl = kvm_dev_ioctl,
2434         .compat_ioctl   = kvm_dev_ioctl,
2435 };
2436
2437 static struct miscdevice kvm_dev = {
2438         KVM_MINOR,
2439         "kvm",
2440         &kvm_chardev_ops,
2441 };
2442
2443 static void hardware_enable(void *junk)
2444 {
2445         int cpu = raw_smp_processor_id();
2446
2447         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
2448                 return;
2449         cpumask_set_cpu(cpu, cpus_hardware_enabled);
2450         kvm_arch_hardware_enable(NULL);
2451 }
2452
2453 static void hardware_disable(void *junk)
2454 {
2455         int cpu = raw_smp_processor_id();
2456
2457         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
2458                 return;
2459         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
2460         kvm_arch_hardware_disable(NULL);
2461 }
2462
2463 static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2464                            void *v)
2465 {
2466         int cpu = (long)v;
2467
2468         val &= ~CPU_TASKS_FROZEN;
2469         switch (val) {
2470         case CPU_DYING:
2471                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2472                        cpu);
2473                 hardware_disable(NULL);
2474                 break;
2475         case CPU_UP_CANCELED:
2476                 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2477                        cpu);
2478                 smp_call_function_single(cpu, hardware_disable, NULL, 1);
2479                 break;
2480         case CPU_ONLINE:
2481                 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2482                        cpu);
2483                 smp_call_function_single(cpu, hardware_enable, NULL, 1);
2484                 break;
2485         }
2486         return NOTIFY_OK;
2487 }
2488
2489
2490 asmlinkage void kvm_handle_fault_on_reboot(void)
2491 {
2492         if (kvm_rebooting)
2493                 /* spin while reset goes on */
2494                 while (true)
2495                         ;
2496         /* Fault while not rebooting.  We want the trace. */
2497         BUG();
2498 }
2499 EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2500
2501 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
2502                       void *v)
2503 {
2504         /*
2505          * Some (well, at least mine) BIOSes hang on reboot if
2506          * in vmx root mode.
2507          *
2508          * And Intel TXT required VMX off for all cpu when system shutdown.
2509          */
2510         printk(KERN_INFO "kvm: exiting hardware virtualization\n");
2511         kvm_rebooting = true;
2512         on_each_cpu(hardware_disable, NULL, 1);
2513         return NOTIFY_OK;
2514 }
2515
2516 static struct notifier_block kvm_reboot_notifier = {
2517         .notifier_call = kvm_reboot,
2518         .priority = 0,
2519 };
2520
2521 void kvm_io_bus_init(struct kvm_io_bus *bus)
2522 {
2523         memset(bus, 0, sizeof(*bus));
2524 }
2525
2526 void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2527 {
2528         int i;
2529
2530         for (i = 0; i < bus->dev_count; i++) {
2531                 struct kvm_io_device *pos = bus->devs[i];
2532
2533                 kvm_iodevice_destructor(pos);
2534         }
2535 }
2536
2537 /* kvm_io_bus_write - called under kvm->slots_lock */
2538 int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
2539                      int len, const void *val)
2540 {
2541         int i;
2542         for (i = 0; i < bus->dev_count; i++)
2543                 if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
2544                         return 0;
2545         return -EOPNOTSUPP;
2546 }
2547
2548 /* kvm_io_bus_read - called under kvm->slots_lock */
2549 int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
2550 {
2551         int i;
2552         for (i = 0; i < bus->dev_count; i++)
2553                 if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
2554                         return 0;
2555         return -EOPNOTSUPP;
2556 }
2557
2558 int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
2559                              struct kvm_io_device *dev)
2560 {
2561         int ret;
2562
2563         down_write(&kvm->slots_lock);
2564         ret = __kvm_io_bus_register_dev(bus, dev);
2565         up_write(&kvm->slots_lock);
2566
2567         return ret;
2568 }
2569
2570 /* An unlocked version. Caller must have write lock on slots_lock. */
2571 int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
2572                               struct kvm_io_device *dev)
2573 {
2574         if (bus->dev_count > NR_IOBUS_DEVS-1)
2575                 return -ENOSPC;
2576
2577         bus->devs[bus->dev_count++] = dev;
2578
2579         return 0;
2580 }
2581
2582 void kvm_io_bus_unregister_dev(struct kvm *kvm,
2583                                struct kvm_io_bus *bus,
2584                                struct kvm_io_device *dev)
2585 {
2586         down_write(&kvm->slots_lock);
2587         __kvm_io_bus_unregister_dev(bus, dev);
2588         up_write(&kvm->slots_lock);
2589 }
2590
2591 /* An unlocked version. Caller must have write lock on slots_lock. */
2592 void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
2593                                  struct kvm_io_device *dev)
2594 {
2595         int i;
2596
2597         for (i = 0; i < bus->dev_count; i++)
2598                 if (bus->devs[i] == dev) {
2599                         bus->devs[i] = bus->devs[--bus->dev_count];
2600                         break;
2601                 }
2602 }
2603
2604 static struct notifier_block kvm_cpu_notifier = {
2605         .notifier_call = kvm_cpu_hotplug,
2606         .priority = 20, /* must be > scheduler priority */
2607 };
2608
2609 static int vm_stat_get(void *_offset, u64 *val)
2610 {
2611         unsigned offset = (long)_offset;
2612         struct kvm *kvm;
2613
2614         *val = 0;
2615         spin_lock(&kvm_lock);
2616         list_for_each_entry(kvm, &vm_list, vm_list)
2617                 *val += *(u32 *)((void *)kvm + offset);
2618         spin_unlock(&kvm_lock);
2619         return 0;
2620 }
2621
2622 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2623
2624 static int vcpu_stat_get(void *_offset, u64 *val)
2625 {
2626         unsigned offset = (long)_offset;
2627         struct kvm *kvm;
2628         struct kvm_vcpu *vcpu;
2629         int i;
2630
2631         *val = 0;
2632         spin_lock(&kvm_lock);
2633         list_for_each_entry(kvm, &vm_list, vm_list)
2634                 kvm_for_each_vcpu(i, vcpu, kvm)
2635                         *val += *(u32 *)((void *)vcpu + offset);
2636
2637         spin_unlock(&kvm_lock);
2638         return 0;
2639 }
2640
2641 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2642
2643 static const struct file_operations *stat_fops[] = {
2644         [KVM_STAT_VCPU] = &vcpu_stat_fops,
2645         [KVM_STAT_VM]   = &vm_stat_fops,
2646 };
2647
2648 static void kvm_init_debug(void)
2649 {
2650         struct kvm_stats_debugfs_item *p;
2651
2652         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
2653         for (p = debugfs_entries; p->name; ++p)
2654                 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
2655                                                 (void *)(long)p->offset,
2656                                                 stat_fops[p->kind]);
2657 }
2658
2659 static void kvm_exit_debug(void)
2660 {
2661         struct kvm_stats_debugfs_item *p;
2662
2663         for (p = debugfs_entries; p->name; ++p)
2664                 debugfs_remove(p->dentry);
2665         debugfs_remove(kvm_debugfs_dir);
2666 }
2667
2668 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2669 {
2670         hardware_disable(NULL);
2671         return 0;
2672 }
2673
2674 static int kvm_resume(struct sys_device *dev)
2675 {
2676         hardware_enable(NULL);
2677         return 0;
2678 }
2679
2680 static struct sysdev_class kvm_sysdev_class = {
2681         .name = "kvm",
2682         .suspend = kvm_suspend,
2683         .resume = kvm_resume,
2684 };
2685
2686 static struct sys_device kvm_sysdev = {
2687         .id = 0,
2688         .cls = &kvm_sysdev_class,
2689 };
2690
2691 struct page *bad_page;
2692 pfn_t bad_pfn;
2693
2694 static inline
2695 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2696 {
2697         return container_of(pn, struct kvm_vcpu, preempt_notifier);
2698 }
2699
2700 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2701 {
2702         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2703
2704         kvm_arch_vcpu_load(vcpu, cpu);
2705 }
2706
2707 static void kvm_sched_out(struct preempt_notifier *pn,
2708                           struct task_struct *next)
2709 {
2710         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2711
2712         kvm_arch_vcpu_put(vcpu);
2713 }
2714
2715 int kvm_init(void *opaque, unsigned int vcpu_size,
2716                   struct module *module)
2717 {
2718         int r;
2719         int cpu;
2720
2721         r = kvm_arch_init(opaque);
2722         if (r)
2723                 goto out_fail;
2724
2725         bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2726
2727         if (bad_page == NULL) {
2728                 r = -ENOMEM;
2729                 goto out;
2730         }
2731
2732         bad_pfn = page_to_pfn(bad_page);
2733
2734         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2735                 r = -ENOMEM;
2736                 goto out_free_0;
2737         }
2738
2739         r = kvm_arch_hardware_setup();
2740         if (r < 0)
2741                 goto out_free_0a;
2742
2743         for_each_online_cpu(cpu) {
2744                 smp_call_function_single(cpu,
2745                                 kvm_arch_check_processor_compat,
2746                                 &r, 1);
2747                 if (r < 0)
2748                         goto out_free_1;
2749         }
2750
2751         on_each_cpu(hardware_enable, NULL, 1);
2752         r = register_cpu_notifier(&kvm_cpu_notifier);
2753         if (r)
2754                 goto out_free_2;
2755         register_reboot_notifier(&kvm_reboot_notifier);
2756
2757         r = sysdev_class_register(&kvm_sysdev_class);
2758         if (r)
2759                 goto out_free_3;
2760
2761         r = sysdev_register(&kvm_sysdev);
2762         if (r)
2763                 goto out_free_4;
2764
2765         /* A kmem cache lets us meet the alignment requirements of fx_save. */
2766         kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
2767                                            __alignof__(struct kvm_vcpu),
2768                                            0, NULL);
2769         if (!kvm_vcpu_cache) {
2770                 r = -ENOMEM;
2771                 goto out_free_5;
2772         }
2773
2774         kvm_chardev_ops.owner = module;
2775         kvm_vm_fops.owner = module;
2776         kvm_vcpu_fops.owner = module;
2777
2778         r = misc_register(&kvm_dev);
2779         if (r) {
2780                 printk(KERN_ERR "kvm: misc device register failed\n");
2781                 goto out_free;
2782         }
2783
2784         kvm_preempt_ops.sched_in = kvm_sched_in;
2785         kvm_preempt_ops.sched_out = kvm_sched_out;
2786
2787         kvm_init_debug();
2788
2789         return 0;
2790
2791 out_free:
2792         kmem_cache_destroy(kvm_vcpu_cache);
2793 out_free_5:
2794         sysdev_unregister(&kvm_sysdev);
2795 out_free_4:
2796         sysdev_class_unregister(&kvm_sysdev_class);
2797 out_free_3:
2798         unregister_reboot_notifier(&kvm_reboot_notifier);
2799         unregister_cpu_notifier(&kvm_cpu_notifier);
2800 out_free_2:
2801         on_each_cpu(hardware_disable, NULL, 1);
2802 out_free_1:
2803         kvm_arch_hardware_unsetup();
2804 out_free_0a:
2805         free_cpumask_var(cpus_hardware_enabled);
2806 out_free_0:
2807         __free_page(bad_page);
2808 out:
2809         kvm_arch_exit();
2810 out_fail:
2811         return r;
2812 }
2813 EXPORT_SYMBOL_GPL(kvm_init);
2814
2815 void kvm_exit(void)
2816 {
2817         tracepoint_synchronize_unregister();
2818         kvm_exit_debug();
2819         misc_deregister(&kvm_dev);
2820         kmem_cache_destroy(kvm_vcpu_cache);
2821         sysdev_unregister(&kvm_sysdev);
2822         sysdev_class_unregister(&kvm_sysdev_class);
2823         unregister_reboot_notifier(&kvm_reboot_notifier);
2824         unregister_cpu_notifier(&kvm_cpu_notifier);
2825         on_each_cpu(hardware_disable, NULL, 1);
2826         kvm_arch_hardware_unsetup();
2827         kvm_arch_exit();
2828         free_cpumask_var(cpus_hardware_enabled);
2829         __free_page(bad_page);
2830 }
2831 EXPORT_SYMBOL_GPL(kvm_exit);