]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - virt/kvm/kvm_main.c
KVM: kvm_io_device: extend in_range() to manage len and write attribute
[linux-2.6.git] / virt / kvm / kvm_main.c
index 2d29e260da3d1b56830794c8dfb1719119195086..9330fad2b918ae7e36da2cacc2a9580798736017 100644 (file)
@@ -65,6 +65,8 @@ struct dentry *kvm_debugfs_dir;
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
                           unsigned long arg);
 
+bool kvm_rebooting;
+
 static inline int valid_vcpu(int n)
 {
        return likely(n >= 0 && n < KVM_MAX_VCPUS);
@@ -532,6 +534,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
        struct page *page[1];
        unsigned long addr;
        int npages;
+       pfn_t pfn;
 
        might_sleep();
 
@@ -544,19 +547,38 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
        npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
                                NULL);
 
-       if (npages != 1) {
-               get_page(bad_page);
-               return page_to_pfn(bad_page);
-       }
+       if (unlikely(npages != 1)) {
+               struct vm_area_struct *vma;
 
-       return page_to_pfn(page[0]);
+               vma = find_vma(current->mm, addr);
+               if (vma == NULL || addr < vma->vm_start ||
+                   !(vma->vm_flags & VM_PFNMAP)) {
+                       get_page(bad_page);
+                       return page_to_pfn(bad_page);
+               }
+
+               pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+               BUG_ON(pfn_valid(pfn));
+       } else
+               pfn = page_to_pfn(page[0]);
+
+       return pfn;
 }
 
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 {
-       return pfn_to_page(gfn_to_pfn(kvm, gfn));
+       pfn_t pfn;
+
+       pfn = gfn_to_pfn(kvm, gfn);
+       if (pfn_valid(pfn))
+               return pfn_to_page(pfn);
+
+       WARN_ON(!pfn_valid(pfn));
+
+       get_page(bad_page);
+       return bad_page;
 }
 
 EXPORT_SYMBOL_GPL(gfn_to_page);
@@ -569,7 +591,8 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
 void kvm_release_pfn_clean(pfn_t pfn)
 {
-       put_page(pfn_to_page(pfn));
+       if (pfn_valid(pfn))
+               put_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
 
@@ -594,21 +617,25 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
 
 void kvm_set_pfn_dirty(pfn_t pfn)
 {
-       struct page *page = pfn_to_page(pfn);
-       if (!PageReserved(page))
-               SetPageDirty(page);
+       if (pfn_valid(pfn)) {
+               struct page *page = pfn_to_page(pfn);
+               if (!PageReserved(page))
+                       SetPageDirty(page);
+       }
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
 
 void kvm_set_pfn_accessed(pfn_t pfn)
 {
-       mark_page_accessed(pfn_to_page(pfn));
+       if (pfn_valid(pfn))
+               mark_page_accessed(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
 
 void kvm_get_pfn(pfn_t pfn)
 {
-       get_page(pfn_to_page(pfn));
+       if (pfn_valid(pfn))
+               get_page(pfn_to_page(pfn));
 }
 EXPORT_SYMBOL_GPL(kvm_get_pfn);
 
@@ -1179,7 +1206,6 @@ static int kvm_dev_ioctl_create_vm(void)
 static long kvm_dev_ioctl(struct file *filp,
                          unsigned int ioctl, unsigned long arg)
 {
-       void __user *argp = (void __user *)arg;
        long r = -EINVAL;
 
        switch (ioctl) {
@@ -1196,7 +1222,7 @@ static long kvm_dev_ioctl(struct file *filp,
                r = kvm_dev_ioctl_create_vm();
                break;
        case KVM_CHECK_EXTENSION:
-               r = kvm_dev_ioctl_check_extension((long)argp);
+               r = kvm_dev_ioctl_check_extension(arg);
                break;
        case KVM_GET_VCPU_MMAP_SIZE:
                r = -EINVAL;
@@ -1247,7 +1273,6 @@ static void hardware_disable(void *junk)
        if (!cpu_isset(cpu, cpus_hardware_enabled))
                return;
        cpu_clear(cpu, cpus_hardware_enabled);
-       decache_vcpus_on_cpu(cpu);
        kvm_arch_hardware_disable(NULL);
 }
 
@@ -1266,17 +1291,29 @@ static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
        case CPU_UP_CANCELED:
                printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
+               smp_call_function_single(cpu, hardware_disable, NULL, 1);
                break;
        case CPU_ONLINE:
                printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
                       cpu);
-               smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
+               smp_call_function_single(cpu, hardware_enable, NULL, 1);
                break;
        }
        return NOTIFY_OK;
 }
 
+
+asmlinkage void kvm_handle_fault_on_reboot(void)
+{
+       if (kvm_rebooting)
+               /* spin while reset goes on */
+               while (true)
+                       ;
+       /* Fault while not rebooting.  We want the trace. */
+       BUG();
+}
+EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
+
 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
                      void *v)
 {
@@ -1286,7 +1323,8 @@ static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
                 * in vmx root mode.
                 */
                printk(KERN_INFO "kvm: exiting hardware virtualization\n");
-               on_each_cpu(hardware_disable, NULL, 0, 1);
+               kvm_rebooting = true;
+               on_each_cpu(hardware_disable, NULL, 1);
        }
        return NOTIFY_OK;
 }
@@ -1312,14 +1350,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
        }
 }
 
-struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
+struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
+                                         gpa_t addr, int len, int is_write)
 {
        int i;
 
        for (i = 0; i < bus->dev_count; i++) {
                struct kvm_io_device *pos = bus->devs[i];
 
-               if (pos->in_range(pos, addr))
+               if (pos->in_range(pos, addr, len, is_write))
                        return pos;
        }
 
@@ -1474,12 +1513,12 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
        for_each_online_cpu(cpu) {
                smp_call_function_single(cpu,
                                kvm_arch_check_processor_compat,
-                               &r, 0, 1);
+                               &r, 1);
                if (r < 0)
                        goto out_free_1;
        }
 
-       on_each_cpu(hardware_enable, NULL, 0, 1);
+       on_each_cpu(hardware_enable, NULL, 1);
        r = register_cpu_notifier(&kvm_cpu_notifier);
        if (r)
                goto out_free_2;
@@ -1525,7 +1564,7 @@ out_free_3:
        unregister_reboot_notifier(&kvm_reboot_notifier);
        unregister_cpu_notifier(&kvm_cpu_notifier);
 out_free_2:
-       on_each_cpu(hardware_disable, NULL, 0, 1);
+       on_each_cpu(hardware_disable, NULL, 1);
 out_free_1:
        kvm_arch_hardware_unsetup();
 out_free_0:
@@ -1547,7 +1586,7 @@ void kvm_exit(void)
        sysdev_class_unregister(&kvm_sysdev_class);
        unregister_reboot_notifier(&kvm_reboot_notifier);
        unregister_cpu_notifier(&kvm_cpu_notifier);
-       on_each_cpu(hardware_disable, NULL, 0, 1);
+       on_each_cpu(hardware_disable, NULL, 1);
        kvm_arch_hardware_unsetup();
        kvm_arch_exit();
        kvm_exit_debug();