KVM: Add barriers to allow mmu_notifier_retry to be used locklessly
[linux-2.6.git] / virt / kvm / kvm_main.c
index e289486..9f32bff 100644 (file)
@@ -357,11 +357,11 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
         * been freed.
         */
        kvm->mmu_notifier_seq++;
+       smp_wmb();
        /*
         * The above sequence increase must be visible before the
-        * below count decrease but both values are read by the kvm
-        * page fault under mmu_lock spinlock so we don't need to add
-        * a smb_wmb() here in between the two.
+        * below count decrease, which is ensured by the smp_wmb above
+        * in conjunction with the smp_rmb in mmu_notifier_retry().
         */
        kvm->mmu_notifier_count--;
        spin_unlock(&kvm->mmu_lock);
@@ -449,7 +449,7 @@ static void kvm_init_memslots_id(struct kvm *kvm)
                slots->id_to_index[i] = slots->memslots[i].id = i;
 }
 
-static struct kvm *kvm_create_vm(void)
+static struct kvm *kvm_create_vm(unsigned long type)
 {
        int r, i;
        struct kvm *kvm = kvm_arch_alloc_vm();
@@ -457,7 +457,7 @@ static struct kvm *kvm_create_vm(void)
        if (!kvm)
                return ERR_PTR(-ENOMEM);
 
-       r = kvm_arch_init_vm(kvm);
+       r = kvm_arch_init_vm(kvm, type);
        if (r)
                goto out_err_nodisable;
 
@@ -1543,7 +1543,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
 
-               if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
+               if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
                        memslot->nr_dirty_pages++;
        }
 }
@@ -1657,7 +1657,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
 #endif
        else
-               return VM_FAULT_SIGBUS;
+               return kvm_arch_vcpu_fault(vcpu, vmf);
        get_page(page);
        vmf->page = page;
        return 0;
@@ -1743,10 +1743,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        smp_wmb();
        atomic_inc(&kvm->online_vcpus);
 
-#ifdef CONFIG_KVM_APIC_ARCHITECTURE
-       if (kvm->bsp_vcpu_id == id)
-               kvm->bsp_vcpu = vcpu;
-#endif
        mutex_unlock(&kvm->lock);
        return r;
 
@@ -1821,12 +1817,11 @@ out_free1:
                struct kvm_regs *kvm_regs;
 
                r = -ENOMEM;
-               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
-               if (!kvm_regs)
+               kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
+               if (IS_ERR(kvm_regs)) {
+                       r = PTR_ERR(kvm_regs);
                        goto out;
-               r = -EFAULT;
-               if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
-                       goto out_free2;
+               }
                r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
                if (r)
                        goto out_free2;
@@ -1850,13 +1845,11 @@ out_free2:
                break;
        }
        case KVM_SET_SREGS: {
-               kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!kvm_sregs)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
+               kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
+               if (IS_ERR(kvm_sregs)) {
+                       r = PTR_ERR(kvm_sregs);
                        goto out;
+               }
                r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
                if (r)
                        goto out;
@@ -1952,13 +1945,11 @@ out_free2:
                break;
        }
        case KVM_SET_FPU: {
-               fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
-               r = -ENOMEM;
-               if (!fpu)
-                       goto out;
-               r = -EFAULT;
-               if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
+               fpu = memdup_user(argp, sizeof(*fpu));
+               if (IS_ERR(fpu)) {
+                       r = PTR_ERR(fpu);
                        goto out;
+               }
                r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
                if (r)
                        goto out;
@@ -2207,12 +2198,12 @@ static struct file_operations kvm_vm_fops = {
        .llseek         = noop_llseek,
 };
 
-static int kvm_dev_ioctl_create_vm(void)
+static int kvm_dev_ioctl_create_vm(unsigned long type)
 {
        int r;
        struct kvm *kvm;
 
-       kvm = kvm_create_vm();
+       kvm = kvm_create_vm(type);
        if (IS_ERR(kvm))
                return PTR_ERR(kvm);
 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
@@ -2263,10 +2254,7 @@ static long kvm_dev_ioctl(struct file *filp,
                r = KVM_API_VERSION;
                break;
        case KVM_CREATE_VM:
-               r = -EINVAL;
-               if (arg)
-                       goto out;
-               r = kvm_dev_ioctl_create_vm();
+               r = kvm_dev_ioctl_create_vm(arg);
                break;
        case KVM_CHECK_EXTENSION:
                r = kvm_dev_ioctl_check_extension_generic(arg);
@@ -2591,13 +2579,12 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
        int i, r;
        struct kvm_io_bus *new_bus, *bus;
 
-       new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+       bus = kvm->buses[bus_idx];
+
+       new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
        if (!new_bus)
                return -ENOMEM;
 
-       bus = kvm->buses[bus_idx];
-       memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
-
        r = -ENOENT;
        for (i = 0; i < new_bus->dev_count; i++)
                if (new_bus->range[i].dev == dev) {
@@ -2664,15 +2651,29 @@ static const struct file_operations *stat_fops[] = {
        [KVM_STAT_VM]   = &vm_stat_fops,
 };
 
-static void kvm_init_debug(void)
+static int kvm_init_debug(void)
 {
+       int r = -EFAULT;
        struct kvm_stats_debugfs_item *p;
 
        kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
-       for (p = debugfs_entries; p->name; ++p)
+       if (kvm_debugfs_dir == NULL)
+               goto out;
+
+       for (p = debugfs_entries; p->name; ++p) {
                p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
                                                (void *)(long)p->offset,
                                                stat_fops[p->kind]);
+               if (p->dentry == NULL)
+                       goto out_dir;
+       }
+
+       return 0;
+
+out_dir:
+       debugfs_remove_recursive(kvm_debugfs_dir);
+out:
+       return r;
 }
 
 static void kvm_exit_debug(void)
@@ -2816,10 +2817,16 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
        kvm_preempt_ops.sched_in = kvm_sched_in;
        kvm_preempt_ops.sched_out = kvm_sched_out;
 
-       kvm_init_debug();
+       r = kvm_init_debug();
+       if (r) {
+               printk(KERN_ERR "kvm: create debugfs files failed\n");
+               goto out_undebugfs;
+       }
 
        return 0;
 
+out_undebugfs:
+       unregister_syscore_ops(&kvm_syscore_ops);
 out_unreg:
        kvm_async_pf_deinit();
 out_free: