KVM: Clean up unneeded void pointer casts
[linux-2.6.git] / virt / kvm / eventfd.c
index 486c604..f59c1e8 100644 (file)
@@ -2,6 +2,7 @@
  * kvm eventfd support - use eventfd objects to signal various KVM events
  *
  * Copyright 2009 Novell.  All Rights Reserved.
+ * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  *
  * Author:
  *     Gregory Haskins <ghaskins@novell.com>
@@ -30,6 +31,7 @@
 #include <linux/list.h>
 #include <linux/eventfd.h>
 #include <linux/kernel.h>
+#include <linux/slab.h>
 
 #include "iodev.h"
 
  */
 
 struct _irqfd {
-       struct kvm               *kvm;
-       struct eventfd_ctx       *eventfd;
-       int                       gsi;
-       struct list_head          list;
-       poll_table                pt;
-       wait_queue_head_t        *wqh;
-       wait_queue_t              wait;
-       struct work_struct        inject;
-       struct work_struct        shutdown;
+       /* Used for MSI fast-path */
+       struct kvm *kvm;
+       wait_queue_t wait;
+       /* Update side is protected by irqfds.lock */
+       struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
+       /* Used for level IRQ fast-path */
+       int gsi;
+       struct work_struct inject;
+       /* Used for setup/shutdown */
+       struct eventfd_ctx *eventfd;
+       struct list_head list;
+       poll_table pt;
+       struct work_struct shutdown;
 };
 
 static struct workqueue_struct *irqfd_cleanup_wq;
@@ -84,7 +90,7 @@ irqfd_shutdown(struct work_struct *work)
         * We know no new events will be scheduled at this point, so block
         * until all previously outstanding events have completed
         */
-       flush_work(&irqfd->inject);
+       flush_work_sync(&irqfd->inject);
 
        /*
         * It is now safe to release the object's resources
@@ -124,14 +130,22 @@ irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
 {
        struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
        unsigned long flags = (unsigned long)key;
+       struct kvm_kernel_irq_routing_entry *irq;
+       struct kvm *kvm = irqfd->kvm;
 
-       if (flags & POLLIN)
+       if (flags & POLLIN) {
+               rcu_read_lock();
+               irq = rcu_dereference(irqfd->irq_entry);
                /* An event has been signaled, inject an interrupt */
-               schedule_work(&irqfd->inject);
+               if (irq)
+                       kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
+               else
+                       schedule_work(&irqfd->inject);
+               rcu_read_unlock();
+       }
 
        if (flags & POLLHUP) {
                /* The eventfd is closing, detach from KVM */
-               struct kvm *kvm = irqfd->kvm;
                unsigned long flags;
 
                spin_lock_irqsave(&kvm->irqfds.lock, flags);
@@ -159,14 +173,34 @@ irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
                        poll_table *pt)
 {
        struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
-
-       irqfd->wqh = wqh;
        add_wait_queue(wqh, &irqfd->wait);
 }
 
+/* Must be called under irqfds.lock */
+static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
+                        struct kvm_irq_routing_table *irq_rt)
+{
+       struct kvm_kernel_irq_routing_entry *e;
+       struct hlist_node *n;
+
+       if (irqfd->gsi >= irq_rt->nr_rt_entries) {
+               rcu_assign_pointer(irqfd->irq_entry, NULL);
+               return;
+       }
+
+       hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
+               /* Only fast-path MSI. */
+               if (e->type == KVM_IRQ_ROUTING_MSI)
+                       rcu_assign_pointer(irqfd->irq_entry, e);
+               else
+                       rcu_assign_pointer(irqfd->irq_entry, NULL);
+       }
+}
+
 static int
 kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
 {
+       struct kvm_irq_routing_table *irq_rt;
        struct _irqfd *irqfd, *tmp;
        struct file *file = NULL;
        struct eventfd_ctx *eventfd = NULL;
@@ -216,10 +250,13 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
                goto fail;
        }
 
+       irq_rt = rcu_dereference_protected(kvm->irq_routing,
+                                          lockdep_is_held(&kvm->irqfds.lock));
+       irqfd_update(kvm, irqfd, irq_rt);
+
        events = file->f_op->poll(file, &irqfd->pt);
 
        list_add_tail(&irqfd->list, &kvm->irqfds.items);
-       spin_unlock_irq(&kvm->irqfds.lock);
 
        /*
         * Check if there was an event already pending on the eventfd
@@ -228,6 +265,8 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
        if (events & POLLIN)
                schedule_work(&irqfd->inject);
 
+       spin_unlock_irq(&kvm->irqfds.lock);
+
        /*
         * do not drop the file until the irqfd is fully initialized, otherwise
         * we might race against the POLLHUP
@@ -271,8 +310,18 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
        spin_lock_irq(&kvm->irqfds.lock);
 
        list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
-               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi)
+               if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+                       /*
+                        * This rcu_assign_pointer is needed for when
+                        * another thread calls kvm_irq_routing_update before
+                        * we flush workqueue below (we synchronize with
+                        * kvm_irq_routing_update using irqfds.lock).
+                        * It is paired with synchronize_rcu done by caller
+                        * of that function.
+                        */
+                       rcu_assign_pointer(irqfd->irq_entry, NULL);
                        irqfd_deactivate(irqfd);
+               }
        }
 
        spin_unlock_irq(&kvm->irqfds.lock);
@@ -322,6 +371,25 @@ kvm_irqfd_release(struct kvm *kvm)
 }
 
 /*
+ * Change irq_routing and irqfd.
+ * Caller must invoke synchronize_rcu afterwards.
+ */
+void kvm_irq_routing_update(struct kvm *kvm,
+                           struct kvm_irq_routing_table *irq_rt)
+{
+       struct _irqfd *irqfd;
+
+       spin_lock_irq(&kvm->irqfds.lock);
+
+       rcu_assign_pointer(kvm->irq_routing, irq_rt);
+
+       list_for_each_entry(irqfd, &kvm->irqfds.items, list)
+               irqfd_update(kvm, irqfd, irq_rt);
+
+       spin_unlock_irq(&kvm->irqfds.lock);
+}
+
+/*
  * create a host-wide workqueue for issuing deferred shutdown requests
  * aggregated from all vm* instances. We need our own isolated single-thread
  * queue to prevent deadlock against flushing the normal work-queue.
@@ -510,7 +578,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
        mutex_lock(&kvm->slots_lock);
 
-       /* Verify that there isnt a match already */
+       /* Verify that there isn't a match already */
        if (ioeventfd_check_collision(kvm, p)) {
                ret = -EEXIST;
                goto unlock_fail;
@@ -518,7 +586,8 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
 
        kvm_iodevice_init(&p->dev, &ioeventfd_ops);
 
-       ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
+       ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
+                                     &p->dev);
        if (ret < 0)
                goto unlock_fail;