vhost: lock receive queue, not the socket
[linux-2.6.git] / drivers / vhost / vhost.c
index b5c4947..2ab2912 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/vhost.h>
 #include <linux/virtio_net.h>
 #include <linux/mm.h>
+#include <linux/mmu_context.h>
 #include <linux/miscdevice.h>
 #include <linux/mutex.h>
 #include <linux/rcupdate.h>
@@ -29,8 +30,6 @@
 #include <linux/if_packet.h>
 #include <linux/if_arp.h>
 
-#include <net/sock.h>
-
 #include "vhost.h"
 
 enum {
@@ -42,8 +41,8 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
                            poll_table *pt)
 {
        struct vhost_poll *poll;
-       poll = container_of(pt, struct vhost_poll, table);
 
+       poll = container_of(pt, struct vhost_poll, table);
        poll->wqh = wqh;
        add_wait_queue(wqh, &poll->wait);
 }
@@ -60,22 +59,25 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
        return 0;
 }
 
+static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+{
+       INIT_LIST_HEAD(&work->node);
+       work->fn = fn;
+       init_waitqueue_head(&work->done);
+       work->flushing = 0;
+       work->queue_seq = work->done_seq = 0;
+}
+
 /* Init poll structure */
 void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
                     unsigned long mask, struct vhost_dev *dev)
 {
-       struct vhost_work *work = &poll->work;
-
        init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
        init_poll_funcptr(&poll->table, vhost_poll_func);
        poll->mask = mask;
        poll->dev = dev;
 
-       INIT_LIST_HEAD(&work->node);
-       work->fn = fn;
-       init_waitqueue_head(&work->done);
-       work->flushing = 0;
-       work->queue_seq = work->done_seq = 0;
+       vhost_work_init(&poll->work, fn);
 }
 
 /* Start polling a file. We add ourselves to file's wait queue. The caller must
@@ -83,6 +85,7 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
 void vhost_poll_start(struct vhost_poll *poll, struct file *file)
 {
        unsigned long mask;
+
        mask = file->f_op->poll(file, &poll->table);
        if (mask)
                vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
@@ -95,35 +98,43 @@ void vhost_poll_stop(struct vhost_poll *poll)
        remove_wait_queue(poll->wqh, &poll->wait);
 }
 
-/* Flush any work that has been scheduled. When calling this, don't hold any
- * locks that are also used by the callback. */
-void vhost_poll_flush(struct vhost_poll *poll)
+static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
+                               unsigned seq)
 {
-       struct vhost_work *work = &poll->work;
-       unsigned seq;
        int left;
+
+       spin_lock_irq(&dev->work_lock);
+       left = seq - work->done_seq;
+       spin_unlock_irq(&dev->work_lock);
+       return left <= 0;
+}
+
+static void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
+{
+       unsigned seq;
        int flushing;
 
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_lock_irq(&dev->work_lock);
        seq = work->queue_seq;
        work->flushing++;
-       spin_unlock_irq(&poll->dev->work_lock);
-       wait_event(work->done, ({
-                  spin_lock_irq(&poll->dev->work_lock);
-                  left = seq - work->done_seq <= 0;
-                  spin_unlock_irq(&poll->dev->work_lock);
-                  left;
-       }));
-       spin_lock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
+       wait_event(work->done, vhost_work_seq_done(dev, work, seq));
+       spin_lock_irq(&dev->work_lock);
        flushing = --work->flushing;
-       spin_unlock_irq(&poll->dev->work_lock);
+       spin_unlock_irq(&dev->work_lock);
        BUG_ON(flushing < 0);
 }
 
-void vhost_poll_queue(struct vhost_poll *poll)
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+       vhost_work_flush(poll->dev, &poll->work);
+}
+
+static inline void vhost_work_queue(struct vhost_dev *dev,
+                                   struct vhost_work *work)
 {
-       struct vhost_dev *dev = poll->dev;
-       struct vhost_work *work = &poll->work;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->work_lock, flags);
@@ -135,6 +146,11 @@ void vhost_poll_queue(struct vhost_poll *poll)
        spin_unlock_irqrestore(&dev->work_lock, flags);
 }
 
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+       vhost_work_queue(poll->dev, &poll->work);
+}
+
 static void vhost_vq_reset(struct vhost_dev *dev,
                           struct vhost_virtqueue *vq)
 {
@@ -146,7 +162,6 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        vq->avail_idx = 0;
        vq->last_used_idx = 0;
        vq->used_flags = 0;
-       vq->used_flags = 0;
        vq->log_used = false;
        vq->log_addr = -1ull;
        vq->vhost_hlen = 0;
@@ -167,6 +182,8 @@ static int vhost_worker(void *data)
        struct vhost_work *work = NULL;
        unsigned uninitialized_var(seq);
 
+       use_mm(dev->mm);
+
        for (;;) {
                /* mb paired w/ kthread_stop */
                set_current_state(TASK_INTERRUPTIBLE);
@@ -181,7 +198,7 @@ static int vhost_worker(void *data)
                if (kthread_should_stop()) {
                        spin_unlock_irq(&dev->work_lock);
                        __set_current_state(TASK_RUNNING);
-                       return 0;
+                       break;
                }
                if (!list_empty(&dev->work_list)) {
                        work = list_first_entry(&dev->work_list,
@@ -199,6 +216,50 @@ static int vhost_worker(void *data)
                        schedule();
 
        }
+       unuse_mm(dev->mm);
+       return 0;
+}
+
+/* Helper to allocate iovec buffers for all vqs. */
+static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->nvqs; ++i) {
+               dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
+                                              UIO_MAXIOV, GFP_KERNEL);
+               dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
+                                         GFP_KERNEL);
+               dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
+                                           UIO_MAXIOV, GFP_KERNEL);
+
+               if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
+                       !dev->vqs[i].heads)
+                       goto err_nomem;
+       }
+       return 0;
+
+err_nomem:
+       for (; i >= 0; --i) {
+               kfree(dev->vqs[i].indirect);
+               kfree(dev->vqs[i].log);
+               kfree(dev->vqs[i].heads);
+       }
+       return -ENOMEM;
+}
+
+static void vhost_dev_free_iovecs(struct vhost_dev *dev)
+{
+       int i;
+
+       for (i = 0; i < dev->nvqs; ++i) {
+               kfree(dev->vqs[i].indirect);
+               dev->vqs[i].indirect = NULL;
+               kfree(dev->vqs[i].log);
+               dev->vqs[i].log = NULL;
+               kfree(dev->vqs[i].heads);
+               dev->vqs[i].heads = NULL;
+       }
 }
 
 long vhost_dev_init(struct vhost_dev *dev,
@@ -218,6 +279,9 @@ long vhost_dev_init(struct vhost_dev *dev,
        dev->worker = NULL;
 
        for (i = 0; i < dev->nvqs; ++i) {
+               dev->vqs[i].log = NULL;
+               dev->vqs[i].indirect = NULL;
+               dev->vqs[i].heads = NULL;
                dev->vqs[i].dev = dev;
                mutex_init(&dev->vqs[i].mutex);
                vhost_vq_reset(dev, dev->vqs + i);
@@ -236,16 +300,43 @@ long vhost_dev_check_owner(struct vhost_dev *dev)
        return dev->mm == current->mm ? 0 : -EPERM;
 }
 
+struct vhost_attach_cgroups_struct {
+       struct vhost_work work;
+       struct task_struct *owner;
+       int ret;
+};
+
+static void vhost_attach_cgroups_work(struct vhost_work *work)
+{
+       struct vhost_attach_cgroups_struct *s;
+
+       s = container_of(work, struct vhost_attach_cgroups_struct, work);
+       s->ret = cgroup_attach_task_all(s->owner, current);
+}
+
+static int vhost_attach_cgroups(struct vhost_dev *dev)
+{
+       struct vhost_attach_cgroups_struct attach;
+
+       attach.owner = current;
+       vhost_work_init(&attach.work, vhost_attach_cgroups_work);
+       vhost_work_queue(dev, &attach.work);
+       vhost_work_flush(dev, &attach.work);
+       return attach.ret;
+}
+
 /* Caller should have device mutex */
 static long vhost_dev_set_owner(struct vhost_dev *dev)
 {
        struct task_struct *worker;
        int err;
+
        /* Is there an owner already? */
        if (dev->mm) {
                err = -EBUSY;
                goto err_mm;
        }
+
        /* No owner, become one */
        dev->mm = get_task_mm(current);
        worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
@@ -255,14 +346,20 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
        }
 
        dev->worker = worker;
-       err = cgroup_attach_task_current_cg(worker);
+       wake_up_process(worker);        /* avoid contributing to loadavg */
+
+       err = vhost_attach_cgroups(dev);
+       if (err)
+               goto err_cgroup;
+
+       err = vhost_dev_alloc_iovecs(dev);
        if (err)
                goto err_cgroup;
-       wake_up_process(worker);        /* avoid contributing to loadavg */
 
        return 0;
 err_cgroup:
        kthread_stop(worker);
+       dev->worker = NULL;
 err_worker:
        if (dev->mm)
                mmput(dev->mm);
@@ -292,6 +389,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev)
 void vhost_dev_cleanup(struct vhost_dev *dev)
 {
        int i;
+
        for (i = 0; i < dev->nvqs; ++i) {
                if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
                        vhost_poll_stop(&dev->vqs[i].poll);
@@ -309,6 +407,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
                        fput(dev->vqs[i].call);
                vhost_vq_reset(dev, dev->vqs + i);
        }
+       vhost_dev_free_iovecs(dev);
        if (dev->log_ctx)
                eventfd_ctx_put(dev->log_ctx);
        dev->log_ctx = NULL;
@@ -319,21 +418,24 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
        kfree(rcu_dereference_protected(dev->memory,
                                        lockdep_is_held(&dev->mutex)));
        RCU_INIT_POINTER(dev->memory, NULL);
+       WARN_ON(!list_empty(&dev->work_list));
+       if (dev->worker) {
+               kthread_stop(dev->worker);
+               dev->worker = NULL;
+       }
        if (dev->mm)
                mmput(dev->mm);
        dev->mm = NULL;
-
-       WARN_ON(!list_empty(&dev->work_list));
-       kthread_stop(dev->worker);
 }
 
 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
 {
        u64 a = addr / VHOST_PAGE_SIZE / 8;
+
        /* Make sure 64 bit math will not overflow. */
        if (a > ULONG_MAX - (unsigned long)log_base ||
            a + (unsigned long)log_base > ULONG_MAX)
-               return -EFAULT;
+               return 0;
 
        return access_ok(VERIFY_WRITE, log_base + a,
                         (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
@@ -370,6 +472,7 @@ static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
                            int log_all)
 {
        int i;
+
        for (i = 0; i < d->nvqs; ++i) {
                int ok;
                mutex_lock(&d->vqs[i].mutex);
@@ -436,6 +539,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
 {
        struct vhost_memory mem, *newmem, *oldmem;
        unsigned long size = offsetof(struct vhost_memory, regions);
+
        if (copy_from_user(&mem, m, size))
                return -EFAULT;
        if (mem.padding)
@@ -453,7 +557,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
                return -EFAULT;
        }
 
-       if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
+       if (!memory_access_ok(d, newmem,
+                             vhost_has_feature(d, VHOST_F_LOG_ALL))) {
                kfree(newmem);
                return -EFAULT;
        }
@@ -469,6 +574,7 @@ static int init_used(struct vhost_virtqueue *vq,
                     struct vring_used __user *used)
 {
        int r = put_user(vq->used_flags, &used->flags);
+
        if (r)
                return r;
        return get_user(vq->last_used_idx, &used->idx);
@@ -758,6 +864,7 @@ static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
 {
        struct vhost_memory_region *reg;
        int i;
+
        /* linear search is not brilliant, but we really have on the order of 6
         * regions in practice */
        for (i = 0; i < mem->nregions; ++i) {
@@ -780,6 +887,7 @@ static int set_bit_to_user(int nr, void __user *addr)
        void *base;
        int bit = nr + (log % PAGE_SIZE) * 8;
        int r;
+
        r = get_user_pages_fast(log, 1, 1, &page);
        if (r < 0)
                return r;
@@ -795,14 +903,16 @@ static int set_bit_to_user(int nr, void __user *addr)
 static int log_write(void __user *log_base,
                     u64 write_address, u64 write_length)
 {
+       u64 write_page = write_address / VHOST_PAGE_SIZE;
        int r;
+
        if (!write_length)
                return 0;
-       write_address /= VHOST_PAGE_SIZE;
+       write_length += write_address % VHOST_PAGE_SIZE;
        for (;;) {
                u64 base = (u64)(unsigned long)log_base;
-               u64 log = base + write_address / 8;
-               int bit = write_address % 8;
+               u64 log = base + write_page / 8;
+               int bit = write_page % 8;
                if ((u64)(unsigned long)log != log)
                        return -EFAULT;
                r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
@@ -811,7 +921,7 @@ static int log_write(void __user *log_base,
                if (write_length <= VHOST_PAGE_SIZE)
                        break;
                write_length -= VHOST_PAGE_SIZE;
-               write_address += VHOST_PAGE_SIZE;
+               write_page += 1;
        }
        return r;
 }
@@ -829,11 +939,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
                if (r < 0)
                        return r;
                len -= l;
-               if (!len)
+               if (!len) {
+                       if (vq->log_ctx)
+                               eventfd_signal(vq->log_ctx, 1);
                        return 0;
+               }
        }
-       if (vq->log_ctx)
-               eventfd_signal(vq->log_ctx, 1);
        /* Length written exceeds what we have stored. This is a bug. */
        BUG();
        return 0;
@@ -917,7 +1028,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
        }
 
        ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
-                            ARRAY_SIZE(vq->indirect));
+                            UIO_MAXIOV);
        if (unlikely(ret < 0)) {
                vq_err(vq, "Translation failure %d in indirect.\n", ret);
                return ret;
@@ -944,8 +1055,8 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
                               i, count);
                        return -EINVAL;
                }
-               if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
-                                             sizeof desc))) {
+               if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
+                                             vq->indirect, sizeof desc))) {
                        vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
                               i, (size_t)indirect->addr + i * sizeof desc);
                        return -EINVAL;
@@ -1005,7 +1116,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
 
        /* Check it isn't doing very strange things with descriptor numbers. */
        last_avail_idx = vq->last_avail_idx;
-       if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) {
+       if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
                vq_err(vq, "Failed to access avail idx at %p\n",
                       &vq->avail->idx);
                return -EFAULT;
@@ -1026,8 +1137,8 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
 
        /* Grab the next descriptor number they're advertising, and increment
         * the index we've seen. */
-       if (unlikely(get_user(head,
-                             &vq->avail->ring[last_avail_idx % vq->num]))) {
+       if (unlikely(__get_user(head,
+                               &vq->avail->ring[last_avail_idx % vq->num]))) {
                vq_err(vq, "Failed to read head: idx %d address %p\n",
                       last_avail_idx,
                       &vq->avail->ring[last_avail_idx % vq->num]);
@@ -1060,7 +1171,7 @@ int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
                               i, vq->num, head);
                        return -EINVAL;
                }
-               ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
+               ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
                if (unlikely(ret)) {
                        vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
                               i, vq->desc + i);
@@ -1126,17 +1237,17 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
        /* The virtqueue contains a ring of used buffers.  Get a pointer to the
         * next entry in that used ring. */
        used = &vq->used->ring[vq->last_used_idx % vq->num];
-       if (put_user(head, &used->id)) {
+       if (__put_user(head, &used->id)) {
                vq_err(vq, "Failed to write used id");
                return -EFAULT;
        }
-       if (put_user(len, &used->len)) {
+       if (__put_user(len, &used->len)) {
                vq_err(vq, "Failed to write used len");
                return -EFAULT;
        }
        /* Make sure buffer is written before we update index. */
        smp_wmb();
-       if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
+       if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
                vq_err(vq, "Failed to increment used idx");
                return -EFAULT;
        }
@@ -1168,7 +1279,7 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
 
        start = vq->last_used_idx % vq->num;
        used = vq->used->ring + start;
-       if (copy_to_user(used, heads, count * sizeof *used)) {
+       if (__copy_to_user(used, heads, count * sizeof *used)) {
                vq_err(vq, "Failed to write used");
                return -EFAULT;
        }
@@ -1224,12 +1335,13 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
 {
        __u16 flags;
+
        /* Flush out used index updates. This is paired
         * with the barrier that the Guest executes when enabling
         * interrupts. */
        smp_mb();
 
-       if (get_user(flags, &vq->avail->flags)) {
+       if (__get_user(flags, &vq->avail->flags)) {
                vq_err(vq, "Failed to get flags");
                return;
        }
@@ -1268,6 +1380,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
 {
        u16 avail_idx;
        int r;
+
        if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
                return false;
        vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
@@ -1280,7 +1393,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
        /* They could have slipped one in as we were doing that: make
         * sure it's written, then check again. */
        smp_mb();
-       r = get_user(avail_idx, &vq->avail->idx);
+       r = __get_user(avail_idx, &vq->avail->idx);
        if (r) {
                vq_err(vq, "Failed to check avail idx at %p: %d\n",
                       &vq->avail->idx, r);
@@ -1294,6 +1407,7 @@ bool vhost_enable_notify(struct vhost_virtqueue *vq)
 void vhost_disable_notify(struct vhost_virtqueue *vq)
 {
        int r;
+
        if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
                return;
        vq->used_flags |= VRING_USED_F_NO_NOTIFY;