]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - drivers/base/sync.c
PM / Runtime: Fix rpm_resume() return value for power.no_callbacks set
[linux-2.6.git] / drivers / base / sync.c
index 80749ec68b8d868ef37d66acb1b0ebd9ed2bdf35..1cc3e4e99461704b4d38bca15556af1bfcb5d7ac 100644 (file)
  */
 
 #include <linux/debugfs.h>
+#include <linux/export.h>
 #include <linux/file.h>
 #include <linux/fs.h>
 #include <linux/kernel.h>
+#include <linux/poll.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
@@ -28,6 +30,8 @@
 
 static void sync_fence_signal_pt(struct sync_pt *pt);
 static int _sync_pt_has_signaled(struct sync_pt *pt);
+static void sync_fence_free(struct kref *kref);
+static void sync_dump(void);
 
 static LIST_HEAD(sync_timeline_list_head);
 static DEFINE_SPINLOCK(sync_timeline_list_lock);
@@ -48,6 +52,7 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
        if (obj == NULL)
                return NULL;
 
+       kref_init(&obj->kref);
        obj->ops = ops;
        strlcpy(obj->name, name, sizeof(obj->name));
 
@@ -63,9 +68,12 @@ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
 
        return obj;
 }
+EXPORT_SYMBOL(sync_timeline_create);
 
-static void sync_timeline_free(struct sync_timeline *obj)
+static void sync_timeline_free(struct kref *kref)
 {
+       struct sync_timeline *obj =
+               container_of(kref, struct sync_timeline, kref);
        unsigned long flags;
 
        if (obj->ops->release_obj)
@@ -80,19 +88,17 @@ static void sync_timeline_free(struct sync_timeline *obj)
 
 void sync_timeline_destroy(struct sync_timeline *obj)
 {
-       unsigned long flags;
-       bool needs_freeing;
-
-       spin_lock_irqsave(&obj->child_list_lock, flags);
        obj->destroyed = true;
-       needs_freeing = list_empty(&obj->child_list_head);
-       spin_unlock_irqrestore(&obj->child_list_lock, flags);
 
-       if (needs_freeing)
-               sync_timeline_free(obj);
-       else
+       /*
+        * If this is not the last reference, signal any children
+        * that their parent is going away.
+        */
+
+       if (!kref_put(&obj->kref, sync_timeline_free))
                sync_timeline_signal(obj);
 }
+EXPORT_SYMBOL(sync_timeline_destroy);
 
 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
 {
@@ -109,7 +115,6 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
 {
        struct sync_timeline *obj = pt->parent;
        unsigned long flags;
-       bool needs_freeing;
 
        spin_lock_irqsave(&obj->active_list_lock, flags);
        if (!list_empty(&pt->active_list))
@@ -117,12 +122,10 @@ static void sync_timeline_remove_pt(struct sync_pt *pt)
        spin_unlock_irqrestore(&obj->active_list_lock, flags);
 
        spin_lock_irqsave(&obj->child_list_lock, flags);
-       list_del(&pt->child_list);
-       needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
+       if (!list_empty(&pt->child_list)) {
+               list_del_init(&pt->child_list);
+       }
        spin_unlock_irqrestore(&obj->child_list_lock, flags);
-
-       if (needs_freeing)
-               sync_timeline_free(obj);
 }
 
 void sync_timeline_signal(struct sync_timeline *obj)
@@ -137,20 +140,25 @@ void sync_timeline_signal(struct sync_timeline *obj)
                struct sync_pt *pt =
                        container_of(pos, struct sync_pt, active_list);
 
-               if (_sync_pt_has_signaled(pt))
-                       list_move(pos, &signaled_pts);
+               if (_sync_pt_has_signaled(pt)) {
+                       list_del_init(pos);
+                       list_add(&pt->signaled_list, &signaled_pts);
+                       kref_get(&pt->fence->kref);
+               }
        }
 
        spin_unlock_irqrestore(&obj->active_list_lock, flags);
 
        list_for_each_safe(pos, n, &signaled_pts) {
                struct sync_pt *pt =
-                       container_of(pos, struct sync_pt, active_list);
+                       container_of(pos, struct sync_pt, signaled_list);
 
                list_del_init(pos);
                sync_fence_signal_pt(pt);
+               kref_put(&pt->fence->kref, sync_fence_free);
        }
 }
+EXPORT_SYMBOL(sync_timeline_signal);
 
 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
 {
@@ -164,10 +172,12 @@ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
                return NULL;
 
        INIT_LIST_HEAD(&pt->active_list);
+       kref_get(&parent->kref);
        sync_timeline_add_pt(parent, pt);
 
        return pt;
 }
+EXPORT_SYMBOL(sync_pt_create);
 
 void sync_pt_free(struct sync_pt *pt)
 {
@@ -176,8 +186,11 @@ void sync_pt_free(struct sync_pt *pt)
 
        sync_timeline_remove_pt(pt);
 
+       kref_put(&pt->parent->kref, sync_timeline_free);
+
        kfree(pt);
 }
+EXPORT_SYMBOL(sync_pt_free);
 
 /* call with pt->parent->active_list_lock held */
 static int _sync_pt_has_signaled(struct sync_pt *pt)
@@ -221,12 +234,14 @@ out:
 }
 
 static int sync_fence_release(struct inode *inode, struct file *file);
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
                             unsigned long arg);
 
 
 static const struct file_operations sync_fence_fops = {
        .release = sync_fence_release,
+       .poll = sync_fence_poll,
        .unlocked_ioctl = sync_fence_ioctl,
 };
 
@@ -244,6 +259,7 @@ static struct sync_fence *sync_fence_alloc(const char *name)
        if (fence->file == NULL)
                goto err;
 
+       kref_init(&fence->kref);
        strlcpy(fence->name, name, sizeof(fence->name));
 
        INIT_LIST_HEAD(&fence->pt_list_head);
@@ -281,6 +297,7 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
 
        return fence;
 }
+EXPORT_SYMBOL(sync_fence_create);
 
 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
 {
@@ -302,6 +319,65 @@ static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
        return 0;
 }
 
+static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
+{
+       struct list_head *src_pos, *dst_pos, *n;
+
+       list_for_each(src_pos, &src->pt_list_head) {
+               struct sync_pt *src_pt =
+                       container_of(src_pos, struct sync_pt, pt_list);
+               bool collapsed = false;
+
+               list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
+                       struct sync_pt *dst_pt =
+                               container_of(dst_pos, struct sync_pt, pt_list);
+                       /* collapse two sync_pts on the same timeline
+                        * to a single sync_pt that will signal at
+                        * the later of the two
+                        */
+                       if (dst_pt->parent == src_pt->parent) {
+                               if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
+                                       struct sync_pt *new_pt =
+                                               sync_pt_dup(src_pt);
+                                       if (new_pt == NULL)
+                                               return -ENOMEM;
+
+                                       new_pt->fence = dst;
+                                       list_replace(&dst_pt->pt_list,
+                                                    &new_pt->pt_list);
+                                       sync_pt_activate(new_pt);
+                                       sync_pt_free(dst_pt);
+                               }
+                               collapsed = true;
+                               break;
+                       }
+               }
+
+               if (!collapsed) {
+                       struct sync_pt *new_pt = sync_pt_dup(src_pt);
+
+                       if (new_pt == NULL)
+                               return -ENOMEM;
+
+                       new_pt->fence = dst;
+                       list_add(&new_pt->pt_list, &dst->pt_list_head);
+                       sync_pt_activate(new_pt);
+               }
+       }
+
+       return 0;
+}
+
+static void sync_fence_detach_pts(struct sync_fence *fence)
+{
+       struct list_head *pos, *n;
+
+       list_for_each_safe(pos, n, &fence->pt_list_head) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+               sync_timeline_remove_pt(pt);
+       }
+}
+
 static void sync_fence_free_pts(struct sync_fence *fence)
 {
        struct list_head *pos, *n;
@@ -328,16 +404,19 @@ err:
        fput(file);
        return NULL;
 }
+EXPORT_SYMBOL(sync_fence_fdget);
 
 void sync_fence_put(struct sync_fence *fence)
 {
        fput(fence->file);
 }
+EXPORT_SYMBOL(sync_fence_put);
 
 void sync_fence_install(struct sync_fence *fence, int fd)
 {
        fd_install(fd, fence->file);
 }
+EXPORT_SYMBOL(sync_fence_install);
 
 static int sync_fence_get_status(struct sync_fence *fence)
 {
@@ -373,7 +452,7 @@ struct sync_fence *sync_fence_merge(const char *name,
        if (err < 0)
                goto err;
 
-       err = sync_fence_copy_pts(fence, b);
+       err = sync_fence_merge_pts(fence, b);
        if (err < 0)
                goto err;
 
@@ -385,6 +464,7 @@ err:
        kfree(fence);
        return NULL;
 }
+EXPORT_SYMBOL(sync_fence_merge);
 
 static void sync_fence_signal_pt(struct sync_pt *pt)
 {
@@ -418,33 +498,22 @@ static void sync_fence_signal_pt(struct sync_pt *pt)
                                container_of(pos, struct sync_fence_waiter,
                                             waiter_list);
 
-                       waiter->callback(fence, waiter->callback_data);
                        list_del(pos);
-                       kfree(waiter);
+                       waiter->callback(fence, waiter);
                }
                wake_up(&fence->wq);
        }
 }
 
 int sync_fence_wait_async(struct sync_fence *fence,
-                         void (*callback)(struct sync_fence *, void *data),
-                         void *callback_data)
+                         struct sync_fence_waiter *waiter)
 {
-       struct sync_fence_waiter *waiter;
        unsigned long flags;
        int err = 0;
 
-       waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
-       if (waiter == NULL)
-               return -ENOMEM;
-
-       waiter->callback = callback;
-       waiter->callback_data = callback_data;
-
        spin_lock_irqsave(&fence->waiter_list_lock, flags);
 
        if (fence->status) {
-               kfree(waiter);
                err = fence->status;
                goto out;
        }
@@ -455,17 +524,47 @@ out:
 
        return err;
 }
+EXPORT_SYMBOL(sync_fence_wait_async);
+
+int sync_fence_cancel_async(struct sync_fence *fence,
+                            struct sync_fence_waiter *waiter)
+{
+       struct list_head *pos;
+       struct list_head *n;
+       unsigned long flags;
+       int ret = -ENOENT;
+
+       spin_lock_irqsave(&fence->waiter_list_lock, flags);
+       /*
+        * Make sure waiter is still in waiter_list because it is possible for
+        * the waiter to be removed from the list while the callback is still
+        * pending.
+        */
+       list_for_each_safe(pos, n, &fence->waiter_list_head) {
+               struct sync_fence_waiter *list_waiter =
+                       container_of(pos, struct sync_fence_waiter,
+                                    waiter_list);
+               if (list_waiter == waiter) {
+                       list_del(pos);
+                       ret = 0;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL(sync_fence_cancel_async);
 
 int sync_fence_wait(struct sync_fence *fence, long timeout)
 {
-       int err;
+       int err = 0;
 
-       if (timeout) {
+       if (timeout > 0) {
                timeout = msecs_to_jiffies(timeout);
                err = wait_event_interruptible_timeout(fence->wq,
                                                       fence->status != 0,
                                                       timeout);
-       } else {
+       } else if (timeout < 0) {
                err = wait_event_interruptible(fence->wq, fence->status != 0);
        }
 
@@ -475,31 +574,69 @@ int sync_fence_wait(struct sync_fence *fence, long timeout)
        if (fence->status < 0)
                return fence->status;
 
-       if (fence->status == 0)
+       if (fence->status == 0) {
+               pr_info("fence timeout on [%p] after %dms\n", fence,
+                       jiffies_to_msecs(timeout));
+               sync_dump();
                return -ETIME;
+       }
 
        return 0;
 }
+EXPORT_SYMBOL(sync_fence_wait);
+
+static void sync_fence_free(struct kref *kref)
+{
+       struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
+
+       sync_fence_free_pts(fence);
+
+       kfree(fence);
+}
 
 static int sync_fence_release(struct inode *inode, struct file *file)
 {
        struct sync_fence *fence = file->private_data;
        unsigned long flags;
 
-       sync_fence_free_pts(fence);
-
+       /*
+        * We need to remove all ways to access this fence before droping
+        * our ref.
+        *
+        * start with its membership in the global fence list
+        */
        spin_lock_irqsave(&sync_fence_list_lock, flags);
        list_del(&fence->sync_fence_list);
        spin_unlock_irqrestore(&sync_fence_list_lock, flags);
 
-       kfree(fence);
+       /*
+        * remove its pts from their parents so that sync_timeline_signal()
+        * can't reference the fence.
+        */
+       sync_fence_detach_pts(fence);
+
+       kref_put(&fence->kref, sync_fence_free);
 
        return 0;
 }
 
+static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
+{
+       struct sync_fence *fence = file->private_data;
+
+       poll_wait(file, &fence->wq, wait);
+
+       if (fence->status == 1)
+               return POLLIN;
+       else if (fence->status < 0)
+               return POLLERR;
+       else
+               return 0;
+}
+
 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
 {
-       __u32 value;
+       __s32 value;
 
        if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
                return -EFAULT;
@@ -514,8 +651,13 @@ static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
        struct sync_fence *fence2, *fence3;
        struct sync_merge_data data;
 
-       if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-               return -EFAULT;
+       if (fd < 0)
+               return fd;
+
+       if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+               err = -EFAULT;
+               goto err_put_fd;
+       }
 
        fence2 = sync_fence_fdget(data.fd2);
        if (fence2 == NULL) {
@@ -708,7 +850,8 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
        struct list_head *pos;
        unsigned long flags;
 
-       seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
+       seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
+                  sync_status_str(fence->status));
 
        list_for_each(pos, &fence->pt_list_head) {
                struct sync_pt *pt =
@@ -722,8 +865,7 @@ static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
                        container_of(pos, struct sync_fence_waiter,
                                     waiter_list);
 
-               seq_printf(s, "waiter %pF %p\n", waiter->callback,
-                          waiter->callback_data);
+               seq_printf(s, "waiter %pF\n", waiter->callback);
        }
        spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
 }
@@ -777,7 +919,34 @@ static __init int sync_debugfs_init(void)
        debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
        return 0;
 }
-
 late_initcall(sync_debugfs_init);
 
+#define DUMP_CHUNK 256
+static char sync_dump_buf[64 * 1024];
+void sync_dump(void)
+{
+       struct seq_file s = {
+               .buf = sync_dump_buf,
+               .size = sizeof(sync_dump_buf) - 1,
+       };
+       int i;
+
+       sync_debugfs_show(&s, NULL);
+
+       for (i = 0; i < s.count; i += DUMP_CHUNK) {
+               if ((s.count - i) > DUMP_CHUNK) {
+                       char c = s.buf[i + DUMP_CHUNK];
+                       s.buf[i + DUMP_CHUNK] = 0;
+                       pr_cont("%s", s.buf + i);
+                       s.buf[i + DUMP_CHUNK] = c;
+               } else {
+                       s.buf[s.count] = 0;
+                       pr_cont("%s", s.buf + i);
+               }
+       }
+}
+#else
+static void sync_dump(void)
+{
+}
 #endif