sync: add poll support
[linux-2.6.git] / drivers / base / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/file.h>
19 #include <linux/fs.h>
20 #include <linux/kernel.h>
21 #include <linux/poll.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/sync.h>
26 #include <linux/uaccess.h>
27
28 #include <linux/anon_inodes.h>
29
30 static void sync_fence_signal_pt(struct sync_pt *pt);
31 static int _sync_pt_has_signaled(struct sync_pt *pt);
32
33 static LIST_HEAD(sync_timeline_list_head);
34 static DEFINE_SPINLOCK(sync_timeline_list_lock);
35
36 static LIST_HEAD(sync_fence_list_head);
37 static DEFINE_SPINLOCK(sync_fence_list_lock);
38
39 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
40                                            int size, const char *name)
41 {
42         struct sync_timeline *obj;
43         unsigned long flags;
44
45         if (size < sizeof(struct sync_timeline))
46                 return NULL;
47
48         obj = kzalloc(size, GFP_KERNEL);
49         if (obj == NULL)
50                 return NULL;
51
52         obj->ops = ops;
53         strlcpy(obj->name, name, sizeof(obj->name));
54
55         INIT_LIST_HEAD(&obj->child_list_head);
56         spin_lock_init(&obj->child_list_lock);
57
58         INIT_LIST_HEAD(&obj->active_list_head);
59         spin_lock_init(&obj->active_list_lock);
60
61         spin_lock_irqsave(&sync_timeline_list_lock, flags);
62         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
63         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
64
65         return obj;
66 }
67
68 static void sync_timeline_free(struct sync_timeline *obj)
69 {
70         unsigned long flags;
71
72         if (obj->ops->release_obj)
73                 obj->ops->release_obj(obj);
74
75         spin_lock_irqsave(&sync_timeline_list_lock, flags);
76         list_del(&obj->sync_timeline_list);
77         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
78
79         kfree(obj);
80 }
81
82 void sync_timeline_destroy(struct sync_timeline *obj)
83 {
84         unsigned long flags;
85         bool needs_freeing;
86
87         spin_lock_irqsave(&obj->child_list_lock, flags);
88         obj->destroyed = true;
89         needs_freeing = list_empty(&obj->child_list_head);
90         spin_unlock_irqrestore(&obj->child_list_lock, flags);
91
92         if (needs_freeing)
93                 sync_timeline_free(obj);
94         else
95                 sync_timeline_signal(obj);
96 }
97
98 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
99 {
100         unsigned long flags;
101
102         pt->parent = obj;
103
104         spin_lock_irqsave(&obj->child_list_lock, flags);
105         list_add_tail(&pt->child_list, &obj->child_list_head);
106         spin_unlock_irqrestore(&obj->child_list_lock, flags);
107 }
108
109 static void sync_timeline_remove_pt(struct sync_pt *pt)
110 {
111         struct sync_timeline *obj = pt->parent;
112         unsigned long flags;
113         bool needs_freeing;
114
115         spin_lock_irqsave(&obj->active_list_lock, flags);
116         if (!list_empty(&pt->active_list))
117                 list_del_init(&pt->active_list);
118         spin_unlock_irqrestore(&obj->active_list_lock, flags);
119
120         spin_lock_irqsave(&obj->child_list_lock, flags);
121         list_del(&pt->child_list);
122         needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
123         spin_unlock_irqrestore(&obj->child_list_lock, flags);
124
125         if (needs_freeing)
126                 sync_timeline_free(obj);
127 }
128
129 void sync_timeline_signal(struct sync_timeline *obj)
130 {
131         unsigned long flags;
132         LIST_HEAD(signaled_pts);
133         struct list_head *pos, *n;
134
135         spin_lock_irqsave(&obj->active_list_lock, flags);
136
137         list_for_each_safe(pos, n, &obj->active_list_head) {
138                 struct sync_pt *pt =
139                         container_of(pos, struct sync_pt, active_list);
140
141                 if (_sync_pt_has_signaled(pt))
142                         list_move(pos, &signaled_pts);
143         }
144
145         spin_unlock_irqrestore(&obj->active_list_lock, flags);
146
147         list_for_each_safe(pos, n, &signaled_pts) {
148                 struct sync_pt *pt =
149                         container_of(pos, struct sync_pt, active_list);
150
151                 list_del_init(pos);
152                 sync_fence_signal_pt(pt);
153         }
154 }
155
156 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
157 {
158         struct sync_pt *pt;
159
160         if (size < sizeof(struct sync_pt))
161                 return NULL;
162
163         pt = kzalloc(size, GFP_KERNEL);
164         if (pt == NULL)
165                 return NULL;
166
167         INIT_LIST_HEAD(&pt->active_list);
168         sync_timeline_add_pt(parent, pt);
169
170         return pt;
171 }
172
173 void sync_pt_free(struct sync_pt *pt)
174 {
175         if (pt->parent->ops->free_pt)
176                 pt->parent->ops->free_pt(pt);
177
178         sync_timeline_remove_pt(pt);
179
180         kfree(pt);
181 }
182
183 /* call with pt->parent->active_list_lock held */
184 static int _sync_pt_has_signaled(struct sync_pt *pt)
185 {
186         int old_status = pt->status;
187
188         if (!pt->status)
189                 pt->status = pt->parent->ops->has_signaled(pt);
190
191         if (!pt->status && pt->parent->destroyed)
192                 pt->status = -ENOENT;
193
194         if (pt->status != old_status)
195                 pt->timestamp = ktime_get();
196
197         return pt->status;
198 }
199
200 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
201 {
202         return pt->parent->ops->dup(pt);
203 }
204
205 /* Adds a sync pt to the active queue.  Called when added to a fence */
206 static void sync_pt_activate(struct sync_pt *pt)
207 {
208         struct sync_timeline *obj = pt->parent;
209         unsigned long flags;
210         int err;
211
212         spin_lock_irqsave(&obj->active_list_lock, flags);
213
214         err = _sync_pt_has_signaled(pt);
215         if (err != 0)
216                 goto out;
217
218         list_add_tail(&pt->active_list, &obj->active_list_head);
219
220 out:
221         spin_unlock_irqrestore(&obj->active_list_lock, flags);
222 }
223
224 static int sync_fence_release(struct inode *inode, struct file *file);
225 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
226 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
227                              unsigned long arg);
228
229
230 static const struct file_operations sync_fence_fops = {
231         .release = sync_fence_release,
232         .poll = sync_fence_poll,
233         .unlocked_ioctl = sync_fence_ioctl,
234 };
235
236 static struct sync_fence *sync_fence_alloc(const char *name)
237 {
238         struct sync_fence *fence;
239         unsigned long flags;
240
241         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
242         if (fence == NULL)
243                 return NULL;
244
245         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
246                                          fence, 0);
247         if (fence->file == NULL)
248                 goto err;
249
250         strlcpy(fence->name, name, sizeof(fence->name));
251
252         INIT_LIST_HEAD(&fence->pt_list_head);
253         INIT_LIST_HEAD(&fence->waiter_list_head);
254         spin_lock_init(&fence->waiter_list_lock);
255
256         init_waitqueue_head(&fence->wq);
257
258         spin_lock_irqsave(&sync_fence_list_lock, flags);
259         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
260         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
261
262         return fence;
263
264 err:
265         kfree(fence);
266         return NULL;
267 }
268
269 /* TODO: implement a create which takes more that one sync_pt */
270 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
271 {
272         struct sync_fence *fence;
273
274         if (pt->fence)
275                 return NULL;
276
277         fence = sync_fence_alloc(name);
278         if (fence == NULL)
279                 return NULL;
280
281         pt->fence = fence;
282         list_add(&pt->pt_list, &fence->pt_list_head);
283         sync_pt_activate(pt);
284
285         return fence;
286 }
287
288 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
289 {
290         struct list_head *pos;
291
292         list_for_each(pos, &src->pt_list_head) {
293                 struct sync_pt *orig_pt =
294                         container_of(pos, struct sync_pt, pt_list);
295                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
296
297                 if (new_pt == NULL)
298                         return -ENOMEM;
299
300                 new_pt->fence = dst;
301                 list_add(&new_pt->pt_list, &dst->pt_list_head);
302                 sync_pt_activate(new_pt);
303         }
304
305         return 0;
306 }
307
308 static void sync_fence_free_pts(struct sync_fence *fence)
309 {
310         struct list_head *pos, *n;
311
312         list_for_each_safe(pos, n, &fence->pt_list_head) {
313                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
314                 sync_pt_free(pt);
315         }
316 }
317
318 struct sync_fence *sync_fence_fdget(int fd)
319 {
320         struct file *file = fget(fd);
321
322         if (file == NULL)
323                 return NULL;
324
325         if (file->f_op != &sync_fence_fops)
326                 goto err;
327
328         return file->private_data;
329
330 err:
331         fput(file);
332         return NULL;
333 }
334
335 void sync_fence_put(struct sync_fence *fence)
336 {
337         fput(fence->file);
338 }
339
340 void sync_fence_install(struct sync_fence *fence, int fd)
341 {
342         fd_install(fd, fence->file);
343 }
344
345 static int sync_fence_get_status(struct sync_fence *fence)
346 {
347         struct list_head *pos;
348         int status = 1;
349
350         list_for_each(pos, &fence->pt_list_head) {
351                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
352                 int pt_status = pt->status;
353
354                 if (pt_status < 0) {
355                         status = pt_status;
356                         break;
357                 } else if (status == 1) {
358                         status = pt_status;
359                 }
360         }
361
362         return status;
363 }
364
365 struct sync_fence *sync_fence_merge(const char *name,
366                                     struct sync_fence *a, struct sync_fence *b)
367 {
368         struct sync_fence *fence;
369         int err;
370
371         fence = sync_fence_alloc(name);
372         if (fence == NULL)
373                 return NULL;
374
375         err = sync_fence_copy_pts(fence, a);
376         if (err < 0)
377                 goto err;
378
379         err = sync_fence_copy_pts(fence, b);
380         if (err < 0)
381                 goto err;
382
383         fence->status = sync_fence_get_status(fence);
384
385         return fence;
386 err:
387         sync_fence_free_pts(fence);
388         kfree(fence);
389         return NULL;
390 }
391
392 static void sync_fence_signal_pt(struct sync_pt *pt)
393 {
394         LIST_HEAD(signaled_waiters);
395         struct sync_fence *fence = pt->fence;
396         struct list_head *pos;
397         struct list_head *n;
398         unsigned long flags;
399         int status;
400
401         status = sync_fence_get_status(fence);
402
403         spin_lock_irqsave(&fence->waiter_list_lock, flags);
404         /*
405          * this should protect against two threads racing on the signaled
406          * false -> true transition
407          */
408         if (status && !fence->status) {
409                 list_for_each_safe(pos, n, &fence->waiter_list_head)
410                         list_move(pos, &signaled_waiters);
411
412                 fence->status = status;
413         } else {
414                 status = 0;
415         }
416         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
417
418         if (status) {
419                 list_for_each_safe(pos, n, &signaled_waiters) {
420                         struct sync_fence_waiter *waiter =
421                                 container_of(pos, struct sync_fence_waiter,
422                                              waiter_list);
423
424                         waiter->callback(fence, waiter->callback_data);
425                         list_del(pos);
426                         kfree(waiter);
427                 }
428                 wake_up(&fence->wq);
429         }
430 }
431
432 int sync_fence_wait_async(struct sync_fence *fence,
433                           void (*callback)(struct sync_fence *, void *data),
434                           void *callback_data)
435 {
436         struct sync_fence_waiter *waiter;
437         unsigned long flags;
438         int err = 0;
439
440         waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
441         if (waiter == NULL)
442                 return -ENOMEM;
443
444         waiter->callback = callback;
445         waiter->callback_data = callback_data;
446
447         spin_lock_irqsave(&fence->waiter_list_lock, flags);
448
449         if (fence->status) {
450                 kfree(waiter);
451                 err = fence->status;
452                 goto out;
453         }
454
455         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
456 out:
457         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
458
459         return err;
460 }
461
462 int sync_fence_wait(struct sync_fence *fence, long timeout)
463 {
464         int err;
465
466         if (timeout) {
467                 timeout = msecs_to_jiffies(timeout);
468                 err = wait_event_interruptible_timeout(fence->wq,
469                                                        fence->status != 0,
470                                                        timeout);
471         } else {
472                 err = wait_event_interruptible(fence->wq, fence->status != 0);
473         }
474
475         if (err < 0)
476                 return err;
477
478         if (fence->status < 0)
479                 return fence->status;
480
481         if (fence->status == 0)
482                 return -ETIME;
483
484         return 0;
485 }
486
487 static int sync_fence_release(struct inode *inode, struct file *file)
488 {
489         struct sync_fence *fence = file->private_data;
490         unsigned long flags;
491
492         sync_fence_free_pts(fence);
493
494         spin_lock_irqsave(&sync_fence_list_lock, flags);
495         list_del(&fence->sync_fence_list);
496         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
497
498         kfree(fence);
499
500         return 0;
501 }
502
503 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
504 {
505         struct sync_fence *fence = file->private_data;
506
507         poll_wait(file, &fence->wq, wait);
508
509         if (fence->status == 1)
510                 return POLLIN;
511         else if (fence->status < 0)
512                 return POLLERR;
513         else
514                 return 0;
515 }
516
517 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
518 {
519         __u32 value;
520
521         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
522                 return -EFAULT;
523
524         return sync_fence_wait(fence, value);
525 }
526
527 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
528 {
529         int fd = get_unused_fd();
530         int err;
531         struct sync_fence *fence2, *fence3;
532         struct sync_merge_data data;
533
534         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
535                 return -EFAULT;
536
537         fence2 = sync_fence_fdget(data.fd2);
538         if (fence2 == NULL) {
539                 err = -ENOENT;
540                 goto err_put_fd;
541         }
542
543         data.name[sizeof(data.name) - 1] = '\0';
544         fence3 = sync_fence_merge(data.name, fence, fence2);
545         if (fence3 == NULL) {
546                 err = -ENOMEM;
547                 goto err_put_fence2;
548         }
549
550         data.fence = fd;
551         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
552                 err = -EFAULT;
553                 goto err_put_fence3;
554         }
555
556         sync_fence_install(fence3, fd);
557         sync_fence_put(fence2);
558         return 0;
559
560 err_put_fence3:
561         sync_fence_put(fence3);
562
563 err_put_fence2:
564         sync_fence_put(fence2);
565
566 err_put_fd:
567         put_unused_fd(fd);
568         return err;
569 }
570
571 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
572 {
573         struct sync_pt_info *info = data;
574         int ret;
575
576         if (size < sizeof(struct sync_pt_info))
577                 return -ENOMEM;
578
579         info->len = sizeof(struct sync_pt_info);
580
581         if (pt->parent->ops->fill_driver_data) {
582                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
583                                                         size - sizeof(*info));
584                 if (ret < 0)
585                         return ret;
586
587                 info->len += ret;
588         }
589
590         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
591         strlcpy(info->driver_name, pt->parent->ops->driver_name,
592                 sizeof(info->driver_name));
593         info->status = pt->status;
594         info->timestamp_ns = ktime_to_ns(pt->timestamp);
595
596         return info->len;
597 }
598
599 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
600                                         unsigned long arg)
601 {
602         struct sync_fence_info_data *data;
603         struct list_head *pos;
604         __u32 size;
605         __u32 len = 0;
606         int ret;
607
608         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
609                 return -EFAULT;
610
611         if (size < sizeof(struct sync_fence_info_data))
612                 return -EINVAL;
613
614         if (size > 4096)
615                 size = 4096;
616
617         data = kzalloc(size, GFP_KERNEL);
618         if (data == NULL)
619                 return -ENOMEM;
620
621         strlcpy(data->name, fence->name, sizeof(data->name));
622         data->status = fence->status;
623         len = sizeof(struct sync_fence_info_data);
624
625         list_for_each(pos, &fence->pt_list_head) {
626                 struct sync_pt *pt =
627                         container_of(pos, struct sync_pt, pt_list);
628
629                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
630
631                 if (ret < 0)
632                         goto out;
633
634                 len += ret;
635         }
636
637         data->len = len;
638
639         if (copy_to_user((void __user *)arg, data, len))
640                 ret = -EFAULT;
641         else
642                 ret = 0;
643
644 out:
645         kfree(data);
646
647         return ret;
648 }
649
650 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
651                              unsigned long arg)
652 {
653         struct sync_fence *fence = file->private_data;
654         switch (cmd) {
655         case SYNC_IOC_WAIT:
656                 return sync_fence_ioctl_wait(fence, arg);
657
658         case SYNC_IOC_MERGE:
659                 return sync_fence_ioctl_merge(fence, arg);
660
661         case SYNC_IOC_FENCE_INFO:
662                 return sync_fence_ioctl_fence_info(fence, arg);
663
664         default:
665                 return -ENOTTY;
666         }
667 }
668
669 #ifdef CONFIG_DEBUG_FS
670 static const char *sync_status_str(int status)
671 {
672         if (status > 0)
673                 return "signaled";
674         else if (status == 0)
675                 return "active";
676         else
677                 return "error";
678 }
679
680 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
681 {
682         int status = pt->status;
683         seq_printf(s, "  %s%spt %s",
684                    fence ? pt->parent->name : "",
685                    fence ? "_" : "",
686                    sync_status_str(status));
687         if (pt->status) {
688                 struct timeval tv = ktime_to_timeval(pt->timestamp);
689                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
690         }
691
692         if (pt->parent->ops->print_pt) {
693                 seq_printf(s, ": ");
694                 pt->parent->ops->print_pt(s, pt);
695         }
696
697         seq_printf(s, "\n");
698 }
699
700 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
701 {
702         struct list_head *pos;
703         unsigned long flags;
704
705         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
706
707         if (obj->ops->print_obj) {
708                 seq_printf(s, ": ");
709                 obj->ops->print_obj(s, obj);
710         }
711
712         seq_printf(s, "\n");
713
714         spin_lock_irqsave(&obj->child_list_lock, flags);
715         list_for_each(pos, &obj->child_list_head) {
716                 struct sync_pt *pt =
717                         container_of(pos, struct sync_pt, child_list);
718                 sync_print_pt(s, pt, false);
719         }
720         spin_unlock_irqrestore(&obj->child_list_lock, flags);
721 }
722
723 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
724 {
725         struct list_head *pos;
726         unsigned long flags;
727
728         seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
729
730         list_for_each(pos, &fence->pt_list_head) {
731                 struct sync_pt *pt =
732                         container_of(pos, struct sync_pt, pt_list);
733                 sync_print_pt(s, pt, true);
734         }
735
736         spin_lock_irqsave(&fence->waiter_list_lock, flags);
737         list_for_each(pos, &fence->waiter_list_head) {
738                 struct sync_fence_waiter *waiter =
739                         container_of(pos, struct sync_fence_waiter,
740                                      waiter_list);
741
742                 seq_printf(s, "waiter %pF %p\n", waiter->callback,
743                            waiter->callback_data);
744         }
745         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
746 }
747
748 static int sync_debugfs_show(struct seq_file *s, void *unused)
749 {
750         unsigned long flags;
751         struct list_head *pos;
752
753         seq_printf(s, "objs:\n--------------\n");
754
755         spin_lock_irqsave(&sync_timeline_list_lock, flags);
756         list_for_each(pos, &sync_timeline_list_head) {
757                 struct sync_timeline *obj =
758                         container_of(pos, struct sync_timeline,
759                                      sync_timeline_list);
760
761                 sync_print_obj(s, obj);
762                 seq_printf(s, "\n");
763         }
764         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
765
766         seq_printf(s, "fences:\n--------------\n");
767
768         spin_lock_irqsave(&sync_fence_list_lock, flags);
769         list_for_each(pos, &sync_fence_list_head) {
770                 struct sync_fence *fence =
771                         container_of(pos, struct sync_fence, sync_fence_list);
772
773                 sync_print_fence(s, fence);
774                 seq_printf(s, "\n");
775         }
776         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
777         return 0;
778 }
779
780 static int sync_debugfs_open(struct inode *inode, struct file *file)
781 {
782         return single_open(file, sync_debugfs_show, inode->i_private);
783 }
784
785 static const struct file_operations sync_debugfs_fops = {
786         .open           = sync_debugfs_open,
787         .read           = seq_read,
788         .llseek         = seq_lseek,
789         .release        = single_release,
790 };
791
792 static __init int sync_debugfs_init(void)
793 {
794         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
795         return 0;
796 }
797
798 late_initcall(sync_debugfs_init);
799
800 #endif