sync: use correct signed type when handling SYNC_IOC_WAIT
[linux-2.6.git] / drivers / base / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/sync.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/anon_inodes.h>
30
31 static void sync_fence_signal_pt(struct sync_pt *pt);
32 static int _sync_pt_has_signaled(struct sync_pt *pt);
33 static void sync_fence_free(struct kref *kref);
34 static void sync_dump(void);
35
36 static LIST_HEAD(sync_timeline_list_head);
37 static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39 static LIST_HEAD(sync_fence_list_head);
40 static DEFINE_SPINLOCK(sync_fence_list_lock);
41
42 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43                                            int size, const char *name)
44 {
45         struct sync_timeline *obj;
46         unsigned long flags;
47
48         if (size < sizeof(struct sync_timeline))
49                 return NULL;
50
51         obj = kzalloc(size, GFP_KERNEL);
52         if (obj == NULL)
53                 return NULL;
54
55         kref_init(&obj->kref);
56         obj->ops = ops;
57         strlcpy(obj->name, name, sizeof(obj->name));
58
59         INIT_LIST_HEAD(&obj->child_list_head);
60         spin_lock_init(&obj->child_list_lock);
61
62         INIT_LIST_HEAD(&obj->active_list_head);
63         spin_lock_init(&obj->active_list_lock);
64
65         spin_lock_irqsave(&sync_timeline_list_lock, flags);
66         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
69         return obj;
70 }
71 EXPORT_SYMBOL(sync_timeline_create);
72
73 static void sync_timeline_free(struct kref *kref)
74 {
75         struct sync_timeline *obj =
76                 container_of(kref, struct sync_timeline, kref);
77         unsigned long flags;
78
79         if (obj->ops->release_obj)
80                 obj->ops->release_obj(obj);
81
82         spin_lock_irqsave(&sync_timeline_list_lock, flags);
83         list_del(&obj->sync_timeline_list);
84         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86         kfree(obj);
87 }
88
89 void sync_timeline_destroy(struct sync_timeline *obj)
90 {
91         obj->destroyed = true;
92
93         /*
94          * If this is not the last reference, signal any children
95          * that their parent is going away.
96          */
97
98         if (!kref_put(&obj->kref, sync_timeline_free))
99                 sync_timeline_signal(obj);
100 }
101 EXPORT_SYMBOL(sync_timeline_destroy);
102
103 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104 {
105         unsigned long flags;
106
107         pt->parent = obj;
108
109         spin_lock_irqsave(&obj->child_list_lock, flags);
110         list_add_tail(&pt->child_list, &obj->child_list_head);
111         spin_unlock_irqrestore(&obj->child_list_lock, flags);
112 }
113
114 static void sync_timeline_remove_pt(struct sync_pt *pt)
115 {
116         struct sync_timeline *obj = pt->parent;
117         unsigned long flags;
118
119         spin_lock_irqsave(&obj->active_list_lock, flags);
120         if (!list_empty(&pt->active_list))
121                 list_del_init(&pt->active_list);
122         spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124         spin_lock_irqsave(&obj->child_list_lock, flags);
125         if (!list_empty(&pt->child_list)) {
126                 list_del_init(&pt->child_list);
127         }
128         spin_unlock_irqrestore(&obj->child_list_lock, flags);
129 }
130
131 void sync_timeline_signal(struct sync_timeline *obj)
132 {
133         unsigned long flags;
134         LIST_HEAD(signaled_pts);
135         struct list_head *pos, *n;
136
137         spin_lock_irqsave(&obj->active_list_lock, flags);
138
139         list_for_each_safe(pos, n, &obj->active_list_head) {
140                 struct sync_pt *pt =
141                         container_of(pos, struct sync_pt, active_list);
142
143                 if (_sync_pt_has_signaled(pt)) {
144                         list_del_init(pos);
145                         list_add(&pt->signaled_list, &signaled_pts);
146                         kref_get(&pt->fence->kref);
147                 }
148         }
149
150         spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152         list_for_each_safe(pos, n, &signaled_pts) {
153                 struct sync_pt *pt =
154                         container_of(pos, struct sync_pt, signaled_list);
155
156                 list_del_init(pos);
157                 sync_fence_signal_pt(pt);
158                 kref_put(&pt->fence->kref, sync_fence_free);
159         }
160 }
161 EXPORT_SYMBOL(sync_timeline_signal);
162
163 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164 {
165         struct sync_pt *pt;
166
167         if (size < sizeof(struct sync_pt))
168                 return NULL;
169
170         pt = kzalloc(size, GFP_KERNEL);
171         if (pt == NULL)
172                 return NULL;
173
174         INIT_LIST_HEAD(&pt->active_list);
175         kref_get(&parent->kref);
176         sync_timeline_add_pt(parent, pt);
177
178         return pt;
179 }
180 EXPORT_SYMBOL(sync_pt_create);
181
182 void sync_pt_free(struct sync_pt *pt)
183 {
184         if (pt->parent->ops->free_pt)
185                 pt->parent->ops->free_pt(pt);
186
187         sync_timeline_remove_pt(pt);
188
189         kref_put(&pt->parent->kref, sync_timeline_free);
190
191         kfree(pt);
192 }
193 EXPORT_SYMBOL(sync_pt_free);
194
195 /* call with pt->parent->active_list_lock held */
196 static int _sync_pt_has_signaled(struct sync_pt *pt)
197 {
198         int old_status = pt->status;
199
200         if (!pt->status)
201                 pt->status = pt->parent->ops->has_signaled(pt);
202
203         if (!pt->status && pt->parent->destroyed)
204                 pt->status = -ENOENT;
205
206         if (pt->status != old_status)
207                 pt->timestamp = ktime_get();
208
209         return pt->status;
210 }
211
212 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213 {
214         return pt->parent->ops->dup(pt);
215 }
216
217 /* Adds a sync pt to the active queue.  Called when added to a fence */
218 static void sync_pt_activate(struct sync_pt *pt)
219 {
220         struct sync_timeline *obj = pt->parent;
221         unsigned long flags;
222         int err;
223
224         spin_lock_irqsave(&obj->active_list_lock, flags);
225
226         err = _sync_pt_has_signaled(pt);
227         if (err != 0)
228                 goto out;
229
230         list_add_tail(&pt->active_list, &obj->active_list_head);
231
232 out:
233         spin_unlock_irqrestore(&obj->active_list_lock, flags);
234 }
235
236 static int sync_fence_release(struct inode *inode, struct file *file);
237 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
238 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239                              unsigned long arg);
240
241
242 static const struct file_operations sync_fence_fops = {
243         .release = sync_fence_release,
244         .poll = sync_fence_poll,
245         .unlocked_ioctl = sync_fence_ioctl,
246 };
247
248 static struct sync_fence *sync_fence_alloc(const char *name)
249 {
250         struct sync_fence *fence;
251         unsigned long flags;
252
253         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254         if (fence == NULL)
255                 return NULL;
256
257         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258                                          fence, 0);
259         if (fence->file == NULL)
260                 goto err;
261
262         kref_init(&fence->kref);
263         strlcpy(fence->name, name, sizeof(fence->name));
264
265         INIT_LIST_HEAD(&fence->pt_list_head);
266         INIT_LIST_HEAD(&fence->waiter_list_head);
267         spin_lock_init(&fence->waiter_list_lock);
268
269         init_waitqueue_head(&fence->wq);
270
271         spin_lock_irqsave(&sync_fence_list_lock, flags);
272         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
275         return fence;
276
277 err:
278         kfree(fence);
279         return NULL;
280 }
281
282 /* TODO: implement a create which takes more that one sync_pt */
283 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284 {
285         struct sync_fence *fence;
286
287         if (pt->fence)
288                 return NULL;
289
290         fence = sync_fence_alloc(name);
291         if (fence == NULL)
292                 return NULL;
293
294         pt->fence = fence;
295         list_add(&pt->pt_list, &fence->pt_list_head);
296         sync_pt_activate(pt);
297
298         return fence;
299 }
300 EXPORT_SYMBOL(sync_fence_create);
301
302 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
303 {
304         struct list_head *pos;
305
306         list_for_each(pos, &src->pt_list_head) {
307                 struct sync_pt *orig_pt =
308                         container_of(pos, struct sync_pt, pt_list);
309                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
310
311                 if (new_pt == NULL)
312                         return -ENOMEM;
313
314                 new_pt->fence = dst;
315                 list_add(&new_pt->pt_list, &dst->pt_list_head);
316                 sync_pt_activate(new_pt);
317         }
318
319         return 0;
320 }
321
322 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
323 {
324         struct list_head *src_pos, *dst_pos, *n;
325
326         list_for_each(src_pos, &src->pt_list_head) {
327                 struct sync_pt *src_pt =
328                         container_of(src_pos, struct sync_pt, pt_list);
329                 bool collapsed = false;
330
331                 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
332                         struct sync_pt *dst_pt =
333                                 container_of(dst_pos, struct sync_pt, pt_list);
334                         /* collapse two sync_pts on the same timeline
335                          * to a single sync_pt that will signal at
336                          * the later of the two
337                          */
338                         if (dst_pt->parent == src_pt->parent) {
339                                 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
340                                         struct sync_pt *new_pt =
341                                                 sync_pt_dup(src_pt);
342                                         if (new_pt == NULL)
343                                                 return -ENOMEM;
344
345                                         new_pt->fence = dst;
346                                         list_replace(&dst_pt->pt_list,
347                                                      &new_pt->pt_list);
348                                         sync_pt_activate(new_pt);
349                                         sync_pt_free(dst_pt);
350                                 }
351                                 collapsed = true;
352                                 break;
353                         }
354                 }
355
356                 if (!collapsed) {
357                         struct sync_pt *new_pt = sync_pt_dup(src_pt);
358
359                         if (new_pt == NULL)
360                                 return -ENOMEM;
361
362                         new_pt->fence = dst;
363                         list_add(&new_pt->pt_list, &dst->pt_list_head);
364                         sync_pt_activate(new_pt);
365                 }
366         }
367
368         return 0;
369 }
370
371 static void sync_fence_detach_pts(struct sync_fence *fence)
372 {
373         struct list_head *pos, *n;
374
375         list_for_each_safe(pos, n, &fence->pt_list_head) {
376                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
377                 sync_timeline_remove_pt(pt);
378         }
379 }
380
381 static void sync_fence_free_pts(struct sync_fence *fence)
382 {
383         struct list_head *pos, *n;
384
385         list_for_each_safe(pos, n, &fence->pt_list_head) {
386                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387                 sync_pt_free(pt);
388         }
389 }
390
391 struct sync_fence *sync_fence_fdget(int fd)
392 {
393         struct file *file = fget(fd);
394
395         if (file == NULL)
396                 return NULL;
397
398         if (file->f_op != &sync_fence_fops)
399                 goto err;
400
401         return file->private_data;
402
403 err:
404         fput(file);
405         return NULL;
406 }
407 EXPORT_SYMBOL(sync_fence_fdget);
408
409 void sync_fence_put(struct sync_fence *fence)
410 {
411         fput(fence->file);
412 }
413 EXPORT_SYMBOL(sync_fence_put);
414
415 void sync_fence_install(struct sync_fence *fence, int fd)
416 {
417         fd_install(fd, fence->file);
418 }
419 EXPORT_SYMBOL(sync_fence_install);
420
421 static int sync_fence_get_status(struct sync_fence *fence)
422 {
423         struct list_head *pos;
424         int status = 1;
425
426         list_for_each(pos, &fence->pt_list_head) {
427                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
428                 int pt_status = pt->status;
429
430                 if (pt_status < 0) {
431                         status = pt_status;
432                         break;
433                 } else if (status == 1) {
434                         status = pt_status;
435                 }
436         }
437
438         return status;
439 }
440
441 struct sync_fence *sync_fence_merge(const char *name,
442                                     struct sync_fence *a, struct sync_fence *b)
443 {
444         struct sync_fence *fence;
445         int err;
446
447         fence = sync_fence_alloc(name);
448         if (fence == NULL)
449                 return NULL;
450
451         err = sync_fence_copy_pts(fence, a);
452         if (err < 0)
453                 goto err;
454
455         err = sync_fence_merge_pts(fence, b);
456         if (err < 0)
457                 goto err;
458
459         fence->status = sync_fence_get_status(fence);
460
461         return fence;
462 err:
463         sync_fence_free_pts(fence);
464         kfree(fence);
465         return NULL;
466 }
467 EXPORT_SYMBOL(sync_fence_merge);
468
469 static void sync_fence_signal_pt(struct sync_pt *pt)
470 {
471         LIST_HEAD(signaled_waiters);
472         struct sync_fence *fence = pt->fence;
473         struct list_head *pos;
474         struct list_head *n;
475         unsigned long flags;
476         int status;
477
478         status = sync_fence_get_status(fence);
479
480         spin_lock_irqsave(&fence->waiter_list_lock, flags);
481         /*
482          * this should protect against two threads racing on the signaled
483          * false -> true transition
484          */
485         if (status && !fence->status) {
486                 list_for_each_safe(pos, n, &fence->waiter_list_head)
487                         list_move(pos, &signaled_waiters);
488
489                 fence->status = status;
490         } else {
491                 status = 0;
492         }
493         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
494
495         if (status) {
496                 list_for_each_safe(pos, n, &signaled_waiters) {
497                         struct sync_fence_waiter *waiter =
498                                 container_of(pos, struct sync_fence_waiter,
499                                              waiter_list);
500
501                         list_del(pos);
502                         waiter->callback(fence, waiter);
503                 }
504                 wake_up(&fence->wq);
505         }
506 }
507
508 int sync_fence_wait_async(struct sync_fence *fence,
509                           struct sync_fence_waiter *waiter)
510 {
511         unsigned long flags;
512         int err = 0;
513
514         spin_lock_irqsave(&fence->waiter_list_lock, flags);
515
516         if (fence->status) {
517                 err = fence->status;
518                 goto out;
519         }
520
521         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
522 out:
523         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
524
525         return err;
526 }
527 EXPORT_SYMBOL(sync_fence_wait_async);
528
529 int sync_fence_cancel_async(struct sync_fence *fence,
530                              struct sync_fence_waiter *waiter)
531 {
532         struct list_head *pos;
533         struct list_head *n;
534         unsigned long flags;
535         int ret = -ENOENT;
536
537         spin_lock_irqsave(&fence->waiter_list_lock, flags);
538         /*
539          * Make sure waiter is still in waiter_list because it is possible for
540          * the waiter to be removed from the list while the callback is still
541          * pending.
542          */
543         list_for_each_safe(pos, n, &fence->waiter_list_head) {
544                 struct sync_fence_waiter *list_waiter =
545                         container_of(pos, struct sync_fence_waiter,
546                                      waiter_list);
547                 if (list_waiter == waiter) {
548                         list_del(pos);
549                         ret = 0;
550                         break;
551                 }
552         }
553         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
554         return ret;
555 }
556 EXPORT_SYMBOL(sync_fence_cancel_async);
557
558 int sync_fence_wait(struct sync_fence *fence, long timeout)
559 {
560         int err = 0;
561
562         if (timeout > 0) {
563                 timeout = msecs_to_jiffies(timeout);
564                 err = wait_event_interruptible_timeout(fence->wq,
565                                                        fence->status != 0,
566                                                        timeout);
567         } else if (timeout < 0) {
568                 err = wait_event_interruptible(fence->wq, fence->status != 0);
569         }
570
571         if (err < 0)
572                 return err;
573
574         if (fence->status < 0)
575                 return fence->status;
576
577         if (fence->status == 0) {
578                 sync_dump();
579                 return -ETIME;
580         }
581
582         return 0;
583 }
584 EXPORT_SYMBOL(sync_fence_wait);
585
586 static void sync_fence_free(struct kref *kref)
587 {
588         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
589
590         sync_fence_free_pts(fence);
591
592         kfree(fence);
593 }
594
595 static int sync_fence_release(struct inode *inode, struct file *file)
596 {
597         struct sync_fence *fence = file->private_data;
598         unsigned long flags;
599
600         /*
601          * We need to remove all ways to access this fence before droping
602          * our ref.
603          *
604          * start with its membership in the global fence list
605          */
606         spin_lock_irqsave(&sync_fence_list_lock, flags);
607         list_del(&fence->sync_fence_list);
608         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
609
610         /*
611          * remove its pts from their parents so that sync_timeline_signal()
612          * can't reference the fence.
613          */
614         sync_fence_detach_pts(fence);
615
616         kref_put(&fence->kref, sync_fence_free);
617
618         return 0;
619 }
620
621 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
622 {
623         struct sync_fence *fence = file->private_data;
624
625         poll_wait(file, &fence->wq, wait);
626
627         if (fence->status == 1)
628                 return POLLIN;
629         else if (fence->status < 0)
630                 return POLLERR;
631         else
632                 return 0;
633 }
634
635 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
636 {
637         __s32 value;
638
639         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
640                 return -EFAULT;
641
642         return sync_fence_wait(fence, value);
643 }
644
645 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
646 {
647         int fd = get_unused_fd();
648         int err;
649         struct sync_fence *fence2, *fence3;
650         struct sync_merge_data data;
651
652         if (fd < 0)
653                 return fd;
654
655         if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
656                 err = -EFAULT;
657                 goto err_put_fd;
658         }
659
660         fence2 = sync_fence_fdget(data.fd2);
661         if (fence2 == NULL) {
662                 err = -ENOENT;
663                 goto err_put_fd;
664         }
665
666         data.name[sizeof(data.name) - 1] = '\0';
667         fence3 = sync_fence_merge(data.name, fence, fence2);
668         if (fence3 == NULL) {
669                 err = -ENOMEM;
670                 goto err_put_fence2;
671         }
672
673         data.fence = fd;
674         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
675                 err = -EFAULT;
676                 goto err_put_fence3;
677         }
678
679         sync_fence_install(fence3, fd);
680         sync_fence_put(fence2);
681         return 0;
682
683 err_put_fence3:
684         sync_fence_put(fence3);
685
686 err_put_fence2:
687         sync_fence_put(fence2);
688
689 err_put_fd:
690         put_unused_fd(fd);
691         return err;
692 }
693
694 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
695 {
696         struct sync_pt_info *info = data;
697         int ret;
698
699         if (size < sizeof(struct sync_pt_info))
700                 return -ENOMEM;
701
702         info->len = sizeof(struct sync_pt_info);
703
704         if (pt->parent->ops->fill_driver_data) {
705                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
706                                                         size - sizeof(*info));
707                 if (ret < 0)
708                         return ret;
709
710                 info->len += ret;
711         }
712
713         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
714         strlcpy(info->driver_name, pt->parent->ops->driver_name,
715                 sizeof(info->driver_name));
716         info->status = pt->status;
717         info->timestamp_ns = ktime_to_ns(pt->timestamp);
718
719         return info->len;
720 }
721
722 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
723                                         unsigned long arg)
724 {
725         struct sync_fence_info_data *data;
726         struct list_head *pos;
727         __u32 size;
728         __u32 len = 0;
729         int ret;
730
731         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
732                 return -EFAULT;
733
734         if (size < sizeof(struct sync_fence_info_data))
735                 return -EINVAL;
736
737         if (size > 4096)
738                 size = 4096;
739
740         data = kzalloc(size, GFP_KERNEL);
741         if (data == NULL)
742                 return -ENOMEM;
743
744         strlcpy(data->name, fence->name, sizeof(data->name));
745         data->status = fence->status;
746         len = sizeof(struct sync_fence_info_data);
747
748         list_for_each(pos, &fence->pt_list_head) {
749                 struct sync_pt *pt =
750                         container_of(pos, struct sync_pt, pt_list);
751
752                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
753
754                 if (ret < 0)
755                         goto out;
756
757                 len += ret;
758         }
759
760         data->len = len;
761
762         if (copy_to_user((void __user *)arg, data, len))
763                 ret = -EFAULT;
764         else
765                 ret = 0;
766
767 out:
768         kfree(data);
769
770         return ret;
771 }
772
773 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
774                              unsigned long arg)
775 {
776         struct sync_fence *fence = file->private_data;
777         switch (cmd) {
778         case SYNC_IOC_WAIT:
779                 return sync_fence_ioctl_wait(fence, arg);
780
781         case SYNC_IOC_MERGE:
782                 return sync_fence_ioctl_merge(fence, arg);
783
784         case SYNC_IOC_FENCE_INFO:
785                 return sync_fence_ioctl_fence_info(fence, arg);
786
787         default:
788                 return -ENOTTY;
789         }
790 }
791
792 #ifdef CONFIG_DEBUG_FS
793 static const char *sync_status_str(int status)
794 {
795         if (status > 0)
796                 return "signaled";
797         else if (status == 0)
798                 return "active";
799         else
800                 return "error";
801 }
802
803 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
804 {
805         int status = pt->status;
806         seq_printf(s, "  %s%spt %s",
807                    fence ? pt->parent->name : "",
808                    fence ? "_" : "",
809                    sync_status_str(status));
810         if (pt->status) {
811                 struct timeval tv = ktime_to_timeval(pt->timestamp);
812                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
813         }
814
815         if (pt->parent->ops->print_pt) {
816                 seq_printf(s, ": ");
817                 pt->parent->ops->print_pt(s, pt);
818         }
819
820         seq_printf(s, "\n");
821 }
822
823 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
824 {
825         struct list_head *pos;
826         unsigned long flags;
827
828         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
829
830         if (obj->ops->print_obj) {
831                 seq_printf(s, ": ");
832                 obj->ops->print_obj(s, obj);
833         }
834
835         seq_printf(s, "\n");
836
837         spin_lock_irqsave(&obj->child_list_lock, flags);
838         list_for_each(pos, &obj->child_list_head) {
839                 struct sync_pt *pt =
840                         container_of(pos, struct sync_pt, child_list);
841                 sync_print_pt(s, pt, false);
842         }
843         spin_unlock_irqrestore(&obj->child_list_lock, flags);
844 }
845
846 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
847 {
848         struct list_head *pos;
849         unsigned long flags;
850
851         seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
852
853         list_for_each(pos, &fence->pt_list_head) {
854                 struct sync_pt *pt =
855                         container_of(pos, struct sync_pt, pt_list);
856                 sync_print_pt(s, pt, true);
857         }
858
859         spin_lock_irqsave(&fence->waiter_list_lock, flags);
860         list_for_each(pos, &fence->waiter_list_head) {
861                 struct sync_fence_waiter *waiter =
862                         container_of(pos, struct sync_fence_waiter,
863                                      waiter_list);
864
865                 seq_printf(s, "waiter %pF\n", waiter->callback);
866         }
867         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
868 }
869
870 static int sync_debugfs_show(struct seq_file *s, void *unused)
871 {
872         unsigned long flags;
873         struct list_head *pos;
874
875         seq_printf(s, "objs:\n--------------\n");
876
877         spin_lock_irqsave(&sync_timeline_list_lock, flags);
878         list_for_each(pos, &sync_timeline_list_head) {
879                 struct sync_timeline *obj =
880                         container_of(pos, struct sync_timeline,
881                                      sync_timeline_list);
882
883                 sync_print_obj(s, obj);
884                 seq_printf(s, "\n");
885         }
886         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
887
888         seq_printf(s, "fences:\n--------------\n");
889
890         spin_lock_irqsave(&sync_fence_list_lock, flags);
891         list_for_each(pos, &sync_fence_list_head) {
892                 struct sync_fence *fence =
893                         container_of(pos, struct sync_fence, sync_fence_list);
894
895                 sync_print_fence(s, fence);
896                 seq_printf(s, "\n");
897         }
898         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
899         return 0;
900 }
901
902 static int sync_debugfs_open(struct inode *inode, struct file *file)
903 {
904         return single_open(file, sync_debugfs_show, inode->i_private);
905 }
906
907 static const struct file_operations sync_debugfs_fops = {
908         .open           = sync_debugfs_open,
909         .read           = seq_read,
910         .llseek         = seq_lseek,
911         .release        = single_release,
912 };
913
914 static __init int sync_debugfs_init(void)
915 {
916         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
917         return 0;
918 }
919 late_initcall(sync_debugfs_init);
920
921 #define DUMP_CHUNK 256
922 static char sync_dump_buf[64 * 1024];
923 void sync_dump(void)
924 {
925        struct seq_file s = {
926                .buf = sync_dump_buf,
927                .size = sizeof(sync_dump_buf) - 1,
928        };
929        int i;
930
931        sync_debugfs_show(&s, NULL);
932
933        for (i = 0; i < s.count; i += DUMP_CHUNK) {
934                if ((s.count - i) > DUMP_CHUNK) {
935                        char c = s.buf[i + DUMP_CHUNK];
936                        s.buf[i + DUMP_CHUNK] = 0;
937                        pr_cont("%s", s.buf + i);
938                        s.buf[i + DUMP_CHUNK] = c;
939                } else {
940                        s.buf[s.count] = 0;
941                        pr_cont("%s", s.buf + i);
942                }
943        }
944 }
945 #else
946 static void sync_dump(void)
947 {
948 }
949 #endif