drivers: dma-contiguous: refactor dma_alloc_from_contiguous()
[linux-2.6.git] / drivers / base / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/sync.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/anon_inodes.h>
30
31 static void sync_fence_signal_pt(struct sync_pt *pt);
32 static int _sync_pt_has_signaled(struct sync_pt *pt);
33 static void sync_fence_free(struct kref *kref);
34
35 static LIST_HEAD(sync_timeline_list_head);
36 static DEFINE_SPINLOCK(sync_timeline_list_lock);
37
38 static LIST_HEAD(sync_fence_list_head);
39 static DEFINE_SPINLOCK(sync_fence_list_lock);
40
41 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
42                                            int size, const char *name)
43 {
44         struct sync_timeline *obj;
45         unsigned long flags;
46
47         if (size < sizeof(struct sync_timeline))
48                 return NULL;
49
50         obj = kzalloc(size, GFP_KERNEL);
51         if (obj == NULL)
52                 return NULL;
53
54         kref_init(&obj->kref);
55         obj->ops = ops;
56         strlcpy(obj->name, name, sizeof(obj->name));
57
58         INIT_LIST_HEAD(&obj->child_list_head);
59         spin_lock_init(&obj->child_list_lock);
60
61         INIT_LIST_HEAD(&obj->active_list_head);
62         spin_lock_init(&obj->active_list_lock);
63
64         spin_lock_irqsave(&sync_timeline_list_lock, flags);
65         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
66         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
67
68         return obj;
69 }
70 EXPORT_SYMBOL(sync_timeline_create);
71
72 static void sync_timeline_free(struct kref *kref)
73 {
74         struct sync_timeline *obj =
75                 container_of(kref, struct sync_timeline, kref);
76         unsigned long flags;
77
78         if (obj->ops->release_obj)
79                 obj->ops->release_obj(obj);
80
81         spin_lock_irqsave(&sync_timeline_list_lock, flags);
82         list_del(&obj->sync_timeline_list);
83         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
84
85         kfree(obj);
86 }
87
88 void sync_timeline_destroy(struct sync_timeline *obj)
89 {
90         unsigned long flags;
91         bool needs_freeing;
92
93         obj->destroyed = true;
94
95         /*
96          * If this is not the last reference, signal any children
97          * that their parent is going away.
98          */
99
100         if (!kref_put(&obj->kref, sync_timeline_free))
101                 sync_timeline_signal(obj);
102 }
103 EXPORT_SYMBOL(sync_timeline_destroy);
104
105 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
106 {
107         unsigned long flags;
108
109         pt->parent = obj;
110
111         spin_lock_irqsave(&obj->child_list_lock, flags);
112         list_add_tail(&pt->child_list, &obj->child_list_head);
113         spin_unlock_irqrestore(&obj->child_list_lock, flags);
114 }
115
116 static void sync_timeline_remove_pt(struct sync_pt *pt)
117 {
118         struct sync_timeline *obj = pt->parent;
119         unsigned long flags;
120         bool needs_freeing = false;
121
122         spin_lock_irqsave(&obj->active_list_lock, flags);
123         if (!list_empty(&pt->active_list))
124                 list_del_init(&pt->active_list);
125         spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127         spin_lock_irqsave(&obj->child_list_lock, flags);
128         if (!list_empty(&pt->child_list)) {
129                 list_del_init(&pt->child_list);
130         }
131         spin_unlock_irqrestore(&obj->child_list_lock, flags);
132 }
133
134 void sync_timeline_signal(struct sync_timeline *obj)
135 {
136         unsigned long flags;
137         LIST_HEAD(signaled_pts);
138         struct list_head *pos, *n;
139
140         spin_lock_irqsave(&obj->active_list_lock, flags);
141
142         list_for_each_safe(pos, n, &obj->active_list_head) {
143                 struct sync_pt *pt =
144                         container_of(pos, struct sync_pt, active_list);
145
146                 if (_sync_pt_has_signaled(pt)) {
147                         list_del_init(pos);
148                         list_add(&pt->signaled_list, &signaled_pts);
149                         kref_get(&pt->fence->kref);
150                 }
151         }
152
153         spin_unlock_irqrestore(&obj->active_list_lock, flags);
154
155         list_for_each_safe(pos, n, &signaled_pts) {
156                 struct sync_pt *pt =
157                         container_of(pos, struct sync_pt, signaled_list);
158
159                 list_del_init(pos);
160                 sync_fence_signal_pt(pt);
161                 kref_put(&pt->fence->kref, sync_fence_free);
162         }
163 }
164 EXPORT_SYMBOL(sync_timeline_signal);
165
166 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
167 {
168         struct sync_pt *pt;
169
170         if (size < sizeof(struct sync_pt))
171                 return NULL;
172
173         pt = kzalloc(size, GFP_KERNEL);
174         if (pt == NULL)
175                 return NULL;
176
177         INIT_LIST_HEAD(&pt->active_list);
178         kref_get(&parent->kref);
179         sync_timeline_add_pt(parent, pt);
180
181         return pt;
182 }
183 EXPORT_SYMBOL(sync_pt_create);
184
185 void sync_pt_free(struct sync_pt *pt)
186 {
187         if (pt->parent->ops->free_pt)
188                 pt->parent->ops->free_pt(pt);
189
190         sync_timeline_remove_pt(pt);
191
192         kref_put(&pt->parent->kref, sync_timeline_free);
193
194         kfree(pt);
195 }
196 EXPORT_SYMBOL(sync_pt_free);
197
198 /* call with pt->parent->active_list_lock held */
199 static int _sync_pt_has_signaled(struct sync_pt *pt)
200 {
201         int old_status = pt->status;
202
203         if (!pt->status)
204                 pt->status = pt->parent->ops->has_signaled(pt);
205
206         if (!pt->status && pt->parent->destroyed)
207                 pt->status = -ENOENT;
208
209         if (pt->status != old_status)
210                 pt->timestamp = ktime_get();
211
212         return pt->status;
213 }
214
215 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
216 {
217         return pt->parent->ops->dup(pt);
218 }
219
220 /* Adds a sync pt to the active queue.  Called when added to a fence */
221 static void sync_pt_activate(struct sync_pt *pt)
222 {
223         struct sync_timeline *obj = pt->parent;
224         unsigned long flags;
225         int err;
226
227         spin_lock_irqsave(&obj->active_list_lock, flags);
228
229         err = _sync_pt_has_signaled(pt);
230         if (err != 0)
231                 goto out;
232
233         list_add_tail(&pt->active_list, &obj->active_list_head);
234
235 out:
236         spin_unlock_irqrestore(&obj->active_list_lock, flags);
237 }
238
239 static int sync_fence_release(struct inode *inode, struct file *file);
240 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
241 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
242                              unsigned long arg);
243
244
245 static const struct file_operations sync_fence_fops = {
246         .release = sync_fence_release,
247         .poll = sync_fence_poll,
248         .unlocked_ioctl = sync_fence_ioctl,
249 };
250
251 static struct sync_fence *sync_fence_alloc(const char *name)
252 {
253         struct sync_fence *fence;
254         unsigned long flags;
255
256         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
257         if (fence == NULL)
258                 return NULL;
259
260         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
261                                          fence, 0);
262         if (fence->file == NULL)
263                 goto err;
264
265         kref_init(&fence->kref);
266         strlcpy(fence->name, name, sizeof(fence->name));
267
268         INIT_LIST_HEAD(&fence->pt_list_head);
269         INIT_LIST_HEAD(&fence->waiter_list_head);
270         spin_lock_init(&fence->waiter_list_lock);
271
272         init_waitqueue_head(&fence->wq);
273
274         spin_lock_irqsave(&sync_fence_list_lock, flags);
275         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
276         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
277
278         return fence;
279
280 err:
281         kfree(fence);
282         return NULL;
283 }
284
285 /* TODO: implement a create which takes more that one sync_pt */
286 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
287 {
288         struct sync_fence *fence;
289
290         if (pt->fence)
291                 return NULL;
292
293         fence = sync_fence_alloc(name);
294         if (fence == NULL)
295                 return NULL;
296
297         pt->fence = fence;
298         list_add(&pt->pt_list, &fence->pt_list_head);
299         sync_pt_activate(pt);
300
301         return fence;
302 }
303 EXPORT_SYMBOL(sync_fence_create);
304
305 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
306 {
307         struct list_head *pos;
308
309         list_for_each(pos, &src->pt_list_head) {
310                 struct sync_pt *orig_pt =
311                         container_of(pos, struct sync_pt, pt_list);
312                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
313
314                 if (new_pt == NULL)
315                         return -ENOMEM;
316
317                 new_pt->fence = dst;
318                 list_add(&new_pt->pt_list, &dst->pt_list_head);
319                 sync_pt_activate(new_pt);
320         }
321
322         return 0;
323 }
324
325 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
326 {
327         struct list_head *src_pos, *dst_pos, *n;
328
329         list_for_each(src_pos, &src->pt_list_head) {
330                 struct sync_pt *src_pt =
331                         container_of(src_pos, struct sync_pt, pt_list);
332                 bool collapsed = false;
333
334                 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
335                         struct sync_pt *dst_pt =
336                                 container_of(dst_pos, struct sync_pt, pt_list);
337                         /* collapse two sync_pts on the same timeline
338                          * to a single sync_pt that will signal at
339                          * the later of the two
340                          */
341                         if (dst_pt->parent == src_pt->parent) {
342                                 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
343                                         struct sync_pt *new_pt =
344                                                 sync_pt_dup(src_pt);
345                                         if (new_pt == NULL)
346                                                 return -ENOMEM;
347
348                                         new_pt->fence = dst;
349                                         list_replace(&dst_pt->pt_list,
350                                                      &new_pt->pt_list);
351                                         sync_pt_activate(new_pt);
352                                         sync_pt_free(dst_pt);
353                                 }
354                                 collapsed = true;
355                                 break;
356                         }
357                 }
358
359                 if (!collapsed) {
360                         struct sync_pt *new_pt = sync_pt_dup(src_pt);
361
362                         if (new_pt == NULL)
363                                 return -ENOMEM;
364
365                         new_pt->fence = dst;
366                         list_add(&new_pt->pt_list, &dst->pt_list_head);
367                         sync_pt_activate(new_pt);
368                 }
369         }
370
371         return 0;
372 }
373
374 static void sync_fence_detach_pts(struct sync_fence *fence)
375 {
376         struct list_head *pos, *n;
377
378         list_for_each_safe(pos, n, &fence->pt_list_head) {
379                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
380                 sync_timeline_remove_pt(pt);
381         }
382 }
383
384 static void sync_fence_free_pts(struct sync_fence *fence)
385 {
386         struct list_head *pos, *n;
387
388         list_for_each_safe(pos, n, &fence->pt_list_head) {
389                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
390                 sync_pt_free(pt);
391         }
392 }
393
394 struct sync_fence *sync_fence_fdget(int fd)
395 {
396         struct file *file = fget(fd);
397
398         if (file == NULL)
399                 return NULL;
400
401         if (file->f_op != &sync_fence_fops)
402                 goto err;
403
404         return file->private_data;
405
406 err:
407         fput(file);
408         return NULL;
409 }
410 EXPORT_SYMBOL(sync_fence_fdget);
411
412 void sync_fence_put(struct sync_fence *fence)
413 {
414         fput(fence->file);
415 }
416 EXPORT_SYMBOL(sync_fence_put);
417
418 void sync_fence_install(struct sync_fence *fence, int fd)
419 {
420         fd_install(fd, fence->file);
421 }
422 EXPORT_SYMBOL(sync_fence_install);
423
424 static int sync_fence_get_status(struct sync_fence *fence)
425 {
426         struct list_head *pos;
427         int status = 1;
428
429         list_for_each(pos, &fence->pt_list_head) {
430                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
431                 int pt_status = pt->status;
432
433                 if (pt_status < 0) {
434                         status = pt_status;
435                         break;
436                 } else if (status == 1) {
437                         status = pt_status;
438                 }
439         }
440
441         return status;
442 }
443
444 struct sync_fence *sync_fence_merge(const char *name,
445                                     struct sync_fence *a, struct sync_fence *b)
446 {
447         struct sync_fence *fence;
448         int err;
449
450         fence = sync_fence_alloc(name);
451         if (fence == NULL)
452                 return NULL;
453
454         err = sync_fence_copy_pts(fence, a);
455         if (err < 0)
456                 goto err;
457
458         err = sync_fence_merge_pts(fence, b);
459         if (err < 0)
460                 goto err;
461
462         fence->status = sync_fence_get_status(fence);
463
464         return fence;
465 err:
466         sync_fence_free_pts(fence);
467         kfree(fence);
468         return NULL;
469 }
470 EXPORT_SYMBOL(sync_fence_merge);
471
472 static void sync_fence_signal_pt(struct sync_pt *pt)
473 {
474         LIST_HEAD(signaled_waiters);
475         struct sync_fence *fence = pt->fence;
476         struct list_head *pos;
477         struct list_head *n;
478         unsigned long flags;
479         int status;
480
481         status = sync_fence_get_status(fence);
482
483         spin_lock_irqsave(&fence->waiter_list_lock, flags);
484         /*
485          * this should protect against two threads racing on the signaled
486          * false -> true transition
487          */
488         if (status && !fence->status) {
489                 list_for_each_safe(pos, n, &fence->waiter_list_head)
490                         list_move(pos, &signaled_waiters);
491
492                 fence->status = status;
493         } else {
494                 status = 0;
495         }
496         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
497
498         if (status) {
499                 list_for_each_safe(pos, n, &signaled_waiters) {
500                         struct sync_fence_waiter *waiter =
501                                 container_of(pos, struct sync_fence_waiter,
502                                              waiter_list);
503
504                         list_del(pos);
505                         waiter->callback(fence, waiter);
506                 }
507                 wake_up(&fence->wq);
508         }
509 }
510
511 int sync_fence_wait_async(struct sync_fence *fence,
512                           struct sync_fence_waiter *waiter)
513 {
514         unsigned long flags;
515         int err = 0;
516
517         spin_lock_irqsave(&fence->waiter_list_lock, flags);
518
519         if (fence->status) {
520                 err = fence->status;
521                 goto out;
522         }
523
524         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
525 out:
526         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
527
528         return err;
529 }
530 EXPORT_SYMBOL(sync_fence_wait_async);
531
532 int sync_fence_cancel_async(struct sync_fence *fence,
533                              struct sync_fence_waiter *waiter)
534 {
535         struct list_head *pos;
536         struct list_head *n;
537         unsigned long flags;
538         int ret = -ENOENT;
539
540         spin_lock_irqsave(&fence->waiter_list_lock, flags);
541         /*
542          * Make sure waiter is still in waiter_list because it is possible for
543          * the waiter to be removed from the list while the callback is still
544          * pending.
545          */
546         list_for_each_safe(pos, n, &fence->waiter_list_head) {
547                 struct sync_fence_waiter *list_waiter =
548                         container_of(pos, struct sync_fence_waiter,
549                                      waiter_list);
550                 if (list_waiter == waiter) {
551                         list_del(pos);
552                         ret = 0;
553                         break;
554                 }
555         }
556         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
557         return ret;
558 }
559 EXPORT_SYMBOL(sync_fence_cancel_async);
560
561 int sync_fence_wait(struct sync_fence *fence, long timeout)
562 {
563         int err;
564
565         if (timeout) {
566                 timeout = msecs_to_jiffies(timeout);
567                 err = wait_event_interruptible_timeout(fence->wq,
568                                                        fence->status != 0,
569                                                        timeout);
570         } else {
571                 err = wait_event_interruptible(fence->wq, fence->status != 0);
572         }
573
574         if (err < 0)
575                 return err;
576
577         if (fence->status < 0)
578                 return fence->status;
579
580         if (fence->status == 0)
581                 return -ETIME;
582
583         return 0;
584 }
585 EXPORT_SYMBOL(sync_fence_wait);
586
587 static void sync_fence_free(struct kref *kref)
588 {
589         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
590
591         sync_fence_free_pts(fence);
592
593         kfree(fence);
594 }
595
596 static int sync_fence_release(struct inode *inode, struct file *file)
597 {
598         struct sync_fence *fence = file->private_data;
599         unsigned long flags;
600
601         /*
602          * We need to remove all ways to access this fence before droping
603          * our ref.
604          *
605          * start with its membership in the global fence list
606          */
607         spin_lock_irqsave(&sync_fence_list_lock, flags);
608         list_del(&fence->sync_fence_list);
609         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
610
611         /*
612          * remove its pts from their parents so that sync_timeline_signal()
613          * can't reference the fence.
614          */
615         sync_fence_detach_pts(fence);
616
617         kref_put(&fence->kref, sync_fence_free);
618
619         return 0;
620 }
621
622 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
623 {
624         struct sync_fence *fence = file->private_data;
625
626         poll_wait(file, &fence->wq, wait);
627
628         if (fence->status == 1)
629                 return POLLIN;
630         else if (fence->status < 0)
631                 return POLLERR;
632         else
633                 return 0;
634 }
635
636 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
637 {
638         __u32 value;
639
640         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
641                 return -EFAULT;
642
643         return sync_fence_wait(fence, value);
644 }
645
646 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
647 {
648         int fd = get_unused_fd();
649         int err;
650         struct sync_fence *fence2, *fence3;
651         struct sync_merge_data data;
652
653         if (fd < 0)
654                 return fd;
655
656         if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
657                 err = -EFAULT;
658                 goto err_put_fd;
659         }
660
661         fence2 = sync_fence_fdget(data.fd2);
662         if (fence2 == NULL) {
663                 err = -ENOENT;
664                 goto err_put_fd;
665         }
666
667         data.name[sizeof(data.name) - 1] = '\0';
668         fence3 = sync_fence_merge(data.name, fence, fence2);
669         if (fence3 == NULL) {
670                 err = -ENOMEM;
671                 goto err_put_fence2;
672         }
673
674         data.fence = fd;
675         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
676                 err = -EFAULT;
677                 goto err_put_fence3;
678         }
679
680         sync_fence_install(fence3, fd);
681         sync_fence_put(fence2);
682         return 0;
683
684 err_put_fence3:
685         sync_fence_put(fence3);
686
687 err_put_fence2:
688         sync_fence_put(fence2);
689
690 err_put_fd:
691         put_unused_fd(fd);
692         return err;
693 }
694
695 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
696 {
697         struct sync_pt_info *info = data;
698         int ret;
699
700         if (size < sizeof(struct sync_pt_info))
701                 return -ENOMEM;
702
703         info->len = sizeof(struct sync_pt_info);
704
705         if (pt->parent->ops->fill_driver_data) {
706                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
707                                                         size - sizeof(*info));
708                 if (ret < 0)
709                         return ret;
710
711                 info->len += ret;
712         }
713
714         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
715         strlcpy(info->driver_name, pt->parent->ops->driver_name,
716                 sizeof(info->driver_name));
717         info->status = pt->status;
718         info->timestamp_ns = ktime_to_ns(pt->timestamp);
719
720         return info->len;
721 }
722
723 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
724                                         unsigned long arg)
725 {
726         struct sync_fence_info_data *data;
727         struct list_head *pos;
728         __u32 size;
729         __u32 len = 0;
730         int ret;
731
732         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
733                 return -EFAULT;
734
735         if (size < sizeof(struct sync_fence_info_data))
736                 return -EINVAL;
737
738         if (size > 4096)
739                 size = 4096;
740
741         data = kzalloc(size, GFP_KERNEL);
742         if (data == NULL)
743                 return -ENOMEM;
744
745         strlcpy(data->name, fence->name, sizeof(data->name));
746         data->status = fence->status;
747         len = sizeof(struct sync_fence_info_data);
748
749         list_for_each(pos, &fence->pt_list_head) {
750                 struct sync_pt *pt =
751                         container_of(pos, struct sync_pt, pt_list);
752
753                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
754
755                 if (ret < 0)
756                         goto out;
757
758                 len += ret;
759         }
760
761         data->len = len;
762
763         if (copy_to_user((void __user *)arg, data, len))
764                 ret = -EFAULT;
765         else
766                 ret = 0;
767
768 out:
769         kfree(data);
770
771         return ret;
772 }
773
774 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
775                              unsigned long arg)
776 {
777         struct sync_fence *fence = file->private_data;
778         switch (cmd) {
779         case SYNC_IOC_WAIT:
780                 return sync_fence_ioctl_wait(fence, arg);
781
782         case SYNC_IOC_MERGE:
783                 return sync_fence_ioctl_merge(fence, arg);
784
785         case SYNC_IOC_FENCE_INFO:
786                 return sync_fence_ioctl_fence_info(fence, arg);
787
788         default:
789                 return -ENOTTY;
790         }
791 }
792
793 #ifdef CONFIG_DEBUG_FS
794 static const char *sync_status_str(int status)
795 {
796         if (status > 0)
797                 return "signaled";
798         else if (status == 0)
799                 return "active";
800         else
801                 return "error";
802 }
803
804 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
805 {
806         int status = pt->status;
807         seq_printf(s, "  %s%spt %s",
808                    fence ? pt->parent->name : "",
809                    fence ? "_" : "",
810                    sync_status_str(status));
811         if (pt->status) {
812                 struct timeval tv = ktime_to_timeval(pt->timestamp);
813                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
814         }
815
816         if (pt->parent->ops->print_pt) {
817                 seq_printf(s, ": ");
818                 pt->parent->ops->print_pt(s, pt);
819         }
820
821         seq_printf(s, "\n");
822 }
823
824 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
825 {
826         struct list_head *pos;
827         unsigned long flags;
828
829         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
830
831         if (obj->ops->print_obj) {
832                 seq_printf(s, ": ");
833                 obj->ops->print_obj(s, obj);
834         }
835
836         seq_printf(s, "\n");
837
838         spin_lock_irqsave(&obj->child_list_lock, flags);
839         list_for_each(pos, &obj->child_list_head) {
840                 struct sync_pt *pt =
841                         container_of(pos, struct sync_pt, child_list);
842                 sync_print_pt(s, pt, false);
843         }
844         spin_unlock_irqrestore(&obj->child_list_lock, flags);
845 }
846
847 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
848 {
849         struct list_head *pos;
850         unsigned long flags;
851
852         seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
853
854         list_for_each(pos, &fence->pt_list_head) {
855                 struct sync_pt *pt =
856                         container_of(pos, struct sync_pt, pt_list);
857                 sync_print_pt(s, pt, true);
858         }
859
860         spin_lock_irqsave(&fence->waiter_list_lock, flags);
861         list_for_each(pos, &fence->waiter_list_head) {
862                 struct sync_fence_waiter *waiter =
863                         container_of(pos, struct sync_fence_waiter,
864                                      waiter_list);
865
866                 seq_printf(s, "waiter %pF\n", waiter->callback);
867         }
868         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
869 }
870
871 static int sync_debugfs_show(struct seq_file *s, void *unused)
872 {
873         unsigned long flags;
874         struct list_head *pos;
875
876         seq_printf(s, "objs:\n--------------\n");
877
878         spin_lock_irqsave(&sync_timeline_list_lock, flags);
879         list_for_each(pos, &sync_timeline_list_head) {
880                 struct sync_timeline *obj =
881                         container_of(pos, struct sync_timeline,
882                                      sync_timeline_list);
883
884                 sync_print_obj(s, obj);
885                 seq_printf(s, "\n");
886         }
887         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
888
889         seq_printf(s, "fences:\n--------------\n");
890
891         spin_lock_irqsave(&sync_fence_list_lock, flags);
892         list_for_each(pos, &sync_fence_list_head) {
893                 struct sync_fence *fence =
894                         container_of(pos, struct sync_fence, sync_fence_list);
895
896                 sync_print_fence(s, fence);
897                 seq_printf(s, "\n");
898         }
899         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
900         return 0;
901 }
902
903 static int sync_debugfs_open(struct inode *inode, struct file *file)
904 {
905         return single_open(file, sync_debugfs_show, inode->i_private);
906 }
907
908 static const struct file_operations sync_debugfs_fops = {
909         .open           = sync_debugfs_open,
910         .read           = seq_read,
911         .llseek         = seq_lseek,
912         .release        = single_release,
913 };
914
915 static __init int sync_debugfs_init(void)
916 {
917         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
918         return 0;
919 }
920
921 late_initcall(sync_debugfs_init);
922
923 #endif