regmap: irq: Only update mask bits when doing initial mask
[linux-2.6.git] / drivers / base / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/sync.h>
27 #include <linux/uaccess.h>
28
29 #include <linux/anon_inodes.h>
30
31 static void sync_fence_signal_pt(struct sync_pt *pt);
32 static int _sync_pt_has_signaled(struct sync_pt *pt);
33 static void sync_fence_free(struct kref *kref);
34 static void sync_dump(void);
35
36 static LIST_HEAD(sync_timeline_list_head);
37 static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39 static LIST_HEAD(sync_fence_list_head);
40 static DEFINE_SPINLOCK(sync_fence_list_lock);
41
42 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43                                            int size, const char *name)
44 {
45         struct sync_timeline *obj;
46         unsigned long flags;
47
48         if (size < sizeof(struct sync_timeline))
49                 return NULL;
50
51         obj = kzalloc(size, GFP_KERNEL);
52         if (obj == NULL)
53                 return NULL;
54
55         kref_init(&obj->kref);
56         obj->ops = ops;
57         strlcpy(obj->name, name, sizeof(obj->name));
58
59         INIT_LIST_HEAD(&obj->child_list_head);
60         spin_lock_init(&obj->child_list_lock);
61
62         INIT_LIST_HEAD(&obj->active_list_head);
63         spin_lock_init(&obj->active_list_lock);
64
65         spin_lock_irqsave(&sync_timeline_list_lock, flags);
66         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
69         return obj;
70 }
71 EXPORT_SYMBOL(sync_timeline_create);
72
73 static void sync_timeline_free(struct kref *kref)
74 {
75         struct sync_timeline *obj =
76                 container_of(kref, struct sync_timeline, kref);
77         unsigned long flags;
78
79         if (obj->ops->release_obj)
80                 obj->ops->release_obj(obj);
81
82         spin_lock_irqsave(&sync_timeline_list_lock, flags);
83         list_del(&obj->sync_timeline_list);
84         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86         kfree(obj);
87 }
88
89 void sync_timeline_destroy(struct sync_timeline *obj)
90 {
91         obj->destroyed = true;
92
93         /*
94          * If this is not the last reference, signal any children
95          * that their parent is going away.
96          */
97
98         if (!kref_put(&obj->kref, sync_timeline_free))
99                 sync_timeline_signal(obj);
100 }
101 EXPORT_SYMBOL(sync_timeline_destroy);
102
103 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104 {
105         unsigned long flags;
106
107         pt->parent = obj;
108
109         spin_lock_irqsave(&obj->child_list_lock, flags);
110         list_add_tail(&pt->child_list, &obj->child_list_head);
111         spin_unlock_irqrestore(&obj->child_list_lock, flags);
112 }
113
114 static void sync_timeline_remove_pt(struct sync_pt *pt)
115 {
116         struct sync_timeline *obj = pt->parent;
117         unsigned long flags;
118
119         spin_lock_irqsave(&obj->active_list_lock, flags);
120         if (!list_empty(&pt->active_list))
121                 list_del_init(&pt->active_list);
122         spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124         spin_lock_irqsave(&obj->child_list_lock, flags);
125         if (!list_empty(&pt->child_list)) {
126                 list_del_init(&pt->child_list);
127         }
128         spin_unlock_irqrestore(&obj->child_list_lock, flags);
129 }
130
131 void sync_timeline_signal(struct sync_timeline *obj)
132 {
133         unsigned long flags;
134         LIST_HEAD(signaled_pts);
135         struct list_head *pos, *n;
136
137         spin_lock_irqsave(&obj->active_list_lock, flags);
138
139         list_for_each_safe(pos, n, &obj->active_list_head) {
140                 struct sync_pt *pt =
141                         container_of(pos, struct sync_pt, active_list);
142
143                 if (_sync_pt_has_signaled(pt)) {
144                         list_del_init(pos);
145                         list_add(&pt->signaled_list, &signaled_pts);
146                         kref_get(&pt->fence->kref);
147                 }
148         }
149
150         spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152         list_for_each_safe(pos, n, &signaled_pts) {
153                 struct sync_pt *pt =
154                         container_of(pos, struct sync_pt, signaled_list);
155
156                 list_del_init(pos);
157                 sync_fence_signal_pt(pt);
158                 kref_put(&pt->fence->kref, sync_fence_free);
159         }
160 }
161 EXPORT_SYMBOL(sync_timeline_signal);
162
163 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164 {
165         struct sync_pt *pt;
166
167         if (size < sizeof(struct sync_pt))
168                 return NULL;
169
170         pt = kzalloc(size, GFP_KERNEL);
171         if (pt == NULL)
172                 return NULL;
173
174         INIT_LIST_HEAD(&pt->active_list);
175         kref_get(&parent->kref);
176         sync_timeline_add_pt(parent, pt);
177
178         return pt;
179 }
180 EXPORT_SYMBOL(sync_pt_create);
181
182 void sync_pt_free(struct sync_pt *pt)
183 {
184         if (pt->parent->ops->free_pt)
185                 pt->parent->ops->free_pt(pt);
186
187         sync_timeline_remove_pt(pt);
188
189         kref_put(&pt->parent->kref, sync_timeline_free);
190
191         kfree(pt);
192 }
193 EXPORT_SYMBOL(sync_pt_free);
194
195 /* call with pt->parent->active_list_lock held */
196 static int _sync_pt_has_signaled(struct sync_pt *pt)
197 {
198         int old_status = pt->status;
199
200         if (!pt->status)
201                 pt->status = pt->parent->ops->has_signaled(pt);
202
203         if (!pt->status && pt->parent->destroyed)
204                 pt->status = -ENOENT;
205
206         if (pt->status != old_status)
207                 pt->timestamp = ktime_get();
208
209         return pt->status;
210 }
211
212 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213 {
214         return pt->parent->ops->dup(pt);
215 }
216
217 /* Adds a sync pt to the active queue.  Called when added to a fence */
218 static void sync_pt_activate(struct sync_pt *pt)
219 {
220         struct sync_timeline *obj = pt->parent;
221         unsigned long flags;
222         int err;
223
224         spin_lock_irqsave(&obj->active_list_lock, flags);
225
226         err = _sync_pt_has_signaled(pt);
227         if (err != 0)
228                 goto out;
229
230         list_add_tail(&pt->active_list, &obj->active_list_head);
231
232 out:
233         spin_unlock_irqrestore(&obj->active_list_lock, flags);
234 }
235
236 static int sync_fence_release(struct inode *inode, struct file *file);
237 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
238 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239                              unsigned long arg);
240
241
242 static const struct file_operations sync_fence_fops = {
243         .release = sync_fence_release,
244         .poll = sync_fence_poll,
245         .unlocked_ioctl = sync_fence_ioctl,
246 };
247
248 static struct sync_fence *sync_fence_alloc(const char *name)
249 {
250         struct sync_fence *fence;
251         unsigned long flags;
252
253         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254         if (fence == NULL)
255                 return NULL;
256
257         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258                                          fence, 0);
259         if (fence->file == NULL)
260                 goto err;
261
262         kref_init(&fence->kref);
263         strlcpy(fence->name, name, sizeof(fence->name));
264
265         INIT_LIST_HEAD(&fence->pt_list_head);
266         INIT_LIST_HEAD(&fence->waiter_list_head);
267         spin_lock_init(&fence->waiter_list_lock);
268
269         init_waitqueue_head(&fence->wq);
270
271         spin_lock_irqsave(&sync_fence_list_lock, flags);
272         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
275         return fence;
276
277 err:
278         kfree(fence);
279         return NULL;
280 }
281
282 /* TODO: implement a create which takes more that one sync_pt */
283 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284 {
285         struct sync_fence *fence;
286
287         if (pt->fence)
288                 return NULL;
289
290         fence = sync_fence_alloc(name);
291         if (fence == NULL)
292                 return NULL;
293
294         pt->fence = fence;
295         list_add(&pt->pt_list, &fence->pt_list_head);
296         sync_pt_activate(pt);
297
298         return fence;
299 }
300 EXPORT_SYMBOL(sync_fence_create);
301
302 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
303 {
304         struct list_head *pos;
305
306         list_for_each(pos, &src->pt_list_head) {
307                 struct sync_pt *orig_pt =
308                         container_of(pos, struct sync_pt, pt_list);
309                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
310
311                 if (new_pt == NULL)
312                         return -ENOMEM;
313
314                 new_pt->fence = dst;
315                 list_add(&new_pt->pt_list, &dst->pt_list_head);
316                 sync_pt_activate(new_pt);
317         }
318
319         return 0;
320 }
321
322 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
323 {
324         struct list_head *src_pos, *dst_pos, *n;
325
326         list_for_each(src_pos, &src->pt_list_head) {
327                 struct sync_pt *src_pt =
328                         container_of(src_pos, struct sync_pt, pt_list);
329                 bool collapsed = false;
330
331                 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
332                         struct sync_pt *dst_pt =
333                                 container_of(dst_pos, struct sync_pt, pt_list);
334                         /* collapse two sync_pts on the same timeline
335                          * to a single sync_pt that will signal at
336                          * the later of the two
337                          */
338                         if (dst_pt->parent == src_pt->parent) {
339                                 if (dst_pt->parent->ops->compare(dst_pt, src_pt) == -1) {
340                                         struct sync_pt *new_pt =
341                                                 sync_pt_dup(src_pt);
342                                         if (new_pt == NULL)
343                                                 return -ENOMEM;
344
345                                         new_pt->fence = dst;
346                                         list_replace(&dst_pt->pt_list,
347                                                      &new_pt->pt_list);
348                                         sync_pt_activate(new_pt);
349                                         sync_pt_free(dst_pt);
350                                 }
351                                 collapsed = true;
352                                 break;
353                         }
354                 }
355
356                 if (!collapsed) {
357                         struct sync_pt *new_pt = sync_pt_dup(src_pt);
358
359                         if (new_pt == NULL)
360                                 return -ENOMEM;
361
362                         new_pt->fence = dst;
363                         list_add(&new_pt->pt_list, &dst->pt_list_head);
364                         sync_pt_activate(new_pt);
365                 }
366         }
367
368         return 0;
369 }
370
371 static void sync_fence_detach_pts(struct sync_fence *fence)
372 {
373         struct list_head *pos, *n;
374
375         list_for_each_safe(pos, n, &fence->pt_list_head) {
376                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
377                 sync_timeline_remove_pt(pt);
378         }
379 }
380
381 static void sync_fence_free_pts(struct sync_fence *fence)
382 {
383         struct list_head *pos, *n;
384
385         list_for_each_safe(pos, n, &fence->pt_list_head) {
386                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387                 sync_pt_free(pt);
388         }
389 }
390
391 struct sync_fence *sync_fence_fdget(int fd)
392 {
393         struct file *file = fget(fd);
394
395         if (file == NULL)
396                 return NULL;
397
398         if (file->f_op != &sync_fence_fops)
399                 goto err;
400
401         return file->private_data;
402
403 err:
404         fput(file);
405         return NULL;
406 }
407 EXPORT_SYMBOL(sync_fence_fdget);
408
409 void sync_fence_put(struct sync_fence *fence)
410 {
411         fput(fence->file);
412 }
413 EXPORT_SYMBOL(sync_fence_put);
414
415 void sync_fence_install(struct sync_fence *fence, int fd)
416 {
417         fd_install(fd, fence->file);
418 }
419 EXPORT_SYMBOL(sync_fence_install);
420
421 static int sync_fence_get_status(struct sync_fence *fence)
422 {
423         struct list_head *pos;
424         int status = 1;
425
426         list_for_each(pos, &fence->pt_list_head) {
427                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
428                 int pt_status = pt->status;
429
430                 if (pt_status < 0) {
431                         status = pt_status;
432                         break;
433                 } else if (status == 1) {
434                         status = pt_status;
435                 }
436         }
437
438         return status;
439 }
440
441 struct sync_fence *sync_fence_merge(const char *name,
442                                     struct sync_fence *a, struct sync_fence *b)
443 {
444         struct sync_fence *fence;
445         int err;
446
447         fence = sync_fence_alloc(name);
448         if (fence == NULL)
449                 return NULL;
450
451         err = sync_fence_copy_pts(fence, a);
452         if (err < 0)
453                 goto err;
454
455         err = sync_fence_merge_pts(fence, b);
456         if (err < 0)
457                 goto err;
458
459         fence->status = sync_fence_get_status(fence);
460
461         return fence;
462 err:
463         sync_fence_free_pts(fence);
464         kfree(fence);
465         return NULL;
466 }
467 EXPORT_SYMBOL(sync_fence_merge);
468
469 static void sync_fence_signal_pt(struct sync_pt *pt)
470 {
471         LIST_HEAD(signaled_waiters);
472         struct sync_fence *fence = pt->fence;
473         struct list_head *pos;
474         struct list_head *n;
475         unsigned long flags;
476         int status;
477
478         status = sync_fence_get_status(fence);
479
480         spin_lock_irqsave(&fence->waiter_list_lock, flags);
481         /*
482          * this should protect against two threads racing on the signaled
483          * false -> true transition
484          */
485         if (status && !fence->status) {
486                 list_for_each_safe(pos, n, &fence->waiter_list_head)
487                         list_move(pos, &signaled_waiters);
488
489                 fence->status = status;
490         } else {
491                 status = 0;
492         }
493         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
494
495         if (status) {
496                 list_for_each_safe(pos, n, &signaled_waiters) {
497                         struct sync_fence_waiter *waiter =
498                                 container_of(pos, struct sync_fence_waiter,
499                                              waiter_list);
500
501                         list_del(pos);
502                         waiter->callback(fence, waiter);
503                 }
504                 wake_up(&fence->wq);
505         }
506 }
507
508 int sync_fence_wait_async(struct sync_fence *fence,
509                           struct sync_fence_waiter *waiter)
510 {
511         unsigned long flags;
512         int err = 0;
513
514         spin_lock_irqsave(&fence->waiter_list_lock, flags);
515
516         if (fence->status) {
517                 err = fence->status;
518                 goto out;
519         }
520
521         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
522 out:
523         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
524
525         return err;
526 }
527 EXPORT_SYMBOL(sync_fence_wait_async);
528
529 int sync_fence_cancel_async(struct sync_fence *fence,
530                              struct sync_fence_waiter *waiter)
531 {
532         struct list_head *pos;
533         struct list_head *n;
534         unsigned long flags;
535         int ret = -ENOENT;
536
537         spin_lock_irqsave(&fence->waiter_list_lock, flags);
538         /*
539          * Make sure waiter is still in waiter_list because it is possible for
540          * the waiter to be removed from the list while the callback is still
541          * pending.
542          */
543         list_for_each_safe(pos, n, &fence->waiter_list_head) {
544                 struct sync_fence_waiter *list_waiter =
545                         container_of(pos, struct sync_fence_waiter,
546                                      waiter_list);
547                 if (list_waiter == waiter) {
548                         list_del(pos);
549                         ret = 0;
550                         break;
551                 }
552         }
553         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
554         return ret;
555 }
556 EXPORT_SYMBOL(sync_fence_cancel_async);
557
558 int sync_fence_wait(struct sync_fence *fence, long timeout)
559 {
560         int err = 0;
561
562         if (timeout > 0) {
563                 timeout = msecs_to_jiffies(timeout);
564                 err = wait_event_interruptible_timeout(fence->wq,
565                                                        fence->status != 0,
566                                                        timeout);
567         } else if (timeout < 0) {
568                 err = wait_event_interruptible(fence->wq, fence->status != 0);
569         }
570
571         if (err < 0)
572                 return err;
573
574         if (fence->status < 0)
575                 return fence->status;
576
577         if (fence->status == 0) {
578                 pr_info("fence timeout on [%p] after %dms\n", fence,
579                         jiffies_to_msecs(timeout));
580                 sync_dump();
581                 return -ETIME;
582         }
583
584         return 0;
585 }
586 EXPORT_SYMBOL(sync_fence_wait);
587
588 static void sync_fence_free(struct kref *kref)
589 {
590         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
591
592         sync_fence_free_pts(fence);
593
594         kfree(fence);
595 }
596
597 static int sync_fence_release(struct inode *inode, struct file *file)
598 {
599         struct sync_fence *fence = file->private_data;
600         unsigned long flags;
601
602         /*
603          * We need to remove all ways to access this fence before droping
604          * our ref.
605          *
606          * start with its membership in the global fence list
607          */
608         spin_lock_irqsave(&sync_fence_list_lock, flags);
609         list_del(&fence->sync_fence_list);
610         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
611
612         /*
613          * remove its pts from their parents so that sync_timeline_signal()
614          * can't reference the fence.
615          */
616         sync_fence_detach_pts(fence);
617
618         kref_put(&fence->kref, sync_fence_free);
619
620         return 0;
621 }
622
623 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
624 {
625         struct sync_fence *fence = file->private_data;
626
627         poll_wait(file, &fence->wq, wait);
628
629         if (fence->status == 1)
630                 return POLLIN;
631         else if (fence->status < 0)
632                 return POLLERR;
633         else
634                 return 0;
635 }
636
637 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
638 {
639         __s32 value;
640
641         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
642                 return -EFAULT;
643
644         return sync_fence_wait(fence, value);
645 }
646
647 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
648 {
649         int fd = get_unused_fd();
650         int err;
651         struct sync_fence *fence2, *fence3;
652         struct sync_merge_data data;
653
654         if (fd < 0)
655                 return fd;
656
657         if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
658                 err = -EFAULT;
659                 goto err_put_fd;
660         }
661
662         fence2 = sync_fence_fdget(data.fd2);
663         if (fence2 == NULL) {
664                 err = -ENOENT;
665                 goto err_put_fd;
666         }
667
668         data.name[sizeof(data.name) - 1] = '\0';
669         fence3 = sync_fence_merge(data.name, fence, fence2);
670         if (fence3 == NULL) {
671                 err = -ENOMEM;
672                 goto err_put_fence2;
673         }
674
675         data.fence = fd;
676         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
677                 err = -EFAULT;
678                 goto err_put_fence3;
679         }
680
681         sync_fence_install(fence3, fd);
682         sync_fence_put(fence2);
683         return 0;
684
685 err_put_fence3:
686         sync_fence_put(fence3);
687
688 err_put_fence2:
689         sync_fence_put(fence2);
690
691 err_put_fd:
692         put_unused_fd(fd);
693         return err;
694 }
695
696 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
697 {
698         struct sync_pt_info *info = data;
699         int ret;
700
701         if (size < sizeof(struct sync_pt_info))
702                 return -ENOMEM;
703
704         info->len = sizeof(struct sync_pt_info);
705
706         if (pt->parent->ops->fill_driver_data) {
707                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
708                                                         size - sizeof(*info));
709                 if (ret < 0)
710                         return ret;
711
712                 info->len += ret;
713         }
714
715         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
716         strlcpy(info->driver_name, pt->parent->ops->driver_name,
717                 sizeof(info->driver_name));
718         info->status = pt->status;
719         info->timestamp_ns = ktime_to_ns(pt->timestamp);
720
721         return info->len;
722 }
723
724 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
725                                         unsigned long arg)
726 {
727         struct sync_fence_info_data *data;
728         struct list_head *pos;
729         __u32 size;
730         __u32 len = 0;
731         int ret;
732
733         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
734                 return -EFAULT;
735
736         if (size < sizeof(struct sync_fence_info_data))
737                 return -EINVAL;
738
739         if (size > 4096)
740                 size = 4096;
741
742         data = kzalloc(size, GFP_KERNEL);
743         if (data == NULL)
744                 return -ENOMEM;
745
746         strlcpy(data->name, fence->name, sizeof(data->name));
747         data->status = fence->status;
748         len = sizeof(struct sync_fence_info_data);
749
750         list_for_each(pos, &fence->pt_list_head) {
751                 struct sync_pt *pt =
752                         container_of(pos, struct sync_pt, pt_list);
753
754                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
755
756                 if (ret < 0)
757                         goto out;
758
759                 len += ret;
760         }
761
762         data->len = len;
763
764         if (copy_to_user((void __user *)arg, data, len))
765                 ret = -EFAULT;
766         else
767                 ret = 0;
768
769 out:
770         kfree(data);
771
772         return ret;
773 }
774
775 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
776                              unsigned long arg)
777 {
778         struct sync_fence *fence = file->private_data;
779         switch (cmd) {
780         case SYNC_IOC_WAIT:
781                 return sync_fence_ioctl_wait(fence, arg);
782
783         case SYNC_IOC_MERGE:
784                 return sync_fence_ioctl_merge(fence, arg);
785
786         case SYNC_IOC_FENCE_INFO:
787                 return sync_fence_ioctl_fence_info(fence, arg);
788
789         default:
790                 return -ENOTTY;
791         }
792 }
793
794 #ifdef CONFIG_DEBUG_FS
795 static const char *sync_status_str(int status)
796 {
797         if (status > 0)
798                 return "signaled";
799         else if (status == 0)
800                 return "active";
801         else
802                 return "error";
803 }
804
805 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
806 {
807         int status = pt->status;
808         seq_printf(s, "  %s%spt %s",
809                    fence ? pt->parent->name : "",
810                    fence ? "_" : "",
811                    sync_status_str(status));
812         if (pt->status) {
813                 struct timeval tv = ktime_to_timeval(pt->timestamp);
814                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
815         }
816
817         if (pt->parent->ops->print_pt) {
818                 seq_printf(s, ": ");
819                 pt->parent->ops->print_pt(s, pt);
820         }
821
822         seq_printf(s, "\n");
823 }
824
825 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
826 {
827         struct list_head *pos;
828         unsigned long flags;
829
830         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
831
832         if (obj->ops->print_obj) {
833                 seq_printf(s, ": ");
834                 obj->ops->print_obj(s, obj);
835         }
836
837         seq_printf(s, "\n");
838
839         spin_lock_irqsave(&obj->child_list_lock, flags);
840         list_for_each(pos, &obj->child_list_head) {
841                 struct sync_pt *pt =
842                         container_of(pos, struct sync_pt, child_list);
843                 sync_print_pt(s, pt, false);
844         }
845         spin_unlock_irqrestore(&obj->child_list_lock, flags);
846 }
847
848 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
849 {
850         struct list_head *pos;
851         unsigned long flags;
852
853         seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
854                    sync_status_str(fence->status));
855
856         list_for_each(pos, &fence->pt_list_head) {
857                 struct sync_pt *pt =
858                         container_of(pos, struct sync_pt, pt_list);
859                 sync_print_pt(s, pt, true);
860         }
861
862         spin_lock_irqsave(&fence->waiter_list_lock, flags);
863         list_for_each(pos, &fence->waiter_list_head) {
864                 struct sync_fence_waiter *waiter =
865                         container_of(pos, struct sync_fence_waiter,
866                                      waiter_list);
867
868                 seq_printf(s, "waiter %pF\n", waiter->callback);
869         }
870         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
871 }
872
873 static int sync_debugfs_show(struct seq_file *s, void *unused)
874 {
875         unsigned long flags;
876         struct list_head *pos;
877
878         seq_printf(s, "objs:\n--------------\n");
879
880         spin_lock_irqsave(&sync_timeline_list_lock, flags);
881         list_for_each(pos, &sync_timeline_list_head) {
882                 struct sync_timeline *obj =
883                         container_of(pos, struct sync_timeline,
884                                      sync_timeline_list);
885
886                 sync_print_obj(s, obj);
887                 seq_printf(s, "\n");
888         }
889         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
890
891         seq_printf(s, "fences:\n--------------\n");
892
893         spin_lock_irqsave(&sync_fence_list_lock, flags);
894         list_for_each(pos, &sync_fence_list_head) {
895                 struct sync_fence *fence =
896                         container_of(pos, struct sync_fence, sync_fence_list);
897
898                 sync_print_fence(s, fence);
899                 seq_printf(s, "\n");
900         }
901         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
902         return 0;
903 }
904
905 static int sync_debugfs_open(struct inode *inode, struct file *file)
906 {
907         return single_open(file, sync_debugfs_show, inode->i_private);
908 }
909
910 static const struct file_operations sync_debugfs_fops = {
911         .open           = sync_debugfs_open,
912         .read           = seq_read,
913         .llseek         = seq_lseek,
914         .release        = single_release,
915 };
916
917 static __init int sync_debugfs_init(void)
918 {
919         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
920         return 0;
921 }
922 late_initcall(sync_debugfs_init);
923
924 #define DUMP_CHUNK 256
925 static char sync_dump_buf[64 * 1024];
926 void sync_dump(void)
927 {
928        struct seq_file s = {
929                .buf = sync_dump_buf,
930                .size = sizeof(sync_dump_buf) - 1,
931        };
932        int i;
933
934        sync_debugfs_show(&s, NULL);
935
936        for (i = 0; i < s.count; i += DUMP_CHUNK) {
937                if ((s.count - i) > DUMP_CHUNK) {
938                        char c = s.buf[i + DUMP_CHUNK];
939                        s.buf[i + DUMP_CHUNK] = 0;
940                        pr_cont("%s", s.buf + i);
941                        s.buf[i + DUMP_CHUNK] = c;
942                } else {
943                        s.buf[s.count] = 0;
944                        pr_cont("%s", s.buf + i);
945                }
946        }
947 }
948 #else
949 static void sync_dump(void)
950 {
951 }
952 #endif