61c27bdc5d0bafe19f2bd47592fe3cc78c8c12c7
[cascardo/linux.git] / drivers / staging / android / sync.c
1 /*
2  * drivers/base/sync.c
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
20 #include <linux/fs.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
28
29 #include "sync.h"
30
31 static void sync_fence_signal_pt(struct sync_pt *pt);
32 static int _sync_pt_has_signaled(struct sync_pt *pt);
33 static void sync_fence_free(struct kref *kref);
34
35 static LIST_HEAD(sync_timeline_list_head);
36 static DEFINE_SPINLOCK(sync_timeline_list_lock);
37
38 static LIST_HEAD(sync_fence_list_head);
39 static DEFINE_SPINLOCK(sync_fence_list_lock);
40
41 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
42                                            int size, const char *name)
43 {
44         struct sync_timeline *obj;
45         unsigned long flags;
46
47         if (size < sizeof(struct sync_timeline))
48                 return NULL;
49
50         obj = kzalloc(size, GFP_KERNEL);
51         if (obj == NULL)
52                 return NULL;
53
54         kref_init(&obj->kref);
55         obj->ops = ops;
56         strlcpy(obj->name, name, sizeof(obj->name));
57
58         INIT_LIST_HEAD(&obj->child_list_head);
59         spin_lock_init(&obj->child_list_lock);
60
61         INIT_LIST_HEAD(&obj->active_list_head);
62         spin_lock_init(&obj->active_list_lock);
63
64         spin_lock_irqsave(&sync_timeline_list_lock, flags);
65         list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
66         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
67
68         return obj;
69 }
70 EXPORT_SYMBOL(sync_timeline_create);
71
72 static void sync_timeline_free(struct kref *kref)
73 {
74         struct sync_timeline *obj =
75                 container_of(kref, struct sync_timeline, kref);
76         unsigned long flags;
77
78         if (obj->ops->release_obj)
79                 obj->ops->release_obj(obj);
80
81         spin_lock_irqsave(&sync_timeline_list_lock, flags);
82         list_del(&obj->sync_timeline_list);
83         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
84
85         kfree(obj);
86 }
87
88 void sync_timeline_destroy(struct sync_timeline *obj)
89 {
90         obj->destroyed = true;
91
92         /*
93          * If this is not the last reference, signal any children
94          * that their parent is going away.
95          */
96
97         if (!kref_put(&obj->kref, sync_timeline_free))
98                 sync_timeline_signal(obj);
99 }
100 EXPORT_SYMBOL(sync_timeline_destroy);
101
102 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
103 {
104         unsigned long flags;
105
106         pt->parent = obj;
107
108         spin_lock_irqsave(&obj->child_list_lock, flags);
109         list_add_tail(&pt->child_list, &obj->child_list_head);
110         spin_unlock_irqrestore(&obj->child_list_lock, flags);
111 }
112
113 static void sync_timeline_remove_pt(struct sync_pt *pt)
114 {
115         struct sync_timeline *obj = pt->parent;
116         unsigned long flags;
117
118         spin_lock_irqsave(&obj->active_list_lock, flags);
119         if (!list_empty(&pt->active_list))
120                 list_del_init(&pt->active_list);
121         spin_unlock_irqrestore(&obj->active_list_lock, flags);
122
123         spin_lock_irqsave(&obj->child_list_lock, flags);
124         if (!list_empty(&pt->child_list)) {
125                 list_del_init(&pt->child_list);
126         }
127         spin_unlock_irqrestore(&obj->child_list_lock, flags);
128 }
129
130 void sync_timeline_signal(struct sync_timeline *obj)
131 {
132         unsigned long flags;
133         LIST_HEAD(signaled_pts);
134         struct list_head *pos, *n;
135
136         spin_lock_irqsave(&obj->active_list_lock, flags);
137
138         list_for_each_safe(pos, n, &obj->active_list_head) {
139                 struct sync_pt *pt =
140                         container_of(pos, struct sync_pt, active_list);
141
142                 if (_sync_pt_has_signaled(pt)) {
143                         list_del_init(pos);
144                         list_add(&pt->signaled_list, &signaled_pts);
145                         kref_get(&pt->fence->kref);
146                 }
147         }
148
149         spin_unlock_irqrestore(&obj->active_list_lock, flags);
150
151         list_for_each_safe(pos, n, &signaled_pts) {
152                 struct sync_pt *pt =
153                         container_of(pos, struct sync_pt, signaled_list);
154
155                 list_del_init(pos);
156                 sync_fence_signal_pt(pt);
157                 kref_put(&pt->fence->kref, sync_fence_free);
158         }
159 }
160 EXPORT_SYMBOL(sync_timeline_signal);
161
162 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
163 {
164         struct sync_pt *pt;
165
166         if (size < sizeof(struct sync_pt))
167                 return NULL;
168
169         pt = kzalloc(size, GFP_KERNEL);
170         if (pt == NULL)
171                 return NULL;
172
173         INIT_LIST_HEAD(&pt->active_list);
174         kref_get(&parent->kref);
175         sync_timeline_add_pt(parent, pt);
176
177         return pt;
178 }
179 EXPORT_SYMBOL(sync_pt_create);
180
181 void sync_pt_free(struct sync_pt *pt)
182 {
183         if (pt->parent->ops->free_pt)
184                 pt->parent->ops->free_pt(pt);
185
186         sync_timeline_remove_pt(pt);
187
188         kref_put(&pt->parent->kref, sync_timeline_free);
189
190         kfree(pt);
191 }
192 EXPORT_SYMBOL(sync_pt_free);
193
194 /* call with pt->parent->active_list_lock held */
195 static int _sync_pt_has_signaled(struct sync_pt *pt)
196 {
197         int old_status = pt->status;
198
199         if (!pt->status)
200                 pt->status = pt->parent->ops->has_signaled(pt);
201
202         if (!pt->status && pt->parent->destroyed)
203                 pt->status = -ENOENT;
204
205         if (pt->status != old_status)
206                 pt->timestamp = ktime_get();
207
208         return pt->status;
209 }
210
211 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
212 {
213         return pt->parent->ops->dup(pt);
214 }
215
216 /* Adds a sync pt to the active queue.  Called when added to a fence */
217 static void sync_pt_activate(struct sync_pt *pt)
218 {
219         struct sync_timeline *obj = pt->parent;
220         unsigned long flags;
221         int err;
222
223         spin_lock_irqsave(&obj->active_list_lock, flags);
224
225         err = _sync_pt_has_signaled(pt);
226         if (err != 0)
227                 goto out;
228
229         list_add_tail(&pt->active_list, &obj->active_list_head);
230
231 out:
232         spin_unlock_irqrestore(&obj->active_list_lock, flags);
233 }
234
235 static int sync_fence_release(struct inode *inode, struct file *file);
236 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
237 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
238                              unsigned long arg);
239
240
241 static const struct file_operations sync_fence_fops = {
242         .release = sync_fence_release,
243         .poll = sync_fence_poll,
244         .unlocked_ioctl = sync_fence_ioctl,
245 };
246
247 static struct sync_fence *sync_fence_alloc(const char *name)
248 {
249         struct sync_fence *fence;
250         unsigned long flags;
251
252         fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
253         if (fence == NULL)
254                 return NULL;
255
256         fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
257                                          fence, 0);
258         if (fence->file == NULL)
259                 goto err;
260
261         kref_init(&fence->kref);
262         strlcpy(fence->name, name, sizeof(fence->name));
263
264         INIT_LIST_HEAD(&fence->pt_list_head);
265         INIT_LIST_HEAD(&fence->waiter_list_head);
266         spin_lock_init(&fence->waiter_list_lock);
267
268         init_waitqueue_head(&fence->wq);
269
270         spin_lock_irqsave(&sync_fence_list_lock, flags);
271         list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
272         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
273
274         return fence;
275
276 err:
277         kfree(fence);
278         return NULL;
279 }
280
281 /* TODO: implement a create which takes more that one sync_pt */
282 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
283 {
284         struct sync_fence *fence;
285
286         if (pt->fence)
287                 return NULL;
288
289         fence = sync_fence_alloc(name);
290         if (fence == NULL)
291                 return NULL;
292
293         pt->fence = fence;
294         list_add(&pt->pt_list, &fence->pt_list_head);
295         sync_pt_activate(pt);
296
297         return fence;
298 }
299 EXPORT_SYMBOL(sync_fence_create);
300
301 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
302 {
303         struct list_head *pos;
304
305         list_for_each(pos, &src->pt_list_head) {
306                 struct sync_pt *orig_pt =
307                         container_of(pos, struct sync_pt, pt_list);
308                 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
309
310                 if (new_pt == NULL)
311                         return -ENOMEM;
312
313                 new_pt->fence = dst;
314                 list_add(&new_pt->pt_list, &dst->pt_list_head);
315                 sync_pt_activate(new_pt);
316         }
317
318         return 0;
319 }
320
321 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
322 {
323         struct list_head *src_pos, *dst_pos, *n;
324
325         list_for_each(src_pos, &src->pt_list_head) {
326                 struct sync_pt *src_pt =
327                         container_of(src_pos, struct sync_pt, pt_list);
328                 bool collapsed = false;
329
330                 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
331                         struct sync_pt *dst_pt =
332                                 container_of(dst_pos, struct sync_pt, pt_list);
333                         /* collapse two sync_pts on the same timeline
334                          * to a single sync_pt that will signal at
335                          * the later of the two
336                          */
337                         if (dst_pt->parent == src_pt->parent) {
338                                 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
339                                                  == -1) {
340                                         struct sync_pt *new_pt =
341                                                 sync_pt_dup(src_pt);
342                                         if (new_pt == NULL)
343                                                 return -ENOMEM;
344
345                                         new_pt->fence = dst;
346                                         list_replace(&dst_pt->pt_list,
347                                                      &new_pt->pt_list);
348                                         sync_pt_activate(new_pt);
349                                         sync_pt_free(dst_pt);
350                                 }
351                                 collapsed = true;
352                                 break;
353                         }
354                 }
355
356                 if (!collapsed) {
357                         struct sync_pt *new_pt = sync_pt_dup(src_pt);
358
359                         if (new_pt == NULL)
360                                 return -ENOMEM;
361
362                         new_pt->fence = dst;
363                         list_add(&new_pt->pt_list, &dst->pt_list_head);
364                         sync_pt_activate(new_pt);
365                 }
366         }
367
368         return 0;
369 }
370
371 static void sync_fence_detach_pts(struct sync_fence *fence)
372 {
373         struct list_head *pos, *n;
374
375         list_for_each_safe(pos, n, &fence->pt_list_head) {
376                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
377                 sync_timeline_remove_pt(pt);
378         }
379 }
380
381 static void sync_fence_free_pts(struct sync_fence *fence)
382 {
383         struct list_head *pos, *n;
384
385         list_for_each_safe(pos, n, &fence->pt_list_head) {
386                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387                 sync_pt_free(pt);
388         }
389 }
390
391 struct sync_fence *sync_fence_fdget(int fd)
392 {
393         struct file *file = fget(fd);
394
395         if (file == NULL)
396                 return NULL;
397
398         if (file->f_op != &sync_fence_fops)
399                 goto err;
400
401         return file->private_data;
402
403 err:
404         fput(file);
405         return NULL;
406 }
407 EXPORT_SYMBOL(sync_fence_fdget);
408
409 void sync_fence_put(struct sync_fence *fence)
410 {
411         fput(fence->file);
412 }
413 EXPORT_SYMBOL(sync_fence_put);
414
415 void sync_fence_install(struct sync_fence *fence, int fd)
416 {
417         fd_install(fd, fence->file);
418 }
419 EXPORT_SYMBOL(sync_fence_install);
420
421 static int sync_fence_get_status(struct sync_fence *fence)
422 {
423         struct list_head *pos;
424         int status = 1;
425
426         list_for_each(pos, &fence->pt_list_head) {
427                 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
428                 int pt_status = pt->status;
429
430                 if (pt_status < 0) {
431                         status = pt_status;
432                         break;
433                 } else if (status == 1) {
434                         status = pt_status;
435                 }
436         }
437
438         return status;
439 }
440
441 struct sync_fence *sync_fence_merge(const char *name,
442                                     struct sync_fence *a, struct sync_fence *b)
443 {
444         struct sync_fence *fence;
445         int err;
446
447         fence = sync_fence_alloc(name);
448         if (fence == NULL)
449                 return NULL;
450
451         err = sync_fence_copy_pts(fence, a);
452         if (err < 0)
453                 goto err;
454
455         err = sync_fence_merge_pts(fence, b);
456         if (err < 0)
457                 goto err;
458
459         fence->status = sync_fence_get_status(fence);
460
461         return fence;
462 err:
463         sync_fence_free_pts(fence);
464         kfree(fence);
465         return NULL;
466 }
467 EXPORT_SYMBOL(sync_fence_merge);
468
469 static void sync_fence_signal_pt(struct sync_pt *pt)
470 {
471         LIST_HEAD(signaled_waiters);
472         struct sync_fence *fence = pt->fence;
473         struct list_head *pos;
474         struct list_head *n;
475         unsigned long flags;
476         int status;
477
478         status = sync_fence_get_status(fence);
479
480         spin_lock_irqsave(&fence->waiter_list_lock, flags);
481         /*
482          * this should protect against two threads racing on the signaled
483          * false -> true transition
484          */
485         if (status && !fence->status) {
486                 list_for_each_safe(pos, n, &fence->waiter_list_head)
487                         list_move(pos, &signaled_waiters);
488
489                 fence->status = status;
490         } else {
491                 status = 0;
492         }
493         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
494
495         if (status) {
496                 list_for_each_safe(pos, n, &signaled_waiters) {
497                         struct sync_fence_waiter *waiter =
498                                 container_of(pos, struct sync_fence_waiter,
499                                              waiter_list);
500
501                         list_del(pos);
502                         waiter->callback(fence, waiter);
503                 }
504                 wake_up(&fence->wq);
505         }
506 }
507
508 int sync_fence_wait_async(struct sync_fence *fence,
509                           struct sync_fence_waiter *waiter)
510 {
511         unsigned long flags;
512         int err = 0;
513
514         spin_lock_irqsave(&fence->waiter_list_lock, flags);
515
516         if (fence->status) {
517                 err = fence->status;
518                 goto out;
519         }
520
521         list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
522 out:
523         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
524
525         return err;
526 }
527 EXPORT_SYMBOL(sync_fence_wait_async);
528
529 int sync_fence_cancel_async(struct sync_fence *fence,
530                              struct sync_fence_waiter *waiter)
531 {
532         struct list_head *pos;
533         struct list_head *n;
534         unsigned long flags;
535         int ret = -ENOENT;
536
537         spin_lock_irqsave(&fence->waiter_list_lock, flags);
538         /*
539          * Make sure waiter is still in waiter_list because it is possible for
540          * the waiter to be removed from the list while the callback is still
541          * pending.
542          */
543         list_for_each_safe(pos, n, &fence->waiter_list_head) {
544                 struct sync_fence_waiter *list_waiter =
545                         container_of(pos, struct sync_fence_waiter,
546                                      waiter_list);
547                 if (list_waiter == waiter) {
548                         list_del(pos);
549                         ret = 0;
550                         break;
551                 }
552         }
553         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
554         return ret;
555 }
556 EXPORT_SYMBOL(sync_fence_cancel_async);
557
558 int sync_fence_wait(struct sync_fence *fence, long timeout)
559 {
560         int err;
561
562         if (timeout) {
563                 timeout = msecs_to_jiffies(timeout);
564                 err = wait_event_interruptible_timeout(fence->wq,
565                                                        fence->status != 0,
566                                                        timeout);
567         } else {
568                 err = wait_event_interruptible(fence->wq, fence->status != 0);
569         }
570
571         if (err < 0)
572                 return err;
573
574         if (fence->status < 0)
575                 return fence->status;
576
577         if (fence->status == 0)
578                 return -ETIME;
579
580         return 0;
581 }
582 EXPORT_SYMBOL(sync_fence_wait);
583
584 static void sync_fence_free(struct kref *kref)
585 {
586         struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
587
588         sync_fence_free_pts(fence);
589
590         kfree(fence);
591 }
592
593 static int sync_fence_release(struct inode *inode, struct file *file)
594 {
595         struct sync_fence *fence = file->private_data;
596         unsigned long flags;
597
598         /*
599          * We need to remove all ways to access this fence before droping
600          * our ref.
601          *
602          * start with its membership in the global fence list
603          */
604         spin_lock_irqsave(&sync_fence_list_lock, flags);
605         list_del(&fence->sync_fence_list);
606         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
607
608         /*
609          * remove its pts from their parents so that sync_timeline_signal()
610          * can't reference the fence.
611          */
612         sync_fence_detach_pts(fence);
613
614         kref_put(&fence->kref, sync_fence_free);
615
616         return 0;
617 }
618
619 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
620 {
621         struct sync_fence *fence = file->private_data;
622
623         poll_wait(file, &fence->wq, wait);
624
625         if (fence->status == 1)
626                 return POLLIN;
627         else if (fence->status < 0)
628                 return POLLERR;
629         else
630                 return 0;
631 }
632
633 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
634 {
635         __s32 value;
636
637         if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
638                 return -EFAULT;
639
640         return sync_fence_wait(fence, value);
641 }
642
643 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
644 {
645         int fd = get_unused_fd();
646         int err;
647         struct sync_fence *fence2, *fence3;
648         struct sync_merge_data data;
649
650         if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
651                 return -EFAULT;
652
653         fence2 = sync_fence_fdget(data.fd2);
654         if (fence2 == NULL) {
655                 err = -ENOENT;
656                 goto err_put_fd;
657         }
658
659         data.name[sizeof(data.name) - 1] = '\0';
660         fence3 = sync_fence_merge(data.name, fence, fence2);
661         if (fence3 == NULL) {
662                 err = -ENOMEM;
663                 goto err_put_fence2;
664         }
665
666         data.fence = fd;
667         if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
668                 err = -EFAULT;
669                 goto err_put_fence3;
670         }
671
672         sync_fence_install(fence3, fd);
673         sync_fence_put(fence2);
674         return 0;
675
676 err_put_fence3:
677         sync_fence_put(fence3);
678
679 err_put_fence2:
680         sync_fence_put(fence2);
681
682 err_put_fd:
683         put_unused_fd(fd);
684         return err;
685 }
686
687 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
688 {
689         struct sync_pt_info *info = data;
690         int ret;
691
692         if (size < sizeof(struct sync_pt_info))
693                 return -ENOMEM;
694
695         info->len = sizeof(struct sync_pt_info);
696
697         if (pt->parent->ops->fill_driver_data) {
698                 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
699                                                         size - sizeof(*info));
700                 if (ret < 0)
701                         return ret;
702
703                 info->len += ret;
704         }
705
706         strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
707         strlcpy(info->driver_name, pt->parent->ops->driver_name,
708                 sizeof(info->driver_name));
709         info->status = pt->status;
710         info->timestamp_ns = ktime_to_ns(pt->timestamp);
711
712         return info->len;
713 }
714
715 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
716                                         unsigned long arg)
717 {
718         struct sync_fence_info_data *data;
719         struct list_head *pos;
720         __u32 size;
721         __u32 len = 0;
722         int ret;
723
724         if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
725                 return -EFAULT;
726
727         if (size < sizeof(struct sync_fence_info_data))
728                 return -EINVAL;
729
730         if (size > 4096)
731                 size = 4096;
732
733         data = kzalloc(size, GFP_KERNEL);
734         if (data == NULL)
735                 return -ENOMEM;
736
737         strlcpy(data->name, fence->name, sizeof(data->name));
738         data->status = fence->status;
739         len = sizeof(struct sync_fence_info_data);
740
741         list_for_each(pos, &fence->pt_list_head) {
742                 struct sync_pt *pt =
743                         container_of(pos, struct sync_pt, pt_list);
744
745                 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
746
747                 if (ret < 0)
748                         goto out;
749
750                 len += ret;
751         }
752
753         data->len = len;
754
755         if (copy_to_user((void __user *)arg, data, len))
756                 ret = -EFAULT;
757         else
758                 ret = 0;
759
760 out:
761         kfree(data);
762
763         return ret;
764 }
765
766 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
767                              unsigned long arg)
768 {
769         struct sync_fence *fence = file->private_data;
770         switch (cmd) {
771         case SYNC_IOC_WAIT:
772                 return sync_fence_ioctl_wait(fence, arg);
773
774         case SYNC_IOC_MERGE:
775                 return sync_fence_ioctl_merge(fence, arg);
776
777         case SYNC_IOC_FENCE_INFO:
778                 return sync_fence_ioctl_fence_info(fence, arg);
779
780         default:
781                 return -ENOTTY;
782         }
783 }
784
785 #ifdef CONFIG_DEBUG_FS
786 static const char *sync_status_str(int status)
787 {
788         if (status > 0)
789                 return "signaled";
790         else if (status == 0)
791                 return "active";
792         else
793                 return "error";
794 }
795
796 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
797 {
798         int status = pt->status;
799         seq_printf(s, "  %s%spt %s",
800                    fence ? pt->parent->name : "",
801                    fence ? "_" : "",
802                    sync_status_str(status));
803         if (pt->status) {
804                 struct timeval tv = ktime_to_timeval(pt->timestamp);
805                 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
806         }
807
808         if (pt->parent->ops->print_pt) {
809                 seq_printf(s, ": ");
810                 pt->parent->ops->print_pt(s, pt);
811         }
812
813         seq_printf(s, "\n");
814 }
815
816 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
817 {
818         struct list_head *pos;
819         unsigned long flags;
820
821         seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
822
823         if (obj->ops->print_obj) {
824                 seq_printf(s, ": ");
825                 obj->ops->print_obj(s, obj);
826         }
827
828         seq_printf(s, "\n");
829
830         spin_lock_irqsave(&obj->child_list_lock, flags);
831         list_for_each(pos, &obj->child_list_head) {
832                 struct sync_pt *pt =
833                         container_of(pos, struct sync_pt, child_list);
834                 sync_print_pt(s, pt, false);
835         }
836         spin_unlock_irqrestore(&obj->child_list_lock, flags);
837 }
838
839 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
840 {
841         struct list_head *pos;
842         unsigned long flags;
843
844         seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
845
846         list_for_each(pos, &fence->pt_list_head) {
847                 struct sync_pt *pt =
848                         container_of(pos, struct sync_pt, pt_list);
849                 sync_print_pt(s, pt, true);
850         }
851
852         spin_lock_irqsave(&fence->waiter_list_lock, flags);
853         list_for_each(pos, &fence->waiter_list_head) {
854                 struct sync_fence_waiter *waiter =
855                         container_of(pos, struct sync_fence_waiter,
856                                      waiter_list);
857
858                 seq_printf(s, "waiter %pF\n", waiter->callback);
859         }
860         spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
861 }
862
863 static int sync_debugfs_show(struct seq_file *s, void *unused)
864 {
865         unsigned long flags;
866         struct list_head *pos;
867
868         seq_printf(s, "objs:\n--------------\n");
869
870         spin_lock_irqsave(&sync_timeline_list_lock, flags);
871         list_for_each(pos, &sync_timeline_list_head) {
872                 struct sync_timeline *obj =
873                         container_of(pos, struct sync_timeline,
874                                      sync_timeline_list);
875
876                 sync_print_obj(s, obj);
877                 seq_printf(s, "\n");
878         }
879         spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
880
881         seq_printf(s, "fences:\n--------------\n");
882
883         spin_lock_irqsave(&sync_fence_list_lock, flags);
884         list_for_each(pos, &sync_fence_list_head) {
885                 struct sync_fence *fence =
886                         container_of(pos, struct sync_fence, sync_fence_list);
887
888                 sync_print_fence(s, fence);
889                 seq_printf(s, "\n");
890         }
891         spin_unlock_irqrestore(&sync_fence_list_lock, flags);
892         return 0;
893 }
894
895 static int sync_debugfs_open(struct inode *inode, struct file *file)
896 {
897         return single_open(file, sync_debugfs_show, inode->i_private);
898 }
899
900 static const struct file_operations sync_debugfs_fops = {
901         .open           = sync_debugfs_open,
902         .read           = seq_read,
903         .llseek         = seq_lseek,
904         .release        = single_release,
905 };
906
907 static __init int sync_debugfs_init(void)
908 {
909         debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
910         return 0;
911 }
912
913 late_initcall(sync_debugfs_init);
914
915 #endif