4 * Copyright (C) 2012 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/debugfs.h>
18 #include <linux/export.h>
19 #include <linux/file.h>
21 #include <linux/kernel.h>
22 #include <linux/poll.h>
23 #include <linux/sched.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
27 #include <linux/anon_inodes.h>
31 static void sync_fence_signal_pt(struct sync_pt *pt);
32 static int _sync_pt_has_signaled(struct sync_pt *pt);
33 static void sync_fence_free(struct kref *kref);
35 static LIST_HEAD(sync_timeline_list_head);
36 static DEFINE_SPINLOCK(sync_timeline_list_lock);
38 static LIST_HEAD(sync_fence_list_head);
39 static DEFINE_SPINLOCK(sync_fence_list_lock);
41 struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
42 int size, const char *name)
44 struct sync_timeline *obj;
47 if (size < sizeof(struct sync_timeline))
50 obj = kzalloc(size, GFP_KERNEL);
54 kref_init(&obj->kref);
56 strlcpy(obj->name, name, sizeof(obj->name));
58 INIT_LIST_HEAD(&obj->child_list_head);
59 spin_lock_init(&obj->child_list_lock);
61 INIT_LIST_HEAD(&obj->active_list_head);
62 spin_lock_init(&obj->active_list_lock);
64 spin_lock_irqsave(&sync_timeline_list_lock, flags);
65 list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
66 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
70 EXPORT_SYMBOL(sync_timeline_create);
72 static void sync_timeline_free(struct kref *kref)
74 struct sync_timeline *obj =
75 container_of(kref, struct sync_timeline, kref);
78 if (obj->ops->release_obj)
79 obj->ops->release_obj(obj);
81 spin_lock_irqsave(&sync_timeline_list_lock, flags);
82 list_del(&obj->sync_timeline_list);
83 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
88 void sync_timeline_destroy(struct sync_timeline *obj)
90 obj->destroyed = true;
93 * If this is not the last reference, signal any children
94 * that their parent is going away.
97 if (!kref_put(&obj->kref, sync_timeline_free))
98 sync_timeline_signal(obj);
100 EXPORT_SYMBOL(sync_timeline_destroy);
102 static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
108 spin_lock_irqsave(&obj->child_list_lock, flags);
109 list_add_tail(&pt->child_list, &obj->child_list_head);
110 spin_unlock_irqrestore(&obj->child_list_lock, flags);
113 static void sync_timeline_remove_pt(struct sync_pt *pt)
115 struct sync_timeline *obj = pt->parent;
118 spin_lock_irqsave(&obj->active_list_lock, flags);
119 if (!list_empty(&pt->active_list))
120 list_del_init(&pt->active_list);
121 spin_unlock_irqrestore(&obj->active_list_lock, flags);
123 spin_lock_irqsave(&obj->child_list_lock, flags);
124 if (!list_empty(&pt->child_list)) {
125 list_del_init(&pt->child_list);
127 spin_unlock_irqrestore(&obj->child_list_lock, flags);
130 void sync_timeline_signal(struct sync_timeline *obj)
133 LIST_HEAD(signaled_pts);
134 struct list_head *pos, *n;
136 spin_lock_irqsave(&obj->active_list_lock, flags);
138 list_for_each_safe(pos, n, &obj->active_list_head) {
140 container_of(pos, struct sync_pt, active_list);
142 if (_sync_pt_has_signaled(pt)) {
144 list_add(&pt->signaled_list, &signaled_pts);
145 kref_get(&pt->fence->kref);
149 spin_unlock_irqrestore(&obj->active_list_lock, flags);
151 list_for_each_safe(pos, n, &signaled_pts) {
153 container_of(pos, struct sync_pt, signaled_list);
156 sync_fence_signal_pt(pt);
157 kref_put(&pt->fence->kref, sync_fence_free);
160 EXPORT_SYMBOL(sync_timeline_signal);
162 struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
166 if (size < sizeof(struct sync_pt))
169 pt = kzalloc(size, GFP_KERNEL);
173 INIT_LIST_HEAD(&pt->active_list);
174 kref_get(&parent->kref);
175 sync_timeline_add_pt(parent, pt);
179 EXPORT_SYMBOL(sync_pt_create);
181 void sync_pt_free(struct sync_pt *pt)
183 if (pt->parent->ops->free_pt)
184 pt->parent->ops->free_pt(pt);
186 sync_timeline_remove_pt(pt);
188 kref_put(&pt->parent->kref, sync_timeline_free);
192 EXPORT_SYMBOL(sync_pt_free);
194 /* call with pt->parent->active_list_lock held */
195 static int _sync_pt_has_signaled(struct sync_pt *pt)
197 int old_status = pt->status;
200 pt->status = pt->parent->ops->has_signaled(pt);
202 if (!pt->status && pt->parent->destroyed)
203 pt->status = -ENOENT;
205 if (pt->status != old_status)
206 pt->timestamp = ktime_get();
211 static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213 return pt->parent->ops->dup(pt);
216 /* Adds a sync pt to the active queue. Called when added to a fence */
217 static void sync_pt_activate(struct sync_pt *pt)
219 struct sync_timeline *obj = pt->parent;
223 spin_lock_irqsave(&obj->active_list_lock, flags);
225 err = _sync_pt_has_signaled(pt);
229 list_add_tail(&pt->active_list, &obj->active_list_head);
232 spin_unlock_irqrestore(&obj->active_list_lock, flags);
235 static int sync_fence_release(struct inode *inode, struct file *file);
236 static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
237 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
241 static const struct file_operations sync_fence_fops = {
242 .release = sync_fence_release,
243 .poll = sync_fence_poll,
244 .unlocked_ioctl = sync_fence_ioctl,
247 static struct sync_fence *sync_fence_alloc(const char *name)
249 struct sync_fence *fence;
252 fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
256 fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258 if (fence->file == NULL)
261 kref_init(&fence->kref);
262 strlcpy(fence->name, name, sizeof(fence->name));
264 INIT_LIST_HEAD(&fence->pt_list_head);
265 INIT_LIST_HEAD(&fence->waiter_list_head);
266 spin_lock_init(&fence->waiter_list_lock);
268 init_waitqueue_head(&fence->wq);
270 spin_lock_irqsave(&sync_fence_list_lock, flags);
271 list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
272 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
281 /* TODO: implement a create which takes more that one sync_pt */
282 struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284 struct sync_fence *fence;
289 fence = sync_fence_alloc(name);
294 list_add(&pt->pt_list, &fence->pt_list_head);
295 sync_pt_activate(pt);
299 EXPORT_SYMBOL(sync_fence_create);
301 static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
303 struct list_head *pos;
305 list_for_each(pos, &src->pt_list_head) {
306 struct sync_pt *orig_pt =
307 container_of(pos, struct sync_pt, pt_list);
308 struct sync_pt *new_pt = sync_pt_dup(orig_pt);
314 list_add(&new_pt->pt_list, &dst->pt_list_head);
315 sync_pt_activate(new_pt);
321 static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
323 struct list_head *src_pos, *dst_pos, *n;
325 list_for_each(src_pos, &src->pt_list_head) {
326 struct sync_pt *src_pt =
327 container_of(src_pos, struct sync_pt, pt_list);
328 bool collapsed = false;
330 list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
331 struct sync_pt *dst_pt =
332 container_of(dst_pos, struct sync_pt, pt_list);
333 /* collapse two sync_pts on the same timeline
334 * to a single sync_pt that will signal at
335 * the later of the two
337 if (dst_pt->parent == src_pt->parent) {
338 if (dst_pt->parent->ops->compare(dst_pt, src_pt)
340 struct sync_pt *new_pt =
346 list_replace(&dst_pt->pt_list,
348 sync_pt_activate(new_pt);
349 sync_pt_free(dst_pt);
357 struct sync_pt *new_pt = sync_pt_dup(src_pt);
363 list_add(&new_pt->pt_list, &dst->pt_list_head);
364 sync_pt_activate(new_pt);
371 static void sync_fence_detach_pts(struct sync_fence *fence)
373 struct list_head *pos, *n;
375 list_for_each_safe(pos, n, &fence->pt_list_head) {
376 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
377 sync_timeline_remove_pt(pt);
381 static void sync_fence_free_pts(struct sync_fence *fence)
383 struct list_head *pos, *n;
385 list_for_each_safe(pos, n, &fence->pt_list_head) {
386 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
391 struct sync_fence *sync_fence_fdget(int fd)
393 struct file *file = fget(fd);
398 if (file->f_op != &sync_fence_fops)
401 return file->private_data;
407 EXPORT_SYMBOL(sync_fence_fdget);
409 void sync_fence_put(struct sync_fence *fence)
413 EXPORT_SYMBOL(sync_fence_put);
415 void sync_fence_install(struct sync_fence *fence, int fd)
417 fd_install(fd, fence->file);
419 EXPORT_SYMBOL(sync_fence_install);
421 static int sync_fence_get_status(struct sync_fence *fence)
423 struct list_head *pos;
426 list_for_each(pos, &fence->pt_list_head) {
427 struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
428 int pt_status = pt->status;
433 } else if (status == 1) {
441 struct sync_fence *sync_fence_merge(const char *name,
442 struct sync_fence *a, struct sync_fence *b)
444 struct sync_fence *fence;
447 fence = sync_fence_alloc(name);
451 err = sync_fence_copy_pts(fence, a);
455 err = sync_fence_merge_pts(fence, b);
459 fence->status = sync_fence_get_status(fence);
463 sync_fence_free_pts(fence);
467 EXPORT_SYMBOL(sync_fence_merge);
469 static void sync_fence_signal_pt(struct sync_pt *pt)
471 LIST_HEAD(signaled_waiters);
472 struct sync_fence *fence = pt->fence;
473 struct list_head *pos;
478 status = sync_fence_get_status(fence);
480 spin_lock_irqsave(&fence->waiter_list_lock, flags);
482 * this should protect against two threads racing on the signaled
483 * false -> true transition
485 if (status && !fence->status) {
486 list_for_each_safe(pos, n, &fence->waiter_list_head)
487 list_move(pos, &signaled_waiters);
489 fence->status = status;
493 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
496 list_for_each_safe(pos, n, &signaled_waiters) {
497 struct sync_fence_waiter *waiter =
498 container_of(pos, struct sync_fence_waiter,
502 waiter->callback(fence, waiter);
508 int sync_fence_wait_async(struct sync_fence *fence,
509 struct sync_fence_waiter *waiter)
514 spin_lock_irqsave(&fence->waiter_list_lock, flags);
521 list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
523 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
527 EXPORT_SYMBOL(sync_fence_wait_async);
529 int sync_fence_cancel_async(struct sync_fence *fence,
530 struct sync_fence_waiter *waiter)
532 struct list_head *pos;
537 spin_lock_irqsave(&fence->waiter_list_lock, flags);
539 * Make sure waiter is still in waiter_list because it is possible for
540 * the waiter to be removed from the list while the callback is still
543 list_for_each_safe(pos, n, &fence->waiter_list_head) {
544 struct sync_fence_waiter *list_waiter =
545 container_of(pos, struct sync_fence_waiter,
547 if (list_waiter == waiter) {
553 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
556 EXPORT_SYMBOL(sync_fence_cancel_async);
558 int sync_fence_wait(struct sync_fence *fence, long timeout)
563 timeout = msecs_to_jiffies(timeout);
564 err = wait_event_interruptible_timeout(fence->wq,
568 err = wait_event_interruptible(fence->wq, fence->status != 0);
574 if (fence->status < 0)
575 return fence->status;
577 if (fence->status == 0)
582 EXPORT_SYMBOL(sync_fence_wait);
584 static void sync_fence_free(struct kref *kref)
586 struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
588 sync_fence_free_pts(fence);
593 static int sync_fence_release(struct inode *inode, struct file *file)
595 struct sync_fence *fence = file->private_data;
599 * We need to remove all ways to access this fence before droping
602 * start with its membership in the global fence list
604 spin_lock_irqsave(&sync_fence_list_lock, flags);
605 list_del(&fence->sync_fence_list);
606 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
609 * remove its pts from their parents so that sync_timeline_signal()
610 * can't reference the fence.
612 sync_fence_detach_pts(fence);
614 kref_put(&fence->kref, sync_fence_free);
619 static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
621 struct sync_fence *fence = file->private_data;
623 poll_wait(file, &fence->wq, wait);
625 if (fence->status == 1)
627 else if (fence->status < 0)
633 static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
637 if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
640 return sync_fence_wait(fence, value);
643 static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
645 int fd = get_unused_fd();
647 struct sync_fence *fence2, *fence3;
648 struct sync_merge_data data;
650 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
653 fence2 = sync_fence_fdget(data.fd2);
654 if (fence2 == NULL) {
659 data.name[sizeof(data.name) - 1] = '\0';
660 fence3 = sync_fence_merge(data.name, fence, fence2);
661 if (fence3 == NULL) {
667 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
672 sync_fence_install(fence3, fd);
673 sync_fence_put(fence2);
677 sync_fence_put(fence3);
680 sync_fence_put(fence2);
687 static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
689 struct sync_pt_info *info = data;
692 if (size < sizeof(struct sync_pt_info))
695 info->len = sizeof(struct sync_pt_info);
697 if (pt->parent->ops->fill_driver_data) {
698 ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
699 size - sizeof(*info));
706 strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
707 strlcpy(info->driver_name, pt->parent->ops->driver_name,
708 sizeof(info->driver_name));
709 info->status = pt->status;
710 info->timestamp_ns = ktime_to_ns(pt->timestamp);
715 static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
718 struct sync_fence_info_data *data;
719 struct list_head *pos;
724 if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
727 if (size < sizeof(struct sync_fence_info_data))
733 data = kzalloc(size, GFP_KERNEL);
737 strlcpy(data->name, fence->name, sizeof(data->name));
738 data->status = fence->status;
739 len = sizeof(struct sync_fence_info_data);
741 list_for_each(pos, &fence->pt_list_head) {
743 container_of(pos, struct sync_pt, pt_list);
745 ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
755 if (copy_to_user((void __user *)arg, data, len))
766 static long sync_fence_ioctl(struct file *file, unsigned int cmd,
769 struct sync_fence *fence = file->private_data;
772 return sync_fence_ioctl_wait(fence, arg);
775 return sync_fence_ioctl_merge(fence, arg);
777 case SYNC_IOC_FENCE_INFO:
778 return sync_fence_ioctl_fence_info(fence, arg);
785 #ifdef CONFIG_DEBUG_FS
786 static const char *sync_status_str(int status)
790 else if (status == 0)
796 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
798 int status = pt->status;
799 seq_printf(s, " %s%spt %s",
800 fence ? pt->parent->name : "",
802 sync_status_str(status));
804 struct timeval tv = ktime_to_timeval(pt->timestamp);
805 seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
808 if (pt->parent->ops->print_pt) {
810 pt->parent->ops->print_pt(s, pt);
816 static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
818 struct list_head *pos;
821 seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
823 if (obj->ops->print_obj) {
825 obj->ops->print_obj(s, obj);
830 spin_lock_irqsave(&obj->child_list_lock, flags);
831 list_for_each(pos, &obj->child_list_head) {
833 container_of(pos, struct sync_pt, child_list);
834 sync_print_pt(s, pt, false);
836 spin_unlock_irqrestore(&obj->child_list_lock, flags);
839 static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
841 struct list_head *pos;
844 seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
846 list_for_each(pos, &fence->pt_list_head) {
848 container_of(pos, struct sync_pt, pt_list);
849 sync_print_pt(s, pt, true);
852 spin_lock_irqsave(&fence->waiter_list_lock, flags);
853 list_for_each(pos, &fence->waiter_list_head) {
854 struct sync_fence_waiter *waiter =
855 container_of(pos, struct sync_fence_waiter,
858 seq_printf(s, "waiter %pF\n", waiter->callback);
860 spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
863 static int sync_debugfs_show(struct seq_file *s, void *unused)
866 struct list_head *pos;
868 seq_printf(s, "objs:\n--------------\n");
870 spin_lock_irqsave(&sync_timeline_list_lock, flags);
871 list_for_each(pos, &sync_timeline_list_head) {
872 struct sync_timeline *obj =
873 container_of(pos, struct sync_timeline,
876 sync_print_obj(s, obj);
879 spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
881 seq_printf(s, "fences:\n--------------\n");
883 spin_lock_irqsave(&sync_fence_list_lock, flags);
884 list_for_each(pos, &sync_fence_list_head) {
885 struct sync_fence *fence =
886 container_of(pos, struct sync_fence, sync_fence_list);
888 sync_print_fence(s, fence);
891 spin_unlock_irqrestore(&sync_fence_list_lock, flags);
895 static int sync_debugfs_open(struct inode *inode, struct file *file)
897 return single_open(file, sync_debugfs_show, inode->i_private);
900 static const struct file_operations sync_debugfs_fops = {
901 .open = sync_debugfs_open,
904 .release = single_release,
907 static __init int sync_debugfs_init(void)
909 debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
913 late_initcall(sync_debugfs_init);