sync.c revision 92ea915adb5565b522902a7b3f0a33ede16bb797
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33static void sync_fence_free(struct kref *kref);
34
35static LIST_HEAD(sync_timeline_list_head);
36static DEFINE_SPINLOCK(sync_timeline_list_lock);
37
38static LIST_HEAD(sync_fence_list_head);
39static DEFINE_SPINLOCK(sync_fence_list_lock);
40
41struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
42					   int size, const char *name)
43{
44	struct sync_timeline *obj;
45	unsigned long flags;
46
47	if (size < sizeof(struct sync_timeline))
48		return NULL;
49
50	obj = kzalloc(size, GFP_KERNEL);
51	if (obj == NULL)
52		return NULL;
53
54	kref_init(&obj->kref);
55	obj->ops = ops;
56	strlcpy(obj->name, name, sizeof(obj->name));
57
58	INIT_LIST_HEAD(&obj->child_list_head);
59	spin_lock_init(&obj->child_list_lock);
60
61	INIT_LIST_HEAD(&obj->active_list_head);
62	spin_lock_init(&obj->active_list_lock);
63
64	spin_lock_irqsave(&sync_timeline_list_lock, flags);
65	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
66	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
67
68	return obj;
69}
70EXPORT_SYMBOL(sync_timeline_create);
71
72static void sync_timeline_free(struct kref *kref)
73{
74	struct sync_timeline *obj =
75		container_of(kref, struct sync_timeline, kref);
76	unsigned long flags;
77
78	if (obj->ops->release_obj)
79		obj->ops->release_obj(obj);
80
81	spin_lock_irqsave(&sync_timeline_list_lock, flags);
82	list_del(&obj->sync_timeline_list);
83	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
84
85	kfree(obj);
86}
87
88void sync_timeline_destroy(struct sync_timeline *obj)
89{
90	obj->destroyed = true;
91
92	/*
93	 * If this is not the last reference, signal any children
94	 * that their parent is going away.
95	 */
96
97	if (!kref_put(&obj->kref, sync_timeline_free))
98		sync_timeline_signal(obj);
99}
100EXPORT_SYMBOL(sync_timeline_destroy);
101
102static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
103{
104	unsigned long flags;
105
106	pt->parent = obj;
107
108	spin_lock_irqsave(&obj->child_list_lock, flags);
109	list_add_tail(&pt->child_list, &obj->child_list_head);
110	spin_unlock_irqrestore(&obj->child_list_lock, flags);
111}
112
113static void sync_timeline_remove_pt(struct sync_pt *pt)
114{
115	struct sync_timeline *obj = pt->parent;
116	unsigned long flags;
117
118	spin_lock_irqsave(&obj->active_list_lock, flags);
119	if (!list_empty(&pt->active_list))
120		list_del_init(&pt->active_list);
121	spin_unlock_irqrestore(&obj->active_list_lock, flags);
122
123	spin_lock_irqsave(&obj->child_list_lock, flags);
124	if (!list_empty(&pt->child_list)) {
125		list_del_init(&pt->child_list);
126	}
127	spin_unlock_irqrestore(&obj->child_list_lock, flags);
128}
129
130void sync_timeline_signal(struct sync_timeline *obj)
131{
132	unsigned long flags;
133	LIST_HEAD(signaled_pts);
134	struct list_head *pos, *n;
135
136	spin_lock_irqsave(&obj->active_list_lock, flags);
137
138	list_for_each_safe(pos, n, &obj->active_list_head) {
139		struct sync_pt *pt =
140			container_of(pos, struct sync_pt, active_list);
141
142		if (_sync_pt_has_signaled(pt)) {
143			list_del_init(pos);
144			list_add(&pt->signaled_list, &signaled_pts);
145			kref_get(&pt->fence->kref);
146		}
147	}
148
149	spin_unlock_irqrestore(&obj->active_list_lock, flags);
150
151	list_for_each_safe(pos, n, &signaled_pts) {
152		struct sync_pt *pt =
153			container_of(pos, struct sync_pt, signaled_list);
154
155		list_del_init(pos);
156		sync_fence_signal_pt(pt);
157		kref_put(&pt->fence->kref, sync_fence_free);
158	}
159}
160EXPORT_SYMBOL(sync_timeline_signal);
161
162struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
163{
164	struct sync_pt *pt;
165
166	if (size < sizeof(struct sync_pt))
167		return NULL;
168
169	pt = kzalloc(size, GFP_KERNEL);
170	if (pt == NULL)
171		return NULL;
172
173	INIT_LIST_HEAD(&pt->active_list);
174	kref_get(&parent->kref);
175	sync_timeline_add_pt(parent, pt);
176
177	return pt;
178}
179EXPORT_SYMBOL(sync_pt_create);
180
181void sync_pt_free(struct sync_pt *pt)
182{
183	if (pt->parent->ops->free_pt)
184		pt->parent->ops->free_pt(pt);
185
186	sync_timeline_remove_pt(pt);
187
188	kref_put(&pt->parent->kref, sync_timeline_free);
189
190	kfree(pt);
191}
192EXPORT_SYMBOL(sync_pt_free);
193
194/* call with pt->parent->active_list_lock held */
195static int _sync_pt_has_signaled(struct sync_pt *pt)
196{
197	int old_status = pt->status;
198
199	if (!pt->status)
200		pt->status = pt->parent->ops->has_signaled(pt);
201
202	if (!pt->status && pt->parent->destroyed)
203		pt->status = -ENOENT;
204
205	if (pt->status != old_status)
206		pt->timestamp = ktime_get();
207
208	return pt->status;
209}
210
211static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
212{
213	return pt->parent->ops->dup(pt);
214}
215
216/* Adds a sync pt to the active queue.  Called when added to a fence */
217static void sync_pt_activate(struct sync_pt *pt)
218{
219	struct sync_timeline *obj = pt->parent;
220	unsigned long flags;
221	int err;
222
223	spin_lock_irqsave(&obj->active_list_lock, flags);
224
225	err = _sync_pt_has_signaled(pt);
226	if (err != 0)
227		goto out;
228
229	list_add_tail(&pt->active_list, &obj->active_list_head);
230
231out:
232	spin_unlock_irqrestore(&obj->active_list_lock, flags);
233}
234
235static int sync_fence_release(struct inode *inode, struct file *file);
236static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
237static long sync_fence_ioctl(struct file *file, unsigned int cmd,
238			     unsigned long arg);
239
240
241static const struct file_operations sync_fence_fops = {
242	.release = sync_fence_release,
243	.poll = sync_fence_poll,
244	.unlocked_ioctl = sync_fence_ioctl,
245};
246
247static struct sync_fence *sync_fence_alloc(const char *name)
248{
249	struct sync_fence *fence;
250	unsigned long flags;
251
252	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
253	if (fence == NULL)
254		return NULL;
255
256	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
257					 fence, 0);
258	if (fence->file == NULL)
259		goto err;
260
261	kref_init(&fence->kref);
262	strlcpy(fence->name, name, sizeof(fence->name));
263
264	INIT_LIST_HEAD(&fence->pt_list_head);
265	INIT_LIST_HEAD(&fence->waiter_list_head);
266	spin_lock_init(&fence->waiter_list_lock);
267
268	init_waitqueue_head(&fence->wq);
269
270	spin_lock_irqsave(&sync_fence_list_lock, flags);
271	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
272	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
273
274	return fence;
275
276err:
277	kfree(fence);
278	return NULL;
279}
280
281/* TODO: implement a create which takes more that one sync_pt */
282struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
283{
284	struct sync_fence *fence;
285
286	if (pt->fence)
287		return NULL;
288
289	fence = sync_fence_alloc(name);
290	if (fence == NULL)
291		return NULL;
292
293	pt->fence = fence;
294	list_add(&pt->pt_list, &fence->pt_list_head);
295	sync_pt_activate(pt);
296
297	return fence;
298}
299EXPORT_SYMBOL(sync_fence_create);
300
301static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
302{
303	struct list_head *pos;
304
305	list_for_each(pos, &src->pt_list_head) {
306		struct sync_pt *orig_pt =
307			container_of(pos, struct sync_pt, pt_list);
308		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
309
310		if (new_pt == NULL)
311			return -ENOMEM;
312
313		new_pt->fence = dst;
314		list_add(&new_pt->pt_list, &dst->pt_list_head);
315		sync_pt_activate(new_pt);
316	}
317
318	return 0;
319}
320
321static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
322{
323	struct list_head *src_pos, *dst_pos, *n;
324
325	list_for_each(src_pos, &src->pt_list_head) {
326		struct sync_pt *src_pt =
327			container_of(src_pos, struct sync_pt, pt_list);
328		bool collapsed = false;
329
330		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
331			struct sync_pt *dst_pt =
332				container_of(dst_pos, struct sync_pt, pt_list);
333			/* collapse two sync_pts on the same timeline
334			 * to a single sync_pt that will signal at
335			 * the later of the two
336			 */
337			if (dst_pt->parent == src_pt->parent) {
338				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
339						 == -1) {
340					struct sync_pt *new_pt =
341						sync_pt_dup(src_pt);
342					if (new_pt == NULL)
343						return -ENOMEM;
344
345					new_pt->fence = dst;
346					list_replace(&dst_pt->pt_list,
347						     &new_pt->pt_list);
348					sync_pt_activate(new_pt);
349					sync_pt_free(dst_pt);
350				}
351				collapsed = true;
352				break;
353			}
354		}
355
356		if (!collapsed) {
357			struct sync_pt *new_pt = sync_pt_dup(src_pt);
358
359			if (new_pt == NULL)
360				return -ENOMEM;
361
362			new_pt->fence = dst;
363			list_add(&new_pt->pt_list, &dst->pt_list_head);
364			sync_pt_activate(new_pt);
365		}
366	}
367
368	return 0;
369}
370
371static void sync_fence_detach_pts(struct sync_fence *fence)
372{
373	struct list_head *pos, *n;
374
375	list_for_each_safe(pos, n, &fence->pt_list_head) {
376		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
377		sync_timeline_remove_pt(pt);
378	}
379}
380
381static void sync_fence_free_pts(struct sync_fence *fence)
382{
383	struct list_head *pos, *n;
384
385	list_for_each_safe(pos, n, &fence->pt_list_head) {
386		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387		sync_pt_free(pt);
388	}
389}
390
391struct sync_fence *sync_fence_fdget(int fd)
392{
393	struct file *file = fget(fd);
394
395	if (file == NULL)
396		return NULL;
397
398	if (file->f_op != &sync_fence_fops)
399		goto err;
400
401	return file->private_data;
402
403err:
404	fput(file);
405	return NULL;
406}
407EXPORT_SYMBOL(sync_fence_fdget);
408
409void sync_fence_put(struct sync_fence *fence)
410{
411	fput(fence->file);
412}
413EXPORT_SYMBOL(sync_fence_put);
414
415void sync_fence_install(struct sync_fence *fence, int fd)
416{
417	fd_install(fd, fence->file);
418}
419EXPORT_SYMBOL(sync_fence_install);
420
421static int sync_fence_get_status(struct sync_fence *fence)
422{
423	struct list_head *pos;
424	int status = 1;
425
426	list_for_each(pos, &fence->pt_list_head) {
427		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
428		int pt_status = pt->status;
429
430		if (pt_status < 0) {
431			status = pt_status;
432			break;
433		} else if (status == 1) {
434			status = pt_status;
435		}
436	}
437
438	return status;
439}
440
441struct sync_fence *sync_fence_merge(const char *name,
442				    struct sync_fence *a, struct sync_fence *b)
443{
444	struct sync_fence *fence;
445	int err;
446
447	fence = sync_fence_alloc(name);
448	if (fence == NULL)
449		return NULL;
450
451	err = sync_fence_copy_pts(fence, a);
452	if (err < 0)
453		goto err;
454
455	err = sync_fence_merge_pts(fence, b);
456	if (err < 0)
457		goto err;
458
459	fence->status = sync_fence_get_status(fence);
460
461	return fence;
462err:
463	sync_fence_free_pts(fence);
464	kfree(fence);
465	return NULL;
466}
467EXPORT_SYMBOL(sync_fence_merge);
468
469static void sync_fence_signal_pt(struct sync_pt *pt)
470{
471	LIST_HEAD(signaled_waiters);
472	struct sync_fence *fence = pt->fence;
473	struct list_head *pos;
474	struct list_head *n;
475	unsigned long flags;
476	int status;
477
478	status = sync_fence_get_status(fence);
479
480	spin_lock_irqsave(&fence->waiter_list_lock, flags);
481	/*
482	 * this should protect against two threads racing on the signaled
483	 * false -> true transition
484	 */
485	if (status && !fence->status) {
486		list_for_each_safe(pos, n, &fence->waiter_list_head)
487			list_move(pos, &signaled_waiters);
488
489		fence->status = status;
490	} else {
491		status = 0;
492	}
493	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
494
495	if (status) {
496		list_for_each_safe(pos, n, &signaled_waiters) {
497			struct sync_fence_waiter *waiter =
498				container_of(pos, struct sync_fence_waiter,
499					     waiter_list);
500
501			list_del(pos);
502			waiter->callback(fence, waiter);
503		}
504		wake_up(&fence->wq);
505	}
506}
507
508int sync_fence_wait_async(struct sync_fence *fence,
509			  struct sync_fence_waiter *waiter)
510{
511	unsigned long flags;
512	int err = 0;
513
514	spin_lock_irqsave(&fence->waiter_list_lock, flags);
515
516	if (fence->status) {
517		err = fence->status;
518		goto out;
519	}
520
521	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
522out:
523	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
524
525	return err;
526}
527EXPORT_SYMBOL(sync_fence_wait_async);
528
529int sync_fence_cancel_async(struct sync_fence *fence,
530			     struct sync_fence_waiter *waiter)
531{
532	struct list_head *pos;
533	struct list_head *n;
534	unsigned long flags;
535	int ret = -ENOENT;
536
537	spin_lock_irqsave(&fence->waiter_list_lock, flags);
538	/*
539	 * Make sure waiter is still in waiter_list because it is possible for
540	 * the waiter to be removed from the list while the callback is still
541	 * pending.
542	 */
543	list_for_each_safe(pos, n, &fence->waiter_list_head) {
544		struct sync_fence_waiter *list_waiter =
545			container_of(pos, struct sync_fence_waiter,
546				     waiter_list);
547		if (list_waiter == waiter) {
548			list_del(pos);
549			ret = 0;
550			break;
551		}
552	}
553	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
554	return ret;
555}
556EXPORT_SYMBOL(sync_fence_cancel_async);
557
558int sync_fence_wait(struct sync_fence *fence, long timeout)
559{
560	int err;
561
562	if (timeout) {
563		timeout = msecs_to_jiffies(timeout);
564		err = wait_event_interruptible_timeout(fence->wq,
565						       fence->status != 0,
566						       timeout);
567	} else {
568		err = wait_event_interruptible(fence->wq, fence->status != 0);
569	}
570
571	if (err < 0)
572		return err;
573
574	if (fence->status < 0)
575		return fence->status;
576
577	if (fence->status == 0)
578		return -ETIME;
579
580	return 0;
581}
582EXPORT_SYMBOL(sync_fence_wait);
583
584static void sync_fence_free(struct kref *kref)
585{
586	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
587
588	sync_fence_free_pts(fence);
589
590	kfree(fence);
591}
592
593static int sync_fence_release(struct inode *inode, struct file *file)
594{
595	struct sync_fence *fence = file->private_data;
596	unsigned long flags;
597
598	/*
599	 * We need to remove all ways to access this fence before droping
600	 * our ref.
601	 *
602	 * start with its membership in the global fence list
603	 */
604	spin_lock_irqsave(&sync_fence_list_lock, flags);
605	list_del(&fence->sync_fence_list);
606	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
607
608	/*
609	 * remove its pts from their parents so that sync_timeline_signal()
610	 * can't reference the fence.
611	 */
612	sync_fence_detach_pts(fence);
613
614	kref_put(&fence->kref, sync_fence_free);
615
616	return 0;
617}
618
619static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
620{
621	struct sync_fence *fence = file->private_data;
622
623	poll_wait(file, &fence->wq, wait);
624
625	if (fence->status == 1)
626		return POLLIN;
627	else if (fence->status < 0)
628		return POLLERR;
629	else
630		return 0;
631}
632
633static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
634{
635	__s32 value;
636
637	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
638		return -EFAULT;
639
640	return sync_fence_wait(fence, value);
641}
642
643static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
644{
645	int fd = get_unused_fd();
646	int err;
647	struct sync_fence *fence2, *fence3;
648	struct sync_merge_data data;
649
650	if (fd < 0)
651		return fd;
652
653	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
654		err = -EFAULT;
655		goto err_put_fd;
656	}
657
658	fence2 = sync_fence_fdget(data.fd2);
659	if (fence2 == NULL) {
660		err = -ENOENT;
661		goto err_put_fd;
662	}
663
664	data.name[sizeof(data.name) - 1] = '\0';
665	fence3 = sync_fence_merge(data.name, fence, fence2);
666	if (fence3 == NULL) {
667		err = -ENOMEM;
668		goto err_put_fence2;
669	}
670
671	data.fence = fd;
672	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
673		err = -EFAULT;
674		goto err_put_fence3;
675	}
676
677	sync_fence_install(fence3, fd);
678	sync_fence_put(fence2);
679	return 0;
680
681err_put_fence3:
682	sync_fence_put(fence3);
683
684err_put_fence2:
685	sync_fence_put(fence2);
686
687err_put_fd:
688	put_unused_fd(fd);
689	return err;
690}
691
692static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
693{
694	struct sync_pt_info *info = data;
695	int ret;
696
697	if (size < sizeof(struct sync_pt_info))
698		return -ENOMEM;
699
700	info->len = sizeof(struct sync_pt_info);
701
702	if (pt->parent->ops->fill_driver_data) {
703		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
704							size - sizeof(*info));
705		if (ret < 0)
706			return ret;
707
708		info->len += ret;
709	}
710
711	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
712	strlcpy(info->driver_name, pt->parent->ops->driver_name,
713		sizeof(info->driver_name));
714	info->status = pt->status;
715	info->timestamp_ns = ktime_to_ns(pt->timestamp);
716
717	return info->len;
718}
719
720static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
721					unsigned long arg)
722{
723	struct sync_fence_info_data *data;
724	struct list_head *pos;
725	__u32 size;
726	__u32 len = 0;
727	int ret;
728
729	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
730		return -EFAULT;
731
732	if (size < sizeof(struct sync_fence_info_data))
733		return -EINVAL;
734
735	if (size > 4096)
736		size = 4096;
737
738	data = kzalloc(size, GFP_KERNEL);
739	if (data == NULL)
740		return -ENOMEM;
741
742	strlcpy(data->name, fence->name, sizeof(data->name));
743	data->status = fence->status;
744	len = sizeof(struct sync_fence_info_data);
745
746	list_for_each(pos, &fence->pt_list_head) {
747		struct sync_pt *pt =
748			container_of(pos, struct sync_pt, pt_list);
749
750		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
751
752		if (ret < 0)
753			goto out;
754
755		len += ret;
756	}
757
758	data->len = len;
759
760	if (copy_to_user((void __user *)arg, data, len))
761		ret = -EFAULT;
762	else
763		ret = 0;
764
765out:
766	kfree(data);
767
768	return ret;
769}
770
771static long sync_fence_ioctl(struct file *file, unsigned int cmd,
772			     unsigned long arg)
773{
774	struct sync_fence *fence = file->private_data;
775	switch (cmd) {
776	case SYNC_IOC_WAIT:
777		return sync_fence_ioctl_wait(fence, arg);
778
779	case SYNC_IOC_MERGE:
780		return sync_fence_ioctl_merge(fence, arg);
781
782	case SYNC_IOC_FENCE_INFO:
783		return sync_fence_ioctl_fence_info(fence, arg);
784
785	default:
786		return -ENOTTY;
787	}
788}
789
790#ifdef CONFIG_DEBUG_FS
791static const char *sync_status_str(int status)
792{
793	if (status > 0)
794		return "signaled";
795	else if (status == 0)
796		return "active";
797	else
798		return "error";
799}
800
801static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
802{
803	int status = pt->status;
804	seq_printf(s, "  %s%spt %s",
805		   fence ? pt->parent->name : "",
806		   fence ? "_" : "",
807		   sync_status_str(status));
808	if (pt->status) {
809		struct timeval tv = ktime_to_timeval(pt->timestamp);
810		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
811	}
812
813	if (pt->parent->ops->print_pt) {
814		seq_printf(s, ": ");
815		pt->parent->ops->print_pt(s, pt);
816	}
817
818	seq_printf(s, "\n");
819}
820
821static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
822{
823	struct list_head *pos;
824	unsigned long flags;
825
826	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
827
828	if (obj->ops->print_obj) {
829		seq_printf(s, ": ");
830		obj->ops->print_obj(s, obj);
831	}
832
833	seq_printf(s, "\n");
834
835	spin_lock_irqsave(&obj->child_list_lock, flags);
836	list_for_each(pos, &obj->child_list_head) {
837		struct sync_pt *pt =
838			container_of(pos, struct sync_pt, child_list);
839		sync_print_pt(s, pt, false);
840	}
841	spin_unlock_irqrestore(&obj->child_list_lock, flags);
842}
843
844static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
845{
846	struct list_head *pos;
847	unsigned long flags;
848
849	seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
850
851	list_for_each(pos, &fence->pt_list_head) {
852		struct sync_pt *pt =
853			container_of(pos, struct sync_pt, pt_list);
854		sync_print_pt(s, pt, true);
855	}
856
857	spin_lock_irqsave(&fence->waiter_list_lock, flags);
858	list_for_each(pos, &fence->waiter_list_head) {
859		struct sync_fence_waiter *waiter =
860			container_of(pos, struct sync_fence_waiter,
861				     waiter_list);
862
863		seq_printf(s, "waiter %pF\n", waiter->callback);
864	}
865	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
866}
867
868static int sync_debugfs_show(struct seq_file *s, void *unused)
869{
870	unsigned long flags;
871	struct list_head *pos;
872
873	seq_printf(s, "objs:\n--------------\n");
874
875	spin_lock_irqsave(&sync_timeline_list_lock, flags);
876	list_for_each(pos, &sync_timeline_list_head) {
877		struct sync_timeline *obj =
878			container_of(pos, struct sync_timeline,
879				     sync_timeline_list);
880
881		sync_print_obj(s, obj);
882		seq_printf(s, "\n");
883	}
884	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
885
886	seq_printf(s, "fences:\n--------------\n");
887
888	spin_lock_irqsave(&sync_fence_list_lock, flags);
889	list_for_each(pos, &sync_fence_list_head) {
890		struct sync_fence *fence =
891			container_of(pos, struct sync_fence, sync_fence_list);
892
893		sync_print_fence(s, fence);
894		seq_printf(s, "\n");
895	}
896	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
897	return 0;
898}
899
900static int sync_debugfs_open(struct inode *inode, struct file *file)
901{
902	return single_open(file, sync_debugfs_show, inode->i_private);
903}
904
905static const struct file_operations sync_debugfs_fops = {
906	.open           = sync_debugfs_open,
907	.read           = seq_read,
908	.llseek         = seq_lseek,
909	.release        = single_release,
910};
911
912static __init int sync_debugfs_init(void)
913{
914	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
915	return 0;
916}
917
918late_initcall(sync_debugfs_init);
919
920#endif
921