sync.c revision 1d5db2ce93089db91d7997927b4cfd92a88c5aad
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33static void sync_fence_free(struct kref *kref);
34static void sync_dump(void);
35
36static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
42struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43					   int size, const char *name)
44{
45	struct sync_timeline *obj;
46	unsigned long flags;
47
48	if (size < sizeof(struct sync_timeline))
49		return NULL;
50
51	obj = kzalloc(size, GFP_KERNEL);
52	if (obj == NULL)
53		return NULL;
54
55	kref_init(&obj->kref);
56	obj->ops = ops;
57	strlcpy(obj->name, name, sizeof(obj->name));
58
59	INIT_LIST_HEAD(&obj->child_list_head);
60	spin_lock_init(&obj->child_list_lock);
61
62	INIT_LIST_HEAD(&obj->active_list_head);
63	spin_lock_init(&obj->active_list_lock);
64
65	spin_lock_irqsave(&sync_timeline_list_lock, flags);
66	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
69	return obj;
70}
71EXPORT_SYMBOL(sync_timeline_create);
72
73static void sync_timeline_free(struct kref *kref)
74{
75	struct sync_timeline *obj =
76		container_of(kref, struct sync_timeline, kref);
77	unsigned long flags;
78
79	if (obj->ops->release_obj)
80		obj->ops->release_obj(obj);
81
82	spin_lock_irqsave(&sync_timeline_list_lock, flags);
83	list_del(&obj->sync_timeline_list);
84	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86	kfree(obj);
87}
88
89void sync_timeline_destroy(struct sync_timeline *obj)
90{
91	obj->destroyed = true;
92
93	/*
94	 * If this is not the last reference, signal any children
95	 * that their parent is going away.
96	 */
97
98	if (!kref_put(&obj->kref, sync_timeline_free))
99		sync_timeline_signal(obj);
100}
101EXPORT_SYMBOL(sync_timeline_destroy);
102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105	unsigned long flags;
106
107	pt->parent = obj;
108
109	spin_lock_irqsave(&obj->child_list_lock, flags);
110	list_add_tail(&pt->child_list, &obj->child_list_head);
111	spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116	struct sync_timeline *obj = pt->parent;
117	unsigned long flags;
118
119	spin_lock_irqsave(&obj->active_list_lock, flags);
120	if (!list_empty(&pt->active_list))
121		list_del_init(&pt->active_list);
122	spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124	spin_lock_irqsave(&obj->child_list_lock, flags);
125	if (!list_empty(&pt->child_list)) {
126		list_del_init(&pt->child_list);
127	}
128	spin_unlock_irqrestore(&obj->child_list_lock, flags);
129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133	unsigned long flags;
134	LIST_HEAD(signaled_pts);
135	struct list_head *pos, *n;
136
137	spin_lock_irqsave(&obj->active_list_lock, flags);
138
139	list_for_each_safe(pos, n, &obj->active_list_head) {
140		struct sync_pt *pt =
141			container_of(pos, struct sync_pt, active_list);
142
143		if (_sync_pt_has_signaled(pt)) {
144			list_del_init(pos);
145			list_add(&pt->signaled_list, &signaled_pts);
146			kref_get(&pt->fence->kref);
147		}
148	}
149
150	spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152	list_for_each_safe(pos, n, &signaled_pts) {
153		struct sync_pt *pt =
154			container_of(pos, struct sync_pt, signaled_list);
155
156		list_del_init(pos);
157		sync_fence_signal_pt(pt);
158		kref_put(&pt->fence->kref, sync_fence_free);
159	}
160}
161EXPORT_SYMBOL(sync_timeline_signal);
162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165	struct sync_pt *pt;
166
167	if (size < sizeof(struct sync_pt))
168		return NULL;
169
170	pt = kzalloc(size, GFP_KERNEL);
171	if (pt == NULL)
172		return NULL;
173
174	INIT_LIST_HEAD(&pt->active_list);
175	kref_get(&parent->kref);
176	sync_timeline_add_pt(parent, pt);
177
178	return pt;
179}
180EXPORT_SYMBOL(sync_pt_create);
181
182void sync_pt_free(struct sync_pt *pt)
183{
184	if (pt->parent->ops->free_pt)
185		pt->parent->ops->free_pt(pt);
186
187	sync_timeline_remove_pt(pt);
188
189	kref_put(&pt->parent->kref, sync_timeline_free);
190
191	kfree(pt);
192}
193EXPORT_SYMBOL(sync_pt_free);
194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
198	int old_status = pt->status;
199
200	if (!pt->status)
201		pt->status = pt->parent->ops->has_signaled(pt);
202
203	if (!pt->status && pt->parent->destroyed)
204		pt->status = -ENOENT;
205
206	if (pt->status != old_status)
207		pt->timestamp = ktime_get();
208
209	return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214	return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue.  Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220	struct sync_timeline *obj = pt->parent;
221	unsigned long flags;
222	int err;
223
224	spin_lock_irqsave(&obj->active_list_lock, flags);
225
226	err = _sync_pt_has_signaled(pt);
227	if (err != 0)
228		goto out;
229
230	list_add_tail(&pt->active_list, &obj->active_list_head);
231
232out:
233	spin_unlock_irqrestore(&obj->active_list_lock, flags);
234}
235
236static int sync_fence_release(struct inode *inode, struct file *file);
237static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
238static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239			     unsigned long arg);
240
241
242static const struct file_operations sync_fence_fops = {
243	.release = sync_fence_release,
244	.poll = sync_fence_poll,
245	.unlocked_ioctl = sync_fence_ioctl,
246};
247
248static struct sync_fence *sync_fence_alloc(const char *name)
249{
250	struct sync_fence *fence;
251	unsigned long flags;
252
253	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254	if (fence == NULL)
255		return NULL;
256
257	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258					 fence, 0);
259	if (fence->file == NULL)
260		goto err;
261
262	kref_init(&fence->kref);
263	strlcpy(fence->name, name, sizeof(fence->name));
264
265	INIT_LIST_HEAD(&fence->pt_list_head);
266	INIT_LIST_HEAD(&fence->waiter_list_head);
267	spin_lock_init(&fence->waiter_list_lock);
268
269	init_waitqueue_head(&fence->wq);
270
271	spin_lock_irqsave(&sync_fence_list_lock, flags);
272	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
275	return fence;
276
277err:
278	kfree(fence);
279	return NULL;
280}
281
282/* TODO: implement a create which takes more that one sync_pt */
283struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284{
285	struct sync_fence *fence;
286
287	if (pt->fence)
288		return NULL;
289
290	fence = sync_fence_alloc(name);
291	if (fence == NULL)
292		return NULL;
293
294	pt->fence = fence;
295	list_add(&pt->pt_list, &fence->pt_list_head);
296	sync_pt_activate(pt);
297
298	return fence;
299}
300EXPORT_SYMBOL(sync_fence_create);
301
302static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
303{
304	struct list_head *pos;
305
306	list_for_each(pos, &src->pt_list_head) {
307		struct sync_pt *orig_pt =
308			container_of(pos, struct sync_pt, pt_list);
309		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
310
311		if (new_pt == NULL)
312			return -ENOMEM;
313
314		new_pt->fence = dst;
315		list_add(&new_pt->pt_list, &dst->pt_list_head);
316		sync_pt_activate(new_pt);
317	}
318
319	return 0;
320}
321
322static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
323{
324	struct list_head *src_pos, *dst_pos, *n;
325
326	list_for_each(src_pos, &src->pt_list_head) {
327		struct sync_pt *src_pt =
328			container_of(src_pos, struct sync_pt, pt_list);
329		bool collapsed = false;
330
331		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
332			struct sync_pt *dst_pt =
333				container_of(dst_pos, struct sync_pt, pt_list);
334			/* collapse two sync_pts on the same timeline
335			 * to a single sync_pt that will signal at
336			 * the later of the two
337			 */
338			if (dst_pt->parent == src_pt->parent) {
339				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
340						 == -1) {
341					struct sync_pt *new_pt =
342						sync_pt_dup(src_pt);
343					if (new_pt == NULL)
344						return -ENOMEM;
345
346					new_pt->fence = dst;
347					list_replace(&dst_pt->pt_list,
348						     &new_pt->pt_list);
349					sync_pt_activate(new_pt);
350					sync_pt_free(dst_pt);
351				}
352				collapsed = true;
353				break;
354			}
355		}
356
357		if (!collapsed) {
358			struct sync_pt *new_pt = sync_pt_dup(src_pt);
359
360			if (new_pt == NULL)
361				return -ENOMEM;
362
363			new_pt->fence = dst;
364			list_add(&new_pt->pt_list, &dst->pt_list_head);
365			sync_pt_activate(new_pt);
366		}
367	}
368
369	return 0;
370}
371
372static void sync_fence_detach_pts(struct sync_fence *fence)
373{
374	struct list_head *pos, *n;
375
376	list_for_each_safe(pos, n, &fence->pt_list_head) {
377		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
378		sync_timeline_remove_pt(pt);
379	}
380}
381
382static void sync_fence_free_pts(struct sync_fence *fence)
383{
384	struct list_head *pos, *n;
385
386	list_for_each_safe(pos, n, &fence->pt_list_head) {
387		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
388		sync_pt_free(pt);
389	}
390}
391
392struct sync_fence *sync_fence_fdget(int fd)
393{
394	struct file *file = fget(fd);
395
396	if (file == NULL)
397		return NULL;
398
399	if (file->f_op != &sync_fence_fops)
400		goto err;
401
402	return file->private_data;
403
404err:
405	fput(file);
406	return NULL;
407}
408EXPORT_SYMBOL(sync_fence_fdget);
409
410void sync_fence_put(struct sync_fence *fence)
411{
412	fput(fence->file);
413}
414EXPORT_SYMBOL(sync_fence_put);
415
416void sync_fence_install(struct sync_fence *fence, int fd)
417{
418	fd_install(fd, fence->file);
419}
420EXPORT_SYMBOL(sync_fence_install);
421
422static int sync_fence_get_status(struct sync_fence *fence)
423{
424	struct list_head *pos;
425	int status = 1;
426
427	list_for_each(pos, &fence->pt_list_head) {
428		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
429		int pt_status = pt->status;
430
431		if (pt_status < 0) {
432			status = pt_status;
433			break;
434		} else if (status == 1) {
435			status = pt_status;
436		}
437	}
438
439	return status;
440}
441
442struct sync_fence *sync_fence_merge(const char *name,
443				    struct sync_fence *a, struct sync_fence *b)
444{
445	struct sync_fence *fence;
446	int err;
447
448	fence = sync_fence_alloc(name);
449	if (fence == NULL)
450		return NULL;
451
452	err = sync_fence_copy_pts(fence, a);
453	if (err < 0)
454		goto err;
455
456	err = sync_fence_merge_pts(fence, b);
457	if (err < 0)
458		goto err;
459
460	fence->status = sync_fence_get_status(fence);
461
462	return fence;
463err:
464	sync_fence_free_pts(fence);
465	kfree(fence);
466	return NULL;
467}
468EXPORT_SYMBOL(sync_fence_merge);
469
470static void sync_fence_signal_pt(struct sync_pt *pt)
471{
472	LIST_HEAD(signaled_waiters);
473	struct sync_fence *fence = pt->fence;
474	struct list_head *pos;
475	struct list_head *n;
476	unsigned long flags;
477	int status;
478
479	status = sync_fence_get_status(fence);
480
481	spin_lock_irqsave(&fence->waiter_list_lock, flags);
482	/*
483	 * this should protect against two threads racing on the signaled
484	 * false -> true transition
485	 */
486	if (status && !fence->status) {
487		list_for_each_safe(pos, n, &fence->waiter_list_head)
488			list_move(pos, &signaled_waiters);
489
490		fence->status = status;
491	} else {
492		status = 0;
493	}
494	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
495
496	if (status) {
497		list_for_each_safe(pos, n, &signaled_waiters) {
498			struct sync_fence_waiter *waiter =
499				container_of(pos, struct sync_fence_waiter,
500					     waiter_list);
501
502			list_del(pos);
503			waiter->callback(fence, waiter);
504		}
505		wake_up(&fence->wq);
506	}
507}
508
509int sync_fence_wait_async(struct sync_fence *fence,
510			  struct sync_fence_waiter *waiter)
511{
512	unsigned long flags;
513	int err = 0;
514
515	spin_lock_irqsave(&fence->waiter_list_lock, flags);
516
517	if (fence->status) {
518		err = fence->status;
519		goto out;
520	}
521
522	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
523out:
524	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
525
526	return err;
527}
528EXPORT_SYMBOL(sync_fence_wait_async);
529
530int sync_fence_cancel_async(struct sync_fence *fence,
531			     struct sync_fence_waiter *waiter)
532{
533	struct list_head *pos;
534	struct list_head *n;
535	unsigned long flags;
536	int ret = -ENOENT;
537
538	spin_lock_irqsave(&fence->waiter_list_lock, flags);
539	/*
540	 * Make sure waiter is still in waiter_list because it is possible for
541	 * the waiter to be removed from the list while the callback is still
542	 * pending.
543	 */
544	list_for_each_safe(pos, n, &fence->waiter_list_head) {
545		struct sync_fence_waiter *list_waiter =
546			container_of(pos, struct sync_fence_waiter,
547				     waiter_list);
548		if (list_waiter == waiter) {
549			list_del(pos);
550			ret = 0;
551			break;
552		}
553	}
554	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
555	return ret;
556}
557EXPORT_SYMBOL(sync_fence_cancel_async);
558
559int sync_fence_wait(struct sync_fence *fence, long timeout)
560{
561	int err = 0;
562
563	if (timeout > 0) {
564		timeout = msecs_to_jiffies(timeout);
565		err = wait_event_interruptible_timeout(fence->wq,
566						       fence->status != 0,
567						       timeout);
568	} else if (timeout < 0) {
569		err = wait_event_interruptible(fence->wq, fence->status != 0);
570	}
571
572	if (err < 0)
573		return err;
574
575	if (fence->status < 0)
576		return fence->status;
577
578	if (fence->status == 0) {
579		pr_info("fence timeout on [%p] after %dms\n", fence,
580			jiffies_to_msecs(timeout));
581		sync_dump();
582		return -ETIME;
583	}
584
585	return 0;
586}
587EXPORT_SYMBOL(sync_fence_wait);
588
589static void sync_fence_free(struct kref *kref)
590{
591	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
592
593	sync_fence_free_pts(fence);
594
595	kfree(fence);
596}
597
598static int sync_fence_release(struct inode *inode, struct file *file)
599{
600	struct sync_fence *fence = file->private_data;
601	unsigned long flags;
602
603	/*
604	 * We need to remove all ways to access this fence before droping
605	 * our ref.
606	 *
607	 * start with its membership in the global fence list
608	 */
609	spin_lock_irqsave(&sync_fence_list_lock, flags);
610	list_del(&fence->sync_fence_list);
611	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
612
613	/*
614	 * remove its pts from their parents so that sync_timeline_signal()
615	 * can't reference the fence.
616	 */
617	sync_fence_detach_pts(fence);
618
619	kref_put(&fence->kref, sync_fence_free);
620
621	return 0;
622}
623
624static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
625{
626	struct sync_fence *fence = file->private_data;
627
628	poll_wait(file, &fence->wq, wait);
629
630	if (fence->status == 1)
631		return POLLIN;
632	else if (fence->status < 0)
633		return POLLERR;
634	else
635		return 0;
636}
637
638static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
639{
640	__s32 value;
641
642	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
643		return -EFAULT;
644
645	return sync_fence_wait(fence, value);
646}
647
648static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
649{
650	int fd = get_unused_fd();
651	int err;
652	struct sync_fence *fence2, *fence3;
653	struct sync_merge_data data;
654
655	if (fd < 0)
656		return fd;
657
658	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
659		err = -EFAULT;
660		goto err_put_fd;
661	}
662
663	fence2 = sync_fence_fdget(data.fd2);
664	if (fence2 == NULL) {
665		err = -ENOENT;
666		goto err_put_fd;
667	}
668
669	data.name[sizeof(data.name) - 1] = '\0';
670	fence3 = sync_fence_merge(data.name, fence, fence2);
671	if (fence3 == NULL) {
672		err = -ENOMEM;
673		goto err_put_fence2;
674	}
675
676	data.fence = fd;
677	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
678		err = -EFAULT;
679		goto err_put_fence3;
680	}
681
682	sync_fence_install(fence3, fd);
683	sync_fence_put(fence2);
684	return 0;
685
686err_put_fence3:
687	sync_fence_put(fence3);
688
689err_put_fence2:
690	sync_fence_put(fence2);
691
692err_put_fd:
693	put_unused_fd(fd);
694	return err;
695}
696
697static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
698{
699	struct sync_pt_info *info = data;
700	int ret;
701
702	if (size < sizeof(struct sync_pt_info))
703		return -ENOMEM;
704
705	info->len = sizeof(struct sync_pt_info);
706
707	if (pt->parent->ops->fill_driver_data) {
708		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
709							size - sizeof(*info));
710		if (ret < 0)
711			return ret;
712
713		info->len += ret;
714	}
715
716	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
717	strlcpy(info->driver_name, pt->parent->ops->driver_name,
718		sizeof(info->driver_name));
719	info->status = pt->status;
720	info->timestamp_ns = ktime_to_ns(pt->timestamp);
721
722	return info->len;
723}
724
725static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
726					unsigned long arg)
727{
728	struct sync_fence_info_data *data;
729	struct list_head *pos;
730	__u32 size;
731	__u32 len = 0;
732	int ret;
733
734	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
735		return -EFAULT;
736
737	if (size < sizeof(struct sync_fence_info_data))
738		return -EINVAL;
739
740	if (size > 4096)
741		size = 4096;
742
743	data = kzalloc(size, GFP_KERNEL);
744	if (data == NULL)
745		return -ENOMEM;
746
747	strlcpy(data->name, fence->name, sizeof(data->name));
748	data->status = fence->status;
749	len = sizeof(struct sync_fence_info_data);
750
751	list_for_each(pos, &fence->pt_list_head) {
752		struct sync_pt *pt =
753			container_of(pos, struct sync_pt, pt_list);
754
755		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
756
757		if (ret < 0)
758			goto out;
759
760		len += ret;
761	}
762
763	data->len = len;
764
765	if (copy_to_user((void __user *)arg, data, len))
766		ret = -EFAULT;
767	else
768		ret = 0;
769
770out:
771	kfree(data);
772
773	return ret;
774}
775
776static long sync_fence_ioctl(struct file *file, unsigned int cmd,
777			     unsigned long arg)
778{
779	struct sync_fence *fence = file->private_data;
780	switch (cmd) {
781	case SYNC_IOC_WAIT:
782		return sync_fence_ioctl_wait(fence, arg);
783
784	case SYNC_IOC_MERGE:
785		return sync_fence_ioctl_merge(fence, arg);
786
787	case SYNC_IOC_FENCE_INFO:
788		return sync_fence_ioctl_fence_info(fence, arg);
789
790	default:
791		return -ENOTTY;
792	}
793}
794
795#ifdef CONFIG_DEBUG_FS
796static const char *sync_status_str(int status)
797{
798	if (status > 0)
799		return "signaled";
800	else if (status == 0)
801		return "active";
802	else
803		return "error";
804}
805
806static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
807{
808	int status = pt->status;
809	seq_printf(s, "  %s%spt %s",
810		   fence ? pt->parent->name : "",
811		   fence ? "_" : "",
812		   sync_status_str(status));
813	if (pt->status) {
814		struct timeval tv = ktime_to_timeval(pt->timestamp);
815		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
816	}
817
818	if (pt->parent->ops->print_pt) {
819		seq_printf(s, ": ");
820		pt->parent->ops->print_pt(s, pt);
821	}
822
823	seq_printf(s, "\n");
824}
825
826static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
827{
828	struct list_head *pos;
829	unsigned long flags;
830
831	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
832
833	if (obj->ops->print_obj) {
834		seq_printf(s, ": ");
835		obj->ops->print_obj(s, obj);
836	}
837
838	seq_printf(s, "\n");
839
840	spin_lock_irqsave(&obj->child_list_lock, flags);
841	list_for_each(pos, &obj->child_list_head) {
842		struct sync_pt *pt =
843			container_of(pos, struct sync_pt, child_list);
844		sync_print_pt(s, pt, false);
845	}
846	spin_unlock_irqrestore(&obj->child_list_lock, flags);
847}
848
849static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
850{
851	struct list_head *pos;
852	unsigned long flags;
853
854	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
855		   sync_status_str(fence->status));
856
857	list_for_each(pos, &fence->pt_list_head) {
858		struct sync_pt *pt =
859			container_of(pos, struct sync_pt, pt_list);
860		sync_print_pt(s, pt, true);
861	}
862
863	spin_lock_irqsave(&fence->waiter_list_lock, flags);
864	list_for_each(pos, &fence->waiter_list_head) {
865		struct sync_fence_waiter *waiter =
866			container_of(pos, struct sync_fence_waiter,
867				     waiter_list);
868
869		seq_printf(s, "waiter %pF\n", waiter->callback);
870	}
871	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
872}
873
874static int sync_debugfs_show(struct seq_file *s, void *unused)
875{
876	unsigned long flags;
877	struct list_head *pos;
878
879	seq_printf(s, "objs:\n--------------\n");
880
881	spin_lock_irqsave(&sync_timeline_list_lock, flags);
882	list_for_each(pos, &sync_timeline_list_head) {
883		struct sync_timeline *obj =
884			container_of(pos, struct sync_timeline,
885				     sync_timeline_list);
886
887		sync_print_obj(s, obj);
888		seq_printf(s, "\n");
889	}
890	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
891
892	seq_printf(s, "fences:\n--------------\n");
893
894	spin_lock_irqsave(&sync_fence_list_lock, flags);
895	list_for_each(pos, &sync_fence_list_head) {
896		struct sync_fence *fence =
897			container_of(pos, struct sync_fence, sync_fence_list);
898
899		sync_print_fence(s, fence);
900		seq_printf(s, "\n");
901	}
902	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
903	return 0;
904}
905
906static int sync_debugfs_open(struct inode *inode, struct file *file)
907{
908	return single_open(file, sync_debugfs_show, inode->i_private);
909}
910
911static const struct file_operations sync_debugfs_fops = {
912	.open           = sync_debugfs_open,
913	.read           = seq_read,
914	.llseek         = seq_lseek,
915	.release        = single_release,
916};
917
918static __init int sync_debugfs_init(void)
919{
920	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
921	return 0;
922}
923late_initcall(sync_debugfs_init);
924
925#define DUMP_CHUNK 256
926static char sync_dump_buf[64 * 1024];
927void sync_dump(void)
928{
929	struct seq_file s = {
930		.buf = sync_dump_buf,
931		.size = sizeof(sync_dump_buf) - 1,
932	};
933	int i;
934
935	sync_debugfs_show(&s, NULL);
936
937	for (i = 0; i < s.count; i += DUMP_CHUNK) {
938		if ((s.count - i) > DUMP_CHUNK) {
939			char c = s.buf[i + DUMP_CHUNK];
940			s.buf[i + DUMP_CHUNK] = 0;
941			pr_cont("%s", s.buf + i);
942			s.buf[i + DUMP_CHUNK] = c;
943		} else {
944			s.buf[s.count] = 0;
945			pr_cont("%s", s.buf + i);
946		}
947	}
948}
949#else
950static void sync_dump(void)
951{
952}
953#endif
954