sync.c revision 4b5de08a37e8189c039424c92ca76ff605cf1c7f
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33static void sync_fence_free(struct kref *kref);
34static void sync_dump(void);
35
36static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
42struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43					   int size, const char *name)
44{
45	struct sync_timeline *obj;
46	unsigned long flags;
47
48	if (size < sizeof(struct sync_timeline))
49		return NULL;
50
51	obj = kzalloc(size, GFP_KERNEL);
52	if (obj == NULL)
53		return NULL;
54
55	kref_init(&obj->kref);
56	obj->ops = ops;
57	strlcpy(obj->name, name, sizeof(obj->name));
58
59	INIT_LIST_HEAD(&obj->child_list_head);
60	spin_lock_init(&obj->child_list_lock);
61
62	INIT_LIST_HEAD(&obj->active_list_head);
63	spin_lock_init(&obj->active_list_lock);
64
65	spin_lock_irqsave(&sync_timeline_list_lock, flags);
66	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
69	return obj;
70}
71EXPORT_SYMBOL(sync_timeline_create);
72
73static void sync_timeline_free(struct kref *kref)
74{
75	struct sync_timeline *obj =
76		container_of(kref, struct sync_timeline, kref);
77	unsigned long flags;
78
79	if (obj->ops->release_obj)
80		obj->ops->release_obj(obj);
81
82	spin_lock_irqsave(&sync_timeline_list_lock, flags);
83	list_del(&obj->sync_timeline_list);
84	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86	kfree(obj);
87}
88
89void sync_timeline_destroy(struct sync_timeline *obj)
90{
91	obj->destroyed = true;
92
93	/*
94	 * If this is not the last reference, signal any children
95	 * that their parent is going away.
96	 */
97
98	if (!kref_put(&obj->kref, sync_timeline_free))
99		sync_timeline_signal(obj);
100}
101EXPORT_SYMBOL(sync_timeline_destroy);
102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105	unsigned long flags;
106
107	pt->parent = obj;
108
109	spin_lock_irqsave(&obj->child_list_lock, flags);
110	list_add_tail(&pt->child_list, &obj->child_list_head);
111	spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116	struct sync_timeline *obj = pt->parent;
117	unsigned long flags;
118
119	spin_lock_irqsave(&obj->active_list_lock, flags);
120	if (!list_empty(&pt->active_list))
121		list_del_init(&pt->active_list);
122	spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124	spin_lock_irqsave(&obj->child_list_lock, flags);
125	if (!list_empty(&pt->child_list)) {
126		list_del_init(&pt->child_list);
127	}
128	spin_unlock_irqrestore(&obj->child_list_lock, flags);
129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133	unsigned long flags;
134	LIST_HEAD(signaled_pts);
135	struct list_head *pos, *n;
136
137	spin_lock_irqsave(&obj->active_list_lock, flags);
138
139	list_for_each_safe(pos, n, &obj->active_list_head) {
140		struct sync_pt *pt =
141			container_of(pos, struct sync_pt, active_list);
142
143		if (_sync_pt_has_signaled(pt)) {
144			list_del_init(pos);
145			list_add(&pt->signaled_list, &signaled_pts);
146			kref_get(&pt->fence->kref);
147		}
148	}
149
150	spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152	list_for_each_safe(pos, n, &signaled_pts) {
153		struct sync_pt *pt =
154			container_of(pos, struct sync_pt, signaled_list);
155
156		list_del_init(pos);
157		sync_fence_signal_pt(pt);
158		kref_put(&pt->fence->kref, sync_fence_free);
159	}
160}
161EXPORT_SYMBOL(sync_timeline_signal);
162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165	struct sync_pt *pt;
166
167	if (size < sizeof(struct sync_pt))
168		return NULL;
169
170	pt = kzalloc(size, GFP_KERNEL);
171	if (pt == NULL)
172		return NULL;
173
174	INIT_LIST_HEAD(&pt->active_list);
175	kref_get(&parent->kref);
176	sync_timeline_add_pt(parent, pt);
177
178	return pt;
179}
180EXPORT_SYMBOL(sync_pt_create);
181
182void sync_pt_free(struct sync_pt *pt)
183{
184	if (pt->parent->ops->free_pt)
185		pt->parent->ops->free_pt(pt);
186
187	sync_timeline_remove_pt(pt);
188
189	kref_put(&pt->parent->kref, sync_timeline_free);
190
191	kfree(pt);
192}
193EXPORT_SYMBOL(sync_pt_free);
194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
198	int old_status = pt->status;
199
200	if (!pt->status)
201		pt->status = pt->parent->ops->has_signaled(pt);
202
203	if (!pt->status && pt->parent->destroyed)
204		pt->status = -ENOENT;
205
206	if (pt->status != old_status)
207		pt->timestamp = ktime_get();
208
209	return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214	return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue.  Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220	struct sync_timeline *obj = pt->parent;
221	unsigned long flags;
222	int err;
223
224	spin_lock_irqsave(&obj->active_list_lock, flags);
225
226	err = _sync_pt_has_signaled(pt);
227	if (err != 0)
228		goto out;
229
230	list_add_tail(&pt->active_list, &obj->active_list_head);
231
232out:
233	spin_unlock_irqrestore(&obj->active_list_lock, flags);
234}
235
236static int sync_fence_release(struct inode *inode, struct file *file);
237static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
238static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239			     unsigned long arg);
240
241
242static const struct file_operations sync_fence_fops = {
243	.release = sync_fence_release,
244	.poll = sync_fence_poll,
245	.unlocked_ioctl = sync_fence_ioctl,
246};
247
248static struct sync_fence *sync_fence_alloc(const char *name)
249{
250	struct sync_fence *fence;
251	unsigned long flags;
252
253	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254	if (fence == NULL)
255		return NULL;
256
257	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258					 fence, 0);
259	if (fence->file == NULL)
260		goto err;
261
262	kref_init(&fence->kref);
263	strlcpy(fence->name, name, sizeof(fence->name));
264
265	INIT_LIST_HEAD(&fence->pt_list_head);
266	INIT_LIST_HEAD(&fence->waiter_list_head);
267	spin_lock_init(&fence->waiter_list_lock);
268
269	init_waitqueue_head(&fence->wq);
270
271	spin_lock_irqsave(&sync_fence_list_lock, flags);
272	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
275	return fence;
276
277err:
278	kfree(fence);
279	return NULL;
280}
281
282/* TODO: implement a create which takes more that one sync_pt */
283struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284{
285	struct sync_fence *fence;
286
287	if (pt->fence)
288		return NULL;
289
290	fence = sync_fence_alloc(name);
291	if (fence == NULL)
292		return NULL;
293
294	pt->fence = fence;
295	list_add(&pt->pt_list, &fence->pt_list_head);
296	sync_pt_activate(pt);
297
298	/*
299	 * signal the fence in case pt was activated before
300	 * sync_pt_activate(pt) was called
301	 */
302	sync_fence_signal_pt(pt);
303
304	return fence;
305}
306EXPORT_SYMBOL(sync_fence_create);
307
308static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
309{
310	struct list_head *pos;
311
312	list_for_each(pos, &src->pt_list_head) {
313		struct sync_pt *orig_pt =
314			container_of(pos, struct sync_pt, pt_list);
315		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
316
317		if (new_pt == NULL)
318			return -ENOMEM;
319
320		new_pt->fence = dst;
321		list_add(&new_pt->pt_list, &dst->pt_list_head);
322		sync_pt_activate(new_pt);
323	}
324
325	return 0;
326}
327
328static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
329{
330	struct list_head *src_pos, *dst_pos, *n;
331
332	list_for_each(src_pos, &src->pt_list_head) {
333		struct sync_pt *src_pt =
334			container_of(src_pos, struct sync_pt, pt_list);
335		bool collapsed = false;
336
337		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
338			struct sync_pt *dst_pt =
339				container_of(dst_pos, struct sync_pt, pt_list);
340			/* collapse two sync_pts on the same timeline
341			 * to a single sync_pt that will signal at
342			 * the later of the two
343			 */
344			if (dst_pt->parent == src_pt->parent) {
345				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
346						 == -1) {
347					struct sync_pt *new_pt =
348						sync_pt_dup(src_pt);
349					if (new_pt == NULL)
350						return -ENOMEM;
351
352					new_pt->fence = dst;
353					list_replace(&dst_pt->pt_list,
354						     &new_pt->pt_list);
355					sync_pt_activate(new_pt);
356					sync_pt_free(dst_pt);
357				}
358				collapsed = true;
359				break;
360			}
361		}
362
363		if (!collapsed) {
364			struct sync_pt *new_pt = sync_pt_dup(src_pt);
365
366			if (new_pt == NULL)
367				return -ENOMEM;
368
369			new_pt->fence = dst;
370			list_add(&new_pt->pt_list, &dst->pt_list_head);
371			sync_pt_activate(new_pt);
372		}
373	}
374
375	return 0;
376}
377
378static void sync_fence_detach_pts(struct sync_fence *fence)
379{
380	struct list_head *pos, *n;
381
382	list_for_each_safe(pos, n, &fence->pt_list_head) {
383		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
384		sync_timeline_remove_pt(pt);
385	}
386}
387
388static void sync_fence_free_pts(struct sync_fence *fence)
389{
390	struct list_head *pos, *n;
391
392	list_for_each_safe(pos, n, &fence->pt_list_head) {
393		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
394		sync_pt_free(pt);
395	}
396}
397
398struct sync_fence *sync_fence_fdget(int fd)
399{
400	struct file *file = fget(fd);
401
402	if (file == NULL)
403		return NULL;
404
405	if (file->f_op != &sync_fence_fops)
406		goto err;
407
408	return file->private_data;
409
410err:
411	fput(file);
412	return NULL;
413}
414EXPORT_SYMBOL(sync_fence_fdget);
415
416void sync_fence_put(struct sync_fence *fence)
417{
418	fput(fence->file);
419}
420EXPORT_SYMBOL(sync_fence_put);
421
422void sync_fence_install(struct sync_fence *fence, int fd)
423{
424	fd_install(fd, fence->file);
425}
426EXPORT_SYMBOL(sync_fence_install);
427
428static int sync_fence_get_status(struct sync_fence *fence)
429{
430	struct list_head *pos;
431	int status = 1;
432
433	list_for_each(pos, &fence->pt_list_head) {
434		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
435		int pt_status = pt->status;
436
437		if (pt_status < 0) {
438			status = pt_status;
439			break;
440		} else if (status == 1) {
441			status = pt_status;
442		}
443	}
444
445	return status;
446}
447
448struct sync_fence *sync_fence_merge(const char *name,
449				    struct sync_fence *a, struct sync_fence *b)
450{
451	struct sync_fence *fence;
452	int err;
453
454	fence = sync_fence_alloc(name);
455	if (fence == NULL)
456		return NULL;
457
458	err = sync_fence_copy_pts(fence, a);
459	if (err < 0)
460		goto err;
461
462	err = sync_fence_merge_pts(fence, b);
463	if (err < 0)
464		goto err;
465
466	/*
467	 * signal the fence in case one of it's pts were activated before
468	 * they were activated
469	 */
470	sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
471					      struct sync_pt,
472					      pt_list));
473
474	return fence;
475err:
476	sync_fence_free_pts(fence);
477	kfree(fence);
478	return NULL;
479}
480EXPORT_SYMBOL(sync_fence_merge);
481
482static void sync_fence_signal_pt(struct sync_pt *pt)
483{
484	LIST_HEAD(signaled_waiters);
485	struct sync_fence *fence = pt->fence;
486	struct list_head *pos;
487	struct list_head *n;
488	unsigned long flags;
489	int status;
490
491	status = sync_fence_get_status(fence);
492
493	spin_lock_irqsave(&fence->waiter_list_lock, flags);
494	/*
495	 * this should protect against two threads racing on the signaled
496	 * false -> true transition
497	 */
498	if (status && !fence->status) {
499		list_for_each_safe(pos, n, &fence->waiter_list_head)
500			list_move(pos, &signaled_waiters);
501
502		fence->status = status;
503	} else {
504		status = 0;
505	}
506	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
507
508	if (status) {
509		list_for_each_safe(pos, n, &signaled_waiters) {
510			struct sync_fence_waiter *waiter =
511				container_of(pos, struct sync_fence_waiter,
512					     waiter_list);
513
514			list_del(pos);
515			waiter->callback(fence, waiter);
516		}
517		wake_up(&fence->wq);
518	}
519}
520
521int sync_fence_wait_async(struct sync_fence *fence,
522			  struct sync_fence_waiter *waiter)
523{
524	unsigned long flags;
525	int err = 0;
526
527	spin_lock_irqsave(&fence->waiter_list_lock, flags);
528
529	if (fence->status) {
530		err = fence->status;
531		goto out;
532	}
533
534	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
535out:
536	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
537
538	return err;
539}
540EXPORT_SYMBOL(sync_fence_wait_async);
541
542int sync_fence_cancel_async(struct sync_fence *fence,
543			     struct sync_fence_waiter *waiter)
544{
545	struct list_head *pos;
546	struct list_head *n;
547	unsigned long flags;
548	int ret = -ENOENT;
549
550	spin_lock_irqsave(&fence->waiter_list_lock, flags);
551	/*
552	 * Make sure waiter is still in waiter_list because it is possible for
553	 * the waiter to be removed from the list while the callback is still
554	 * pending.
555	 */
556	list_for_each_safe(pos, n, &fence->waiter_list_head) {
557		struct sync_fence_waiter *list_waiter =
558			container_of(pos, struct sync_fence_waiter,
559				     waiter_list);
560		if (list_waiter == waiter) {
561			list_del(pos);
562			ret = 0;
563			break;
564		}
565	}
566	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
567	return ret;
568}
569EXPORT_SYMBOL(sync_fence_cancel_async);
570
571static bool sync_fence_check(struct sync_fence *fence)
572{
573	/*
574	 * Make sure that reads to fence->status are ordered with the
575	 * wait queue event triggering
576	 */
577	smp_rmb();
578	return fence->status != 0;
579}
580
581int sync_fence_wait(struct sync_fence *fence, long timeout)
582{
583	int err = 0;
584
585	if (timeout > 0) {
586		timeout = msecs_to_jiffies(timeout);
587		err = wait_event_interruptible_timeout(fence->wq,
588						       sync_fence_check(fence),
589						       timeout);
590	} else if (timeout < 0) {
591		err = wait_event_interruptible(fence->wq,
592					       sync_fence_check(fence));
593	}
594
595	if (err < 0)
596		return err;
597
598	if (fence->status < 0) {
599		pr_info("fence error %d on [%p]\n", fence->status, fence);
600		sync_dump();
601		return fence->status;
602	}
603
604	if (fence->status == 0) {
605		pr_info("fence timeout on [%p] after %dms\n", fence,
606			jiffies_to_msecs(timeout));
607		sync_dump();
608		return -ETIME;
609	}
610
611	return 0;
612}
613EXPORT_SYMBOL(sync_fence_wait);
614
615static void sync_fence_free(struct kref *kref)
616{
617	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
618
619	sync_fence_free_pts(fence);
620
621	kfree(fence);
622}
623
624static int sync_fence_release(struct inode *inode, struct file *file)
625{
626	struct sync_fence *fence = file->private_data;
627	unsigned long flags;
628
629	/*
630	 * We need to remove all ways to access this fence before droping
631	 * our ref.
632	 *
633	 * start with its membership in the global fence list
634	 */
635	spin_lock_irqsave(&sync_fence_list_lock, flags);
636	list_del(&fence->sync_fence_list);
637	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
638
639	/*
640	 * remove its pts from their parents so that sync_timeline_signal()
641	 * can't reference the fence.
642	 */
643	sync_fence_detach_pts(fence);
644
645	kref_put(&fence->kref, sync_fence_free);
646
647	return 0;
648}
649
650static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
651{
652	struct sync_fence *fence = file->private_data;
653
654	poll_wait(file, &fence->wq, wait);
655
656	/*
657	 * Make sure that reads to fence->status are ordered with the
658	 * wait queue event triggering
659	 */
660	smp_rmb();
661
662	if (fence->status == 1)
663		return POLLIN;
664	else if (fence->status < 0)
665		return POLLERR;
666	else
667		return 0;
668}
669
670static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
671{
672	__s32 value;
673
674	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
675		return -EFAULT;
676
677	return sync_fence_wait(fence, value);
678}
679
680static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
681{
682	int fd = get_unused_fd();
683	int err;
684	struct sync_fence *fence2, *fence3;
685	struct sync_merge_data data;
686
687	if (fd < 0)
688		return fd;
689
690	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
691		err = -EFAULT;
692		goto err_put_fd;
693	}
694
695	fence2 = sync_fence_fdget(data.fd2);
696	if (fence2 == NULL) {
697		err = -ENOENT;
698		goto err_put_fd;
699	}
700
701	data.name[sizeof(data.name) - 1] = '\0';
702	fence3 = sync_fence_merge(data.name, fence, fence2);
703	if (fence3 == NULL) {
704		err = -ENOMEM;
705		goto err_put_fence2;
706	}
707
708	data.fence = fd;
709	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
710		err = -EFAULT;
711		goto err_put_fence3;
712	}
713
714	sync_fence_install(fence3, fd);
715	sync_fence_put(fence2);
716	return 0;
717
718err_put_fence3:
719	sync_fence_put(fence3);
720
721err_put_fence2:
722	sync_fence_put(fence2);
723
724err_put_fd:
725	put_unused_fd(fd);
726	return err;
727}
728
729static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
730{
731	struct sync_pt_info *info = data;
732	int ret;
733
734	if (size < sizeof(struct sync_pt_info))
735		return -ENOMEM;
736
737	info->len = sizeof(struct sync_pt_info);
738
739	if (pt->parent->ops->fill_driver_data) {
740		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
741							size - sizeof(*info));
742		if (ret < 0)
743			return ret;
744
745		info->len += ret;
746	}
747
748	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
749	strlcpy(info->driver_name, pt->parent->ops->driver_name,
750		sizeof(info->driver_name));
751	info->status = pt->status;
752	info->timestamp_ns = ktime_to_ns(pt->timestamp);
753
754	return info->len;
755}
756
757static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
758					unsigned long arg)
759{
760	struct sync_fence_info_data *data;
761	struct list_head *pos;
762	__u32 size;
763	__u32 len = 0;
764	int ret;
765
766	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
767		return -EFAULT;
768
769	if (size < sizeof(struct sync_fence_info_data))
770		return -EINVAL;
771
772	if (size > 4096)
773		size = 4096;
774
775	data = kzalloc(size, GFP_KERNEL);
776	if (data == NULL)
777		return -ENOMEM;
778
779	strlcpy(data->name, fence->name, sizeof(data->name));
780	data->status = fence->status;
781	len = sizeof(struct sync_fence_info_data);
782
783	list_for_each(pos, &fence->pt_list_head) {
784		struct sync_pt *pt =
785			container_of(pos, struct sync_pt, pt_list);
786
787		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
788
789		if (ret < 0)
790			goto out;
791
792		len += ret;
793	}
794
795	data->len = len;
796
797	if (copy_to_user((void __user *)arg, data, len))
798		ret = -EFAULT;
799	else
800		ret = 0;
801
802out:
803	kfree(data);
804
805	return ret;
806}
807
808static long sync_fence_ioctl(struct file *file, unsigned int cmd,
809			     unsigned long arg)
810{
811	struct sync_fence *fence = file->private_data;
812	switch (cmd) {
813	case SYNC_IOC_WAIT:
814		return sync_fence_ioctl_wait(fence, arg);
815
816	case SYNC_IOC_MERGE:
817		return sync_fence_ioctl_merge(fence, arg);
818
819	case SYNC_IOC_FENCE_INFO:
820		return sync_fence_ioctl_fence_info(fence, arg);
821
822	default:
823		return -ENOTTY;
824	}
825}
826
827#ifdef CONFIG_DEBUG_FS
828static const char *sync_status_str(int status)
829{
830	if (status > 0)
831		return "signaled";
832	else if (status == 0)
833		return "active";
834	else
835		return "error";
836}
837
838static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
839{
840	int status = pt->status;
841	seq_printf(s, "  %s%spt %s",
842		   fence ? pt->parent->name : "",
843		   fence ? "_" : "",
844		   sync_status_str(status));
845	if (pt->status) {
846		struct timeval tv = ktime_to_timeval(pt->timestamp);
847		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
848	}
849
850	if (pt->parent->ops->print_pt) {
851		seq_printf(s, ": ");
852		pt->parent->ops->print_pt(s, pt);
853	}
854
855	seq_printf(s, "\n");
856}
857
858static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
859{
860	struct list_head *pos;
861	unsigned long flags;
862
863	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
864
865	if (obj->ops->print_obj) {
866		seq_printf(s, ": ");
867		obj->ops->print_obj(s, obj);
868	}
869
870	seq_printf(s, "\n");
871
872	spin_lock_irqsave(&obj->child_list_lock, flags);
873	list_for_each(pos, &obj->child_list_head) {
874		struct sync_pt *pt =
875			container_of(pos, struct sync_pt, child_list);
876		sync_print_pt(s, pt, false);
877	}
878	spin_unlock_irqrestore(&obj->child_list_lock, flags);
879}
880
881static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
882{
883	struct list_head *pos;
884	unsigned long flags;
885
886	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
887		   sync_status_str(fence->status));
888
889	list_for_each(pos, &fence->pt_list_head) {
890		struct sync_pt *pt =
891			container_of(pos, struct sync_pt, pt_list);
892		sync_print_pt(s, pt, true);
893	}
894
895	spin_lock_irqsave(&fence->waiter_list_lock, flags);
896	list_for_each(pos, &fence->waiter_list_head) {
897		struct sync_fence_waiter *waiter =
898			container_of(pos, struct sync_fence_waiter,
899				     waiter_list);
900
901		seq_printf(s, "waiter %pF\n", waiter->callback);
902	}
903	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
904}
905
906static int sync_debugfs_show(struct seq_file *s, void *unused)
907{
908	unsigned long flags;
909	struct list_head *pos;
910
911	seq_printf(s, "objs:\n--------------\n");
912
913	spin_lock_irqsave(&sync_timeline_list_lock, flags);
914	list_for_each(pos, &sync_timeline_list_head) {
915		struct sync_timeline *obj =
916			container_of(pos, struct sync_timeline,
917				     sync_timeline_list);
918
919		sync_print_obj(s, obj);
920		seq_printf(s, "\n");
921	}
922	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
923
924	seq_printf(s, "fences:\n--------------\n");
925
926	spin_lock_irqsave(&sync_fence_list_lock, flags);
927	list_for_each(pos, &sync_fence_list_head) {
928		struct sync_fence *fence =
929			container_of(pos, struct sync_fence, sync_fence_list);
930
931		sync_print_fence(s, fence);
932		seq_printf(s, "\n");
933	}
934	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
935	return 0;
936}
937
938static int sync_debugfs_open(struct inode *inode, struct file *file)
939{
940	return single_open(file, sync_debugfs_show, inode->i_private);
941}
942
943static const struct file_operations sync_debugfs_fops = {
944	.open           = sync_debugfs_open,
945	.read           = seq_read,
946	.llseek         = seq_lseek,
947	.release        = single_release,
948};
949
950static __init int sync_debugfs_init(void)
951{
952	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
953	return 0;
954}
955late_initcall(sync_debugfs_init);
956
957#define DUMP_CHUNK 256
958static char sync_dump_buf[64 * 1024];
959void sync_dump(void)
960{
961	struct seq_file s = {
962		.buf = sync_dump_buf,
963		.size = sizeof(sync_dump_buf) - 1,
964	};
965	int i;
966
967	sync_debugfs_show(&s, NULL);
968
969	for (i = 0; i < s.count; i += DUMP_CHUNK) {
970		if ((s.count - i) > DUMP_CHUNK) {
971			char c = s.buf[i + DUMP_CHUNK];
972			s.buf[i + DUMP_CHUNK] = 0;
973			pr_cont("%s", s.buf + i);
974			s.buf[i + DUMP_CHUNK] = c;
975		} else {
976			s.buf[s.count] = 0;
977			pr_cont("%s", s.buf + i);
978		}
979	}
980}
981#else
982static void sync_dump(void)
983{
984}
985#endif
986