sync.c revision c679212dbfd060513e156133326122bf9f496579
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31static void sync_fence_signal_pt(struct sync_pt *pt);
32static int _sync_pt_has_signaled(struct sync_pt *pt);
33static void sync_fence_free(struct kref *kref);
34static void sync_dump(void);
35
36static LIST_HEAD(sync_timeline_list_head);
37static DEFINE_SPINLOCK(sync_timeline_list_lock);
38
39static LIST_HEAD(sync_fence_list_head);
40static DEFINE_SPINLOCK(sync_fence_list_lock);
41
42struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
43					   int size, const char *name)
44{
45	struct sync_timeline *obj;
46	unsigned long flags;
47
48	if (size < sizeof(struct sync_timeline))
49		return NULL;
50
51	obj = kzalloc(size, GFP_KERNEL);
52	if (obj == NULL)
53		return NULL;
54
55	kref_init(&obj->kref);
56	obj->ops = ops;
57	strlcpy(obj->name, name, sizeof(obj->name));
58
59	INIT_LIST_HEAD(&obj->child_list_head);
60	spin_lock_init(&obj->child_list_lock);
61
62	INIT_LIST_HEAD(&obj->active_list_head);
63	spin_lock_init(&obj->active_list_lock);
64
65	spin_lock_irqsave(&sync_timeline_list_lock, flags);
66	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
67	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
68
69	return obj;
70}
71EXPORT_SYMBOL(sync_timeline_create);
72
73static void sync_timeline_free(struct kref *kref)
74{
75	struct sync_timeline *obj =
76		container_of(kref, struct sync_timeline, kref);
77	unsigned long flags;
78
79	if (obj->ops->release_obj)
80		obj->ops->release_obj(obj);
81
82	spin_lock_irqsave(&sync_timeline_list_lock, flags);
83	list_del(&obj->sync_timeline_list);
84	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86	kfree(obj);
87}
88
89void sync_timeline_destroy(struct sync_timeline *obj)
90{
91	obj->destroyed = true;
92
93	/*
94	 * If this is not the last reference, signal any children
95	 * that their parent is going away.
96	 */
97
98	if (!kref_put(&obj->kref, sync_timeline_free))
99		sync_timeline_signal(obj);
100}
101EXPORT_SYMBOL(sync_timeline_destroy);
102
103static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
104{
105	unsigned long flags;
106
107	pt->parent = obj;
108
109	spin_lock_irqsave(&obj->child_list_lock, flags);
110	list_add_tail(&pt->child_list, &obj->child_list_head);
111	spin_unlock_irqrestore(&obj->child_list_lock, flags);
112}
113
114static void sync_timeline_remove_pt(struct sync_pt *pt)
115{
116	struct sync_timeline *obj = pt->parent;
117	unsigned long flags;
118
119	spin_lock_irqsave(&obj->active_list_lock, flags);
120	if (!list_empty(&pt->active_list))
121		list_del_init(&pt->active_list);
122	spin_unlock_irqrestore(&obj->active_list_lock, flags);
123
124	spin_lock_irqsave(&obj->child_list_lock, flags);
125	if (!list_empty(&pt->child_list)) {
126		list_del_init(&pt->child_list);
127	}
128	spin_unlock_irqrestore(&obj->child_list_lock, flags);
129}
130
131void sync_timeline_signal(struct sync_timeline *obj)
132{
133	unsigned long flags;
134	LIST_HEAD(signaled_pts);
135	struct list_head *pos, *n;
136
137	spin_lock_irqsave(&obj->active_list_lock, flags);
138
139	list_for_each_safe(pos, n, &obj->active_list_head) {
140		struct sync_pt *pt =
141			container_of(pos, struct sync_pt, active_list);
142
143		if (_sync_pt_has_signaled(pt)) {
144			list_del_init(pos);
145			list_add(&pt->signaled_list, &signaled_pts);
146			kref_get(&pt->fence->kref);
147		}
148	}
149
150	spin_unlock_irqrestore(&obj->active_list_lock, flags);
151
152	list_for_each_safe(pos, n, &signaled_pts) {
153		struct sync_pt *pt =
154			container_of(pos, struct sync_pt, signaled_list);
155
156		list_del_init(pos);
157		sync_fence_signal_pt(pt);
158		kref_put(&pt->fence->kref, sync_fence_free);
159	}
160}
161EXPORT_SYMBOL(sync_timeline_signal);
162
163struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
164{
165	struct sync_pt *pt;
166
167	if (size < sizeof(struct sync_pt))
168		return NULL;
169
170	pt = kzalloc(size, GFP_KERNEL);
171	if (pt == NULL)
172		return NULL;
173
174	INIT_LIST_HEAD(&pt->active_list);
175	kref_get(&parent->kref);
176	sync_timeline_add_pt(parent, pt);
177
178	return pt;
179}
180EXPORT_SYMBOL(sync_pt_create);
181
182void sync_pt_free(struct sync_pt *pt)
183{
184	if (pt->parent->ops->free_pt)
185		pt->parent->ops->free_pt(pt);
186
187	sync_timeline_remove_pt(pt);
188
189	kref_put(&pt->parent->kref, sync_timeline_free);
190
191	kfree(pt);
192}
193EXPORT_SYMBOL(sync_pt_free);
194
195/* call with pt->parent->active_list_lock held */
196static int _sync_pt_has_signaled(struct sync_pt *pt)
197{
198	int old_status = pt->status;
199
200	if (!pt->status)
201		pt->status = pt->parent->ops->has_signaled(pt);
202
203	if (!pt->status && pt->parent->destroyed)
204		pt->status = -ENOENT;
205
206	if (pt->status != old_status)
207		pt->timestamp = ktime_get();
208
209	return pt->status;
210}
211
212static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
213{
214	return pt->parent->ops->dup(pt);
215}
216
217/* Adds a sync pt to the active queue.  Called when added to a fence */
218static void sync_pt_activate(struct sync_pt *pt)
219{
220	struct sync_timeline *obj = pt->parent;
221	unsigned long flags;
222	int err;
223
224	spin_lock_irqsave(&obj->active_list_lock, flags);
225
226	err = _sync_pt_has_signaled(pt);
227	if (err != 0)
228		goto out;
229
230	list_add_tail(&pt->active_list, &obj->active_list_head);
231
232out:
233	spin_unlock_irqrestore(&obj->active_list_lock, flags);
234}
235
236static int sync_fence_release(struct inode *inode, struct file *file);
237static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
238static long sync_fence_ioctl(struct file *file, unsigned int cmd,
239			     unsigned long arg);
240
241
242static const struct file_operations sync_fence_fops = {
243	.release = sync_fence_release,
244	.poll = sync_fence_poll,
245	.unlocked_ioctl = sync_fence_ioctl,
246};
247
248static struct sync_fence *sync_fence_alloc(const char *name)
249{
250	struct sync_fence *fence;
251	unsigned long flags;
252
253	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
254	if (fence == NULL)
255		return NULL;
256
257	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
258					 fence, 0);
259	if (fence->file == NULL)
260		goto err;
261
262	kref_init(&fence->kref);
263	strlcpy(fence->name, name, sizeof(fence->name));
264
265	INIT_LIST_HEAD(&fence->pt_list_head);
266	INIT_LIST_HEAD(&fence->waiter_list_head);
267	spin_lock_init(&fence->waiter_list_lock);
268
269	init_waitqueue_head(&fence->wq);
270
271	spin_lock_irqsave(&sync_fence_list_lock, flags);
272	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
273	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
274
275	return fence;
276
277err:
278	kfree(fence);
279	return NULL;
280}
281
282/* TODO: implement a create which takes more that one sync_pt */
283struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
284{
285	struct sync_fence *fence;
286
287	if (pt->fence)
288		return NULL;
289
290	fence = sync_fence_alloc(name);
291	if (fence == NULL)
292		return NULL;
293
294	pt->fence = fence;
295	list_add(&pt->pt_list, &fence->pt_list_head);
296	sync_pt_activate(pt);
297
298	return fence;
299}
300EXPORT_SYMBOL(sync_fence_create);
301
302static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
303{
304	struct list_head *pos;
305
306	list_for_each(pos, &src->pt_list_head) {
307		struct sync_pt *orig_pt =
308			container_of(pos, struct sync_pt, pt_list);
309		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
310
311		if (new_pt == NULL)
312			return -ENOMEM;
313
314		new_pt->fence = dst;
315		list_add(&new_pt->pt_list, &dst->pt_list_head);
316		sync_pt_activate(new_pt);
317	}
318
319	return 0;
320}
321
322static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
323{
324	struct list_head *src_pos, *dst_pos, *n;
325
326	list_for_each(src_pos, &src->pt_list_head) {
327		struct sync_pt *src_pt =
328			container_of(src_pos, struct sync_pt, pt_list);
329		bool collapsed = false;
330
331		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
332			struct sync_pt *dst_pt =
333				container_of(dst_pos, struct sync_pt, pt_list);
334			/* collapse two sync_pts on the same timeline
335			 * to a single sync_pt that will signal at
336			 * the later of the two
337			 */
338			if (dst_pt->parent == src_pt->parent) {
339				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
340						 == -1) {
341					struct sync_pt *new_pt =
342						sync_pt_dup(src_pt);
343					if (new_pt == NULL)
344						return -ENOMEM;
345
346					new_pt->fence = dst;
347					list_replace(&dst_pt->pt_list,
348						     &new_pt->pt_list);
349					sync_pt_activate(new_pt);
350					sync_pt_free(dst_pt);
351				}
352				collapsed = true;
353				break;
354			}
355		}
356
357		if (!collapsed) {
358			struct sync_pt *new_pt = sync_pt_dup(src_pt);
359
360			if (new_pt == NULL)
361				return -ENOMEM;
362
363			new_pt->fence = dst;
364			list_add(&new_pt->pt_list, &dst->pt_list_head);
365			sync_pt_activate(new_pt);
366		}
367	}
368
369	return 0;
370}
371
372static void sync_fence_detach_pts(struct sync_fence *fence)
373{
374	struct list_head *pos, *n;
375
376	list_for_each_safe(pos, n, &fence->pt_list_head) {
377		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
378		sync_timeline_remove_pt(pt);
379	}
380}
381
382static void sync_fence_free_pts(struct sync_fence *fence)
383{
384	struct list_head *pos, *n;
385
386	list_for_each_safe(pos, n, &fence->pt_list_head) {
387		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
388		sync_pt_free(pt);
389	}
390}
391
392struct sync_fence *sync_fence_fdget(int fd)
393{
394	struct file *file = fget(fd);
395
396	if (file == NULL)
397		return NULL;
398
399	if (file->f_op != &sync_fence_fops)
400		goto err;
401
402	return file->private_data;
403
404err:
405	fput(file);
406	return NULL;
407}
408EXPORT_SYMBOL(sync_fence_fdget);
409
410void sync_fence_put(struct sync_fence *fence)
411{
412	fput(fence->file);
413}
414EXPORT_SYMBOL(sync_fence_put);
415
416void sync_fence_install(struct sync_fence *fence, int fd)
417{
418	fd_install(fd, fence->file);
419}
420EXPORT_SYMBOL(sync_fence_install);
421
422static int sync_fence_get_status(struct sync_fence *fence)
423{
424	struct list_head *pos;
425	int status = 1;
426
427	list_for_each(pos, &fence->pt_list_head) {
428		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
429		int pt_status = pt->status;
430
431		if (pt_status < 0) {
432			status = pt_status;
433			break;
434		} else if (status == 1) {
435			status = pt_status;
436		}
437	}
438
439	return status;
440}
441
442struct sync_fence *sync_fence_merge(const char *name,
443				    struct sync_fence *a, struct sync_fence *b)
444{
445	struct sync_fence *fence;
446	int err;
447
448	fence = sync_fence_alloc(name);
449	if (fence == NULL)
450		return NULL;
451
452	err = sync_fence_copy_pts(fence, a);
453	if (err < 0)
454		goto err;
455
456	err = sync_fence_merge_pts(fence, b);
457	if (err < 0)
458		goto err;
459
460	fence->status = sync_fence_get_status(fence);
461
462	return fence;
463err:
464	sync_fence_free_pts(fence);
465	kfree(fence);
466	return NULL;
467}
468EXPORT_SYMBOL(sync_fence_merge);
469
470static void sync_fence_signal_pt(struct sync_pt *pt)
471{
472	LIST_HEAD(signaled_waiters);
473	struct sync_fence *fence = pt->fence;
474	struct list_head *pos;
475	struct list_head *n;
476	unsigned long flags;
477	int status;
478
479	status = sync_fence_get_status(fence);
480
481	spin_lock_irqsave(&fence->waiter_list_lock, flags);
482	/*
483	 * this should protect against two threads racing on the signaled
484	 * false -> true transition
485	 */
486	if (status && !fence->status) {
487		list_for_each_safe(pos, n, &fence->waiter_list_head)
488			list_move(pos, &signaled_waiters);
489
490		fence->status = status;
491	} else {
492		status = 0;
493	}
494	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
495
496	if (status) {
497		list_for_each_safe(pos, n, &signaled_waiters) {
498			struct sync_fence_waiter *waiter =
499				container_of(pos, struct sync_fence_waiter,
500					     waiter_list);
501
502			list_del(pos);
503			waiter->callback(fence, waiter);
504		}
505		wake_up(&fence->wq);
506	}
507}
508
509int sync_fence_wait_async(struct sync_fence *fence,
510			  struct sync_fence_waiter *waiter)
511{
512	unsigned long flags;
513	int err = 0;
514
515	spin_lock_irqsave(&fence->waiter_list_lock, flags);
516
517	if (fence->status) {
518		err = fence->status;
519		goto out;
520	}
521
522	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
523out:
524	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
525
526	return err;
527}
528EXPORT_SYMBOL(sync_fence_wait_async);
529
530int sync_fence_cancel_async(struct sync_fence *fence,
531			     struct sync_fence_waiter *waiter)
532{
533	struct list_head *pos;
534	struct list_head *n;
535	unsigned long flags;
536	int ret = -ENOENT;
537
538	spin_lock_irqsave(&fence->waiter_list_lock, flags);
539	/*
540	 * Make sure waiter is still in waiter_list because it is possible for
541	 * the waiter to be removed from the list while the callback is still
542	 * pending.
543	 */
544	list_for_each_safe(pos, n, &fence->waiter_list_head) {
545		struct sync_fence_waiter *list_waiter =
546			container_of(pos, struct sync_fence_waiter,
547				     waiter_list);
548		if (list_waiter == waiter) {
549			list_del(pos);
550			ret = 0;
551			break;
552		}
553	}
554	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
555	return ret;
556}
557EXPORT_SYMBOL(sync_fence_cancel_async);
558
559static bool sync_fence_check(struct sync_fence *fence)
560{
561	/*
562	 * Make sure that reads to fence->status are ordered with the
563	 * wait queue event triggering
564	 */
565	smp_rmb();
566	return fence->status != 0;
567}
568
569int sync_fence_wait(struct sync_fence *fence, long timeout)
570{
571	int err = 0;
572
573	if (timeout > 0) {
574		timeout = msecs_to_jiffies(timeout);
575		err = wait_event_interruptible_timeout(fence->wq,
576						       sync_fence_check(fence),
577						       timeout);
578	} else if (timeout < 0) {
579		err = wait_event_interruptible(fence->wq, fence->status != 0);
580	}
581
582	if (err < 0)
583		return err;
584
585	if (fence->status < 0) {
586		pr_info("fence error %d on [%p]\n", fence->status, fence);
587		sync_dump();
588		return fence->status;
589	}
590
591	if (fence->status == 0) {
592		pr_info("fence timeout on [%p] after %dms\n", fence,
593			jiffies_to_msecs(timeout));
594		sync_dump();
595		return -ETIME;
596	}
597
598	return 0;
599}
600EXPORT_SYMBOL(sync_fence_wait);
601
602static void sync_fence_free(struct kref *kref)
603{
604	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
605
606	sync_fence_free_pts(fence);
607
608	kfree(fence);
609}
610
611static int sync_fence_release(struct inode *inode, struct file *file)
612{
613	struct sync_fence *fence = file->private_data;
614	unsigned long flags;
615
616	/*
617	 * We need to remove all ways to access this fence before droping
618	 * our ref.
619	 *
620	 * start with its membership in the global fence list
621	 */
622	spin_lock_irqsave(&sync_fence_list_lock, flags);
623	list_del(&fence->sync_fence_list);
624	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
625
626	/*
627	 * remove its pts from their parents so that sync_timeline_signal()
628	 * can't reference the fence.
629	 */
630	sync_fence_detach_pts(fence);
631
632	kref_put(&fence->kref, sync_fence_free);
633
634	return 0;
635}
636
637static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
638{
639	struct sync_fence *fence = file->private_data;
640
641	poll_wait(file, &fence->wq, wait);
642
643	/*
644	 * Make sure that reads to fence->status are ordered with the
645	 * wait queue event triggering
646	 */
647	smp_rmb();
648
649	if (fence->status == 1)
650		return POLLIN;
651	else if (fence->status < 0)
652		return POLLERR;
653	else
654		return 0;
655}
656
657static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
658{
659	__s32 value;
660
661	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
662		return -EFAULT;
663
664	return sync_fence_wait(fence, value);
665}
666
667static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
668{
669	int fd = get_unused_fd();
670	int err;
671	struct sync_fence *fence2, *fence3;
672	struct sync_merge_data data;
673
674	if (fd < 0)
675		return fd;
676
677	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
678		err = -EFAULT;
679		goto err_put_fd;
680	}
681
682	fence2 = sync_fence_fdget(data.fd2);
683	if (fence2 == NULL) {
684		err = -ENOENT;
685		goto err_put_fd;
686	}
687
688	data.name[sizeof(data.name) - 1] = '\0';
689	fence3 = sync_fence_merge(data.name, fence, fence2);
690	if (fence3 == NULL) {
691		err = -ENOMEM;
692		goto err_put_fence2;
693	}
694
695	data.fence = fd;
696	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
697		err = -EFAULT;
698		goto err_put_fence3;
699	}
700
701	sync_fence_install(fence3, fd);
702	sync_fence_put(fence2);
703	return 0;
704
705err_put_fence3:
706	sync_fence_put(fence3);
707
708err_put_fence2:
709	sync_fence_put(fence2);
710
711err_put_fd:
712	put_unused_fd(fd);
713	return err;
714}
715
716static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
717{
718	struct sync_pt_info *info = data;
719	int ret;
720
721	if (size < sizeof(struct sync_pt_info))
722		return -ENOMEM;
723
724	info->len = sizeof(struct sync_pt_info);
725
726	if (pt->parent->ops->fill_driver_data) {
727		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
728							size - sizeof(*info));
729		if (ret < 0)
730			return ret;
731
732		info->len += ret;
733	}
734
735	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
736	strlcpy(info->driver_name, pt->parent->ops->driver_name,
737		sizeof(info->driver_name));
738	info->status = pt->status;
739	info->timestamp_ns = ktime_to_ns(pt->timestamp);
740
741	return info->len;
742}
743
744static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
745					unsigned long arg)
746{
747	struct sync_fence_info_data *data;
748	struct list_head *pos;
749	__u32 size;
750	__u32 len = 0;
751	int ret;
752
753	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
754		return -EFAULT;
755
756	if (size < sizeof(struct sync_fence_info_data))
757		return -EINVAL;
758
759	if (size > 4096)
760		size = 4096;
761
762	data = kzalloc(size, GFP_KERNEL);
763	if (data == NULL)
764		return -ENOMEM;
765
766	strlcpy(data->name, fence->name, sizeof(data->name));
767	data->status = fence->status;
768	len = sizeof(struct sync_fence_info_data);
769
770	list_for_each(pos, &fence->pt_list_head) {
771		struct sync_pt *pt =
772			container_of(pos, struct sync_pt, pt_list);
773
774		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
775
776		if (ret < 0)
777			goto out;
778
779		len += ret;
780	}
781
782	data->len = len;
783
784	if (copy_to_user((void __user *)arg, data, len))
785		ret = -EFAULT;
786	else
787		ret = 0;
788
789out:
790	kfree(data);
791
792	return ret;
793}
794
795static long sync_fence_ioctl(struct file *file, unsigned int cmd,
796			     unsigned long arg)
797{
798	struct sync_fence *fence = file->private_data;
799	switch (cmd) {
800	case SYNC_IOC_WAIT:
801		return sync_fence_ioctl_wait(fence, arg);
802
803	case SYNC_IOC_MERGE:
804		return sync_fence_ioctl_merge(fence, arg);
805
806	case SYNC_IOC_FENCE_INFO:
807		return sync_fence_ioctl_fence_info(fence, arg);
808
809	default:
810		return -ENOTTY;
811	}
812}
813
814#ifdef CONFIG_DEBUG_FS
815static const char *sync_status_str(int status)
816{
817	if (status > 0)
818		return "signaled";
819	else if (status == 0)
820		return "active";
821	else
822		return "error";
823}
824
825static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
826{
827	int status = pt->status;
828	seq_printf(s, "  %s%spt %s",
829		   fence ? pt->parent->name : "",
830		   fence ? "_" : "",
831		   sync_status_str(status));
832	if (pt->status) {
833		struct timeval tv = ktime_to_timeval(pt->timestamp);
834		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
835	}
836
837	if (pt->parent->ops->print_pt) {
838		seq_printf(s, ": ");
839		pt->parent->ops->print_pt(s, pt);
840	}
841
842	seq_printf(s, "\n");
843}
844
845static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
846{
847	struct list_head *pos;
848	unsigned long flags;
849
850	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
851
852	if (obj->ops->print_obj) {
853		seq_printf(s, ": ");
854		obj->ops->print_obj(s, obj);
855	}
856
857	seq_printf(s, "\n");
858
859	spin_lock_irqsave(&obj->child_list_lock, flags);
860	list_for_each(pos, &obj->child_list_head) {
861		struct sync_pt *pt =
862			container_of(pos, struct sync_pt, child_list);
863		sync_print_pt(s, pt, false);
864	}
865	spin_unlock_irqrestore(&obj->child_list_lock, flags);
866}
867
868static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
869{
870	struct list_head *pos;
871	unsigned long flags;
872
873	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
874		   sync_status_str(fence->status));
875
876	list_for_each(pos, &fence->pt_list_head) {
877		struct sync_pt *pt =
878			container_of(pos, struct sync_pt, pt_list);
879		sync_print_pt(s, pt, true);
880	}
881
882	spin_lock_irqsave(&fence->waiter_list_lock, flags);
883	list_for_each(pos, &fence->waiter_list_head) {
884		struct sync_fence_waiter *waiter =
885			container_of(pos, struct sync_fence_waiter,
886				     waiter_list);
887
888		seq_printf(s, "waiter %pF\n", waiter->callback);
889	}
890	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
891}
892
893static int sync_debugfs_show(struct seq_file *s, void *unused)
894{
895	unsigned long flags;
896	struct list_head *pos;
897
898	seq_printf(s, "objs:\n--------------\n");
899
900	spin_lock_irqsave(&sync_timeline_list_lock, flags);
901	list_for_each(pos, &sync_timeline_list_head) {
902		struct sync_timeline *obj =
903			container_of(pos, struct sync_timeline,
904				     sync_timeline_list);
905
906		sync_print_obj(s, obj);
907		seq_printf(s, "\n");
908	}
909	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
910
911	seq_printf(s, "fences:\n--------------\n");
912
913	spin_lock_irqsave(&sync_fence_list_lock, flags);
914	list_for_each(pos, &sync_fence_list_head) {
915		struct sync_fence *fence =
916			container_of(pos, struct sync_fence, sync_fence_list);
917
918		sync_print_fence(s, fence);
919		seq_printf(s, "\n");
920	}
921	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
922	return 0;
923}
924
925static int sync_debugfs_open(struct inode *inode, struct file *file)
926{
927	return single_open(file, sync_debugfs_show, inode->i_private);
928}
929
930static const struct file_operations sync_debugfs_fops = {
931	.open           = sync_debugfs_open,
932	.read           = seq_read,
933	.llseek         = seq_lseek,
934	.release        = single_release,
935};
936
937static __init int sync_debugfs_init(void)
938{
939	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
940	return 0;
941}
942late_initcall(sync_debugfs_init);
943
944#define DUMP_CHUNK 256
945static char sync_dump_buf[64 * 1024];
946void sync_dump(void)
947{
948	struct seq_file s = {
949		.buf = sync_dump_buf,
950		.size = sizeof(sync_dump_buf) - 1,
951	};
952	int i;
953
954	sync_debugfs_show(&s, NULL);
955
956	for (i = 0; i < s.count; i += DUMP_CHUNK) {
957		if ((s.count - i) > DUMP_CHUNK) {
958			char c = s.buf[i + DUMP_CHUNK];
959			s.buf[i + DUMP_CHUNK] = 0;
960			pr_cont("%s", s.buf + i);
961			s.buf[i + DUMP_CHUNK] = c;
962		} else {
963			s.buf[s.count] = 0;
964			pr_cont("%s", s.buf + i);
965		}
966	}
967}
968#else
969static void sync_dump(void)
970{
971}
972#endif
973