sync.c revision ac5b705b22642208764aa784ccc47f093d0212b5
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31#define CREATE_TRACE_POINTS
32#include "trace/sync.h"
33
34static void sync_fence_signal_pt(struct sync_pt *pt);
35static int _sync_pt_has_signaled(struct sync_pt *pt);
36static void sync_fence_free(struct kref *kref);
37static void sync_dump(void);
38
39static LIST_HEAD(sync_timeline_list_head);
40static DEFINE_SPINLOCK(sync_timeline_list_lock);
41
42static LIST_HEAD(sync_fence_list_head);
43static DEFINE_SPINLOCK(sync_fence_list_lock);
44
45struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
46					   int size, const char *name)
47{
48	struct sync_timeline *obj;
49	unsigned long flags;
50
51	if (size < sizeof(struct sync_timeline))
52		return NULL;
53
54	obj = kzalloc(size, GFP_KERNEL);
55	if (obj == NULL)
56		return NULL;
57
58	kref_init(&obj->kref);
59	obj->ops = ops;
60	strlcpy(obj->name, name, sizeof(obj->name));
61
62	INIT_LIST_HEAD(&obj->child_list_head);
63	spin_lock_init(&obj->child_list_lock);
64
65	INIT_LIST_HEAD(&obj->active_list_head);
66	spin_lock_init(&obj->active_list_lock);
67
68	spin_lock_irqsave(&sync_timeline_list_lock, flags);
69	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
70	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
71
72	return obj;
73}
74EXPORT_SYMBOL(sync_timeline_create);
75
76static void sync_timeline_free(struct kref *kref)
77{
78	struct sync_timeline *obj =
79		container_of(kref, struct sync_timeline, kref);
80	unsigned long flags;
81
82	spin_lock_irqsave(&sync_timeline_list_lock, flags);
83	list_del(&obj->sync_timeline_list);
84	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
85
86	if (obj->ops->release_obj)
87		obj->ops->release_obj(obj);
88
89	kfree(obj);
90}
91
92void sync_timeline_destroy(struct sync_timeline *obj)
93{
94	obj->destroyed = true;
95	smp_wmb();
96
97	/*
98	 * signal any children that their parent is going away.
99	 */
100	sync_timeline_signal(obj);
101
102	kref_put(&obj->kref, sync_timeline_free);
103}
104EXPORT_SYMBOL(sync_timeline_destroy);
105
106static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
107{
108	unsigned long flags;
109
110	pt->parent = obj;
111
112	spin_lock_irqsave(&obj->child_list_lock, flags);
113	list_add_tail(&pt->child_list, &obj->child_list_head);
114	spin_unlock_irqrestore(&obj->child_list_lock, flags);
115}
116
117static void sync_timeline_remove_pt(struct sync_pt *pt)
118{
119	struct sync_timeline *obj = pt->parent;
120	unsigned long flags;
121
122	spin_lock_irqsave(&obj->active_list_lock, flags);
123	if (!list_empty(&pt->active_list))
124		list_del_init(&pt->active_list);
125	spin_unlock_irqrestore(&obj->active_list_lock, flags);
126
127	spin_lock_irqsave(&obj->child_list_lock, flags);
128	if (!list_empty(&pt->child_list))
129		list_del_init(&pt->child_list);
130
131	spin_unlock_irqrestore(&obj->child_list_lock, flags);
132}
133
134void sync_timeline_signal(struct sync_timeline *obj)
135{
136	unsigned long flags;
137	LIST_HEAD(signaled_pts);
138	struct list_head *pos, *n;
139
140	trace_sync_timeline(obj);
141
142	spin_lock_irqsave(&obj->active_list_lock, flags);
143
144	list_for_each_safe(pos, n, &obj->active_list_head) {
145		struct sync_pt *pt =
146			container_of(pos, struct sync_pt, active_list);
147
148		if (_sync_pt_has_signaled(pt)) {
149			list_del_init(pos);
150			list_add(&pt->signaled_list, &signaled_pts);
151			kref_get(&pt->fence->kref);
152		}
153	}
154
155	spin_unlock_irqrestore(&obj->active_list_lock, flags);
156
157	list_for_each_safe(pos, n, &signaled_pts) {
158		struct sync_pt *pt =
159			container_of(pos, struct sync_pt, signaled_list);
160
161		list_del_init(pos);
162		sync_fence_signal_pt(pt);
163		kref_put(&pt->fence->kref, sync_fence_free);
164	}
165}
166EXPORT_SYMBOL(sync_timeline_signal);
167
168struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
169{
170	struct sync_pt *pt;
171
172	if (size < sizeof(struct sync_pt))
173		return NULL;
174
175	pt = kzalloc(size, GFP_KERNEL);
176	if (pt == NULL)
177		return NULL;
178
179	INIT_LIST_HEAD(&pt->active_list);
180	kref_get(&parent->kref);
181	sync_timeline_add_pt(parent, pt);
182
183	return pt;
184}
185EXPORT_SYMBOL(sync_pt_create);
186
187void sync_pt_free(struct sync_pt *pt)
188{
189	if (pt->parent->ops->free_pt)
190		pt->parent->ops->free_pt(pt);
191
192	sync_timeline_remove_pt(pt);
193
194	kref_put(&pt->parent->kref, sync_timeline_free);
195
196	kfree(pt);
197}
198EXPORT_SYMBOL(sync_pt_free);
199
200/* call with pt->parent->active_list_lock held */
201static int _sync_pt_has_signaled(struct sync_pt *pt)
202{
203	int old_status = pt->status;
204
205	if (!pt->status)
206		pt->status = pt->parent->ops->has_signaled(pt);
207
208	if (!pt->status && pt->parent->destroyed)
209		pt->status = -ENOENT;
210
211	if (pt->status != old_status)
212		pt->timestamp = ktime_get();
213
214	return pt->status;
215}
216
217static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
218{
219	return pt->parent->ops->dup(pt);
220}
221
222/* Adds a sync pt to the active queue.  Called when added to a fence */
223static void sync_pt_activate(struct sync_pt *pt)
224{
225	struct sync_timeline *obj = pt->parent;
226	unsigned long flags;
227	int err;
228
229	spin_lock_irqsave(&obj->active_list_lock, flags);
230
231	err = _sync_pt_has_signaled(pt);
232	if (err != 0)
233		goto out;
234
235	list_add_tail(&pt->active_list, &obj->active_list_head);
236
237out:
238	spin_unlock_irqrestore(&obj->active_list_lock, flags);
239}
240
241static int sync_fence_release(struct inode *inode, struct file *file);
242static unsigned int sync_fence_poll(struct file *file, poll_table *wait);
243static long sync_fence_ioctl(struct file *file, unsigned int cmd,
244			     unsigned long arg);
245
246
247static const struct file_operations sync_fence_fops = {
248	.release = sync_fence_release,
249	.poll = sync_fence_poll,
250	.unlocked_ioctl = sync_fence_ioctl,
251	.compat_ioctl = sync_fence_ioctl,
252};
253
254static struct sync_fence *sync_fence_alloc(const char *name)
255{
256	struct sync_fence *fence;
257	unsigned long flags;
258
259	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
260	if (fence == NULL)
261		return NULL;
262
263	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
264					 fence, 0);
265	if (IS_ERR(fence->file))
266		goto err;
267
268	kref_init(&fence->kref);
269	strlcpy(fence->name, name, sizeof(fence->name));
270
271	INIT_LIST_HEAD(&fence->pt_list_head);
272	INIT_LIST_HEAD(&fence->waiter_list_head);
273	spin_lock_init(&fence->waiter_list_lock);
274
275	init_waitqueue_head(&fence->wq);
276
277	spin_lock_irqsave(&sync_fence_list_lock, flags);
278	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
279	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
280
281	return fence;
282
283err:
284	kfree(fence);
285	return NULL;
286}
287
288/* TODO: implement a create which takes more that one sync_pt */
289struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
290{
291	struct sync_fence *fence;
292
293	if (pt->fence)
294		return NULL;
295
296	fence = sync_fence_alloc(name);
297	if (fence == NULL)
298		return NULL;
299
300	pt->fence = fence;
301	list_add(&pt->pt_list, &fence->pt_list_head);
302	sync_pt_activate(pt);
303
304	/*
305	 * signal the fence in case pt was activated before
306	 * sync_pt_activate(pt) was called
307	 */
308	sync_fence_signal_pt(pt);
309
310	return fence;
311}
312EXPORT_SYMBOL(sync_fence_create);
313
314static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
315{
316	struct list_head *pos;
317
318	list_for_each(pos, &src->pt_list_head) {
319		struct sync_pt *orig_pt =
320			container_of(pos, struct sync_pt, pt_list);
321		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
322
323		if (new_pt == NULL)
324			return -ENOMEM;
325
326		new_pt->fence = dst;
327		list_add(&new_pt->pt_list, &dst->pt_list_head);
328	}
329
330	return 0;
331}
332
333static int sync_fence_merge_pts(struct sync_fence *dst, struct sync_fence *src)
334{
335	struct list_head *src_pos, *dst_pos, *n;
336
337	list_for_each(src_pos, &src->pt_list_head) {
338		struct sync_pt *src_pt =
339			container_of(src_pos, struct sync_pt, pt_list);
340		bool collapsed = false;
341
342		list_for_each_safe(dst_pos, n, &dst->pt_list_head) {
343			struct sync_pt *dst_pt =
344				container_of(dst_pos, struct sync_pt, pt_list);
345			/* collapse two sync_pts on the same timeline
346			 * to a single sync_pt that will signal at
347			 * the later of the two
348			 */
349			if (dst_pt->parent == src_pt->parent) {
350				if (dst_pt->parent->ops->compare(dst_pt, src_pt)
351						 == -1) {
352					struct sync_pt *new_pt =
353						sync_pt_dup(src_pt);
354					if (new_pt == NULL)
355						return -ENOMEM;
356
357					new_pt->fence = dst;
358					list_replace(&dst_pt->pt_list,
359						     &new_pt->pt_list);
360					sync_pt_free(dst_pt);
361				}
362				collapsed = true;
363				break;
364			}
365		}
366
367		if (!collapsed) {
368			struct sync_pt *new_pt = sync_pt_dup(src_pt);
369
370			if (new_pt == NULL)
371				return -ENOMEM;
372
373			new_pt->fence = dst;
374			list_add(&new_pt->pt_list, &dst->pt_list_head);
375		}
376	}
377
378	return 0;
379}
380
381static void sync_fence_detach_pts(struct sync_fence *fence)
382{
383	struct list_head *pos, *n;
384
385	list_for_each_safe(pos, n, &fence->pt_list_head) {
386		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
387		sync_timeline_remove_pt(pt);
388	}
389}
390
391static void sync_fence_free_pts(struct sync_fence *fence)
392{
393	struct list_head *pos, *n;
394
395	list_for_each_safe(pos, n, &fence->pt_list_head) {
396		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
397		sync_pt_free(pt);
398	}
399}
400
401struct sync_fence *sync_fence_fdget(int fd)
402{
403	struct file *file = fget(fd);
404
405	if (file == NULL)
406		return NULL;
407
408	if (file->f_op != &sync_fence_fops)
409		goto err;
410
411	return file->private_data;
412
413err:
414	fput(file);
415	return NULL;
416}
417EXPORT_SYMBOL(sync_fence_fdget);
418
419void sync_fence_put(struct sync_fence *fence)
420{
421	fput(fence->file);
422}
423EXPORT_SYMBOL(sync_fence_put);
424
425void sync_fence_install(struct sync_fence *fence, int fd)
426{
427	fd_install(fd, fence->file);
428}
429EXPORT_SYMBOL(sync_fence_install);
430
431static int sync_fence_get_status(struct sync_fence *fence)
432{
433	struct list_head *pos;
434	int status = 1;
435
436	list_for_each(pos, &fence->pt_list_head) {
437		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
438		int pt_status = pt->status;
439
440		if (pt_status < 0) {
441			status = pt_status;
442			break;
443		} else if (status == 1) {
444			status = pt_status;
445		}
446	}
447
448	return status;
449}
450
451struct sync_fence *sync_fence_merge(const char *name,
452				    struct sync_fence *a, struct sync_fence *b)
453{
454	struct sync_fence *fence;
455	struct list_head *pos;
456	int err;
457
458	fence = sync_fence_alloc(name);
459	if (fence == NULL)
460		return NULL;
461
462	err = sync_fence_copy_pts(fence, a);
463	if (err < 0)
464		goto err;
465
466	err = sync_fence_merge_pts(fence, b);
467	if (err < 0)
468		goto err;
469
470	list_for_each(pos, &fence->pt_list_head) {
471		struct sync_pt *pt =
472			container_of(pos, struct sync_pt, pt_list);
473		sync_pt_activate(pt);
474	}
475
476	/*
477	 * signal the fence in case one of it's pts were activated before
478	 * they were activated
479	 */
480	sync_fence_signal_pt(list_first_entry(&fence->pt_list_head,
481					      struct sync_pt,
482					      pt_list));
483
484	return fence;
485err:
486	sync_fence_free_pts(fence);
487	kfree(fence);
488	return NULL;
489}
490EXPORT_SYMBOL(sync_fence_merge);
491
492static void sync_fence_signal_pt(struct sync_pt *pt)
493{
494	LIST_HEAD(signaled_waiters);
495	struct sync_fence *fence = pt->fence;
496	struct list_head *pos;
497	struct list_head *n;
498	unsigned long flags;
499	int status;
500
501	status = sync_fence_get_status(fence);
502
503	spin_lock_irqsave(&fence->waiter_list_lock, flags);
504	/*
505	 * this should protect against two threads racing on the signaled
506	 * false -> true transition
507	 */
508	if (status && !fence->status) {
509		list_for_each_safe(pos, n, &fence->waiter_list_head)
510			list_move(pos, &signaled_waiters);
511
512		fence->status = status;
513	} else {
514		status = 0;
515	}
516	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
517
518	if (status) {
519		list_for_each_safe(pos, n, &signaled_waiters) {
520			struct sync_fence_waiter *waiter =
521				container_of(pos, struct sync_fence_waiter,
522					     waiter_list);
523
524			list_del(pos);
525			waiter->callback(fence, waiter);
526		}
527		wake_up(&fence->wq);
528	}
529}
530
531int sync_fence_wait_async(struct sync_fence *fence,
532			  struct sync_fence_waiter *waiter)
533{
534	unsigned long flags;
535	int err = 0;
536
537	spin_lock_irqsave(&fence->waiter_list_lock, flags);
538
539	if (fence->status) {
540		err = fence->status;
541		goto out;
542	}
543
544	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
545out:
546	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
547
548	return err;
549}
550EXPORT_SYMBOL(sync_fence_wait_async);
551
552int sync_fence_cancel_async(struct sync_fence *fence,
553			     struct sync_fence_waiter *waiter)
554{
555	struct list_head *pos;
556	struct list_head *n;
557	unsigned long flags;
558	int ret = -ENOENT;
559
560	spin_lock_irqsave(&fence->waiter_list_lock, flags);
561	/*
562	 * Make sure waiter is still in waiter_list because it is possible for
563	 * the waiter to be removed from the list while the callback is still
564	 * pending.
565	 */
566	list_for_each_safe(pos, n, &fence->waiter_list_head) {
567		struct sync_fence_waiter *list_waiter =
568			container_of(pos, struct sync_fence_waiter,
569				     waiter_list);
570		if (list_waiter == waiter) {
571			list_del(pos);
572			ret = 0;
573			break;
574		}
575	}
576	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
577	return ret;
578}
579EXPORT_SYMBOL(sync_fence_cancel_async);
580
581static bool sync_fence_check(struct sync_fence *fence)
582{
583	/*
584	 * Make sure that reads to fence->status are ordered with the
585	 * wait queue event triggering
586	 */
587	smp_rmb();
588	return fence->status != 0;
589}
590
591int sync_fence_wait(struct sync_fence *fence, long timeout)
592{
593	int err = 0;
594	struct sync_pt *pt;
595
596	trace_sync_wait(fence, 1);
597	list_for_each_entry(pt, &fence->pt_list_head, pt_list)
598		trace_sync_pt(pt);
599
600	if (timeout > 0) {
601		timeout = msecs_to_jiffies(timeout);
602		err = wait_event_interruptible_timeout(fence->wq,
603						       sync_fence_check(fence),
604						       timeout);
605	} else if (timeout < 0) {
606		err = wait_event_interruptible(fence->wq,
607					       sync_fence_check(fence));
608	}
609	trace_sync_wait(fence, 0);
610
611	if (err < 0)
612		return err;
613
614	if (fence->status < 0) {
615		pr_info("fence error %d on [%p]\n", fence->status, fence);
616		sync_dump();
617		return fence->status;
618	}
619
620	if (fence->status == 0) {
621		if (timeout > 0) {
622			pr_info("fence timeout on [%p] after %dms\n", fence,
623				jiffies_to_msecs(timeout));
624			sync_dump();
625		}
626		return -ETIME;
627	}
628
629	return 0;
630}
631EXPORT_SYMBOL(sync_fence_wait);
632
633static void sync_fence_free(struct kref *kref)
634{
635	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
636
637	sync_fence_free_pts(fence);
638
639	kfree(fence);
640}
641
642static int sync_fence_release(struct inode *inode, struct file *file)
643{
644	struct sync_fence *fence = file->private_data;
645	unsigned long flags;
646
647	/*
648	 * We need to remove all ways to access this fence before droping
649	 * our ref.
650	 *
651	 * start with its membership in the global fence list
652	 */
653	spin_lock_irqsave(&sync_fence_list_lock, flags);
654	list_del(&fence->sync_fence_list);
655	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
656
657	/*
658	 * remove its pts from their parents so that sync_timeline_signal()
659	 * can't reference the fence.
660	 */
661	sync_fence_detach_pts(fence);
662
663	kref_put(&fence->kref, sync_fence_free);
664
665	return 0;
666}
667
668static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
669{
670	struct sync_fence *fence = file->private_data;
671
672	poll_wait(file, &fence->wq, wait);
673
674	/*
675	 * Make sure that reads to fence->status are ordered with the
676	 * wait queue event triggering
677	 */
678	smp_rmb();
679
680	if (fence->status == 1)
681		return POLLIN;
682	else if (fence->status < 0)
683		return POLLERR;
684	else
685		return 0;
686}
687
688static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
689{
690	__s32 value;
691
692	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
693		return -EFAULT;
694
695	return sync_fence_wait(fence, value);
696}
697
698static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
699{
700	int fd = get_unused_fd_flags(O_CLOEXEC);
701	int err;
702	struct sync_fence *fence2, *fence3;
703	struct sync_merge_data data;
704
705	if (fd < 0)
706		return fd;
707
708	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
709		err = -EFAULT;
710		goto err_put_fd;
711	}
712
713	fence2 = sync_fence_fdget(data.fd2);
714	if (fence2 == NULL) {
715		err = -ENOENT;
716		goto err_put_fd;
717	}
718
719	data.name[sizeof(data.name) - 1] = '\0';
720	fence3 = sync_fence_merge(data.name, fence, fence2);
721	if (fence3 == NULL) {
722		err = -ENOMEM;
723		goto err_put_fence2;
724	}
725
726	data.fence = fd;
727	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
728		err = -EFAULT;
729		goto err_put_fence3;
730	}
731
732	sync_fence_install(fence3, fd);
733	sync_fence_put(fence2);
734	return 0;
735
736err_put_fence3:
737	sync_fence_put(fence3);
738
739err_put_fence2:
740	sync_fence_put(fence2);
741
742err_put_fd:
743	put_unused_fd(fd);
744	return err;
745}
746
747static int sync_fill_pt_info(struct sync_pt *pt, void *data, int size)
748{
749	struct sync_pt_info *info = data;
750	int ret;
751
752	if (size < sizeof(struct sync_pt_info))
753		return -ENOMEM;
754
755	info->len = sizeof(struct sync_pt_info);
756
757	if (pt->parent->ops->fill_driver_data) {
758		ret = pt->parent->ops->fill_driver_data(pt, info->driver_data,
759							size - sizeof(*info));
760		if (ret < 0)
761			return ret;
762
763		info->len += ret;
764	}
765
766	strlcpy(info->obj_name, pt->parent->name, sizeof(info->obj_name));
767	strlcpy(info->driver_name, pt->parent->ops->driver_name,
768		sizeof(info->driver_name));
769	info->status = pt->status;
770	info->timestamp_ns = ktime_to_ns(pt->timestamp);
771
772	return info->len;
773}
774
775static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
776					unsigned long arg)
777{
778	struct sync_fence_info_data *data;
779	struct list_head *pos;
780	__u32 size;
781	__u32 len = 0;
782	int ret;
783
784	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
785		return -EFAULT;
786
787	if (size < sizeof(struct sync_fence_info_data))
788		return -EINVAL;
789
790	if (size > 4096)
791		size = 4096;
792
793	data = kzalloc(size, GFP_KERNEL);
794	if (data == NULL)
795		return -ENOMEM;
796
797	strlcpy(data->name, fence->name, sizeof(data->name));
798	data->status = fence->status;
799	len = sizeof(struct sync_fence_info_data);
800
801	list_for_each(pos, &fence->pt_list_head) {
802		struct sync_pt *pt =
803			container_of(pos, struct sync_pt, pt_list);
804
805		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
806
807		if (ret < 0)
808			goto out;
809
810		len += ret;
811	}
812
813	data->len = len;
814
815	if (copy_to_user((void __user *)arg, data, len))
816		ret = -EFAULT;
817	else
818		ret = 0;
819
820out:
821	kfree(data);
822
823	return ret;
824}
825
826static long sync_fence_ioctl(struct file *file, unsigned int cmd,
827			     unsigned long arg)
828{
829	struct sync_fence *fence = file->private_data;
830	switch (cmd) {
831	case SYNC_IOC_WAIT:
832		return sync_fence_ioctl_wait(fence, arg);
833
834	case SYNC_IOC_MERGE:
835		return sync_fence_ioctl_merge(fence, arg);
836
837	case SYNC_IOC_FENCE_INFO:
838		return sync_fence_ioctl_fence_info(fence, arg);
839
840	default:
841		return -ENOTTY;
842	}
843}
844
845#ifdef CONFIG_DEBUG_FS
846static const char *sync_status_str(int status)
847{
848	if (status > 0)
849		return "signaled";
850	else if (status == 0)
851		return "active";
852	else
853		return "error";
854}
855
856static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
857{
858	int status = pt->status;
859	seq_printf(s, "  %s%spt %s",
860		   fence ? pt->parent->name : "",
861		   fence ? "_" : "",
862		   sync_status_str(status));
863	if (pt->status) {
864		struct timeval tv = ktime_to_timeval(pt->timestamp);
865		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
866	}
867
868	if (pt->parent->ops->timeline_value_str &&
869	    pt->parent->ops->pt_value_str) {
870		char value[64];
871		pt->parent->ops->pt_value_str(pt, value, sizeof(value));
872		seq_printf(s, ": %s", value);
873		if (fence) {
874			pt->parent->ops->timeline_value_str(pt->parent, value,
875						    sizeof(value));
876			seq_printf(s, " / %s", value);
877		}
878	} else if (pt->parent->ops->print_pt) {
879		seq_puts(s, ": ");
880		pt->parent->ops->print_pt(s, pt);
881	}
882
883	seq_puts(s, "\n");
884}
885
886static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
887{
888	struct list_head *pos;
889	unsigned long flags;
890
891	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
892
893	if (obj->ops->timeline_value_str) {
894		char value[64];
895		obj->ops->timeline_value_str(obj, value, sizeof(value));
896		seq_printf(s, ": %s", value);
897	} else if (obj->ops->print_obj) {
898		seq_puts(s, ": ");
899		obj->ops->print_obj(s, obj);
900	}
901
902	seq_puts(s, "\n");
903
904	spin_lock_irqsave(&obj->child_list_lock, flags);
905	list_for_each(pos, &obj->child_list_head) {
906		struct sync_pt *pt =
907			container_of(pos, struct sync_pt, child_list);
908		sync_print_pt(s, pt, false);
909	}
910	spin_unlock_irqrestore(&obj->child_list_lock, flags);
911}
912
913static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
914{
915	struct list_head *pos;
916	unsigned long flags;
917
918	seq_printf(s, "[%p] %s: %s\n", fence, fence->name,
919		   sync_status_str(fence->status));
920
921	list_for_each(pos, &fence->pt_list_head) {
922		struct sync_pt *pt =
923			container_of(pos, struct sync_pt, pt_list);
924		sync_print_pt(s, pt, true);
925	}
926
927	spin_lock_irqsave(&fence->waiter_list_lock, flags);
928	list_for_each(pos, &fence->waiter_list_head) {
929		struct sync_fence_waiter *waiter =
930			container_of(pos, struct sync_fence_waiter,
931				     waiter_list);
932
933		seq_printf(s, "waiter %pF\n", waiter->callback);
934	}
935	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
936}
937
938static int sync_debugfs_show(struct seq_file *s, void *unused)
939{
940	unsigned long flags;
941	struct list_head *pos;
942
943	seq_puts(s, "objs:\n--------------\n");
944
945	spin_lock_irqsave(&sync_timeline_list_lock, flags);
946	list_for_each(pos, &sync_timeline_list_head) {
947		struct sync_timeline *obj =
948			container_of(pos, struct sync_timeline,
949				     sync_timeline_list);
950
951		sync_print_obj(s, obj);
952		seq_puts(s, "\n");
953	}
954	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
955
956	seq_puts(s, "fences:\n--------------\n");
957
958	spin_lock_irqsave(&sync_fence_list_lock, flags);
959	list_for_each(pos, &sync_fence_list_head) {
960		struct sync_fence *fence =
961			container_of(pos, struct sync_fence, sync_fence_list);
962
963		sync_print_fence(s, fence);
964		seq_puts(s, "\n");
965	}
966	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
967	return 0;
968}
969
970static int sync_debugfs_open(struct inode *inode, struct file *file)
971{
972	return single_open(file, sync_debugfs_show, inode->i_private);
973}
974
975static const struct file_operations sync_debugfs_fops = {
976	.open           = sync_debugfs_open,
977	.read           = seq_read,
978	.llseek         = seq_lseek,
979	.release        = single_release,
980};
981
982static __init int sync_debugfs_init(void)
983{
984	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
985	return 0;
986}
987late_initcall(sync_debugfs_init);
988
989#define DUMP_CHUNK 256
990static char sync_dump_buf[64 * 1024];
991static void sync_dump(void)
992{
993	struct seq_file s = {
994		.buf = sync_dump_buf,
995		.size = sizeof(sync_dump_buf) - 1,
996	};
997	int i;
998
999	sync_debugfs_show(&s, NULL);
1000
1001	for (i = 0; i < s.count; i += DUMP_CHUNK) {
1002		if ((s.count - i) > DUMP_CHUNK) {
1003			char c = s.buf[i + DUMP_CHUNK];
1004			s.buf[i + DUMP_CHUNK] = 0;
1005			pr_cont("%s", s.buf + i);
1006			s.buf[i + DUMP_CHUNK] = c;
1007		} else {
1008			s.buf[s.count] = 0;
1009			pr_cont("%s", s.buf + i);
1010		}
1011	}
1012}
1013#else
1014static void sync_dump(void)
1015{
1016}
1017#endif
1018