sync.c revision af7582f293cdc29999d05f75b1ec835ffa43cb68
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/file.h>
19#include <linux/fs.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/seq_file.h>
23#include <linux/slab.h>
24#include <linux/uaccess.h>
25#include <linux/anon_inodes.h>
26
27#include "sync.h"
28
29static void sync_fence_signal_pt(struct sync_pt *pt);
30static int _sync_pt_has_signaled(struct sync_pt *pt);
31
32static LIST_HEAD(sync_timeline_list_head);
33static DEFINE_SPINLOCK(sync_timeline_list_lock);
34
35static LIST_HEAD(sync_fence_list_head);
36static DEFINE_SPINLOCK(sync_fence_list_lock);
37
38struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
39					   int size, const char *name)
40{
41	struct sync_timeline *obj;
42	unsigned long flags;
43
44	if (size < sizeof(struct sync_timeline))
45		return NULL;
46
47	obj = kzalloc(size, GFP_KERNEL);
48	if (obj == NULL)
49		return NULL;
50
51	obj->ops = ops;
52	strlcpy(obj->name, name, sizeof(obj->name));
53
54	INIT_LIST_HEAD(&obj->child_list_head);
55	spin_lock_init(&obj->child_list_lock);
56
57	INIT_LIST_HEAD(&obj->active_list_head);
58	spin_lock_init(&obj->active_list_lock);
59
60	spin_lock_irqsave(&sync_timeline_list_lock, flags);
61	list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
62	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
63
64	return obj;
65}
66
67static void sync_timeline_free(struct sync_timeline *obj)
68{
69	unsigned long flags;
70
71	if (obj->ops->release_obj)
72		obj->ops->release_obj(obj);
73
74	spin_lock_irqsave(&sync_timeline_list_lock, flags);
75	list_del(&obj->sync_timeline_list);
76	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
77
78	kfree(obj);
79}
80
81void sync_timeline_destroy(struct sync_timeline *obj)
82{
83	unsigned long flags;
84	bool needs_freeing;
85
86	spin_lock_irqsave(&obj->child_list_lock, flags);
87	obj->destroyed = true;
88	needs_freeing = list_empty(&obj->child_list_head);
89	spin_unlock_irqrestore(&obj->child_list_lock, flags);
90
91	if (needs_freeing)
92		sync_timeline_free(obj);
93	else
94		sync_timeline_signal(obj);
95}
96
97static void sync_timeline_add_pt(struct sync_timeline *obj, struct sync_pt *pt)
98{
99	unsigned long flags;
100
101	pt->parent = obj;
102
103	spin_lock_irqsave(&obj->child_list_lock, flags);
104	list_add_tail(&pt->child_list, &obj->child_list_head);
105	spin_unlock_irqrestore(&obj->child_list_lock, flags);
106}
107
108static void sync_timeline_remove_pt(struct sync_pt *pt)
109{
110	struct sync_timeline *obj = pt->parent;
111	unsigned long flags;
112	bool needs_freeing;
113
114	spin_lock_irqsave(&obj->active_list_lock, flags);
115	if (!list_empty(&pt->active_list))
116		list_del_init(&pt->active_list);
117	spin_unlock_irqrestore(&obj->active_list_lock, flags);
118
119	spin_lock_irqsave(&obj->child_list_lock, flags);
120	list_del(&pt->child_list);
121	needs_freeing = obj->destroyed && list_empty(&obj->child_list_head);
122	spin_unlock_irqrestore(&obj->child_list_lock, flags);
123
124	if (needs_freeing)
125		sync_timeline_free(obj);
126}
127
128void sync_timeline_signal(struct sync_timeline *obj)
129{
130	unsigned long flags;
131	LIST_HEAD(signaled_pts);
132	struct list_head *pos, *n;
133
134	spin_lock_irqsave(&obj->active_list_lock, flags);
135
136	list_for_each_safe(pos, n, &obj->active_list_head) {
137		struct sync_pt *pt =
138			container_of(pos, struct sync_pt, active_list);
139
140		if (_sync_pt_has_signaled(pt))
141			list_move(pos, &signaled_pts);
142	}
143
144	spin_unlock_irqrestore(&obj->active_list_lock, flags);
145
146	list_for_each_safe(pos, n, &signaled_pts) {
147		struct sync_pt *pt =
148			container_of(pos, struct sync_pt, active_list);
149
150		list_del_init(pos);
151		sync_fence_signal_pt(pt);
152	}
153}
154
155struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size)
156{
157	struct sync_pt *pt;
158
159	if (size < sizeof(struct sync_pt))
160		return NULL;
161
162	pt = kzalloc(size, GFP_KERNEL);
163	if (pt == NULL)
164		return NULL;
165
166	INIT_LIST_HEAD(&pt->active_list);
167	sync_timeline_add_pt(parent, pt);
168
169	return pt;
170}
171
172void sync_pt_free(struct sync_pt *pt)
173{
174	if (pt->parent->ops->free_pt)
175		pt->parent->ops->free_pt(pt);
176
177	sync_timeline_remove_pt(pt);
178
179	kfree(pt);
180}
181
182/* call with pt->parent->active_list_lock held */
183static int _sync_pt_has_signaled(struct sync_pt *pt)
184{
185	int old_status = pt->status;
186
187	if (!pt->status)
188		pt->status = pt->parent->ops->has_signaled(pt);
189
190	if (!pt->status && pt->parent->destroyed)
191		pt->status = -ENOENT;
192
193	if (pt->status != old_status)
194		pt->timestamp = ktime_get();
195
196	return pt->status;
197}
198
199static struct sync_pt *sync_pt_dup(struct sync_pt *pt)
200{
201	return pt->parent->ops->dup(pt);
202}
203
204/* Adds a sync pt to the active queue.  Called when added to a fence */
205static void sync_pt_activate(struct sync_pt *pt)
206{
207	struct sync_timeline *obj = pt->parent;
208	unsigned long flags;
209	int err;
210
211	spin_lock_irqsave(&obj->active_list_lock, flags);
212
213	err = _sync_pt_has_signaled(pt);
214	if (err != 0)
215		goto out;
216
217	list_add_tail(&pt->active_list, &obj->active_list_head);
218
219out:
220	spin_unlock_irqrestore(&obj->active_list_lock, flags);
221}
222
223static int sync_fence_release(struct inode *inode, struct file *file);
224static long sync_fence_ioctl(struct file *file, unsigned int cmd,
225			     unsigned long arg);
226
227
228static const struct file_operations sync_fence_fops = {
229	.release = sync_fence_release,
230	.unlocked_ioctl = sync_fence_ioctl,
231};
232
233static struct sync_fence *sync_fence_alloc(const char *name)
234{
235	struct sync_fence *fence;
236	unsigned long flags;
237
238	fence = kzalloc(sizeof(struct sync_fence), GFP_KERNEL);
239	if (fence == NULL)
240		return NULL;
241
242	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
243					 fence, 0);
244	if (fence->file == NULL)
245		goto err;
246
247	strlcpy(fence->name, name, sizeof(fence->name));
248
249	INIT_LIST_HEAD(&fence->pt_list_head);
250	INIT_LIST_HEAD(&fence->waiter_list_head);
251	spin_lock_init(&fence->waiter_list_lock);
252
253	init_waitqueue_head(&fence->wq);
254
255	spin_lock_irqsave(&sync_fence_list_lock, flags);
256	list_add_tail(&fence->sync_fence_list, &sync_fence_list_head);
257	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
258
259	return fence;
260
261err:
262	kfree(fence);
263	return NULL;
264}
265
266/* TODO: implement a create which takes more that one sync_pt */
267struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
268{
269	struct sync_fence *fence;
270
271	if (pt->fence)
272		return NULL;
273
274	fence = sync_fence_alloc(name);
275	if (fence == NULL)
276		return NULL;
277
278	pt->fence = fence;
279	list_add(&pt->pt_list, &fence->pt_list_head);
280	sync_pt_activate(pt);
281
282	return fence;
283}
284
285static int sync_fence_copy_pts(struct sync_fence *dst, struct sync_fence *src)
286{
287	struct list_head *pos;
288
289	list_for_each(pos, &src->pt_list_head) {
290		struct sync_pt *orig_pt =
291			container_of(pos, struct sync_pt, pt_list);
292		struct sync_pt *new_pt = sync_pt_dup(orig_pt);
293
294		if (new_pt == NULL)
295			return -ENOMEM;
296
297		new_pt->fence = dst;
298		list_add(&new_pt->pt_list, &dst->pt_list_head);
299		sync_pt_activate(new_pt);
300	}
301
302	return 0;
303}
304
305static void sync_fence_free_pts(struct sync_fence *fence)
306{
307	struct list_head *pos, *n;
308
309	list_for_each_safe(pos, n, &fence->pt_list_head) {
310		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
311		sync_pt_free(pt);
312	}
313}
314
315struct sync_fence *sync_fence_fdget(int fd)
316{
317	struct file *file = fget(fd);
318
319	if (file == NULL)
320		return NULL;
321
322	if (file->f_op != &sync_fence_fops)
323		goto err;
324
325	return file->private_data;
326
327err:
328	fput(file);
329	return NULL;
330}
331
332void sync_fence_put(struct sync_fence *fence)
333{
334	fput(fence->file);
335}
336
337void sync_fence_install(struct sync_fence *fence, int fd)
338{
339	fd_install(fd, fence->file);
340}
341
342static int sync_fence_get_status(struct sync_fence *fence)
343{
344	struct list_head *pos;
345	int status = 1;
346
347	list_for_each(pos, &fence->pt_list_head) {
348		struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
349		int pt_status = pt->status;
350
351		if (pt_status < 0) {
352			status = pt_status;
353			break;
354		} else if (status == 1) {
355			status = pt_status;
356		}
357	}
358
359	return status;
360}
361
362struct sync_fence *sync_fence_merge(const char *name,
363				    struct sync_fence *a, struct sync_fence *b)
364{
365	struct sync_fence *fence;
366	int err;
367
368	fence = sync_fence_alloc(name);
369	if (fence == NULL)
370		return NULL;
371
372	err = sync_fence_copy_pts(fence, a);
373	if (err < 0)
374		goto err;
375
376	err = sync_fence_copy_pts(fence, b);
377	if (err < 0)
378		goto err;
379
380	fence->status = sync_fence_get_status(fence);
381
382	return fence;
383err:
384	sync_fence_free_pts(fence);
385	kfree(fence);
386	return NULL;
387}
388
389static void sync_fence_signal_pt(struct sync_pt *pt)
390{
391	LIST_HEAD(signaled_waiters);
392	struct sync_fence *fence = pt->fence;
393	struct list_head *pos;
394	struct list_head *n;
395	unsigned long flags;
396	int status;
397
398	status = sync_fence_get_status(fence);
399
400	spin_lock_irqsave(&fence->waiter_list_lock, flags);
401	/*
402	 * this should protect against two threads racing on the signaled
403	 * false -> true transition
404	 */
405	if (status && !fence->status) {
406		list_for_each_safe(pos, n, &fence->waiter_list_head)
407			list_move(pos, &signaled_waiters);
408
409		fence->status = status;
410	} else {
411		status = 0;
412	}
413	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
414
415	if (status) {
416		list_for_each_safe(pos, n, &signaled_waiters) {
417			struct sync_fence_waiter *waiter =
418				container_of(pos, struct sync_fence_waiter,
419					     waiter_list);
420
421			waiter->callback(fence, waiter->callback_data);
422			list_del(pos);
423			kfree(waiter);
424		}
425		wake_up(&fence->wq);
426	}
427}
428
429int sync_fence_wait_async(struct sync_fence *fence,
430			  void (*callback)(struct sync_fence *, void *data),
431			  void *callback_data)
432{
433	struct sync_fence_waiter *waiter;
434	unsigned long flags;
435	int err = 0;
436
437	waiter = kzalloc(sizeof(struct sync_fence_waiter), GFP_KERNEL);
438	if (waiter == NULL)
439		return -ENOMEM;
440
441	waiter->callback = callback;
442	waiter->callback_data = callback_data;
443
444	spin_lock_irqsave(&fence->waiter_list_lock, flags);
445
446	if (fence->status) {
447		kfree(waiter);
448		err = fence->status;
449		goto out;
450	}
451
452	list_add_tail(&waiter->waiter_list, &fence->waiter_list_head);
453out:
454	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
455
456	return err;
457}
458
459int sync_fence_wait(struct sync_fence *fence, long timeout)
460{
461	int err;
462
463	if (timeout) {
464		timeout = msecs_to_jiffies(timeout);
465		err = wait_event_interruptible_timeout(fence->wq,
466						       fence->status != 0,
467						       timeout);
468	} else {
469		err = wait_event_interruptible(fence->wq, fence->status != 0);
470	}
471
472	if (err < 0)
473		return err;
474
475	if (fence->status < 0)
476		return fence->status;
477
478	if (fence->status == 0)
479		return -ETIME;
480
481	return 0;
482}
483
484static int sync_fence_release(struct inode *inode, struct file *file)
485{
486	struct sync_fence *fence = file->private_data;
487	unsigned long flags;
488
489	sync_fence_free_pts(fence);
490
491	spin_lock_irqsave(&sync_fence_list_lock, flags);
492	list_del(&fence->sync_fence_list);
493	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
494
495	kfree(fence);
496
497	return 0;
498}
499
500static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
501{
502	__s32 value;
503
504	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
505		return -EFAULT;
506
507	return sync_fence_wait(fence, value);
508}
509
510static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
511{
512	int fd = get_unused_fd();
513	int err;
514	struct sync_fence *fence2, *fence3;
515	struct sync_merge_data data;
516
517	if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
518		return -EFAULT;
519
520	fence2 = sync_fence_fdget(data.fd2);
521	if (fence2 == NULL) {
522		err = -ENOENT;
523		goto err_put_fd;
524	}
525
526	data.name[sizeof(data.name) - 1] = '\0';
527	fence3 = sync_fence_merge(data.name, fence, fence2);
528	if (fence3 == NULL) {
529		err = -ENOMEM;
530		goto err_put_fence2;
531	}
532
533	data.fence = fd;
534	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
535		err = -EFAULT;
536		goto err_put_fence3;
537	}
538
539	sync_fence_install(fence3, fd);
540	sync_fence_put(fence2);
541	return 0;
542
543err_put_fence3:
544	sync_fence_put(fence3);
545
546err_put_fence2:
547	sync_fence_put(fence2);
548
549err_put_fd:
550	put_unused_fd(fd);
551	return err;
552}
553
554
555static long sync_fence_ioctl(struct file *file, unsigned int cmd,
556			     unsigned long arg)
557{
558	struct sync_fence *fence = file->private_data;
559	switch (cmd) {
560	case SYNC_IOC_WAIT:
561		return sync_fence_ioctl_wait(fence, arg);
562
563	case SYNC_IOC_MERGE:
564		return sync_fence_ioctl_merge(fence, arg);
565
566	default:
567		return -ENOTTY;
568	}
569}
570
571#ifdef CONFIG_DEBUG_FS
572static const char *sync_status_str(int status)
573{
574	if (status > 0)
575		return "signaled";
576	else if (status == 0)
577		return "active";
578	else
579		return "error";
580}
581
582static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
583{
584	int status = pt->status;
585	seq_printf(s, "  %s%spt %s",
586		   fence ? pt->parent->name : "",
587		   fence ? "_" : "",
588		   sync_status_str(status));
589	if (pt->status) {
590		struct timeval tv = ktime_to_timeval(pt->timestamp);
591		seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
592	}
593
594	if (pt->parent->ops->print_pt) {
595		seq_printf(s, ": ");
596		pt->parent->ops->print_pt(s, pt);
597	}
598
599	seq_printf(s, "\n");
600}
601
602static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
603{
604	struct list_head *pos;
605	unsigned long flags;
606
607	seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
608
609	if (obj->ops->print_obj) {
610		seq_printf(s, ": ");
611		obj->ops->print_obj(s, obj);
612	}
613
614	seq_printf(s, "\n");
615
616	spin_lock_irqsave(&obj->child_list_lock, flags);
617	list_for_each(pos, &obj->child_list_head) {
618		struct sync_pt *pt =
619			container_of(pos, struct sync_pt, child_list);
620		sync_print_pt(s, pt, false);
621	}
622	spin_unlock_irqrestore(&obj->child_list_lock, flags);
623}
624
625static void sync_print_fence(struct seq_file *s, struct sync_fence *fence)
626{
627	struct list_head *pos;
628	unsigned long flags;
629
630	seq_printf(s, "%s: %s\n", fence->name, sync_status_str(fence->status));
631
632	list_for_each(pos, &fence->pt_list_head) {
633		struct sync_pt *pt =
634			container_of(pos, struct sync_pt, pt_list);
635		sync_print_pt(s, pt, true);
636	}
637
638	spin_lock_irqsave(&fence->waiter_list_lock, flags);
639	list_for_each(pos, &fence->waiter_list_head) {
640		struct sync_fence_waiter *waiter =
641			container_of(pos, struct sync_fence_waiter,
642				     waiter_list);
643
644		seq_printf(s, "waiter %pF %p\n", waiter->callback,
645			   waiter->callback_data);
646	}
647	spin_unlock_irqrestore(&fence->waiter_list_lock, flags);
648}
649
650static int sync_debugfs_show(struct seq_file *s, void *unused)
651{
652	unsigned long flags;
653	struct list_head *pos;
654
655	seq_printf(s, "objs:\n--------------\n");
656
657	spin_lock_irqsave(&sync_timeline_list_lock, flags);
658	list_for_each(pos, &sync_timeline_list_head) {
659		struct sync_timeline *obj =
660			container_of(pos, struct sync_timeline,
661				     sync_timeline_list);
662
663		sync_print_obj(s, obj);
664		seq_printf(s, "\n");
665	}
666	spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
667
668	seq_printf(s, "fences:\n--------------\n");
669
670	spin_lock_irqsave(&sync_fence_list_lock, flags);
671	list_for_each(pos, &sync_fence_list_head) {
672		struct sync_fence *fence =
673			container_of(pos, struct sync_fence, sync_fence_list);
674
675		sync_print_fence(s, fence);
676		seq_printf(s, "\n");
677	}
678	spin_unlock_irqrestore(&sync_fence_list_lock, flags);
679	return 0;
680}
681
682static int sync_debugfs_open(struct inode *inode, struct file *file)
683{
684	return single_open(file, sync_debugfs_show, inode->i_private);
685}
686
687static const struct file_operations sync_debugfs_fops = {
688	.open           = sync_debugfs_open,
689	.read           = seq_read,
690	.llseek         = seq_lseek,
691	.release        = single_release,
692};
693
694static __init int sync_debugfs_init(void)
695{
696	debugfs_create_file("sync", S_IRUGO, NULL, NULL, &sync_debugfs_fops);
697	return 0;
698}
699
700late_initcall(sync_debugfs_init);
701
702#endif
703