sync.c revision 7b1046e040670ee9fb2abe110fb24b33d91ada86
1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31#define CREATE_TRACE_POINTS
32#include "trace/sync.h"
33
34static const struct fence_ops android_fence_ops;
35static const struct file_operations sync_fence_fops;
36
37struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
38					   int size, const char *name)
39{
40	struct sync_timeline *obj;
41
42	if (size < sizeof(struct sync_timeline))
43		return NULL;
44
45	obj = kzalloc(size, GFP_KERNEL);
46	if (obj == NULL)
47		return NULL;
48
49	kref_init(&obj->kref);
50	obj->ops = ops;
51	obj->context = fence_context_alloc(1);
52	strlcpy(obj->name, name, sizeof(obj->name));
53
54	INIT_LIST_HEAD(&obj->child_list_head);
55	INIT_LIST_HEAD(&obj->active_list_head);
56	spin_lock_init(&obj->child_list_lock);
57
58	sync_timeline_debug_add(obj);
59
60	return obj;
61}
62EXPORT_SYMBOL(sync_timeline_create);
63
64static void sync_timeline_free(struct kref *kref)
65{
66	struct sync_timeline *obj =
67		container_of(kref, struct sync_timeline, kref);
68
69	sync_timeline_debug_remove(obj);
70
71	if (obj->ops->release_obj)
72		obj->ops->release_obj(obj);
73
74	kfree(obj);
75}
76
77static void sync_timeline_get(struct sync_timeline *obj)
78{
79	kref_get(&obj->kref);
80}
81
82static void sync_timeline_put(struct sync_timeline *obj)
83{
84	kref_put(&obj->kref, sync_timeline_free);
85}
86
87void sync_timeline_destroy(struct sync_timeline *obj)
88{
89	obj->destroyed = true;
90	/*
91	 * Ensure timeline is marked as destroyed before
92	 * changing timeline's fences status.
93	 */
94	smp_wmb();
95
96	/*
97	 * signal any children that their parent is going away.
98	 */
99	sync_timeline_signal(obj);
100	sync_timeline_put(obj);
101}
102EXPORT_SYMBOL(sync_timeline_destroy);
103
104void sync_timeline_signal(struct sync_timeline *obj)
105{
106	unsigned long flags;
107	LIST_HEAD(signaled_pts);
108	struct sync_pt *pt, *next;
109
110	trace_sync_timeline(obj);
111
112	spin_lock_irqsave(&obj->child_list_lock, flags);
113
114	list_for_each_entry_safe(pt, next, &obj->active_list_head,
115				 active_list) {
116		if (fence_is_signaled_locked(&pt->base))
117			list_del(&pt->active_list);
118	}
119
120	spin_unlock_irqrestore(&obj->child_list_lock, flags);
121}
122EXPORT_SYMBOL(sync_timeline_signal);
123
124struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
125{
126	unsigned long flags;
127	struct sync_pt *pt;
128
129	if (size < sizeof(struct sync_pt))
130		return NULL;
131
132	pt = kzalloc(size, GFP_KERNEL);
133	if (pt == NULL)
134		return NULL;
135
136	spin_lock_irqsave(&obj->child_list_lock, flags);
137	sync_timeline_get(obj);
138	fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
139		   obj->context, ++obj->value);
140	list_add_tail(&pt->child_list, &obj->child_list_head);
141	INIT_LIST_HEAD(&pt->active_list);
142	spin_unlock_irqrestore(&obj->child_list_lock, flags);
143	return pt;
144}
145EXPORT_SYMBOL(sync_pt_create);
146
147void sync_pt_free(struct sync_pt *pt)
148{
149	fence_put(&pt->base);
150}
151EXPORT_SYMBOL(sync_pt_free);
152
153static struct sync_fence *sync_fence_alloc(int size, const char *name)
154{
155	struct sync_fence *fence;
156
157	fence = kzalloc(size, GFP_KERNEL);
158	if (fence == NULL)
159		return NULL;
160
161	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
162					 fence, 0);
163	if (IS_ERR(fence->file))
164		goto err;
165
166	kref_init(&fence->kref);
167	strlcpy(fence->name, name, sizeof(fence->name));
168
169	init_waitqueue_head(&fence->wq);
170
171	return fence;
172
173err:
174	kfree(fence);
175	return NULL;
176}
177
178static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
179{
180	struct sync_fence_cb *check;
181	struct sync_fence *fence;
182
183	check = container_of(cb, struct sync_fence_cb, cb);
184	fence = check->fence;
185
186	if (atomic_dec_and_test(&fence->status))
187		wake_up_all(&fence->wq);
188}
189
190/* TODO: implement a create which takes more that one sync_pt */
191struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
192{
193	struct sync_fence *fence;
194
195	fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
196	if (fence == NULL)
197		return NULL;
198
199	fence->num_fences = 1;
200	atomic_set(&fence->status, 1);
201
202	fence_get(&pt->base);
203	fence->cbs[0].sync_pt = &pt->base;
204	fence->cbs[0].fence = fence;
205	if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
206			       fence_check_cb_func))
207		atomic_dec(&fence->status);
208
209	sync_fence_debug_add(fence);
210
211	return fence;
212}
213EXPORT_SYMBOL(sync_fence_create);
214
215struct sync_fence *sync_fence_fdget(int fd)
216{
217	struct file *file = fget(fd);
218
219	if (file == NULL)
220		return NULL;
221
222	if (file->f_op != &sync_fence_fops)
223		goto err;
224
225	return file->private_data;
226
227err:
228	fput(file);
229	return NULL;
230}
231EXPORT_SYMBOL(sync_fence_fdget);
232
233void sync_fence_put(struct sync_fence *fence)
234{
235	fput(fence->file);
236}
237EXPORT_SYMBOL(sync_fence_put);
238
239void sync_fence_install(struct sync_fence *fence, int fd)
240{
241	fd_install(fd, fence->file);
242}
243EXPORT_SYMBOL(sync_fence_install);
244
245static void sync_fence_add_pt(struct sync_fence *fence,
246			      int *i, struct fence *pt)
247{
248	fence->cbs[*i].sync_pt = pt;
249	fence->cbs[*i].fence = fence;
250
251	if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
252		fence_get(pt);
253		(*i)++;
254	}
255}
256
257struct sync_fence *sync_fence_merge(const char *name,
258				    struct sync_fence *a, struct sync_fence *b)
259{
260	int num_fences = a->num_fences + b->num_fences;
261	struct sync_fence *fence;
262	int i, i_a, i_b;
263	unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
264
265	fence = sync_fence_alloc(size, name);
266	if (fence == NULL)
267		return NULL;
268
269	atomic_set(&fence->status, num_fences);
270
271	/*
272	 * Assume sync_fence a and b are both ordered and have no
273	 * duplicates with the same context.
274	 *
275	 * If a sync_fence can only be created with sync_fence_merge
276	 * and sync_fence_create, this is a reasonable assumption.
277	 */
278	for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
279		struct fence *pt_a = a->cbs[i_a].sync_pt;
280		struct fence *pt_b = b->cbs[i_b].sync_pt;
281
282		if (pt_a->context < pt_b->context) {
283			sync_fence_add_pt(fence, &i, pt_a);
284
285			i_a++;
286		} else if (pt_a->context > pt_b->context) {
287			sync_fence_add_pt(fence, &i, pt_b);
288
289			i_b++;
290		} else {
291			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
292				sync_fence_add_pt(fence, &i, pt_a);
293			else
294				sync_fence_add_pt(fence, &i, pt_b);
295
296			i_a++;
297			i_b++;
298		}
299	}
300
301	for (; i_a < a->num_fences; i_a++)
302		sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
303
304	for (; i_b < b->num_fences; i_b++)
305		sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
306
307	if (num_fences > i)
308		atomic_sub(num_fences - i, &fence->status);
309	fence->num_fences = i;
310
311	sync_fence_debug_add(fence);
312	return fence;
313}
314EXPORT_SYMBOL(sync_fence_merge);
315
316int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
317				 int wake_flags, void *key)
318{
319	struct sync_fence_waiter *wait;
320
321	wait = container_of(curr, struct sync_fence_waiter, work);
322	list_del_init(&wait->work.task_list);
323
324	wait->callback(wait->work.private, wait);
325	return 1;
326}
327
328int sync_fence_wait_async(struct sync_fence *fence,
329			  struct sync_fence_waiter *waiter)
330{
331	int err = atomic_read(&fence->status);
332	unsigned long flags;
333
334	if (err < 0)
335		return err;
336
337	if (!err)
338		return 1;
339
340	init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
341	waiter->work.private = fence;
342
343	spin_lock_irqsave(&fence->wq.lock, flags);
344	err = atomic_read(&fence->status);
345	if (err > 0)
346		__add_wait_queue_tail(&fence->wq, &waiter->work);
347	spin_unlock_irqrestore(&fence->wq.lock, flags);
348
349	if (err < 0)
350		return err;
351
352	return !err;
353}
354EXPORT_SYMBOL(sync_fence_wait_async);
355
356int sync_fence_cancel_async(struct sync_fence *fence,
357			     struct sync_fence_waiter *waiter)
358{
359	unsigned long flags;
360	int ret = 0;
361
362	spin_lock_irqsave(&fence->wq.lock, flags);
363	if (!list_empty(&waiter->work.task_list))
364		list_del_init(&waiter->work.task_list);
365	else
366		ret = -ENOENT;
367	spin_unlock_irqrestore(&fence->wq.lock, flags);
368	return ret;
369}
370EXPORT_SYMBOL(sync_fence_cancel_async);
371
372int sync_fence_wait(struct sync_fence *fence, long timeout)
373{
374	long ret;
375	int i;
376
377	if (timeout < 0)
378		timeout = MAX_SCHEDULE_TIMEOUT;
379	else
380		timeout = msecs_to_jiffies(timeout);
381
382	trace_sync_wait(fence, 1);
383	for (i = 0; i < fence->num_fences; ++i)
384		trace_sync_pt(fence->cbs[i].sync_pt);
385	ret = wait_event_interruptible_timeout(fence->wq,
386					       atomic_read(&fence->status) <= 0,
387					       timeout);
388	trace_sync_wait(fence, 0);
389
390	if (ret < 0)
391		return ret;
392	else if (ret == 0) {
393		if (timeout) {
394			pr_info("fence timeout on [%p] after %dms\n", fence,
395				jiffies_to_msecs(timeout));
396			sync_dump();
397		}
398		return -ETIME;
399	}
400
401	ret = atomic_read(&fence->status);
402	if (ret) {
403		pr_info("fence error %ld on [%p]\n", ret, fence);
404		sync_dump();
405	}
406	return ret;
407}
408EXPORT_SYMBOL(sync_fence_wait);
409
410static const char *android_fence_get_driver_name(struct fence *fence)
411{
412	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
413	struct sync_timeline *parent = sync_pt_parent(pt);
414
415	return parent->ops->driver_name;
416}
417
418static const char *android_fence_get_timeline_name(struct fence *fence)
419{
420	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
421	struct sync_timeline *parent = sync_pt_parent(pt);
422
423	return parent->name;
424}
425
426static void android_fence_release(struct fence *fence)
427{
428	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
429	struct sync_timeline *parent = sync_pt_parent(pt);
430	unsigned long flags;
431
432	spin_lock_irqsave(fence->lock, flags);
433	list_del(&pt->child_list);
434	if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
435		list_del(&pt->active_list);
436	spin_unlock_irqrestore(fence->lock, flags);
437
438	if (parent->ops->free_pt)
439		parent->ops->free_pt(pt);
440
441	sync_timeline_put(parent);
442	fence_free(&pt->base);
443}
444
445static bool android_fence_signaled(struct fence *fence)
446{
447	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
448	struct sync_timeline *parent = sync_pt_parent(pt);
449	int ret;
450
451	ret = parent->ops->has_signaled(pt);
452	if (ret < 0)
453		fence->status = ret;
454	return ret;
455}
456
457static bool android_fence_enable_signaling(struct fence *fence)
458{
459	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
460	struct sync_timeline *parent = sync_pt_parent(pt);
461
462	if (android_fence_signaled(fence))
463		return false;
464
465	list_add_tail(&pt->active_list, &parent->active_list_head);
466	return true;
467}
468
469static int android_fence_fill_driver_data(struct fence *fence,
470					  void *data, int size)
471{
472	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
473	struct sync_timeline *parent = sync_pt_parent(pt);
474
475	if (!parent->ops->fill_driver_data)
476		return 0;
477	return parent->ops->fill_driver_data(pt, data, size);
478}
479
480static void android_fence_value_str(struct fence *fence,
481				    char *str, int size)
482{
483	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
484	struct sync_timeline *parent = sync_pt_parent(pt);
485
486	if (!parent->ops->pt_value_str) {
487		if (size)
488			*str = 0;
489		return;
490	}
491	parent->ops->pt_value_str(pt, str, size);
492}
493
494static void android_fence_timeline_value_str(struct fence *fence,
495					     char *str, int size)
496{
497	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
498	struct sync_timeline *parent = sync_pt_parent(pt);
499
500	if (!parent->ops->timeline_value_str) {
501		if (size)
502			*str = 0;
503		return;
504	}
505	parent->ops->timeline_value_str(parent, str, size);
506}
507
508static const struct fence_ops android_fence_ops = {
509	.get_driver_name = android_fence_get_driver_name,
510	.get_timeline_name = android_fence_get_timeline_name,
511	.enable_signaling = android_fence_enable_signaling,
512	.signaled = android_fence_signaled,
513	.wait = fence_default_wait,
514	.release = android_fence_release,
515	.fill_driver_data = android_fence_fill_driver_data,
516	.fence_value_str = android_fence_value_str,
517	.timeline_value_str = android_fence_timeline_value_str,
518};
519
520static void sync_fence_free(struct kref *kref)
521{
522	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
523	int i, status = atomic_read(&fence->status);
524
525	for (i = 0; i < fence->num_fences; ++i) {
526		if (status)
527			fence_remove_callback(fence->cbs[i].sync_pt,
528					      &fence->cbs[i].cb);
529		fence_put(fence->cbs[i].sync_pt);
530	}
531
532	kfree(fence);
533}
534
535static int sync_fence_release(struct inode *inode, struct file *file)
536{
537	struct sync_fence *fence = file->private_data;
538
539	sync_fence_debug_remove(fence);
540
541	kref_put(&fence->kref, sync_fence_free);
542	return 0;
543}
544
545static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
546{
547	struct sync_fence *fence = file->private_data;
548	int status;
549
550	poll_wait(file, &fence->wq, wait);
551
552	status = atomic_read(&fence->status);
553
554	if (!status)
555		return POLLIN;
556	else if (status < 0)
557		return POLLERR;
558	return 0;
559}
560
561static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
562{
563	__s32 value;
564
565	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
566		return -EFAULT;
567
568	return sync_fence_wait(fence, value);
569}
570
571static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
572{
573	int fd = get_unused_fd_flags(O_CLOEXEC);
574	int err;
575	struct sync_fence *fence2, *fence3;
576	struct sync_merge_data data;
577
578	if (fd < 0)
579		return fd;
580
581	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
582		err = -EFAULT;
583		goto err_put_fd;
584	}
585
586	fence2 = sync_fence_fdget(data.fd2);
587	if (fence2 == NULL) {
588		err = -ENOENT;
589		goto err_put_fd;
590	}
591
592	data.name[sizeof(data.name) - 1] = '\0';
593	fence3 = sync_fence_merge(data.name, fence, fence2);
594	if (fence3 == NULL) {
595		err = -ENOMEM;
596		goto err_put_fence2;
597	}
598
599	data.fence = fd;
600	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
601		err = -EFAULT;
602		goto err_put_fence3;
603	}
604
605	sync_fence_install(fence3, fd);
606	sync_fence_put(fence2);
607	return 0;
608
609err_put_fence3:
610	sync_fence_put(fence3);
611
612err_put_fence2:
613	sync_fence_put(fence2);
614
615err_put_fd:
616	put_unused_fd(fd);
617	return err;
618}
619
620static int sync_fill_pt_info(struct fence *fence, void *data, int size)
621{
622	struct sync_pt_info *info = data;
623	int ret;
624
625	if (size < sizeof(struct sync_pt_info))
626		return -ENOMEM;
627
628	info->len = sizeof(struct sync_pt_info);
629
630	if (fence->ops->fill_driver_data) {
631		ret = fence->ops->fill_driver_data(fence, info->driver_data,
632						   size - sizeof(*info));
633		if (ret < 0)
634			return ret;
635
636		info->len += ret;
637	}
638
639	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
640		sizeof(info->obj_name));
641	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
642		sizeof(info->driver_name));
643	if (fence_is_signaled(fence))
644		info->status = fence->status >= 0 ? 1 : fence->status;
645	else
646		info->status = 0;
647	info->timestamp_ns = ktime_to_ns(fence->timestamp);
648
649	return info->len;
650}
651
652static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
653					unsigned long arg)
654{
655	struct sync_fence_info_data *data;
656	__u32 size;
657	__u32 len = 0;
658	int ret, i;
659
660	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
661		return -EFAULT;
662
663	if (size < sizeof(struct sync_fence_info_data))
664		return -EINVAL;
665
666	if (size > 4096)
667		size = 4096;
668
669	data = kzalloc(size, GFP_KERNEL);
670	if (data == NULL)
671		return -ENOMEM;
672
673	strlcpy(data->name, fence->name, sizeof(data->name));
674	data->status = atomic_read(&fence->status);
675	if (data->status >= 0)
676		data->status = !data->status;
677
678	len = sizeof(struct sync_fence_info_data);
679
680	for (i = 0; i < fence->num_fences; ++i) {
681		struct fence *pt = fence->cbs[i].sync_pt;
682
683		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
684
685		if (ret < 0)
686			goto out;
687
688		len += ret;
689	}
690
691	data->len = len;
692
693	if (copy_to_user((void __user *)arg, data, len))
694		ret = -EFAULT;
695	else
696		ret = 0;
697
698out:
699	kfree(data);
700
701	return ret;
702}
703
704static long sync_fence_ioctl(struct file *file, unsigned int cmd,
705			     unsigned long arg)
706{
707	struct sync_fence *fence = file->private_data;
708
709	switch (cmd) {
710	case SYNC_IOC_WAIT:
711		return sync_fence_ioctl_wait(fence, arg);
712
713	case SYNC_IOC_MERGE:
714		return sync_fence_ioctl_merge(fence, arg);
715
716	case SYNC_IOC_FENCE_INFO:
717		return sync_fence_ioctl_fence_info(fence, arg);
718
719	default:
720		return -ENOTTY;
721	}
722}
723
724static const struct file_operations sync_fence_fops = {
725	.release = sync_fence_release,
726	.poll = sync_fence_poll,
727	.unlocked_ioctl = sync_fence_ioctl,
728	.compat_ioctl = sync_fence_ioctl,
729};
730
731