1/*
2 * drivers/base/sync.c
3 *
4 * Copyright (C) 2012 Google, Inc.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/debugfs.h>
18#include <linux/export.h>
19#include <linux/file.h>
20#include <linux/fs.h>
21#include <linux/kernel.h>
22#include <linux/poll.h>
23#include <linux/sched.h>
24#include <linux/seq_file.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/anon_inodes.h>
28
29#include "sync.h"
30
31#define CREATE_TRACE_POINTS
32#include "trace/sync.h"
33
34static const struct fence_ops android_fence_ops;
35static const struct file_operations sync_fence_fops;
36
37struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
38					   int size, const char *name)
39{
40	struct sync_timeline *obj;
41
42	if (size < sizeof(struct sync_timeline))
43		return NULL;
44
45	obj = kzalloc(size, GFP_KERNEL);
46	if (obj == NULL)
47		return NULL;
48
49	kref_init(&obj->kref);
50	obj->ops = ops;
51	obj->context = fence_context_alloc(1);
52	strlcpy(obj->name, name, sizeof(obj->name));
53
54	INIT_LIST_HEAD(&obj->child_list_head);
55	INIT_LIST_HEAD(&obj->active_list_head);
56	spin_lock_init(&obj->child_list_lock);
57
58	sync_timeline_debug_add(obj);
59
60	return obj;
61}
62EXPORT_SYMBOL(sync_timeline_create);
63
64static void sync_timeline_free(struct kref *kref)
65{
66	struct sync_timeline *obj =
67		container_of(kref, struct sync_timeline, kref);
68
69	sync_timeline_debug_remove(obj);
70
71	if (obj->ops->release_obj)
72		obj->ops->release_obj(obj);
73
74	kfree(obj);
75}
76
77static void sync_timeline_get(struct sync_timeline *obj)
78{
79	kref_get(&obj->kref);
80}
81
82static void sync_timeline_put(struct sync_timeline *obj)
83{
84	kref_put(&obj->kref, sync_timeline_free);
85}
86
87void sync_timeline_destroy(struct sync_timeline *obj)
88{
89	obj->destroyed = true;
90	/*
91	 * Ensure timeline is marked as destroyed before
92	 * changing timeline's fences status.
93	 */
94	smp_wmb();
95
96	/*
97	 * signal any children that their parent is going away.
98	 */
99	sync_timeline_signal(obj);
100	sync_timeline_put(obj);
101}
102EXPORT_SYMBOL(sync_timeline_destroy);
103
104void sync_timeline_signal(struct sync_timeline *obj)
105{
106	unsigned long flags;
107	LIST_HEAD(signaled_pts);
108	struct sync_pt *pt, *next;
109
110	trace_sync_timeline(obj);
111
112	spin_lock_irqsave(&obj->child_list_lock, flags);
113
114	list_for_each_entry_safe(pt, next, &obj->active_list_head,
115				 active_list) {
116		if (fence_is_signaled_locked(&pt->base))
117			list_del_init(&pt->active_list);
118	}
119
120	spin_unlock_irqrestore(&obj->child_list_lock, flags);
121}
122EXPORT_SYMBOL(sync_timeline_signal);
123
124struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size)
125{
126	unsigned long flags;
127	struct sync_pt *pt;
128
129	if (size < sizeof(struct sync_pt))
130		return NULL;
131
132	pt = kzalloc(size, GFP_KERNEL);
133	if (pt == NULL)
134		return NULL;
135
136	spin_lock_irqsave(&obj->child_list_lock, flags);
137	sync_timeline_get(obj);
138	fence_init(&pt->base, &android_fence_ops, &obj->child_list_lock,
139		   obj->context, ++obj->value);
140	list_add_tail(&pt->child_list, &obj->child_list_head);
141	INIT_LIST_HEAD(&pt->active_list);
142	spin_unlock_irqrestore(&obj->child_list_lock, flags);
143	return pt;
144}
145EXPORT_SYMBOL(sync_pt_create);
146
147void sync_pt_free(struct sync_pt *pt)
148{
149	fence_put(&pt->base);
150}
151EXPORT_SYMBOL(sync_pt_free);
152
153static struct sync_fence *sync_fence_alloc(int size, const char *name)
154{
155	struct sync_fence *fence;
156
157	fence = kzalloc(size, GFP_KERNEL);
158	if (fence == NULL)
159		return NULL;
160
161	fence->file = anon_inode_getfile("sync_fence", &sync_fence_fops,
162					 fence, 0);
163	if (IS_ERR(fence->file))
164		goto err;
165
166	kref_init(&fence->kref);
167	strlcpy(fence->name, name, sizeof(fence->name));
168
169	init_waitqueue_head(&fence->wq);
170
171	return fence;
172
173err:
174	kfree(fence);
175	return NULL;
176}
177
178static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
179{
180	struct sync_fence_cb *check;
181	struct sync_fence *fence;
182
183	check = container_of(cb, struct sync_fence_cb, cb);
184	fence = check->fence;
185
186	if (atomic_dec_and_test(&fence->status))
187		wake_up_all(&fence->wq);
188}
189
190/* TODO: implement a create which takes more that one sync_pt */
191struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt)
192{
193	struct sync_fence *fence;
194
195	fence = sync_fence_alloc(offsetof(struct sync_fence, cbs[1]), name);
196	if (fence == NULL)
197		return NULL;
198
199	fence->num_fences = 1;
200	atomic_set(&fence->status, 1);
201
202	fence->cbs[0].sync_pt = &pt->base;
203	fence->cbs[0].fence = fence;
204	if (fence_add_callback(&pt->base, &fence->cbs[0].cb,
205			       fence_check_cb_func))
206		atomic_dec(&fence->status);
207
208	sync_fence_debug_add(fence);
209
210	return fence;
211}
212EXPORT_SYMBOL(sync_fence_create);
213
214struct sync_fence *sync_fence_fdget(int fd)
215{
216	struct file *file = fget(fd);
217
218	if (file == NULL)
219		return NULL;
220
221	if (file->f_op != &sync_fence_fops)
222		goto err;
223
224	return file->private_data;
225
226err:
227	fput(file);
228	return NULL;
229}
230EXPORT_SYMBOL(sync_fence_fdget);
231
232void sync_fence_put(struct sync_fence *fence)
233{
234	fput(fence->file);
235}
236EXPORT_SYMBOL(sync_fence_put);
237
238void sync_fence_install(struct sync_fence *fence, int fd)
239{
240	fd_install(fd, fence->file);
241}
242EXPORT_SYMBOL(sync_fence_install);
243
244static void sync_fence_add_pt(struct sync_fence *fence,
245			      int *i, struct fence *pt)
246{
247	fence->cbs[*i].sync_pt = pt;
248	fence->cbs[*i].fence = fence;
249
250	if (!fence_add_callback(pt, &fence->cbs[*i].cb, fence_check_cb_func)) {
251		fence_get(pt);
252		(*i)++;
253	}
254}
255
256struct sync_fence *sync_fence_merge(const char *name,
257				    struct sync_fence *a, struct sync_fence *b)
258{
259	int num_fences = a->num_fences + b->num_fences;
260	struct sync_fence *fence;
261	int i, i_a, i_b;
262	unsigned long size = offsetof(struct sync_fence, cbs[num_fences]);
263
264	fence = sync_fence_alloc(size, name);
265	if (fence == NULL)
266		return NULL;
267
268	atomic_set(&fence->status, num_fences);
269
270	/*
271	 * Assume sync_fence a and b are both ordered and have no
272	 * duplicates with the same context.
273	 *
274	 * If a sync_fence can only be created with sync_fence_merge
275	 * and sync_fence_create, this is a reasonable assumption.
276	 */
277	for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
278		struct fence *pt_a = a->cbs[i_a].sync_pt;
279		struct fence *pt_b = b->cbs[i_b].sync_pt;
280
281		if (pt_a->context < pt_b->context) {
282			sync_fence_add_pt(fence, &i, pt_a);
283
284			i_a++;
285		} else if (pt_a->context > pt_b->context) {
286			sync_fence_add_pt(fence, &i, pt_b);
287
288			i_b++;
289		} else {
290			if (pt_a->seqno - pt_b->seqno <= INT_MAX)
291				sync_fence_add_pt(fence, &i, pt_a);
292			else
293				sync_fence_add_pt(fence, &i, pt_b);
294
295			i_a++;
296			i_b++;
297		}
298	}
299
300	for (; i_a < a->num_fences; i_a++)
301		sync_fence_add_pt(fence, &i, a->cbs[i_a].sync_pt);
302
303	for (; i_b < b->num_fences; i_b++)
304		sync_fence_add_pt(fence, &i, b->cbs[i_b].sync_pt);
305
306	if (num_fences > i)
307		atomic_sub(num_fences - i, &fence->status);
308	fence->num_fences = i;
309
310	sync_fence_debug_add(fence);
311	return fence;
312}
313EXPORT_SYMBOL(sync_fence_merge);
314
315int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
316				 int wake_flags, void *key)
317{
318	struct sync_fence_waiter *wait;
319
320	wait = container_of(curr, struct sync_fence_waiter, work);
321	list_del_init(&wait->work.task_list);
322
323	wait->callback(wait->work.private, wait);
324	return 1;
325}
326
327int sync_fence_wait_async(struct sync_fence *fence,
328			  struct sync_fence_waiter *waiter)
329{
330	int err = atomic_read(&fence->status);
331	unsigned long flags;
332
333	if (err < 0)
334		return err;
335
336	if (!err)
337		return 1;
338
339	init_waitqueue_func_entry(&waiter->work, sync_fence_wake_up_wq);
340	waiter->work.private = fence;
341
342	spin_lock_irqsave(&fence->wq.lock, flags);
343	err = atomic_read(&fence->status);
344	if (err > 0)
345		__add_wait_queue_tail(&fence->wq, &waiter->work);
346	spin_unlock_irqrestore(&fence->wq.lock, flags);
347
348	if (err < 0)
349		return err;
350
351	return !err;
352}
353EXPORT_SYMBOL(sync_fence_wait_async);
354
355int sync_fence_cancel_async(struct sync_fence *fence,
356			     struct sync_fence_waiter *waiter)
357{
358	unsigned long flags;
359	int ret = 0;
360
361	spin_lock_irqsave(&fence->wq.lock, flags);
362	if (!list_empty(&waiter->work.task_list))
363		list_del_init(&waiter->work.task_list);
364	else
365		ret = -ENOENT;
366	spin_unlock_irqrestore(&fence->wq.lock, flags);
367	return ret;
368}
369EXPORT_SYMBOL(sync_fence_cancel_async);
370
371int sync_fence_wait(struct sync_fence *fence, long timeout)
372{
373	long ret;
374	int i;
375
376	if (timeout < 0)
377		timeout = MAX_SCHEDULE_TIMEOUT;
378	else
379		timeout = msecs_to_jiffies(timeout);
380
381	trace_sync_wait(fence, 1);
382	for (i = 0; i < fence->num_fences; ++i)
383		trace_sync_pt(fence->cbs[i].sync_pt);
384	ret = wait_event_interruptible_timeout(fence->wq,
385					       atomic_read(&fence->status) <= 0,
386					       timeout);
387	trace_sync_wait(fence, 0);
388
389	if (ret < 0) {
390		return ret;
391	} else if (ret == 0) {
392		if (timeout) {
393			pr_info("fence timeout on [%p] after %dms\n", fence,
394				jiffies_to_msecs(timeout));
395			sync_dump();
396		}
397		return -ETIME;
398	}
399
400	ret = atomic_read(&fence->status);
401	if (ret) {
402		pr_info("fence error %ld on [%p]\n", ret, fence);
403		sync_dump();
404	}
405	return ret;
406}
407EXPORT_SYMBOL(sync_fence_wait);
408
409static const char *android_fence_get_driver_name(struct fence *fence)
410{
411	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
412	struct sync_timeline *parent = sync_pt_parent(pt);
413
414	return parent->ops->driver_name;
415}
416
417static const char *android_fence_get_timeline_name(struct fence *fence)
418{
419	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
420	struct sync_timeline *parent = sync_pt_parent(pt);
421
422	return parent->name;
423}
424
425static void android_fence_release(struct fence *fence)
426{
427	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
428	struct sync_timeline *parent = sync_pt_parent(pt);
429	unsigned long flags;
430
431	spin_lock_irqsave(fence->lock, flags);
432	list_del(&pt->child_list);
433	if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
434		list_del(&pt->active_list);
435	spin_unlock_irqrestore(fence->lock, flags);
436
437	if (parent->ops->free_pt)
438		parent->ops->free_pt(pt);
439
440	sync_timeline_put(parent);
441	fence_free(&pt->base);
442}
443
444static bool android_fence_signaled(struct fence *fence)
445{
446	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
447	struct sync_timeline *parent = sync_pt_parent(pt);
448	int ret;
449
450	ret = parent->ops->has_signaled(pt);
451	if (ret < 0)
452		fence->status = ret;
453	return ret;
454}
455
456static bool android_fence_enable_signaling(struct fence *fence)
457{
458	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
459	struct sync_timeline *parent = sync_pt_parent(pt);
460
461	if (android_fence_signaled(fence))
462		return false;
463
464	list_add_tail(&pt->active_list, &parent->active_list_head);
465	return true;
466}
467
468static int android_fence_fill_driver_data(struct fence *fence,
469					  void *data, int size)
470{
471	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
472	struct sync_timeline *parent = sync_pt_parent(pt);
473
474	if (!parent->ops->fill_driver_data)
475		return 0;
476	return parent->ops->fill_driver_data(pt, data, size);
477}
478
479static void android_fence_value_str(struct fence *fence,
480				    char *str, int size)
481{
482	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
483	struct sync_timeline *parent = sync_pt_parent(pt);
484
485	if (!parent->ops->pt_value_str) {
486		if (size)
487			*str = 0;
488		return;
489	}
490	parent->ops->pt_value_str(pt, str, size);
491}
492
493static void android_fence_timeline_value_str(struct fence *fence,
494					     char *str, int size)
495{
496	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
497	struct sync_timeline *parent = sync_pt_parent(pt);
498
499	if (!parent->ops->timeline_value_str) {
500		if (size)
501			*str = 0;
502		return;
503	}
504	parent->ops->timeline_value_str(parent, str, size);
505}
506
507static const struct fence_ops android_fence_ops = {
508	.get_driver_name = android_fence_get_driver_name,
509	.get_timeline_name = android_fence_get_timeline_name,
510	.enable_signaling = android_fence_enable_signaling,
511	.signaled = android_fence_signaled,
512	.wait = fence_default_wait,
513	.release = android_fence_release,
514	.fill_driver_data = android_fence_fill_driver_data,
515	.fence_value_str = android_fence_value_str,
516	.timeline_value_str = android_fence_timeline_value_str,
517};
518
519static void sync_fence_free(struct kref *kref)
520{
521	struct sync_fence *fence = container_of(kref, struct sync_fence, kref);
522	int i, status = atomic_read(&fence->status);
523
524	for (i = 0; i < fence->num_fences; ++i) {
525		if (status)
526			fence_remove_callback(fence->cbs[i].sync_pt,
527					      &fence->cbs[i].cb);
528		fence_put(fence->cbs[i].sync_pt);
529	}
530
531	kfree(fence);
532}
533
534static int sync_fence_release(struct inode *inode, struct file *file)
535{
536	struct sync_fence *fence = file->private_data;
537
538	sync_fence_debug_remove(fence);
539
540	kref_put(&fence->kref, sync_fence_free);
541	return 0;
542}
543
544static unsigned int sync_fence_poll(struct file *file, poll_table *wait)
545{
546	struct sync_fence *fence = file->private_data;
547	int status;
548
549	poll_wait(file, &fence->wq, wait);
550
551	status = atomic_read(&fence->status);
552
553	if (!status)
554		return POLLIN;
555	else if (status < 0)
556		return POLLERR;
557	return 0;
558}
559
560static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
561{
562	__s32 value;
563
564	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
565		return -EFAULT;
566
567	return sync_fence_wait(fence, value);
568}
569
570static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
571{
572	int fd = get_unused_fd_flags(O_CLOEXEC);
573	int err;
574	struct sync_fence *fence2, *fence3;
575	struct sync_merge_data data;
576
577	if (fd < 0)
578		return fd;
579
580	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
581		err = -EFAULT;
582		goto err_put_fd;
583	}
584
585	fence2 = sync_fence_fdget(data.fd2);
586	if (fence2 == NULL) {
587		err = -ENOENT;
588		goto err_put_fd;
589	}
590
591	data.name[sizeof(data.name) - 1] = '\0';
592	fence3 = sync_fence_merge(data.name, fence, fence2);
593	if (fence3 == NULL) {
594		err = -ENOMEM;
595		goto err_put_fence2;
596	}
597
598	data.fence = fd;
599	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
600		err = -EFAULT;
601		goto err_put_fence3;
602	}
603
604	sync_fence_install(fence3, fd);
605	sync_fence_put(fence2);
606	return 0;
607
608err_put_fence3:
609	sync_fence_put(fence3);
610
611err_put_fence2:
612	sync_fence_put(fence2);
613
614err_put_fd:
615	put_unused_fd(fd);
616	return err;
617}
618
619static int sync_fill_pt_info(struct fence *fence, void *data, int size)
620{
621	struct sync_pt_info *info = data;
622	int ret;
623
624	if (size < sizeof(struct sync_pt_info))
625		return -ENOMEM;
626
627	info->len = sizeof(struct sync_pt_info);
628
629	if (fence->ops->fill_driver_data) {
630		ret = fence->ops->fill_driver_data(fence, info->driver_data,
631						   size - sizeof(*info));
632		if (ret < 0)
633			return ret;
634
635		info->len += ret;
636	}
637
638	strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
639		sizeof(info->obj_name));
640	strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
641		sizeof(info->driver_name));
642	if (fence_is_signaled(fence))
643		info->status = fence->status >= 0 ? 1 : fence->status;
644	else
645		info->status = 0;
646	info->timestamp_ns = ktime_to_ns(fence->timestamp);
647
648	return info->len;
649}
650
651static long sync_fence_ioctl_fence_info(struct sync_fence *fence,
652					unsigned long arg)
653{
654	struct sync_fence_info_data *data;
655	__u32 size;
656	__u32 len = 0;
657	int ret, i;
658
659	if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
660		return -EFAULT;
661
662	if (size < sizeof(struct sync_fence_info_data))
663		return -EINVAL;
664
665	if (size > 4096)
666		size = 4096;
667
668	data = kzalloc(size, GFP_KERNEL);
669	if (data == NULL)
670		return -ENOMEM;
671
672	strlcpy(data->name, fence->name, sizeof(data->name));
673	data->status = atomic_read(&fence->status);
674	if (data->status >= 0)
675		data->status = !data->status;
676
677	len = sizeof(struct sync_fence_info_data);
678
679	for (i = 0; i < fence->num_fences; ++i) {
680		struct fence *pt = fence->cbs[i].sync_pt;
681
682		ret = sync_fill_pt_info(pt, (u8 *)data + len, size - len);
683
684		if (ret < 0)
685			goto out;
686
687		len += ret;
688	}
689
690	data->len = len;
691
692	if (copy_to_user((void __user *)arg, data, len))
693		ret = -EFAULT;
694	else
695		ret = 0;
696
697out:
698	kfree(data);
699
700	return ret;
701}
702
703static long sync_fence_ioctl(struct file *file, unsigned int cmd,
704			     unsigned long arg)
705{
706	struct sync_fence *fence = file->private_data;
707
708	switch (cmd) {
709	case SYNC_IOC_WAIT:
710		return sync_fence_ioctl_wait(fence, arg);
711
712	case SYNC_IOC_MERGE:
713		return sync_fence_ioctl_merge(fence, arg);
714
715	case SYNC_IOC_FENCE_INFO:
716		return sync_fence_ioctl_fence_info(fence, arg);
717
718	default:
719		return -ENOTTY;
720	}
721}
722
723static const struct file_operations sync_fence_fops = {
724	.release = sync_fence_release,
725	.poll = sync_fence_poll,
726	.unlocked_ioctl = sync_fence_ioctl,
727	.compat_ioctl = sync_fence_ioctl,
728};
729
730