1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/fdtable.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/nsproxy.h>
28#include <linux/poll.h>
29#include <linux/debugfs.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/seq_file.h>
33#include <linux/uaccess.h>
34#include <linux/vmalloc.h>
35#include <linux/slab.h>
36
37#include "binder.h"
38#include "binder_trace.h"
39
40static DEFINE_MUTEX(binder_main_lock);
41static DEFINE_MUTEX(binder_deferred_lock);
42static DEFINE_MUTEX(binder_mmap_lock);
43
44static HLIST_HEAD(binder_procs);
45static HLIST_HEAD(binder_deferred_list);
46static HLIST_HEAD(binder_dead_nodes);
47
48static struct dentry *binder_debugfs_dir_entry_root;
49static struct dentry *binder_debugfs_dir_entry_proc;
50static struct binder_node *binder_context_mgr_node;
51static uid_t binder_context_mgr_uid = -1;
52static int binder_last_id;
53static struct workqueue_struct *binder_deferred_workqueue;
54
55#define BINDER_DEBUG_ENTRY(name) \
56static int binder_##name##_open(struct inode *inode, struct file *file) \
57{ \
58	return single_open(file, binder_##name##_show, inode->i_private); \
59} \
60\
61static const struct file_operations binder_##name##_fops = { \
62	.owner = THIS_MODULE, \
63	.open = binder_##name##_open, \
64	.read = seq_read, \
65	.llseek = seq_lseek, \
66	.release = single_release, \
67}
68
69static int binder_proc_show(struct seq_file *m, void *unused);
70BINDER_DEBUG_ENTRY(proc);
71
72/* This is only defined in include/asm-arm/sizes.h */
73#ifndef SZ_1K
74#define SZ_1K                               0x400
75#endif
76
77#ifndef SZ_4M
78#define SZ_4M                               0x400000
79#endif
80
81#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
82
83#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
84
85enum {
86	BINDER_DEBUG_USER_ERROR             = 1U << 0,
87	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
88	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
89	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
90	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
91	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
92	BINDER_DEBUG_READ_WRITE             = 1U << 6,
93	BINDER_DEBUG_USER_REFS              = 1U << 7,
94	BINDER_DEBUG_THREADS                = 1U << 8,
95	BINDER_DEBUG_TRANSACTION            = 1U << 9,
96	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
97	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
98	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
99	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
100	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
101	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
102};
103static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
104	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
105module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
106
107static bool binder_debug_no_lock;
108module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
109
110static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
111static int binder_stop_on_user_error;
112
113static int binder_set_stop_on_user_error(const char *val,
114					 struct kernel_param *kp)
115{
116	int ret;
117	ret = param_set_int(val, kp);
118	if (binder_stop_on_user_error < 2)
119		wake_up(&binder_user_error_wait);
120	return ret;
121}
122module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
123	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
124
125#define binder_debug(mask, x...) \
126	do { \
127		if (binder_debug_mask & mask) \
128			printk(KERN_INFO x); \
129	} while (0)
130
131#define binder_user_error(x...) \
132	do { \
133		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
134			printk(KERN_INFO x); \
135		if (binder_stop_on_user_error) \
136			binder_stop_on_user_error = 2; \
137	} while (0)
138
139enum binder_stat_types {
140	BINDER_STAT_PROC,
141	BINDER_STAT_THREAD,
142	BINDER_STAT_NODE,
143	BINDER_STAT_REF,
144	BINDER_STAT_DEATH,
145	BINDER_STAT_TRANSACTION,
146	BINDER_STAT_TRANSACTION_COMPLETE,
147	BINDER_STAT_COUNT
148};
149
150struct binder_stats {
151	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
152	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
153	int obj_created[BINDER_STAT_COUNT];
154	int obj_deleted[BINDER_STAT_COUNT];
155};
156
157static struct binder_stats binder_stats;
158
159static inline void binder_stats_deleted(enum binder_stat_types type)
160{
161	binder_stats.obj_deleted[type]++;
162}
163
164static inline void binder_stats_created(enum binder_stat_types type)
165{
166	binder_stats.obj_created[type]++;
167}
168
169struct binder_transaction_log_entry {
170	int debug_id;
171	int call_type;
172	int from_proc;
173	int from_thread;
174	int target_handle;
175	int to_proc;
176	int to_thread;
177	int to_node;
178	int data_size;
179	int offsets_size;
180};
181struct binder_transaction_log {
182	int next;
183	int full;
184	struct binder_transaction_log_entry entry[32];
185};
186static struct binder_transaction_log binder_transaction_log;
187static struct binder_transaction_log binder_transaction_log_failed;
188
189static struct binder_transaction_log_entry *binder_transaction_log_add(
190	struct binder_transaction_log *log)
191{
192	struct binder_transaction_log_entry *e;
193	e = &log->entry[log->next];
194	memset(e, 0, sizeof(*e));
195	log->next++;
196	if (log->next == ARRAY_SIZE(log->entry)) {
197		log->next = 0;
198		log->full = 1;
199	}
200	return e;
201}
202
203struct binder_work {
204	struct list_head entry;
205	enum {
206		BINDER_WORK_TRANSACTION = 1,
207		BINDER_WORK_TRANSACTION_COMPLETE,
208		BINDER_WORK_NODE,
209		BINDER_WORK_DEAD_BINDER,
210		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
211		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
212	} type;
213};
214
215struct binder_node {
216	int debug_id;
217	struct binder_work work;
218	union {
219		struct rb_node rb_node;
220		struct hlist_node dead_node;
221	};
222	struct binder_proc *proc;
223	struct hlist_head refs;
224	int internal_strong_refs;
225	int local_weak_refs;
226	int local_strong_refs;
227	void __user *ptr;
228	void __user *cookie;
229	unsigned has_strong_ref:1;
230	unsigned pending_strong_ref:1;
231	unsigned has_weak_ref:1;
232	unsigned pending_weak_ref:1;
233	unsigned has_async_transaction:1;
234	unsigned accept_fds:1;
235	unsigned min_priority:8;
236	struct list_head async_todo;
237};
238
239struct binder_ref_death {
240	struct binder_work work;
241	void __user *cookie;
242};
243
244struct binder_ref {
245	/* Lookups needed: */
246	/*   node + proc => ref (transaction) */
247	/*   desc + proc => ref (transaction, inc/dec ref) */
248	/*   node => refs + procs (proc exit) */
249	int debug_id;
250	struct rb_node rb_node_desc;
251	struct rb_node rb_node_node;
252	struct hlist_node node_entry;
253	struct binder_proc *proc;
254	struct binder_node *node;
255	uint32_t desc;
256	int strong;
257	int weak;
258	struct binder_ref_death *death;
259};
260
261struct binder_buffer {
262	struct list_head entry; /* free and allocated entries by address */
263	struct rb_node rb_node; /* free entry by size or allocated entry */
264				/* by address */
265	unsigned free:1;
266	unsigned allow_user_free:1;
267	unsigned async_transaction:1;
268	unsigned debug_id:29;
269
270	struct binder_transaction *transaction;
271
272	struct binder_node *target_node;
273	size_t data_size;
274	size_t offsets_size;
275	uint8_t data[0];
276};
277
278enum binder_deferred_state {
279	BINDER_DEFERRED_PUT_FILES    = 0x01,
280	BINDER_DEFERRED_FLUSH        = 0x02,
281	BINDER_DEFERRED_RELEASE      = 0x04,
282};
283
284struct binder_proc {
285	struct hlist_node proc_node;
286	struct rb_root threads;
287	struct rb_root nodes;
288	struct rb_root refs_by_desc;
289	struct rb_root refs_by_node;
290	int pid;
291	struct vm_area_struct *vma;
292	struct mm_struct *vma_vm_mm;
293	struct task_struct *tsk;
294	struct files_struct *files;
295	struct hlist_node deferred_work_node;
296	int deferred_work;
297	void *buffer;
298	ptrdiff_t user_buffer_offset;
299
300	struct list_head buffers;
301	struct rb_root free_buffers;
302	struct rb_root allocated_buffers;
303	size_t free_async_space;
304
305	struct page **pages;
306	size_t buffer_size;
307	uint32_t buffer_free;
308	struct list_head todo;
309	wait_queue_head_t wait;
310	struct binder_stats stats;
311	struct list_head delivered_death;
312	int max_threads;
313	int requested_threads;
314	int requested_threads_started;
315	int ready_threads;
316	long default_priority;
317	struct dentry *debugfs_entry;
318};
319
320enum {
321	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
322	BINDER_LOOPER_STATE_ENTERED     = 0x02,
323	BINDER_LOOPER_STATE_EXITED      = 0x04,
324	BINDER_LOOPER_STATE_INVALID     = 0x08,
325	BINDER_LOOPER_STATE_WAITING     = 0x10,
326	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
327};
328
329struct binder_thread {
330	struct binder_proc *proc;
331	struct rb_node rb_node;
332	int pid;
333	int looper;
334	struct binder_transaction *transaction_stack;
335	struct list_head todo;
336	uint32_t return_error; /* Write failed, return error code in read buf */
337	uint32_t return_error2; /* Write failed, return error code in read */
338		/* buffer. Used when sending a reply to a dead process that */
339		/* we are also waiting on */
340	wait_queue_head_t wait;
341	struct binder_stats stats;
342};
343
344struct binder_transaction {
345	int debug_id;
346	struct binder_work work;
347	struct binder_thread *from;
348	struct binder_transaction *from_parent;
349	struct binder_proc *to_proc;
350	struct binder_thread *to_thread;
351	struct binder_transaction *to_parent;
352	unsigned need_reply:1;
353	/* unsigned is_dead:1; */	/* not used at the moment */
354
355	struct binder_buffer *buffer;
356	unsigned int	code;
357	unsigned int	flags;
358	long	priority;
359	long	saved_priority;
360	uid_t	sender_euid;
361};
362
363static void
364binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
365
366/*
367 * copied from get_unused_fd_flags
368 */
369int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
370{
371	struct files_struct *files = proc->files;
372	int fd, error;
373	struct fdtable *fdt;
374	unsigned long rlim_cur;
375	unsigned long irqs;
376
377	if (files == NULL)
378		return -ESRCH;
379
380	error = -EMFILE;
381	spin_lock(&files->file_lock);
382
383repeat:
384	fdt = files_fdtable(files);
385	fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
386
387	/*
388	 * N.B. For clone tasks sharing a files structure, this test
389	 * will limit the total number of files that can be opened.
390	 */
391	rlim_cur = 0;
392	if (lock_task_sighand(proc->tsk, &irqs)) {
393		rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
394		unlock_task_sighand(proc->tsk, &irqs);
395	}
396	if (fd >= rlim_cur)
397		goto out;
398
399	/* Do we need to expand the fd array or fd set?  */
400	error = expand_files(files, fd);
401	if (error < 0)
402		goto out;
403
404	if (error) {
405		/*
406		 * If we needed to expand the fs array we
407		 * might have blocked - try again.
408		 */
409		error = -EMFILE;
410		goto repeat;
411	}
412
413	__set_open_fd(fd, fdt);
414	if (flags & O_CLOEXEC)
415		__set_close_on_exec(fd, fdt);
416	else
417		__clear_close_on_exec(fd, fdt);
418	files->next_fd = fd + 1;
419#if 1
420	/* Sanity check */
421	if (fdt->fd[fd] != NULL) {
422		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
423		fdt->fd[fd] = NULL;
424	}
425#endif
426	error = fd;
427
428out:
429	spin_unlock(&files->file_lock);
430	return error;
431}
432
433/*
434 * copied from fd_install
435 */
436static void task_fd_install(
437	struct binder_proc *proc, unsigned int fd, struct file *file)
438{
439	struct files_struct *files = proc->files;
440	struct fdtable *fdt;
441
442	if (files == NULL)
443		return;
444
445	spin_lock(&files->file_lock);
446	fdt = files_fdtable(files);
447	BUG_ON(fdt->fd[fd] != NULL);
448	rcu_assign_pointer(fdt->fd[fd], file);
449	spin_unlock(&files->file_lock);
450}
451
452/*
453 * copied from __put_unused_fd in open.c
454 */
455static void __put_unused_fd(struct files_struct *files, unsigned int fd)
456{
457	struct fdtable *fdt = files_fdtable(files);
458	__clear_open_fd(fd, fdt);
459	if (fd < files->next_fd)
460		files->next_fd = fd;
461}
462
463/*
464 * copied from sys_close
465 */
466static long task_close_fd(struct binder_proc *proc, unsigned int fd)
467{
468	struct file *filp;
469	struct files_struct *files = proc->files;
470	struct fdtable *fdt;
471	int retval;
472
473	if (files == NULL)
474		return -ESRCH;
475
476	spin_lock(&files->file_lock);
477	fdt = files_fdtable(files);
478	if (fd >= fdt->max_fds)
479		goto out_unlock;
480	filp = fdt->fd[fd];
481	if (!filp)
482		goto out_unlock;
483	rcu_assign_pointer(fdt->fd[fd], NULL);
484	__clear_close_on_exec(fd, fdt);
485	__put_unused_fd(files, fd);
486	spin_unlock(&files->file_lock);
487	retval = filp_close(filp, files);
488
489	/* can't restart close syscall because file table entry was cleared */
490	if (unlikely(retval == -ERESTARTSYS ||
491		     retval == -ERESTARTNOINTR ||
492		     retval == -ERESTARTNOHAND ||
493		     retval == -ERESTART_RESTARTBLOCK))
494		retval = -EINTR;
495
496	return retval;
497
498out_unlock:
499	spin_unlock(&files->file_lock);
500	return -EBADF;
501}
502
503static inline void binder_lock(const char *tag)
504{
505	trace_binder_lock(tag);
506	mutex_lock(&binder_main_lock);
507	trace_binder_locked(tag);
508}
509
510static inline void binder_unlock(const char *tag)
511{
512	trace_binder_unlock(tag);
513	mutex_unlock(&binder_main_lock);
514}
515
516static void binder_set_nice(long nice)
517{
518	long min_nice;
519	if (can_nice(current, nice)) {
520		set_user_nice(current, nice);
521		return;
522	}
523	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
524	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
525		     "binder: %d: nice value %ld not allowed use "
526		     "%ld instead\n", current->pid, nice, min_nice);
527	set_user_nice(current, min_nice);
528	if (min_nice < 20)
529		return;
530	binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
531}
532
533static size_t binder_buffer_size(struct binder_proc *proc,
534				 struct binder_buffer *buffer)
535{
536	if (list_is_last(&buffer->entry, &proc->buffers))
537		return proc->buffer + proc->buffer_size - (void *)buffer->data;
538	else
539		return (size_t)list_entry(buffer->entry.next,
540			struct binder_buffer, entry) - (size_t)buffer->data;
541}
542
543static void binder_insert_free_buffer(struct binder_proc *proc,
544				      struct binder_buffer *new_buffer)
545{
546	struct rb_node **p = &proc->free_buffers.rb_node;
547	struct rb_node *parent = NULL;
548	struct binder_buffer *buffer;
549	size_t buffer_size;
550	size_t new_buffer_size;
551
552	BUG_ON(!new_buffer->free);
553
554	new_buffer_size = binder_buffer_size(proc, new_buffer);
555
556	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
557		     "binder: %d: add free buffer, size %zd, "
558		     "at %p\n", proc->pid, new_buffer_size, new_buffer);
559
560	while (*p) {
561		parent = *p;
562		buffer = rb_entry(parent, struct binder_buffer, rb_node);
563		BUG_ON(!buffer->free);
564
565		buffer_size = binder_buffer_size(proc, buffer);
566
567		if (new_buffer_size < buffer_size)
568			p = &parent->rb_left;
569		else
570			p = &parent->rb_right;
571	}
572	rb_link_node(&new_buffer->rb_node, parent, p);
573	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
574}
575
576static void binder_insert_allocated_buffer(struct binder_proc *proc,
577					   struct binder_buffer *new_buffer)
578{
579	struct rb_node **p = &proc->allocated_buffers.rb_node;
580	struct rb_node *parent = NULL;
581	struct binder_buffer *buffer;
582
583	BUG_ON(new_buffer->free);
584
585	while (*p) {
586		parent = *p;
587		buffer = rb_entry(parent, struct binder_buffer, rb_node);
588		BUG_ON(buffer->free);
589
590		if (new_buffer < buffer)
591			p = &parent->rb_left;
592		else if (new_buffer > buffer)
593			p = &parent->rb_right;
594		else
595			BUG();
596	}
597	rb_link_node(&new_buffer->rb_node, parent, p);
598	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
599}
600
601static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
602						  void __user *user_ptr)
603{
604	struct rb_node *n = proc->allocated_buffers.rb_node;
605	struct binder_buffer *buffer;
606	struct binder_buffer *kern_ptr;
607
608	kern_ptr = user_ptr - proc->user_buffer_offset
609		- offsetof(struct binder_buffer, data);
610
611	while (n) {
612		buffer = rb_entry(n, struct binder_buffer, rb_node);
613		BUG_ON(buffer->free);
614
615		if (kern_ptr < buffer)
616			n = n->rb_left;
617		else if (kern_ptr > buffer)
618			n = n->rb_right;
619		else
620			return buffer;
621	}
622	return NULL;
623}
624
625static int binder_update_page_range(struct binder_proc *proc, int allocate,
626				    void *start, void *end,
627				    struct vm_area_struct *vma)
628{
629	void *page_addr;
630	unsigned long user_page_addr;
631	struct vm_struct tmp_area;
632	struct page **page;
633	struct mm_struct *mm;
634
635	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
636		     "binder: %d: %s pages %p-%p\n", proc->pid,
637		     allocate ? "allocate" : "free", start, end);
638
639	if (end <= start)
640		return 0;
641
642	trace_binder_update_page_range(proc, allocate, start, end);
643
644	if (vma)
645		mm = NULL;
646	else
647		mm = get_task_mm(proc->tsk);
648
649	if (mm) {
650		down_write(&mm->mmap_sem);
651		vma = proc->vma;
652		if (vma && mm != proc->vma_vm_mm) {
653			pr_err("binder: %d: vma mm and task mm mismatch\n",
654				proc->pid);
655			vma = NULL;
656		}
657	}
658
659	if (allocate == 0)
660		goto free_range;
661
662	if (vma == NULL) {
663		printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
664		       "map pages in userspace, no vma\n", proc->pid);
665		goto err_no_vma;
666	}
667
668	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
669		int ret;
670		struct page **page_array_ptr;
671		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
672
673		BUG_ON(*page);
674		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);
675		if (*page == NULL) {
676			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
677			       "for page at %p\n", proc->pid, page_addr);
678			goto err_alloc_page_failed;
679		}
680		tmp_area.addr = page_addr;
681		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
682		page_array_ptr = page;
683		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
684		if (ret) {
685			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
686			       "to map page at %p in kernel\n",
687			       proc->pid, page_addr);
688			goto err_map_kernel_failed;
689		}
690		user_page_addr =
691			(uintptr_t)page_addr + proc->user_buffer_offset;
692		ret = vm_insert_page(vma, user_page_addr, page[0]);
693		if (ret) {
694			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
695			       "to map page at %lx in userspace\n",
696			       proc->pid, user_page_addr);
697			goto err_vm_insert_page_failed;
698		}
699		/* vm_insert_page does not seem to increment the refcount */
700	}
701	if (mm) {
702		up_write(&mm->mmap_sem);
703		mmput(mm);
704	}
705	return 0;
706
707free_range:
708	for (page_addr = end - PAGE_SIZE; page_addr >= start;
709	     page_addr -= PAGE_SIZE) {
710		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
711		if (vma)
712			zap_page_range(vma, (uintptr_t)page_addr +
713				proc->user_buffer_offset, PAGE_SIZE, NULL);
714err_vm_insert_page_failed:
715		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
716err_map_kernel_failed:
717		__free_page(*page);
718		*page = NULL;
719err_alloc_page_failed:
720		;
721	}
722err_no_vma:
723	if (mm) {
724		up_write(&mm->mmap_sem);
725		mmput(mm);
726	}
727	return -ENOMEM;
728}
729
730static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
731					      size_t data_size,
732					      size_t offsets_size, int is_async)
733{
734	struct rb_node *n = proc->free_buffers.rb_node;
735	struct binder_buffer *buffer;
736	size_t buffer_size;
737	struct rb_node *best_fit = NULL;
738	void *has_page_addr;
739	void *end_page_addr;
740	size_t size;
741
742	if (proc->vma == NULL) {
743		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
744		       proc->pid);
745		return NULL;
746	}
747
748	size = ALIGN(data_size, sizeof(void *)) +
749		ALIGN(offsets_size, sizeof(void *));
750
751	if (size < data_size || size < offsets_size) {
752		binder_user_error("binder: %d: got transaction with invalid "
753			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
754		return NULL;
755	}
756
757	if (is_async &&
758	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
759		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
760			     "binder: %d: binder_alloc_buf size %zd"
761			     "failed, no async space left\n", proc->pid, size);
762		return NULL;
763	}
764
765	while (n) {
766		buffer = rb_entry(n, struct binder_buffer, rb_node);
767		BUG_ON(!buffer->free);
768		buffer_size = binder_buffer_size(proc, buffer);
769
770		if (size < buffer_size) {
771			best_fit = n;
772			n = n->rb_left;
773		} else if (size > buffer_size)
774			n = n->rb_right;
775		else {
776			best_fit = n;
777			break;
778		}
779	}
780	if (best_fit == NULL) {
781		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
782		       "no address space\n", proc->pid, size);
783		return NULL;
784	}
785	if (n == NULL) {
786		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
787		buffer_size = binder_buffer_size(proc, buffer);
788	}
789
790	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
791		     "binder: %d: binder_alloc_buf size %zd got buff"
792		     "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
793
794	has_page_addr =
795		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
796	if (n == NULL) {
797		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
798			buffer_size = size; /* no room for other buffers */
799		else
800			buffer_size = size + sizeof(struct binder_buffer);
801	}
802	end_page_addr =
803		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
804	if (end_page_addr > has_page_addr)
805		end_page_addr = has_page_addr;
806	if (binder_update_page_range(proc, 1,
807	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
808		return NULL;
809
810	rb_erase(best_fit, &proc->free_buffers);
811	buffer->free = 0;
812	binder_insert_allocated_buffer(proc, buffer);
813	if (buffer_size != size) {
814		struct binder_buffer *new_buffer = (void *)buffer->data + size;
815		list_add(&new_buffer->entry, &buffer->entry);
816		new_buffer->free = 1;
817		binder_insert_free_buffer(proc, new_buffer);
818	}
819	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
820		     "binder: %d: binder_alloc_buf size %zd got "
821		     "%p\n", proc->pid, size, buffer);
822	buffer->data_size = data_size;
823	buffer->offsets_size = offsets_size;
824	buffer->async_transaction = is_async;
825	if (is_async) {
826		proc->free_async_space -= size + sizeof(struct binder_buffer);
827		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
828			     "binder: %d: binder_alloc_buf size %zd "
829			     "async free %zd\n", proc->pid, size,
830			     proc->free_async_space);
831	}
832
833	return buffer;
834}
835
836static void *buffer_start_page(struct binder_buffer *buffer)
837{
838	return (void *)((uintptr_t)buffer & PAGE_MASK);
839}
840
841static void *buffer_end_page(struct binder_buffer *buffer)
842{
843	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
844}
845
846static void binder_delete_free_buffer(struct binder_proc *proc,
847				      struct binder_buffer *buffer)
848{
849	struct binder_buffer *prev, *next = NULL;
850	int free_page_end = 1;
851	int free_page_start = 1;
852
853	BUG_ON(proc->buffers.next == &buffer->entry);
854	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
855	BUG_ON(!prev->free);
856	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
857		free_page_start = 0;
858		if (buffer_end_page(prev) == buffer_end_page(buffer))
859			free_page_end = 0;
860		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
861			     "binder: %d: merge free, buffer %p "
862			     "share page with %p\n", proc->pid, buffer, prev);
863	}
864
865	if (!list_is_last(&buffer->entry, &proc->buffers)) {
866		next = list_entry(buffer->entry.next,
867				  struct binder_buffer, entry);
868		if (buffer_start_page(next) == buffer_end_page(buffer)) {
869			free_page_end = 0;
870			if (buffer_start_page(next) ==
871			    buffer_start_page(buffer))
872				free_page_start = 0;
873			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
874				     "binder: %d: merge free, buffer"
875				     " %p share page with %p\n", proc->pid,
876				     buffer, prev);
877		}
878	}
879	list_del(&buffer->entry);
880	if (free_page_start || free_page_end) {
881		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
882			     "binder: %d: merge free, buffer %p do "
883			     "not share page%s%s with with %p or %p\n",
884			     proc->pid, buffer, free_page_start ? "" : " end",
885			     free_page_end ? "" : " start", prev, next);
886		binder_update_page_range(proc, 0, free_page_start ?
887			buffer_start_page(buffer) : buffer_end_page(buffer),
888			(free_page_end ? buffer_end_page(buffer) :
889			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
890	}
891}
892
893static void binder_free_buf(struct binder_proc *proc,
894			    struct binder_buffer *buffer)
895{
896	size_t size, buffer_size;
897
898	buffer_size = binder_buffer_size(proc, buffer);
899
900	size = ALIGN(buffer->data_size, sizeof(void *)) +
901		ALIGN(buffer->offsets_size, sizeof(void *));
902
903	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
904		     "binder: %d: binder_free_buf %p size %zd buffer"
905		     "_size %zd\n", proc->pid, buffer, size, buffer_size);
906
907	BUG_ON(buffer->free);
908	BUG_ON(size > buffer_size);
909	BUG_ON(buffer->transaction != NULL);
910	BUG_ON((void *)buffer < proc->buffer);
911	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
912
913	if (buffer->async_transaction) {
914		proc->free_async_space += size + sizeof(struct binder_buffer);
915
916		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
917			     "binder: %d: binder_free_buf size %zd "
918			     "async free %zd\n", proc->pid, size,
919			     proc->free_async_space);
920	}
921
922	binder_update_page_range(proc, 0,
923		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
924		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
925		NULL);
926	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
927	buffer->free = 1;
928	if (!list_is_last(&buffer->entry, &proc->buffers)) {
929		struct binder_buffer *next = list_entry(buffer->entry.next,
930						struct binder_buffer, entry);
931		if (next->free) {
932			rb_erase(&next->rb_node, &proc->free_buffers);
933			binder_delete_free_buffer(proc, next);
934		}
935	}
936	if (proc->buffers.next != &buffer->entry) {
937		struct binder_buffer *prev = list_entry(buffer->entry.prev,
938						struct binder_buffer, entry);
939		if (prev->free) {
940			binder_delete_free_buffer(proc, buffer);
941			rb_erase(&prev->rb_node, &proc->free_buffers);
942			buffer = prev;
943		}
944	}
945	binder_insert_free_buffer(proc, buffer);
946}
947
948static struct binder_node *binder_get_node(struct binder_proc *proc,
949					   void __user *ptr)
950{
951	struct rb_node *n = proc->nodes.rb_node;
952	struct binder_node *node;
953
954	while (n) {
955		node = rb_entry(n, struct binder_node, rb_node);
956
957		if (ptr < node->ptr)
958			n = n->rb_left;
959		else if (ptr > node->ptr)
960			n = n->rb_right;
961		else
962			return node;
963	}
964	return NULL;
965}
966
967static struct binder_node *binder_new_node(struct binder_proc *proc,
968					   void __user *ptr,
969					   void __user *cookie)
970{
971	struct rb_node **p = &proc->nodes.rb_node;
972	struct rb_node *parent = NULL;
973	struct binder_node *node;
974
975	while (*p) {
976		parent = *p;
977		node = rb_entry(parent, struct binder_node, rb_node);
978
979		if (ptr < node->ptr)
980			p = &(*p)->rb_left;
981		else if (ptr > node->ptr)
982			p = &(*p)->rb_right;
983		else
984			return NULL;
985	}
986
987	node = kzalloc(sizeof(*node), GFP_KERNEL);
988	if (node == NULL)
989		return NULL;
990	binder_stats_created(BINDER_STAT_NODE);
991	rb_link_node(&node->rb_node, parent, p);
992	rb_insert_color(&node->rb_node, &proc->nodes);
993	node->debug_id = ++binder_last_id;
994	node->proc = proc;
995	node->ptr = ptr;
996	node->cookie = cookie;
997	node->work.type = BINDER_WORK_NODE;
998	INIT_LIST_HEAD(&node->work.entry);
999	INIT_LIST_HEAD(&node->async_todo);
1000	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1001		     "binder: %d:%d node %d u%p c%p created\n",
1002		     proc->pid, current->pid, node->debug_id,
1003		     node->ptr, node->cookie);
1004	return node;
1005}
1006
1007static int binder_inc_node(struct binder_node *node, int strong, int internal,
1008			   struct list_head *target_list)
1009{
1010	if (strong) {
1011		if (internal) {
1012			if (target_list == NULL &&
1013			    node->internal_strong_refs == 0 &&
1014			    !(node == binder_context_mgr_node &&
1015			    node->has_strong_ref)) {
1016				printk(KERN_ERR "binder: invalid inc strong "
1017					"node for %d\n", node->debug_id);
1018				return -EINVAL;
1019			}
1020			node->internal_strong_refs++;
1021		} else
1022			node->local_strong_refs++;
1023		if (!node->has_strong_ref && target_list) {
1024			list_del_init(&node->work.entry);
1025			list_add_tail(&node->work.entry, target_list);
1026		}
1027	} else {
1028		if (!internal)
1029			node->local_weak_refs++;
1030		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1031			if (target_list == NULL) {
1032				printk(KERN_ERR "binder: invalid inc weak node "
1033					"for %d\n", node->debug_id);
1034				return -EINVAL;
1035			}
1036			list_add_tail(&node->work.entry, target_list);
1037		}
1038	}
1039	return 0;
1040}
1041
1042static int binder_dec_node(struct binder_node *node, int strong, int internal)
1043{
1044	if (strong) {
1045		if (internal)
1046			node->internal_strong_refs--;
1047		else
1048			node->local_strong_refs--;
1049		if (node->local_strong_refs || node->internal_strong_refs)
1050			return 0;
1051	} else {
1052		if (!internal)
1053			node->local_weak_refs--;
1054		if (node->local_weak_refs || !hlist_empty(&node->refs))
1055			return 0;
1056	}
1057	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1058		if (list_empty(&node->work.entry)) {
1059			list_add_tail(&node->work.entry, &node->proc->todo);
1060			wake_up_interruptible(&node->proc->wait);
1061		}
1062	} else {
1063		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1064		    !node->local_weak_refs) {
1065			list_del_init(&node->work.entry);
1066			if (node->proc) {
1067				rb_erase(&node->rb_node, &node->proc->nodes);
1068				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1069					     "binder: refless node %d deleted\n",
1070					     node->debug_id);
1071			} else {
1072				hlist_del(&node->dead_node);
1073				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1074					     "binder: dead node %d deleted\n",
1075					     node->debug_id);
1076			}
1077			kfree(node);
1078			binder_stats_deleted(BINDER_STAT_NODE);
1079		}
1080	}
1081
1082	return 0;
1083}
1084
1085
1086static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1087					 uint32_t desc)
1088{
1089	struct rb_node *n = proc->refs_by_desc.rb_node;
1090	struct binder_ref *ref;
1091
1092	while (n) {
1093		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1094
1095		if (desc < ref->desc)
1096			n = n->rb_left;
1097		else if (desc > ref->desc)
1098			n = n->rb_right;
1099		else
1100			return ref;
1101	}
1102	return NULL;
1103}
1104
1105static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1106						  struct binder_node *node)
1107{
1108	struct rb_node *n;
1109	struct rb_node **p = &proc->refs_by_node.rb_node;
1110	struct rb_node *parent = NULL;
1111	struct binder_ref *ref, *new_ref;
1112
1113	while (*p) {
1114		parent = *p;
1115		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1116
1117		if (node < ref->node)
1118			p = &(*p)->rb_left;
1119		else if (node > ref->node)
1120			p = &(*p)->rb_right;
1121		else
1122			return ref;
1123	}
1124	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1125	if (new_ref == NULL)
1126		return NULL;
1127	binder_stats_created(BINDER_STAT_REF);
1128	new_ref->debug_id = ++binder_last_id;
1129	new_ref->proc = proc;
1130	new_ref->node = node;
1131	rb_link_node(&new_ref->rb_node_node, parent, p);
1132	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1133
1134	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1135	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1136		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1137		if (ref->desc > new_ref->desc)
1138			break;
1139		new_ref->desc = ref->desc + 1;
1140	}
1141
1142	p = &proc->refs_by_desc.rb_node;
1143	while (*p) {
1144		parent = *p;
1145		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1146
1147		if (new_ref->desc < ref->desc)
1148			p = &(*p)->rb_left;
1149		else if (new_ref->desc > ref->desc)
1150			p = &(*p)->rb_right;
1151		else
1152			BUG();
1153	}
1154	rb_link_node(&new_ref->rb_node_desc, parent, p);
1155	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1156	if (node) {
1157		hlist_add_head(&new_ref->node_entry, &node->refs);
1158
1159		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1160			     "binder: %d new ref %d desc %d for "
1161			     "node %d\n", proc->pid, new_ref->debug_id,
1162			     new_ref->desc, node->debug_id);
1163	} else {
1164		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1165			     "binder: %d new ref %d desc %d for "
1166			     "dead node\n", proc->pid, new_ref->debug_id,
1167			      new_ref->desc);
1168	}
1169	return new_ref;
1170}
1171
1172static void binder_delete_ref(struct binder_ref *ref)
1173{
1174	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1175		     "binder: %d delete ref %d desc %d for "
1176		     "node %d\n", ref->proc->pid, ref->debug_id,
1177		     ref->desc, ref->node->debug_id);
1178
1179	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1180	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1181	if (ref->strong)
1182		binder_dec_node(ref->node, 1, 1);
1183	hlist_del(&ref->node_entry);
1184	binder_dec_node(ref->node, 0, 1);
1185	if (ref->death) {
1186		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1187			     "binder: %d delete ref %d desc %d "
1188			     "has death notification\n", ref->proc->pid,
1189			     ref->debug_id, ref->desc);
1190		list_del(&ref->death->work.entry);
1191		kfree(ref->death);
1192		binder_stats_deleted(BINDER_STAT_DEATH);
1193	}
1194	kfree(ref);
1195	binder_stats_deleted(BINDER_STAT_REF);
1196}
1197
1198static int binder_inc_ref(struct binder_ref *ref, int strong,
1199			  struct list_head *target_list)
1200{
1201	int ret;
1202	if (strong) {
1203		if (ref->strong == 0) {
1204			ret = binder_inc_node(ref->node, 1, 1, target_list);
1205			if (ret)
1206				return ret;
1207		}
1208		ref->strong++;
1209	} else {
1210		if (ref->weak == 0) {
1211			ret = binder_inc_node(ref->node, 0, 1, target_list);
1212			if (ret)
1213				return ret;
1214		}
1215		ref->weak++;
1216	}
1217	return 0;
1218}
1219
1220
1221static int binder_dec_ref(struct binder_ref *ref, int strong)
1222{
1223	if (strong) {
1224		if (ref->strong == 0) {
1225			binder_user_error("binder: %d invalid dec strong, "
1226					  "ref %d desc %d s %d w %d\n",
1227					  ref->proc->pid, ref->debug_id,
1228					  ref->desc, ref->strong, ref->weak);
1229			return -EINVAL;
1230		}
1231		ref->strong--;
1232		if (ref->strong == 0) {
1233			int ret;
1234			ret = binder_dec_node(ref->node, strong, 1);
1235			if (ret)
1236				return ret;
1237		}
1238	} else {
1239		if (ref->weak == 0) {
1240			binder_user_error("binder: %d invalid dec weak, "
1241					  "ref %d desc %d s %d w %d\n",
1242					  ref->proc->pid, ref->debug_id,
1243					  ref->desc, ref->strong, ref->weak);
1244			return -EINVAL;
1245		}
1246		ref->weak--;
1247	}
1248	if (ref->strong == 0 && ref->weak == 0)
1249		binder_delete_ref(ref);
1250	return 0;
1251}
1252
1253static void binder_pop_transaction(struct binder_thread *target_thread,
1254				   struct binder_transaction *t)
1255{
1256	if (target_thread) {
1257		BUG_ON(target_thread->transaction_stack != t);
1258		BUG_ON(target_thread->transaction_stack->from != target_thread);
1259		target_thread->transaction_stack =
1260			target_thread->transaction_stack->from_parent;
1261		t->from = NULL;
1262	}
1263	t->need_reply = 0;
1264	if (t->buffer)
1265		t->buffer->transaction = NULL;
1266	kfree(t);
1267	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1268}
1269
1270static void binder_send_failed_reply(struct binder_transaction *t,
1271				     uint32_t error_code)
1272{
1273	struct binder_thread *target_thread;
1274	BUG_ON(t->flags & TF_ONE_WAY);
1275	while (1) {
1276		target_thread = t->from;
1277		if (target_thread) {
1278			if (target_thread->return_error != BR_OK &&
1279			   target_thread->return_error2 == BR_OK) {
1280				target_thread->return_error2 =
1281					target_thread->return_error;
1282				target_thread->return_error = BR_OK;
1283			}
1284			if (target_thread->return_error == BR_OK) {
1285				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1286					     "binder: send failed reply for "
1287					     "transaction %d to %d:%d\n",
1288					      t->debug_id, target_thread->proc->pid,
1289					      target_thread->pid);
1290
1291				binder_pop_transaction(target_thread, t);
1292				target_thread->return_error = error_code;
1293				wake_up_interruptible(&target_thread->wait);
1294			} else {
1295				printk(KERN_ERR "binder: reply failed, target "
1296					"thread, %d:%d, has error code %d "
1297					"already\n", target_thread->proc->pid,
1298					target_thread->pid,
1299					target_thread->return_error);
1300			}
1301			return;
1302		} else {
1303			struct binder_transaction *next = t->from_parent;
1304
1305			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1306				     "binder: send failed reply "
1307				     "for transaction %d, target dead\n",
1308				     t->debug_id);
1309
1310			binder_pop_transaction(target_thread, t);
1311			if (next == NULL) {
1312				binder_debug(BINDER_DEBUG_DEAD_BINDER,
1313					     "binder: reply failed,"
1314					     " no target thread at root\n");
1315				return;
1316			}
1317			t = next;
1318			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1319				     "binder: reply failed, no target "
1320				     "thread -- retry %d\n", t->debug_id);
1321		}
1322	}
1323}
1324
1325static void binder_transaction_buffer_release(struct binder_proc *proc,
1326					      struct binder_buffer *buffer,
1327					      size_t *failed_at)
1328{
1329	size_t *offp, *off_end;
1330	int debug_id = buffer->debug_id;
1331
1332	binder_debug(BINDER_DEBUG_TRANSACTION,
1333		     "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
1334		     proc->pid, buffer->debug_id,
1335		     buffer->data_size, buffer->offsets_size, failed_at);
1336
1337	if (buffer->target_node)
1338		binder_dec_node(buffer->target_node, 1, 0);
1339
1340	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
1341	if (failed_at)
1342		off_end = failed_at;
1343	else
1344		off_end = (void *)offp + buffer->offsets_size;
1345	for (; offp < off_end; offp++) {
1346		struct flat_binder_object *fp;
1347		if (*offp > buffer->data_size - sizeof(*fp) ||
1348		    buffer->data_size < sizeof(*fp) ||
1349		    !IS_ALIGNED(*offp, sizeof(void *))) {
1350			printk(KERN_ERR "binder: transaction release %d bad"
1351					"offset %zd, size %zd\n", debug_id,
1352					*offp, buffer->data_size);
1353			continue;
1354		}
1355		fp = (struct flat_binder_object *)(buffer->data + *offp);
1356		switch (fp->type) {
1357		case BINDER_TYPE_BINDER:
1358		case BINDER_TYPE_WEAK_BINDER: {
1359			struct binder_node *node = binder_get_node(proc, fp->binder);
1360			if (node == NULL) {
1361				printk(KERN_ERR "binder: transaction release %d"
1362				       " bad node %p\n", debug_id, fp->binder);
1363				break;
1364			}
1365			binder_debug(BINDER_DEBUG_TRANSACTION,
1366				     "        node %d u%p\n",
1367				     node->debug_id, node->ptr);
1368			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1369		} break;
1370		case BINDER_TYPE_HANDLE:
1371		case BINDER_TYPE_WEAK_HANDLE: {
1372			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1373			if (ref == NULL) {
1374				printk(KERN_ERR "binder: transaction release %d"
1375				       " bad handle %ld\n", debug_id,
1376				       fp->handle);
1377				break;
1378			}
1379			binder_debug(BINDER_DEBUG_TRANSACTION,
1380				     "        ref %d desc %d (node %d)\n",
1381				     ref->debug_id, ref->desc, ref->node->debug_id);
1382			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1383		} break;
1384
1385		case BINDER_TYPE_FD:
1386			binder_debug(BINDER_DEBUG_TRANSACTION,
1387				     "        fd %ld\n", fp->handle);
1388			if (failed_at)
1389				task_close_fd(proc, fp->handle);
1390			break;
1391
1392		default:
1393			printk(KERN_ERR "binder: transaction release %d bad "
1394			       "object type %lx\n", debug_id, fp->type);
1395			break;
1396		}
1397	}
1398}
1399
1400static void binder_transaction(struct binder_proc *proc,
1401			       struct binder_thread *thread,
1402			       struct binder_transaction_data *tr, int reply)
1403{
1404	struct binder_transaction *t;
1405	struct binder_work *tcomplete;
1406	size_t *offp, *off_end;
1407	struct binder_proc *target_proc;
1408	struct binder_thread *target_thread = NULL;
1409	struct binder_node *target_node = NULL;
1410	struct list_head *target_list;
1411	wait_queue_head_t *target_wait;
1412	struct binder_transaction *in_reply_to = NULL;
1413	struct binder_transaction_log_entry *e;
1414	uint32_t return_error;
1415
1416	e = binder_transaction_log_add(&binder_transaction_log);
1417	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1418	e->from_proc = proc->pid;
1419	e->from_thread = thread->pid;
1420	e->target_handle = tr->target.handle;
1421	e->data_size = tr->data_size;
1422	e->offsets_size = tr->offsets_size;
1423
1424	if (reply) {
1425		in_reply_to = thread->transaction_stack;
1426		if (in_reply_to == NULL) {
1427			binder_user_error("binder: %d:%d got reply transaction "
1428					  "with no transaction stack\n",
1429					  proc->pid, thread->pid);
1430			return_error = BR_FAILED_REPLY;
1431			goto err_empty_call_stack;
1432		}
1433		binder_set_nice(in_reply_to->saved_priority);
1434		if (in_reply_to->to_thread != thread) {
1435			binder_user_error("binder: %d:%d got reply transaction "
1436				"with bad transaction stack,"
1437				" transaction %d has target %d:%d\n",
1438				proc->pid, thread->pid, in_reply_to->debug_id,
1439				in_reply_to->to_proc ?
1440				in_reply_to->to_proc->pid : 0,
1441				in_reply_to->to_thread ?
1442				in_reply_to->to_thread->pid : 0);
1443			return_error = BR_FAILED_REPLY;
1444			in_reply_to = NULL;
1445			goto err_bad_call_stack;
1446		}
1447		thread->transaction_stack = in_reply_to->to_parent;
1448		target_thread = in_reply_to->from;
1449		if (target_thread == NULL) {
1450			return_error = BR_DEAD_REPLY;
1451			goto err_dead_binder;
1452		}
1453		if (target_thread->transaction_stack != in_reply_to) {
1454			binder_user_error("binder: %d:%d got reply transaction "
1455				"with bad target transaction stack %d, "
1456				"expected %d\n",
1457				proc->pid, thread->pid,
1458				target_thread->transaction_stack ?
1459				target_thread->transaction_stack->debug_id : 0,
1460				in_reply_to->debug_id);
1461			return_error = BR_FAILED_REPLY;
1462			in_reply_to = NULL;
1463			target_thread = NULL;
1464			goto err_dead_binder;
1465		}
1466		target_proc = target_thread->proc;
1467	} else {
1468		if (tr->target.handle) {
1469			struct binder_ref *ref;
1470			ref = binder_get_ref(proc, tr->target.handle);
1471			if (ref == NULL) {
1472				binder_user_error("binder: %d:%d got "
1473					"transaction to invalid handle\n",
1474					proc->pid, thread->pid);
1475				return_error = BR_FAILED_REPLY;
1476				goto err_invalid_target_handle;
1477			}
1478			target_node = ref->node;
1479		} else {
1480			target_node = binder_context_mgr_node;
1481			if (target_node == NULL) {
1482				return_error = BR_DEAD_REPLY;
1483				goto err_no_context_mgr_node;
1484			}
1485		}
1486		e->to_node = target_node->debug_id;
1487		target_proc = target_node->proc;
1488		if (target_proc == NULL) {
1489			return_error = BR_DEAD_REPLY;
1490			goto err_dead_binder;
1491		}
1492		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1493			struct binder_transaction *tmp;
1494			tmp = thread->transaction_stack;
1495			if (tmp->to_thread != thread) {
1496				binder_user_error("binder: %d:%d got new "
1497					"transaction with bad transaction stack"
1498					", transaction %d has target %d:%d\n",
1499					proc->pid, thread->pid, tmp->debug_id,
1500					tmp->to_proc ? tmp->to_proc->pid : 0,
1501					tmp->to_thread ?
1502					tmp->to_thread->pid : 0);
1503				return_error = BR_FAILED_REPLY;
1504				goto err_bad_call_stack;
1505			}
1506			while (tmp) {
1507				if (tmp->from && tmp->from->proc == target_proc)
1508					target_thread = tmp->from;
1509				tmp = tmp->from_parent;
1510			}
1511		}
1512	}
1513	if (target_thread) {
1514		e->to_thread = target_thread->pid;
1515		target_list = &target_thread->todo;
1516		target_wait = &target_thread->wait;
1517	} else {
1518		target_list = &target_proc->todo;
1519		target_wait = &target_proc->wait;
1520	}
1521	e->to_proc = target_proc->pid;
1522
1523	/* TODO: reuse incoming transaction for reply */
1524	t = kzalloc(sizeof(*t), GFP_KERNEL);
1525	if (t == NULL) {
1526		return_error = BR_FAILED_REPLY;
1527		goto err_alloc_t_failed;
1528	}
1529	binder_stats_created(BINDER_STAT_TRANSACTION);
1530
1531	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1532	if (tcomplete == NULL) {
1533		return_error = BR_FAILED_REPLY;
1534		goto err_alloc_tcomplete_failed;
1535	}
1536	binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1537
1538	t->debug_id = ++binder_last_id;
1539	e->debug_id = t->debug_id;
1540
1541	if (reply)
1542		binder_debug(BINDER_DEBUG_TRANSACTION,
1543			     "binder: %d:%d BC_REPLY %d -> %d:%d, "
1544			     "data %p-%p size %zd-%zd\n",
1545			     proc->pid, thread->pid, t->debug_id,
1546			     target_proc->pid, target_thread->pid,
1547			     tr->data.ptr.buffer, tr->data.ptr.offsets,
1548			     tr->data_size, tr->offsets_size);
1549	else
1550		binder_debug(BINDER_DEBUG_TRANSACTION,
1551			     "binder: %d:%d BC_TRANSACTION %d -> "
1552			     "%d - node %d, data %p-%p size %zd-%zd\n",
1553			     proc->pid, thread->pid, t->debug_id,
1554			     target_proc->pid, target_node->debug_id,
1555			     tr->data.ptr.buffer, tr->data.ptr.offsets,
1556			     tr->data_size, tr->offsets_size);
1557
1558	if (!reply && !(tr->flags & TF_ONE_WAY))
1559		t->from = thread;
1560	else
1561		t->from = NULL;
1562	t->sender_euid = proc->tsk->cred->euid;
1563	t->to_proc = target_proc;
1564	t->to_thread = target_thread;
1565	t->code = tr->code;
1566	t->flags = tr->flags;
1567	t->priority = task_nice(current);
1568
1569	trace_binder_transaction(reply, t, target_node);
1570
1571	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1572		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1573	if (t->buffer == NULL) {
1574		return_error = BR_FAILED_REPLY;
1575		goto err_binder_alloc_buf_failed;
1576	}
1577	t->buffer->allow_user_free = 0;
1578	t->buffer->debug_id = t->debug_id;
1579	t->buffer->transaction = t;
1580	t->buffer->target_node = target_node;
1581	trace_binder_transaction_alloc_buf(t->buffer);
1582	if (target_node)
1583		binder_inc_node(target_node, 1, 0, NULL);
1584
1585	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
1586
1587	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
1588		binder_user_error("binder: %d:%d got transaction with invalid "
1589			"data ptr\n", proc->pid, thread->pid);
1590		return_error = BR_FAILED_REPLY;
1591		goto err_copy_data_failed;
1592	}
1593	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
1594		binder_user_error("binder: %d:%d got transaction with invalid "
1595			"offsets ptr\n", proc->pid, thread->pid);
1596		return_error = BR_FAILED_REPLY;
1597		goto err_copy_data_failed;
1598	}
1599	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
1600		binder_user_error("binder: %d:%d got transaction with "
1601			"invalid offsets size, %zd\n",
1602			proc->pid, thread->pid, tr->offsets_size);
1603		return_error = BR_FAILED_REPLY;
1604		goto err_bad_offset;
1605	}
1606	off_end = (void *)offp + tr->offsets_size;
1607	for (; offp < off_end; offp++) {
1608		struct flat_binder_object *fp;
1609		if (*offp > t->buffer->data_size - sizeof(*fp) ||
1610		    t->buffer->data_size < sizeof(*fp) ||
1611		    !IS_ALIGNED(*offp, sizeof(void *))) {
1612			binder_user_error("binder: %d:%d got transaction with "
1613				"invalid offset, %zd\n",
1614				proc->pid, thread->pid, *offp);
1615			return_error = BR_FAILED_REPLY;
1616			goto err_bad_offset;
1617		}
1618		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1619		switch (fp->type) {
1620		case BINDER_TYPE_BINDER:
1621		case BINDER_TYPE_WEAK_BINDER: {
1622			struct binder_ref *ref;
1623			struct binder_node *node = binder_get_node(proc, fp->binder);
1624			if (node == NULL) {
1625				node = binder_new_node(proc, fp->binder, fp->cookie);
1626				if (node == NULL) {
1627					return_error = BR_FAILED_REPLY;
1628					goto err_binder_new_node_failed;
1629				}
1630				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1631				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1632			}
1633			if (fp->cookie != node->cookie) {
1634				binder_user_error("binder: %d:%d sending u%p "
1635					"node %d, cookie mismatch %p != %p\n",
1636					proc->pid, thread->pid,
1637					fp->binder, node->debug_id,
1638					fp->cookie, node->cookie);
1639				goto err_binder_get_ref_for_node_failed;
1640			}
1641			ref = binder_get_ref_for_node(target_proc, node);
1642			if (ref == NULL) {
1643				return_error = BR_FAILED_REPLY;
1644				goto err_binder_get_ref_for_node_failed;
1645			}
1646			if (fp->type == BINDER_TYPE_BINDER)
1647				fp->type = BINDER_TYPE_HANDLE;
1648			else
1649				fp->type = BINDER_TYPE_WEAK_HANDLE;
1650			fp->handle = ref->desc;
1651			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
1652				       &thread->todo);
1653
1654			trace_binder_transaction_node_to_ref(t, node, ref);
1655			binder_debug(BINDER_DEBUG_TRANSACTION,
1656				     "        node %d u%p -> ref %d desc %d\n",
1657				     node->debug_id, node->ptr, ref->debug_id,
1658				     ref->desc);
1659		} break;
1660		case BINDER_TYPE_HANDLE:
1661		case BINDER_TYPE_WEAK_HANDLE: {
1662			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1663			if (ref == NULL) {
1664				binder_user_error("binder: %d:%d got "
1665					"transaction with invalid "
1666					"handle, %ld\n", proc->pid,
1667					thread->pid, fp->handle);
1668				return_error = BR_FAILED_REPLY;
1669				goto err_binder_get_ref_failed;
1670			}
1671			if (ref->node->proc == target_proc) {
1672				if (fp->type == BINDER_TYPE_HANDLE)
1673					fp->type = BINDER_TYPE_BINDER;
1674				else
1675					fp->type = BINDER_TYPE_WEAK_BINDER;
1676				fp->binder = ref->node->ptr;
1677				fp->cookie = ref->node->cookie;
1678				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1679				trace_binder_transaction_ref_to_node(t, ref);
1680				binder_debug(BINDER_DEBUG_TRANSACTION,
1681					     "        ref %d desc %d -> node %d u%p\n",
1682					     ref->debug_id, ref->desc, ref->node->debug_id,
1683					     ref->node->ptr);
1684			} else {
1685				struct binder_ref *new_ref;
1686				new_ref = binder_get_ref_for_node(target_proc, ref->node);
1687				if (new_ref == NULL) {
1688					return_error = BR_FAILED_REPLY;
1689					goto err_binder_get_ref_for_node_failed;
1690				}
1691				fp->handle = new_ref->desc;
1692				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1693				trace_binder_transaction_ref_to_ref(t, ref,
1694								    new_ref);
1695				binder_debug(BINDER_DEBUG_TRANSACTION,
1696					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1697					     ref->debug_id, ref->desc, new_ref->debug_id,
1698					     new_ref->desc, ref->node->debug_id);
1699			}
1700		} break;
1701
1702		case BINDER_TYPE_FD: {
1703			int target_fd;
1704			struct file *file;
1705
1706			if (reply) {
1707				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1708					binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
1709						proc->pid, thread->pid, fp->handle);
1710					return_error = BR_FAILED_REPLY;
1711					goto err_fd_not_allowed;
1712				}
1713			} else if (!target_node->accept_fds) {
1714				binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
1715					proc->pid, thread->pid, fp->handle);
1716				return_error = BR_FAILED_REPLY;
1717				goto err_fd_not_allowed;
1718			}
1719
1720			file = fget(fp->handle);
1721			if (file == NULL) {
1722				binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
1723					proc->pid, thread->pid, fp->handle);
1724				return_error = BR_FAILED_REPLY;
1725				goto err_fget_failed;
1726			}
1727			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1728			if (target_fd < 0) {
1729				fput(file);
1730				return_error = BR_FAILED_REPLY;
1731				goto err_get_unused_fd_failed;
1732			}
1733			task_fd_install(target_proc, target_fd, file);
1734			trace_binder_transaction_fd(t, fp->handle, target_fd);
1735			binder_debug(BINDER_DEBUG_TRANSACTION,
1736				     "        fd %ld -> %d\n", fp->handle, target_fd);
1737			/* TODO: fput? */
1738			fp->handle = target_fd;
1739		} break;
1740
1741		default:
1742			binder_user_error("binder: %d:%d got transactio"
1743				"n with invalid object type, %lx\n",
1744				proc->pid, thread->pid, fp->type);
1745			return_error = BR_FAILED_REPLY;
1746			goto err_bad_object_type;
1747		}
1748	}
1749	if (reply) {
1750		BUG_ON(t->buffer->async_transaction != 0);
1751		binder_pop_transaction(target_thread, in_reply_to);
1752	} else if (!(t->flags & TF_ONE_WAY)) {
1753		BUG_ON(t->buffer->async_transaction != 0);
1754		t->need_reply = 1;
1755		t->from_parent = thread->transaction_stack;
1756		thread->transaction_stack = t;
1757	} else {
1758		BUG_ON(target_node == NULL);
1759		BUG_ON(t->buffer->async_transaction != 1);
1760		if (target_node->has_async_transaction) {
1761			target_list = &target_node->async_todo;
1762			target_wait = NULL;
1763		} else
1764			target_node->has_async_transaction = 1;
1765	}
1766	t->work.type = BINDER_WORK_TRANSACTION;
1767	list_add_tail(&t->work.entry, target_list);
1768	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1769	list_add_tail(&tcomplete->entry, &thread->todo);
1770	if (target_wait)
1771		wake_up_interruptible(target_wait);
1772	return;
1773
1774err_get_unused_fd_failed:
1775err_fget_failed:
1776err_fd_not_allowed:
1777err_binder_get_ref_for_node_failed:
1778err_binder_get_ref_failed:
1779err_binder_new_node_failed:
1780err_bad_object_type:
1781err_bad_offset:
1782err_copy_data_failed:
1783	trace_binder_transaction_failed_buffer_release(t->buffer);
1784	binder_transaction_buffer_release(target_proc, t->buffer, offp);
1785	t->buffer->transaction = NULL;
1786	binder_free_buf(target_proc, t->buffer);
1787err_binder_alloc_buf_failed:
1788	kfree(tcomplete);
1789	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
1790err_alloc_tcomplete_failed:
1791	kfree(t);
1792	binder_stats_deleted(BINDER_STAT_TRANSACTION);
1793err_alloc_t_failed:
1794err_bad_call_stack:
1795err_empty_call_stack:
1796err_dead_binder:
1797err_invalid_target_handle:
1798err_no_context_mgr_node:
1799	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1800		     "binder: %d:%d transaction failed %d, size %zd-%zd\n",
1801		     proc->pid, thread->pid, return_error,
1802		     tr->data_size, tr->offsets_size);
1803
1804	{
1805		struct binder_transaction_log_entry *fe;
1806		fe = binder_transaction_log_add(&binder_transaction_log_failed);
1807		*fe = *e;
1808	}
1809
1810	BUG_ON(thread->return_error != BR_OK);
1811	if (in_reply_to) {
1812		thread->return_error = BR_TRANSACTION_COMPLETE;
1813		binder_send_failed_reply(in_reply_to, return_error);
1814	} else
1815		thread->return_error = return_error;
1816}
1817
1818int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
1819			void __user *buffer, int size, signed long *consumed)
1820{
1821	uint32_t cmd;
1822	void __user *ptr = buffer + *consumed;
1823	void __user *end = buffer + size;
1824
1825	while (ptr < end && thread->return_error == BR_OK) {
1826		if (get_user(cmd, (uint32_t __user *)ptr))
1827			return -EFAULT;
1828		ptr += sizeof(uint32_t);
1829		trace_binder_command(cmd);
1830		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1831			binder_stats.bc[_IOC_NR(cmd)]++;
1832			proc->stats.bc[_IOC_NR(cmd)]++;
1833			thread->stats.bc[_IOC_NR(cmd)]++;
1834		}
1835		switch (cmd) {
1836		case BC_INCREFS:
1837		case BC_ACQUIRE:
1838		case BC_RELEASE:
1839		case BC_DECREFS: {
1840			uint32_t target;
1841			struct binder_ref *ref;
1842			const char *debug_string;
1843
1844			if (get_user(target, (uint32_t __user *)ptr))
1845				return -EFAULT;
1846			ptr += sizeof(uint32_t);
1847			if (target == 0 && binder_context_mgr_node &&
1848			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1849				ref = binder_get_ref_for_node(proc,
1850					       binder_context_mgr_node);
1851				if (ref->desc != target) {
1852					binder_user_error("binder: %d:"
1853						"%d tried to acquire "
1854						"reference to desc 0, "
1855						"got %d instead\n",
1856						proc->pid, thread->pid,
1857						ref->desc);
1858				}
1859			} else
1860				ref = binder_get_ref(proc, target);
1861			if (ref == NULL) {
1862				binder_user_error("binder: %d:%d refcou"
1863					"nt change on invalid ref %d\n",
1864					proc->pid, thread->pid, target);
1865				break;
1866			}
1867			switch (cmd) {
1868			case BC_INCREFS:
1869				debug_string = "IncRefs";
1870				binder_inc_ref(ref, 0, NULL);
1871				break;
1872			case BC_ACQUIRE:
1873				debug_string = "Acquire";
1874				binder_inc_ref(ref, 1, NULL);
1875				break;
1876			case BC_RELEASE:
1877				debug_string = "Release";
1878				binder_dec_ref(ref, 1);
1879				break;
1880			case BC_DECREFS:
1881			default:
1882				debug_string = "DecRefs";
1883				binder_dec_ref(ref, 0);
1884				break;
1885			}
1886			binder_debug(BINDER_DEBUG_USER_REFS,
1887				     "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
1888				     proc->pid, thread->pid, debug_string, ref->debug_id,
1889				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1890			break;
1891		}
1892		case BC_INCREFS_DONE:
1893		case BC_ACQUIRE_DONE: {
1894			void __user *node_ptr;
1895			void *cookie;
1896			struct binder_node *node;
1897
1898			if (get_user(node_ptr, (void * __user *)ptr))
1899				return -EFAULT;
1900			ptr += sizeof(void *);
1901			if (get_user(cookie, (void * __user *)ptr))
1902				return -EFAULT;
1903			ptr += sizeof(void *);
1904			node = binder_get_node(proc, node_ptr);
1905			if (node == NULL) {
1906				binder_user_error("binder: %d:%d "
1907					"%s u%p no match\n",
1908					proc->pid, thread->pid,
1909					cmd == BC_INCREFS_DONE ?
1910					"BC_INCREFS_DONE" :
1911					"BC_ACQUIRE_DONE",
1912					node_ptr);
1913				break;
1914			}
1915			if (cookie != node->cookie) {
1916				binder_user_error("binder: %d:%d %s u%p node %d"
1917					" cookie mismatch %p != %p\n",
1918					proc->pid, thread->pid,
1919					cmd == BC_INCREFS_DONE ?
1920					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1921					node_ptr, node->debug_id,
1922					cookie, node->cookie);
1923				break;
1924			}
1925			if (cmd == BC_ACQUIRE_DONE) {
1926				if (node->pending_strong_ref == 0) {
1927					binder_user_error("binder: %d:%d "
1928						"BC_ACQUIRE_DONE node %d has "
1929						"no pending acquire request\n",
1930						proc->pid, thread->pid,
1931						node->debug_id);
1932					break;
1933				}
1934				node->pending_strong_ref = 0;
1935			} else {
1936				if (node->pending_weak_ref == 0) {
1937					binder_user_error("binder: %d:%d "
1938						"BC_INCREFS_DONE node %d has "
1939						"no pending increfs request\n",
1940						proc->pid, thread->pid,
1941						node->debug_id);
1942					break;
1943				}
1944				node->pending_weak_ref = 0;
1945			}
1946			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1947			binder_debug(BINDER_DEBUG_USER_REFS,
1948				     "binder: %d:%d %s node %d ls %d lw %d\n",
1949				     proc->pid, thread->pid,
1950				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1951				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
1952			break;
1953		}
1954		case BC_ATTEMPT_ACQUIRE:
1955			printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
1956			return -EINVAL;
1957		case BC_ACQUIRE_RESULT:
1958			printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
1959			return -EINVAL;
1960
1961		case BC_FREE_BUFFER: {
1962			void __user *data_ptr;
1963			struct binder_buffer *buffer;
1964
1965			if (get_user(data_ptr, (void * __user *)ptr))
1966				return -EFAULT;
1967			ptr += sizeof(void *);
1968
1969			buffer = binder_buffer_lookup(proc, data_ptr);
1970			if (buffer == NULL) {
1971				binder_user_error("binder: %d:%d "
1972					"BC_FREE_BUFFER u%p no match\n",
1973					proc->pid, thread->pid, data_ptr);
1974				break;
1975			}
1976			if (!buffer->allow_user_free) {
1977				binder_user_error("binder: %d:%d "
1978					"BC_FREE_BUFFER u%p matched "
1979					"unreturned buffer\n",
1980					proc->pid, thread->pid, data_ptr);
1981				break;
1982			}
1983			binder_debug(BINDER_DEBUG_FREE_BUFFER,
1984				     "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
1985				     proc->pid, thread->pid, data_ptr, buffer->debug_id,
1986				     buffer->transaction ? "active" : "finished");
1987
1988			if (buffer->transaction) {
1989				buffer->transaction->buffer = NULL;
1990				buffer->transaction = NULL;
1991			}
1992			if (buffer->async_transaction && buffer->target_node) {
1993				BUG_ON(!buffer->target_node->has_async_transaction);
1994				if (list_empty(&buffer->target_node->async_todo))
1995					buffer->target_node->has_async_transaction = 0;
1996				else
1997					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1998			}
1999			trace_binder_transaction_buffer_release(buffer);
2000			binder_transaction_buffer_release(proc, buffer, NULL);
2001			binder_free_buf(proc, buffer);
2002			break;
2003		}
2004
2005		case BC_TRANSACTION:
2006		case BC_REPLY: {
2007			struct binder_transaction_data tr;
2008
2009			if (copy_from_user(&tr, ptr, sizeof(tr)))
2010				return -EFAULT;
2011			ptr += sizeof(tr);
2012			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
2013			break;
2014		}
2015
2016		case BC_REGISTER_LOOPER:
2017			binder_debug(BINDER_DEBUG_THREADS,
2018				     "binder: %d:%d BC_REGISTER_LOOPER\n",
2019				     proc->pid, thread->pid);
2020			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2021				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2022				binder_user_error("binder: %d:%d ERROR:"
2023					" BC_REGISTER_LOOPER called "
2024					"after BC_ENTER_LOOPER\n",
2025					proc->pid, thread->pid);
2026			} else if (proc->requested_threads == 0) {
2027				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2028				binder_user_error("binder: %d:%d ERROR:"
2029					" BC_REGISTER_LOOPER called "
2030					"without request\n",
2031					proc->pid, thread->pid);
2032			} else {
2033				proc->requested_threads--;
2034				proc->requested_threads_started++;
2035			}
2036			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2037			break;
2038		case BC_ENTER_LOOPER:
2039			binder_debug(BINDER_DEBUG_THREADS,
2040				     "binder: %d:%d BC_ENTER_LOOPER\n",
2041				     proc->pid, thread->pid);
2042			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2043				thread->looper |= BINDER_LOOPER_STATE_INVALID;
2044				binder_user_error("binder: %d:%d ERROR:"
2045					" BC_ENTER_LOOPER called after "
2046					"BC_REGISTER_LOOPER\n",
2047					proc->pid, thread->pid);
2048			}
2049			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2050			break;
2051		case BC_EXIT_LOOPER:
2052			binder_debug(BINDER_DEBUG_THREADS,
2053				     "binder: %d:%d BC_EXIT_LOOPER\n",
2054				     proc->pid, thread->pid);
2055			thread->looper |= BINDER_LOOPER_STATE_EXITED;
2056			break;
2057
2058		case BC_REQUEST_DEATH_NOTIFICATION:
2059		case BC_CLEAR_DEATH_NOTIFICATION: {
2060			uint32_t target;
2061			void __user *cookie;
2062			struct binder_ref *ref;
2063			struct binder_ref_death *death;
2064
2065			if (get_user(target, (uint32_t __user *)ptr))
2066				return -EFAULT;
2067			ptr += sizeof(uint32_t);
2068			if (get_user(cookie, (void __user * __user *)ptr))
2069				return -EFAULT;
2070			ptr += sizeof(void *);
2071			ref = binder_get_ref(proc, target);
2072			if (ref == NULL) {
2073				binder_user_error("binder: %d:%d %s "
2074					"invalid ref %d\n",
2075					proc->pid, thread->pid,
2076					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2077					"BC_REQUEST_DEATH_NOTIFICATION" :
2078					"BC_CLEAR_DEATH_NOTIFICATION",
2079					target);
2080				break;
2081			}
2082
2083			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2084				     "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
2085				     proc->pid, thread->pid,
2086				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2087				     "BC_REQUEST_DEATH_NOTIFICATION" :
2088				     "BC_CLEAR_DEATH_NOTIFICATION",
2089				     cookie, ref->debug_id, ref->desc,
2090				     ref->strong, ref->weak, ref->node->debug_id);
2091
2092			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2093				if (ref->death) {
2094					binder_user_error("binder: %d:%"
2095						"d BC_REQUEST_DEATH_NOTI"
2096						"FICATION death notific"
2097						"ation already set\n",
2098						proc->pid, thread->pid);
2099					break;
2100				}
2101				death = kzalloc(sizeof(*death), GFP_KERNEL);
2102				if (death == NULL) {
2103					thread->return_error = BR_ERROR;
2104					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2105						     "binder: %d:%d "
2106						     "BC_REQUEST_DEATH_NOTIFICATION failed\n",
2107						     proc->pid, thread->pid);
2108					break;
2109				}
2110				binder_stats_created(BINDER_STAT_DEATH);
2111				INIT_LIST_HEAD(&death->work.entry);
2112				death->cookie = cookie;
2113				ref->death = death;
2114				if (ref->node->proc == NULL) {
2115					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2116					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2117						list_add_tail(&ref->death->work.entry, &thread->todo);
2118					} else {
2119						list_add_tail(&ref->death->work.entry, &proc->todo);
2120						wake_up_interruptible(&proc->wait);
2121					}
2122				}
2123			} else {
2124				if (ref->death == NULL) {
2125					binder_user_error("binder: %d:%"
2126						"d BC_CLEAR_DEATH_NOTIFI"
2127						"CATION death notificat"
2128						"ion not active\n",
2129						proc->pid, thread->pid);
2130					break;
2131				}
2132				death = ref->death;
2133				if (death->cookie != cookie) {
2134					binder_user_error("binder: %d:%"
2135						"d BC_CLEAR_DEATH_NOTIFI"
2136						"CATION death notificat"
2137						"ion cookie mismatch "
2138						"%p != %p\n",
2139						proc->pid, thread->pid,
2140						death->cookie, cookie);
2141					break;
2142				}
2143				ref->death = NULL;
2144				if (list_empty(&death->work.entry)) {
2145					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2146					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2147						list_add_tail(&death->work.entry, &thread->todo);
2148					} else {
2149						list_add_tail(&death->work.entry, &proc->todo);
2150						wake_up_interruptible(&proc->wait);
2151					}
2152				} else {
2153					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2154					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2155				}
2156			}
2157		} break;
2158		case BC_DEAD_BINDER_DONE: {
2159			struct binder_work *w;
2160			void __user *cookie;
2161			struct binder_ref_death *death = NULL;
2162			if (get_user(cookie, (void __user * __user *)ptr))
2163				return -EFAULT;
2164
2165			ptr += sizeof(void *);
2166			list_for_each_entry(w, &proc->delivered_death, entry) {
2167				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2168				if (tmp_death->cookie == cookie) {
2169					death = tmp_death;
2170					break;
2171				}
2172			}
2173			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2174				     "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
2175				     proc->pid, thread->pid, cookie, death);
2176			if (death == NULL) {
2177				binder_user_error("binder: %d:%d BC_DEAD"
2178					"_BINDER_DONE %p not found\n",
2179					proc->pid, thread->pid, cookie);
2180				break;
2181			}
2182
2183			list_del_init(&death->work.entry);
2184			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2185				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2186				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2187					list_add_tail(&death->work.entry, &thread->todo);
2188				} else {
2189					list_add_tail(&death->work.entry, &proc->todo);
2190					wake_up_interruptible(&proc->wait);
2191				}
2192			}
2193		} break;
2194
2195		default:
2196			printk(KERN_ERR "binder: %d:%d unknown command %d\n",
2197			       proc->pid, thread->pid, cmd);
2198			return -EINVAL;
2199		}
2200		*consumed = ptr - buffer;
2201	}
2202	return 0;
2203}
2204
2205void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
2206		    uint32_t cmd)
2207{
2208	trace_binder_return(cmd);
2209	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2210		binder_stats.br[_IOC_NR(cmd)]++;
2211		proc->stats.br[_IOC_NR(cmd)]++;
2212		thread->stats.br[_IOC_NR(cmd)]++;
2213	}
2214}
2215
2216static int binder_has_proc_work(struct binder_proc *proc,
2217				struct binder_thread *thread)
2218{
2219	return !list_empty(&proc->todo) ||
2220		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2221}
2222
2223static int binder_has_thread_work(struct binder_thread *thread)
2224{
2225	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2226		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2227}
2228
2229static int binder_thread_read(struct binder_proc *proc,
2230			      struct binder_thread *thread,
2231			      void  __user *buffer, int size,
2232			      signed long *consumed, int non_block)
2233{
2234	void __user *ptr = buffer + *consumed;
2235	void __user *end = buffer + size;
2236
2237	int ret = 0;
2238	int wait_for_proc_work;
2239
2240	if (*consumed == 0) {
2241		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2242			return -EFAULT;
2243		ptr += sizeof(uint32_t);
2244	}
2245
2246retry:
2247	wait_for_proc_work = thread->transaction_stack == NULL &&
2248				list_empty(&thread->todo);
2249
2250	if (thread->return_error != BR_OK && ptr < end) {
2251		if (thread->return_error2 != BR_OK) {
2252			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2253				return -EFAULT;
2254			ptr += sizeof(uint32_t);
2255			binder_stat_br(proc, thread, thread->return_error2);
2256			if (ptr == end)
2257				goto done;
2258			thread->return_error2 = BR_OK;
2259		}
2260		if (put_user(thread->return_error, (uint32_t __user *)ptr))
2261			return -EFAULT;
2262		ptr += sizeof(uint32_t);
2263		binder_stat_br(proc, thread, thread->return_error);
2264		thread->return_error = BR_OK;
2265		goto done;
2266	}
2267
2268
2269	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2270	if (wait_for_proc_work)
2271		proc->ready_threads++;
2272
2273	binder_unlock(__func__);
2274
2275	trace_binder_wait_for_work(wait_for_proc_work,
2276				   !!thread->transaction_stack,
2277				   !list_empty(&thread->todo));
2278	if (wait_for_proc_work) {
2279		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2280					BINDER_LOOPER_STATE_ENTERED))) {
2281			binder_user_error("binder: %d:%d ERROR: Thread waiting "
2282				"for process work before calling BC_REGISTER_"
2283				"LOOPER or BC_ENTER_LOOPER (state %x)\n",
2284				proc->pid, thread->pid, thread->looper);
2285			wait_event_interruptible(binder_user_error_wait,
2286						 binder_stop_on_user_error < 2);
2287		}
2288		binder_set_nice(proc->default_priority);
2289		if (non_block) {
2290			if (!binder_has_proc_work(proc, thread))
2291				ret = -EAGAIN;
2292		} else
2293			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2294	} else {
2295		if (non_block) {
2296			if (!binder_has_thread_work(thread))
2297				ret = -EAGAIN;
2298		} else
2299			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
2300	}
2301
2302	binder_lock(__func__);
2303
2304	if (wait_for_proc_work)
2305		proc->ready_threads--;
2306	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2307
2308	if (ret)
2309		return ret;
2310
2311	while (1) {
2312		uint32_t cmd;
2313		struct binder_transaction_data tr;
2314		struct binder_work *w;
2315		struct binder_transaction *t = NULL;
2316
2317		if (!list_empty(&thread->todo))
2318			w = list_first_entry(&thread->todo, struct binder_work, entry);
2319		else if (!list_empty(&proc->todo) && wait_for_proc_work)
2320			w = list_first_entry(&proc->todo, struct binder_work, entry);
2321		else {
2322			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2323				goto retry;
2324			break;
2325		}
2326
2327		if (end - ptr < sizeof(tr) + 4)
2328			break;
2329
2330		switch (w->type) {
2331		case BINDER_WORK_TRANSACTION: {
2332			t = container_of(w, struct binder_transaction, work);
2333		} break;
2334		case BINDER_WORK_TRANSACTION_COMPLETE: {
2335			cmd = BR_TRANSACTION_COMPLETE;
2336			if (put_user(cmd, (uint32_t __user *)ptr))
2337				return -EFAULT;
2338			ptr += sizeof(uint32_t);
2339
2340			binder_stat_br(proc, thread, cmd);
2341			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2342				     "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
2343				     proc->pid, thread->pid);
2344
2345			list_del(&w->entry);
2346			kfree(w);
2347			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2348		} break;
2349		case BINDER_WORK_NODE: {
2350			struct binder_node *node = container_of(w, struct binder_node, work);
2351			uint32_t cmd = BR_NOOP;
2352			const char *cmd_name;
2353			int strong = node->internal_strong_refs || node->local_strong_refs;
2354			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2355			if (weak && !node->has_weak_ref) {
2356				cmd = BR_INCREFS;
2357				cmd_name = "BR_INCREFS";
2358				node->has_weak_ref = 1;
2359				node->pending_weak_ref = 1;
2360				node->local_weak_refs++;
2361			} else if (strong && !node->has_strong_ref) {
2362				cmd = BR_ACQUIRE;
2363				cmd_name = "BR_ACQUIRE";
2364				node->has_strong_ref = 1;
2365				node->pending_strong_ref = 1;
2366				node->local_strong_refs++;
2367			} else if (!strong && node->has_strong_ref) {
2368				cmd = BR_RELEASE;
2369				cmd_name = "BR_RELEASE";
2370				node->has_strong_ref = 0;
2371			} else if (!weak && node->has_weak_ref) {
2372				cmd = BR_DECREFS;
2373				cmd_name = "BR_DECREFS";
2374				node->has_weak_ref = 0;
2375			}
2376			if (cmd != BR_NOOP) {
2377				if (put_user(cmd, (uint32_t __user *)ptr))
2378					return -EFAULT;
2379				ptr += sizeof(uint32_t);
2380				if (put_user(node->ptr, (void * __user *)ptr))
2381					return -EFAULT;
2382				ptr += sizeof(void *);
2383				if (put_user(node->cookie, (void * __user *)ptr))
2384					return -EFAULT;
2385				ptr += sizeof(void *);
2386
2387				binder_stat_br(proc, thread, cmd);
2388				binder_debug(BINDER_DEBUG_USER_REFS,
2389					     "binder: %d:%d %s %d u%p c%p\n",
2390					     proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
2391			} else {
2392				list_del_init(&w->entry);
2393				if (!weak && !strong) {
2394					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2395						     "binder: %d:%d node %d u%p c%p deleted\n",
2396						     proc->pid, thread->pid, node->debug_id,
2397						     node->ptr, node->cookie);
2398					rb_erase(&node->rb_node, &proc->nodes);
2399					kfree(node);
2400					binder_stats_deleted(BINDER_STAT_NODE);
2401				} else {
2402					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2403						     "binder: %d:%d node %d u%p c%p state unchanged\n",
2404						     proc->pid, thread->pid, node->debug_id, node->ptr,
2405						     node->cookie);
2406				}
2407			}
2408		} break;
2409		case BINDER_WORK_DEAD_BINDER:
2410		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2411		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2412			struct binder_ref_death *death;
2413			uint32_t cmd;
2414
2415			death = container_of(w, struct binder_ref_death, work);
2416			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2417				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2418			else
2419				cmd = BR_DEAD_BINDER;
2420			if (put_user(cmd, (uint32_t __user *)ptr))
2421				return -EFAULT;
2422			ptr += sizeof(uint32_t);
2423			if (put_user(death->cookie, (void * __user *)ptr))
2424				return -EFAULT;
2425			ptr += sizeof(void *);
2426			binder_stat_br(proc, thread, cmd);
2427			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2428				     "binder: %d:%d %s %p\n",
2429				      proc->pid, thread->pid,
2430				      cmd == BR_DEAD_BINDER ?
2431				      "BR_DEAD_BINDER" :
2432				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2433				      death->cookie);
2434
2435			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2436				list_del(&w->entry);
2437				kfree(death);
2438				binder_stats_deleted(BINDER_STAT_DEATH);
2439			} else
2440				list_move(&w->entry, &proc->delivered_death);
2441			if (cmd == BR_DEAD_BINDER)
2442				goto done; /* DEAD_BINDER notifications can cause transactions */
2443		} break;
2444		}
2445
2446		if (!t)
2447			continue;
2448
2449		BUG_ON(t->buffer == NULL);
2450		if (t->buffer->target_node) {
2451			struct binder_node *target_node = t->buffer->target_node;
2452			tr.target.ptr = target_node->ptr;
2453			tr.cookie =  target_node->cookie;
2454			t->saved_priority = task_nice(current);
2455			if (t->priority < target_node->min_priority &&
2456			    !(t->flags & TF_ONE_WAY))
2457				binder_set_nice(t->priority);
2458			else if (!(t->flags & TF_ONE_WAY) ||
2459				 t->saved_priority > target_node->min_priority)
2460				binder_set_nice(target_node->min_priority);
2461			cmd = BR_TRANSACTION;
2462		} else {
2463			tr.target.ptr = NULL;
2464			tr.cookie = NULL;
2465			cmd = BR_REPLY;
2466		}
2467		tr.code = t->code;
2468		tr.flags = t->flags;
2469		tr.sender_euid = t->sender_euid;
2470
2471		if (t->from) {
2472			struct task_struct *sender = t->from->proc->tsk;
2473			tr.sender_pid = task_tgid_nr_ns(sender,
2474							current->nsproxy->pid_ns);
2475		} else {
2476			tr.sender_pid = 0;
2477		}
2478
2479		tr.data_size = t->buffer->data_size;
2480		tr.offsets_size = t->buffer->offsets_size;
2481		tr.data.ptr.buffer = (void *)t->buffer->data +
2482					proc->user_buffer_offset;
2483		tr.data.ptr.offsets = tr.data.ptr.buffer +
2484					ALIGN(t->buffer->data_size,
2485					    sizeof(void *));
2486
2487		if (put_user(cmd, (uint32_t __user *)ptr))
2488			return -EFAULT;
2489		ptr += sizeof(uint32_t);
2490		if (copy_to_user(ptr, &tr, sizeof(tr)))
2491			return -EFAULT;
2492		ptr += sizeof(tr);
2493
2494		trace_binder_transaction_received(t);
2495		binder_stat_br(proc, thread, cmd);
2496		binder_debug(BINDER_DEBUG_TRANSACTION,
2497			     "binder: %d:%d %s %d %d:%d, cmd %d"
2498			     "size %zd-%zd ptr %p-%p\n",
2499			     proc->pid, thread->pid,
2500			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2501			     "BR_REPLY",
2502			     t->debug_id, t->from ? t->from->proc->pid : 0,
2503			     t->from ? t->from->pid : 0, cmd,
2504			     t->buffer->data_size, t->buffer->offsets_size,
2505			     tr.data.ptr.buffer, tr.data.ptr.offsets);
2506
2507		list_del(&t->work.entry);
2508		t->buffer->allow_user_free = 1;
2509		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2510			t->to_parent = thread->transaction_stack;
2511			t->to_thread = thread;
2512			thread->transaction_stack = t;
2513		} else {
2514			t->buffer->transaction = NULL;
2515			kfree(t);
2516			binder_stats_deleted(BINDER_STAT_TRANSACTION);
2517		}
2518		break;
2519	}
2520
2521done:
2522
2523	*consumed = ptr - buffer;
2524	if (proc->requested_threads + proc->ready_threads == 0 &&
2525	    proc->requested_threads_started < proc->max_threads &&
2526	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2527	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2528	     /*spawn a new thread if we leave this out */) {
2529		proc->requested_threads++;
2530		binder_debug(BINDER_DEBUG_THREADS,
2531			     "binder: %d:%d BR_SPAWN_LOOPER\n",
2532			     proc->pid, thread->pid);
2533		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2534			return -EFAULT;
2535		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2536	}
2537	return 0;
2538}
2539
2540static void binder_release_work(struct list_head *list)
2541{
2542	struct binder_work *w;
2543	while (!list_empty(list)) {
2544		w = list_first_entry(list, struct binder_work, entry);
2545		list_del_init(&w->entry);
2546		switch (w->type) {
2547		case BINDER_WORK_TRANSACTION: {
2548			struct binder_transaction *t;
2549
2550			t = container_of(w, struct binder_transaction, work);
2551			if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
2552				binder_send_failed_reply(t, BR_DEAD_REPLY);
2553		} break;
2554		case BINDER_WORK_TRANSACTION_COMPLETE: {
2555			kfree(w);
2556			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2557		} break;
2558		default:
2559			break;
2560		}
2561	}
2562
2563}
2564
2565static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2566{
2567	struct binder_thread *thread = NULL;
2568	struct rb_node *parent = NULL;
2569	struct rb_node **p = &proc->threads.rb_node;
2570
2571	while (*p) {
2572		parent = *p;
2573		thread = rb_entry(parent, struct binder_thread, rb_node);
2574
2575		if (current->pid < thread->pid)
2576			p = &(*p)->rb_left;
2577		else if (current->pid > thread->pid)
2578			p = &(*p)->rb_right;
2579		else
2580			break;
2581	}
2582	if (*p == NULL) {
2583		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2584		if (thread == NULL)
2585			return NULL;
2586		binder_stats_created(BINDER_STAT_THREAD);
2587		thread->proc = proc;
2588		thread->pid = current->pid;
2589		init_waitqueue_head(&thread->wait);
2590		INIT_LIST_HEAD(&thread->todo);
2591		rb_link_node(&thread->rb_node, parent, p);
2592		rb_insert_color(&thread->rb_node, &proc->threads);
2593		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2594		thread->return_error = BR_OK;
2595		thread->return_error2 = BR_OK;
2596	}
2597	return thread;
2598}
2599
2600static int binder_free_thread(struct binder_proc *proc,
2601			      struct binder_thread *thread)
2602{
2603	struct binder_transaction *t;
2604	struct binder_transaction *send_reply = NULL;
2605	int active_transactions = 0;
2606
2607	rb_erase(&thread->rb_node, &proc->threads);
2608	t = thread->transaction_stack;
2609	if (t && t->to_thread == thread)
2610		send_reply = t;
2611	while (t) {
2612		active_transactions++;
2613		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2614			     "binder: release %d:%d transaction %d "
2615			     "%s, still active\n", proc->pid, thread->pid,
2616			     t->debug_id,
2617			     (t->to_thread == thread) ? "in" : "out");
2618
2619		if (t->to_thread == thread) {
2620			t->to_proc = NULL;
2621			t->to_thread = NULL;
2622			if (t->buffer) {
2623				t->buffer->transaction = NULL;
2624				t->buffer = NULL;
2625			}
2626			t = t->to_parent;
2627		} else if (t->from == thread) {
2628			t->from = NULL;
2629			t = t->from_parent;
2630		} else
2631			BUG();
2632	}
2633	if (send_reply)
2634		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2635	binder_release_work(&thread->todo);
2636	kfree(thread);
2637	binder_stats_deleted(BINDER_STAT_THREAD);
2638	return active_transactions;
2639}
2640
2641static unsigned int binder_poll(struct file *filp,
2642				struct poll_table_struct *wait)
2643{
2644	struct binder_proc *proc = filp->private_data;
2645	struct binder_thread *thread = NULL;
2646	int wait_for_proc_work;
2647
2648	binder_lock(__func__);
2649
2650	thread = binder_get_thread(proc);
2651
2652	wait_for_proc_work = thread->transaction_stack == NULL &&
2653		list_empty(&thread->todo) && thread->return_error == BR_OK;
2654
2655	binder_unlock(__func__);
2656
2657	if (wait_for_proc_work) {
2658		if (binder_has_proc_work(proc, thread))
2659			return POLLIN;
2660		poll_wait(filp, &proc->wait, wait);
2661		if (binder_has_proc_work(proc, thread))
2662			return POLLIN;
2663	} else {
2664		if (binder_has_thread_work(thread))
2665			return POLLIN;
2666		poll_wait(filp, &thread->wait, wait);
2667		if (binder_has_thread_work(thread))
2668			return POLLIN;
2669	}
2670	return 0;
2671}
2672
2673static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2674{
2675	int ret;
2676	struct binder_proc *proc = filp->private_data;
2677	struct binder_thread *thread;
2678	unsigned int size = _IOC_SIZE(cmd);
2679	void __user *ubuf = (void __user *)arg;
2680
2681	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2682
2683	trace_binder_ioctl(cmd, arg);
2684
2685	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2686	if (ret)
2687		goto err_unlocked;
2688
2689	binder_lock(__func__);
2690	thread = binder_get_thread(proc);
2691	if (thread == NULL) {
2692		ret = -ENOMEM;
2693		goto err;
2694	}
2695
2696	switch (cmd) {
2697	case BINDER_WRITE_READ: {
2698		struct binder_write_read bwr;
2699		if (size != sizeof(struct binder_write_read)) {
2700			ret = -EINVAL;
2701			goto err;
2702		}
2703		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2704			ret = -EFAULT;
2705			goto err;
2706		}
2707		binder_debug(BINDER_DEBUG_READ_WRITE,
2708			     "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
2709			     proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
2710			     bwr.read_size, bwr.read_buffer);
2711
2712		if (bwr.write_size > 0) {
2713			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
2714			trace_binder_write_done(ret);
2715			if (ret < 0) {
2716				bwr.read_consumed = 0;
2717				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2718					ret = -EFAULT;
2719				goto err;
2720			}
2721		}
2722		if (bwr.read_size > 0) {
2723			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2724			trace_binder_read_done(ret);
2725			if (!list_empty(&proc->todo))
2726				wake_up_interruptible(&proc->wait);
2727			if (ret < 0) {
2728				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2729					ret = -EFAULT;
2730				goto err;
2731			}
2732		}
2733		binder_debug(BINDER_DEBUG_READ_WRITE,
2734			     "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
2735			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
2736			     bwr.read_consumed, bwr.read_size);
2737		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2738			ret = -EFAULT;
2739			goto err;
2740		}
2741		break;
2742	}
2743	case BINDER_SET_MAX_THREADS:
2744		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2745			ret = -EINVAL;
2746			goto err;
2747		}
2748		break;
2749	case BINDER_SET_CONTEXT_MGR:
2750		if (binder_context_mgr_node != NULL) {
2751			printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
2752			ret = -EBUSY;
2753			goto err;
2754		}
2755		if (binder_context_mgr_uid != -1) {
2756			if (binder_context_mgr_uid != current->cred->euid) {
2757				printk(KERN_ERR "binder: BINDER_SET_"
2758				       "CONTEXT_MGR bad uid %d != %d\n",
2759				       current->cred->euid,
2760				       binder_context_mgr_uid);
2761				ret = -EPERM;
2762				goto err;
2763			}
2764		} else
2765			binder_context_mgr_uid = current->cred->euid;
2766		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
2767		if (binder_context_mgr_node == NULL) {
2768			ret = -ENOMEM;
2769			goto err;
2770		}
2771		binder_context_mgr_node->local_weak_refs++;
2772		binder_context_mgr_node->local_strong_refs++;
2773		binder_context_mgr_node->has_strong_ref = 1;
2774		binder_context_mgr_node->has_weak_ref = 1;
2775		break;
2776	case BINDER_THREAD_EXIT:
2777		binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
2778			     proc->pid, thread->pid);
2779		binder_free_thread(proc, thread);
2780		thread = NULL;
2781		break;
2782	case BINDER_VERSION:
2783		if (size != sizeof(struct binder_version)) {
2784			ret = -EINVAL;
2785			goto err;
2786		}
2787		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
2788			ret = -EINVAL;
2789			goto err;
2790		}
2791		break;
2792	default:
2793		ret = -EINVAL;
2794		goto err;
2795	}
2796	ret = 0;
2797err:
2798	if (thread)
2799		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2800	binder_unlock(__func__);
2801	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2802	if (ret && ret != -ERESTARTSYS)
2803		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2804err_unlocked:
2805	trace_binder_ioctl_done(ret);
2806	return ret;
2807}
2808
2809static void binder_vma_open(struct vm_area_struct *vma)
2810{
2811	struct binder_proc *proc = vma->vm_private_data;
2812	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2813		     "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2814		     proc->pid, vma->vm_start, vma->vm_end,
2815		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2816		     (unsigned long)pgprot_val(vma->vm_page_prot));
2817}
2818
2819static void binder_vma_close(struct vm_area_struct *vma)
2820{
2821	struct binder_proc *proc = vma->vm_private_data;
2822	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2823		     "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2824		     proc->pid, vma->vm_start, vma->vm_end,
2825		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2826		     (unsigned long)pgprot_val(vma->vm_page_prot));
2827	proc->vma = NULL;
2828	proc->vma_vm_mm = NULL;
2829	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2830}
2831
2832static struct vm_operations_struct binder_vm_ops = {
2833	.open = binder_vma_open,
2834	.close = binder_vma_close,
2835};
2836
2837static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2838{
2839	int ret;
2840	struct vm_struct *area;
2841	struct binder_proc *proc = filp->private_data;
2842	const char *failure_string;
2843	struct binder_buffer *buffer;
2844
2845	if ((vma->vm_end - vma->vm_start) > SZ_4M)
2846		vma->vm_end = vma->vm_start + SZ_4M;
2847
2848	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2849		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2850		     proc->pid, vma->vm_start, vma->vm_end,
2851		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2852		     (unsigned long)pgprot_val(vma->vm_page_prot));
2853
2854	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2855		ret = -EPERM;
2856		failure_string = "bad vm_flags";
2857		goto err_bad_arg;
2858	}
2859	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2860
2861	mutex_lock(&binder_mmap_lock);
2862	if (proc->buffer) {
2863		ret = -EBUSY;
2864		failure_string = "already mapped";
2865		goto err_already_mapped;
2866	}
2867
2868	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2869	if (area == NULL) {
2870		ret = -ENOMEM;
2871		failure_string = "get_vm_area";
2872		goto err_get_vm_area_failed;
2873	}
2874	proc->buffer = area->addr;
2875	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2876	mutex_unlock(&binder_mmap_lock);
2877
2878#ifdef CONFIG_CPU_CACHE_VIPT
2879	if (cache_is_vipt_aliasing()) {
2880		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2881			printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2882			vma->vm_start += PAGE_SIZE;
2883		}
2884	}
2885#endif
2886	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2887	if (proc->pages == NULL) {
2888		ret = -ENOMEM;
2889		failure_string = "alloc page array";
2890		goto err_alloc_pages_failed;
2891	}
2892	proc->buffer_size = vma->vm_end - vma->vm_start;
2893
2894	vma->vm_ops = &binder_vm_ops;
2895	vma->vm_private_data = proc;
2896
2897	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2898		ret = -ENOMEM;
2899		failure_string = "alloc small buf";
2900		goto err_alloc_small_buf_failed;
2901	}
2902	buffer = proc->buffer;
2903	INIT_LIST_HEAD(&proc->buffers);
2904	list_add(&buffer->entry, &proc->buffers);
2905	buffer->free = 1;
2906	binder_insert_free_buffer(proc, buffer);
2907	proc->free_async_space = proc->buffer_size / 2;
2908	barrier();
2909	proc->files = get_files_struct(proc->tsk);
2910	proc->vma = vma;
2911	proc->vma_vm_mm = vma->vm_mm;
2912
2913	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
2914		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2915	return 0;
2916
2917err_alloc_small_buf_failed:
2918	kfree(proc->pages);
2919	proc->pages = NULL;
2920err_alloc_pages_failed:
2921	mutex_lock(&binder_mmap_lock);
2922	vfree(proc->buffer);
2923	proc->buffer = NULL;
2924err_get_vm_area_failed:
2925err_already_mapped:
2926	mutex_unlock(&binder_mmap_lock);
2927err_bad_arg:
2928	printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
2929	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2930	return ret;
2931}
2932
2933static int binder_open(struct inode *nodp, struct file *filp)
2934{
2935	struct binder_proc *proc;
2936
2937	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2938		     current->group_leader->pid, current->pid);
2939
2940	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2941	if (proc == NULL)
2942		return -ENOMEM;
2943	get_task_struct(current);
2944	proc->tsk = current;
2945	INIT_LIST_HEAD(&proc->todo);
2946	init_waitqueue_head(&proc->wait);
2947	proc->default_priority = task_nice(current);
2948
2949	binder_lock(__func__);
2950
2951	binder_stats_created(BINDER_STAT_PROC);
2952	hlist_add_head(&proc->proc_node, &binder_procs);
2953	proc->pid = current->group_leader->pid;
2954	INIT_LIST_HEAD(&proc->delivered_death);
2955	filp->private_data = proc;
2956
2957	binder_unlock(__func__);
2958
2959	if (binder_debugfs_dir_entry_proc) {
2960		char strbuf[11];
2961		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2962		proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2963			binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2964	}
2965
2966	return 0;
2967}
2968
2969static int binder_flush(struct file *filp, fl_owner_t id)
2970{
2971	struct binder_proc *proc = filp->private_data;
2972
2973	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2974
2975	return 0;
2976}
2977
2978static void binder_deferred_flush(struct binder_proc *proc)
2979{
2980	struct rb_node *n;
2981	int wake_count = 0;
2982	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2983		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2984		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2985		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2986			wake_up_interruptible(&thread->wait);
2987			wake_count++;
2988		}
2989	}
2990	wake_up_interruptible_all(&proc->wait);
2991
2992	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2993		     "binder_flush: %d woke %d threads\n", proc->pid,
2994		     wake_count);
2995}
2996
2997static int binder_release(struct inode *nodp, struct file *filp)
2998{
2999	struct binder_proc *proc = filp->private_data;
3000	debugfs_remove(proc->debugfs_entry);
3001	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3002
3003	return 0;
3004}
3005
3006static void binder_deferred_release(struct binder_proc *proc)
3007{
3008	struct hlist_node *pos;
3009	struct binder_transaction *t;
3010	struct rb_node *n;
3011	int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
3012
3013	BUG_ON(proc->vma);
3014	BUG_ON(proc->files);
3015
3016	hlist_del(&proc->proc_node);
3017	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3018		binder_debug(BINDER_DEBUG_DEAD_BINDER,
3019			     "binder_release: %d context_mgr_node gone\n",
3020			     proc->pid);
3021		binder_context_mgr_node = NULL;
3022	}
3023
3024	threads = 0;
3025	active_transactions = 0;
3026	while ((n = rb_first(&proc->threads))) {
3027		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
3028		threads++;
3029		active_transactions += binder_free_thread(proc, thread);
3030	}
3031	nodes = 0;
3032	incoming_refs = 0;
3033	while ((n = rb_first(&proc->nodes))) {
3034		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
3035
3036		nodes++;
3037		rb_erase(&node->rb_node, &proc->nodes);
3038		list_del_init(&node->work.entry);
3039		if (hlist_empty(&node->refs)) {
3040			kfree(node);
3041			binder_stats_deleted(BINDER_STAT_NODE);
3042		} else {
3043			struct binder_ref *ref;
3044			int death = 0;
3045
3046			node->proc = NULL;
3047			node->local_strong_refs = 0;
3048			node->local_weak_refs = 0;
3049			hlist_add_head(&node->dead_node, &binder_dead_nodes);
3050
3051			hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
3052				incoming_refs++;
3053				if (ref->death) {
3054					death++;
3055					if (list_empty(&ref->death->work.entry)) {
3056						ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3057						list_add_tail(&ref->death->work.entry, &ref->proc->todo);
3058						wake_up_interruptible(&ref->proc->wait);
3059					} else
3060						BUG();
3061				}
3062			}
3063			binder_debug(BINDER_DEBUG_DEAD_BINDER,
3064				     "binder: node %d now dead, "
3065				     "refs %d, death %d\n", node->debug_id,
3066				     incoming_refs, death);
3067		}
3068	}
3069	outgoing_refs = 0;
3070	while ((n = rb_first(&proc->refs_by_desc))) {
3071		struct binder_ref *ref = rb_entry(n, struct binder_ref,
3072						  rb_node_desc);
3073		outgoing_refs++;
3074		binder_delete_ref(ref);
3075	}
3076	binder_release_work(&proc->todo);
3077	buffers = 0;
3078
3079	while ((n = rb_first(&proc->allocated_buffers))) {
3080		struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
3081							rb_node);
3082		t = buffer->transaction;
3083		if (t) {
3084			t->buffer = NULL;
3085			buffer->transaction = NULL;
3086			printk(KERN_ERR "binder: release proc %d, "
3087			       "transaction %d, not freed\n",
3088			       proc->pid, t->debug_id);
3089			/*BUG();*/
3090		}
3091		binder_free_buf(proc, buffer);
3092		buffers++;
3093	}
3094
3095	binder_stats_deleted(BINDER_STAT_PROC);
3096
3097	page_count = 0;
3098	if (proc->pages) {
3099		int i;
3100		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3101			if (proc->pages[i]) {
3102				void *page_addr = proc->buffer + i * PAGE_SIZE;
3103				binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3104					     "binder_release: %d: "
3105					     "page %d at %p not freed\n",
3106					     proc->pid, i,
3107					     page_addr);
3108				unmap_kernel_range((unsigned long)page_addr,
3109					PAGE_SIZE);
3110				__free_page(proc->pages[i]);
3111				page_count++;
3112			}
3113		}
3114		kfree(proc->pages);
3115		vfree(proc->buffer);
3116	}
3117
3118	put_task_struct(proc->tsk);
3119
3120	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3121		     "binder_release: %d threads %d, nodes %d (ref %d), "
3122		     "refs %d, active transactions %d, buffers %d, "
3123		     "pages %d\n",
3124		     proc->pid, threads, nodes, incoming_refs, outgoing_refs,
3125		     active_transactions, buffers, page_count);
3126
3127	kfree(proc);
3128}
3129
3130static void binder_deferred_func(struct work_struct *work)
3131{
3132	struct binder_proc *proc;
3133	struct files_struct *files;
3134
3135	int defer;
3136	do {
3137		binder_lock(__func__);
3138		mutex_lock(&binder_deferred_lock);
3139		if (!hlist_empty(&binder_deferred_list)) {
3140			proc = hlist_entry(binder_deferred_list.first,
3141					struct binder_proc, deferred_work_node);
3142			hlist_del_init(&proc->deferred_work_node);
3143			defer = proc->deferred_work;
3144			proc->deferred_work = 0;
3145		} else {
3146			proc = NULL;
3147			defer = 0;
3148		}
3149		mutex_unlock(&binder_deferred_lock);
3150
3151		files = NULL;
3152		if (defer & BINDER_DEFERRED_PUT_FILES) {
3153			files = proc->files;
3154			if (files)
3155				proc->files = NULL;
3156		}
3157
3158		if (defer & BINDER_DEFERRED_FLUSH)
3159			binder_deferred_flush(proc);
3160
3161		if (defer & BINDER_DEFERRED_RELEASE)
3162			binder_deferred_release(proc); /* frees proc */
3163
3164		binder_unlock(__func__);
3165		if (files)
3166			put_files_struct(files);
3167	} while (proc);
3168}
3169static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3170
3171static void
3172binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3173{
3174	mutex_lock(&binder_deferred_lock);
3175	proc->deferred_work |= defer;
3176	if (hlist_unhashed(&proc->deferred_work_node)) {
3177		hlist_add_head(&proc->deferred_work_node,
3178				&binder_deferred_list);
3179		queue_work(binder_deferred_workqueue, &binder_deferred_work);
3180	}
3181	mutex_unlock(&binder_deferred_lock);
3182}
3183
3184static void print_binder_transaction(struct seq_file *m, const char *prefix,
3185				     struct binder_transaction *t)
3186{
3187	seq_printf(m,
3188		   "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3189		   prefix, t->debug_id, t,
3190		   t->from ? t->from->proc->pid : 0,
3191		   t->from ? t->from->pid : 0,
3192		   t->to_proc ? t->to_proc->pid : 0,
3193		   t->to_thread ? t->to_thread->pid : 0,
3194		   t->code, t->flags, t->priority, t->need_reply);
3195	if (t->buffer == NULL) {
3196		seq_puts(m, " buffer free\n");
3197		return;
3198	}
3199	if (t->buffer->target_node)
3200		seq_printf(m, " node %d",
3201			   t->buffer->target_node->debug_id);
3202	seq_printf(m, " size %zd:%zd data %p\n",
3203		   t->buffer->data_size, t->buffer->offsets_size,
3204		   t->buffer->data);
3205}
3206
3207static void print_binder_buffer(struct seq_file *m, const char *prefix,
3208				struct binder_buffer *buffer)
3209{
3210	seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
3211		   prefix, buffer->debug_id, buffer->data,
3212		   buffer->data_size, buffer->offsets_size,
3213		   buffer->transaction ? "active" : "delivered");
3214}
3215
3216static void print_binder_work(struct seq_file *m, const char *prefix,
3217			      const char *transaction_prefix,
3218			      struct binder_work *w)
3219{
3220	struct binder_node *node;
3221	struct binder_transaction *t;
3222
3223	switch (w->type) {
3224	case BINDER_WORK_TRANSACTION:
3225		t = container_of(w, struct binder_transaction, work);
3226		print_binder_transaction(m, transaction_prefix, t);
3227		break;
3228	case BINDER_WORK_TRANSACTION_COMPLETE:
3229		seq_printf(m, "%stransaction complete\n", prefix);
3230		break;
3231	case BINDER_WORK_NODE:
3232		node = container_of(w, struct binder_node, work);
3233		seq_printf(m, "%snode work %d: u%p c%p\n",
3234			   prefix, node->debug_id, node->ptr, node->cookie);
3235		break;
3236	case BINDER_WORK_DEAD_BINDER:
3237		seq_printf(m, "%shas dead binder\n", prefix);
3238		break;
3239	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3240		seq_printf(m, "%shas cleared dead binder\n", prefix);
3241		break;
3242	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3243		seq_printf(m, "%shas cleared death notification\n", prefix);
3244		break;
3245	default:
3246		seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
3247		break;
3248	}
3249}
3250
3251static void print_binder_thread(struct seq_file *m,
3252				struct binder_thread *thread,
3253				int print_always)
3254{
3255	struct binder_transaction *t;
3256	struct binder_work *w;
3257	size_t start_pos = m->count;
3258	size_t header_pos;
3259
3260	seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
3261	header_pos = m->count;
3262	t = thread->transaction_stack;
3263	while (t) {
3264		if (t->from == thread) {
3265			print_binder_transaction(m,
3266						 "    outgoing transaction", t);
3267			t = t->from_parent;
3268		} else if (t->to_thread == thread) {
3269			print_binder_transaction(m,
3270						 "    incoming transaction", t);
3271			t = t->to_parent;
3272		} else {
3273			print_binder_transaction(m, "    bad transaction", t);
3274			t = NULL;
3275		}
3276	}
3277	list_for_each_entry(w, &thread->todo, entry) {
3278		print_binder_work(m, "    ", "    pending transaction", w);
3279	}
3280	if (!print_always && m->count == header_pos)
3281		m->count = start_pos;
3282}
3283
3284static void print_binder_node(struct seq_file *m, struct binder_node *node)
3285{
3286	struct binder_ref *ref;
3287	struct hlist_node *pos;
3288	struct binder_work *w;
3289	int count;
3290
3291	count = 0;
3292	hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3293		count++;
3294
3295	seq_printf(m, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
3296		   node->debug_id, node->ptr, node->cookie,
3297		   node->has_strong_ref, node->has_weak_ref,
3298		   node->local_strong_refs, node->local_weak_refs,
3299		   node->internal_strong_refs, count);
3300	if (count) {
3301		seq_puts(m, " proc");
3302		hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3303			seq_printf(m, " %d", ref->proc->pid);
3304	}
3305	seq_puts(m, "\n");
3306	list_for_each_entry(w, &node->async_todo, entry)
3307		print_binder_work(m, "    ",
3308				  "    pending async transaction", w);
3309}
3310
3311static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
3312{
3313	seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3314		   ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3315		   ref->node->debug_id, ref->strong, ref->weak, ref->death);
3316}
3317
3318static void print_binder_proc(struct seq_file *m,
3319			      struct binder_proc *proc, int print_all)
3320{
3321	struct binder_work *w;
3322	struct rb_node *n;
3323	size_t start_pos = m->count;
3324	size_t header_pos;
3325
3326	seq_printf(m, "proc %d\n", proc->pid);
3327	header_pos = m->count;
3328
3329	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3330		print_binder_thread(m, rb_entry(n, struct binder_thread,
3331						rb_node), print_all);
3332	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3333		struct binder_node *node = rb_entry(n, struct binder_node,
3334						    rb_node);
3335		if (print_all || node->has_async_transaction)
3336			print_binder_node(m, node);
3337	}
3338	if (print_all) {
3339		for (n = rb_first(&proc->refs_by_desc);
3340		     n != NULL;
3341		     n = rb_next(n))
3342			print_binder_ref(m, rb_entry(n, struct binder_ref,
3343						     rb_node_desc));
3344	}
3345	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3346		print_binder_buffer(m, "  buffer",
3347				    rb_entry(n, struct binder_buffer, rb_node));
3348	list_for_each_entry(w, &proc->todo, entry)
3349		print_binder_work(m, "  ", "  pending transaction", w);
3350	list_for_each_entry(w, &proc->delivered_death, entry) {
3351		seq_puts(m, "  has delivered dead binder\n");
3352		break;
3353	}
3354	if (!print_all && m->count == header_pos)
3355		m->count = start_pos;
3356}
3357
3358static const char *binder_return_strings[] = {
3359	"BR_ERROR",
3360	"BR_OK",
3361	"BR_TRANSACTION",
3362	"BR_REPLY",
3363	"BR_ACQUIRE_RESULT",
3364	"BR_DEAD_REPLY",
3365	"BR_TRANSACTION_COMPLETE",
3366	"BR_INCREFS",
3367	"BR_ACQUIRE",
3368	"BR_RELEASE",
3369	"BR_DECREFS",
3370	"BR_ATTEMPT_ACQUIRE",
3371	"BR_NOOP",
3372	"BR_SPAWN_LOOPER",
3373	"BR_FINISHED",
3374	"BR_DEAD_BINDER",
3375	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
3376	"BR_FAILED_REPLY"
3377};
3378
3379static const char *binder_command_strings[] = {
3380	"BC_TRANSACTION",
3381	"BC_REPLY",
3382	"BC_ACQUIRE_RESULT",
3383	"BC_FREE_BUFFER",
3384	"BC_INCREFS",
3385	"BC_ACQUIRE",
3386	"BC_RELEASE",
3387	"BC_DECREFS",
3388	"BC_INCREFS_DONE",
3389	"BC_ACQUIRE_DONE",
3390	"BC_ATTEMPT_ACQUIRE",
3391	"BC_REGISTER_LOOPER",
3392	"BC_ENTER_LOOPER",
3393	"BC_EXIT_LOOPER",
3394	"BC_REQUEST_DEATH_NOTIFICATION",
3395	"BC_CLEAR_DEATH_NOTIFICATION",
3396	"BC_DEAD_BINDER_DONE"
3397};
3398
3399static const char *binder_objstat_strings[] = {
3400	"proc",
3401	"thread",
3402	"node",
3403	"ref",
3404	"death",
3405	"transaction",
3406	"transaction_complete"
3407};
3408
3409static void print_binder_stats(struct seq_file *m, const char *prefix,
3410			       struct binder_stats *stats)
3411{
3412	int i;
3413
3414	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3415		     ARRAY_SIZE(binder_command_strings));
3416	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3417		if (stats->bc[i])
3418			seq_printf(m, "%s%s: %d\n", prefix,
3419				   binder_command_strings[i], stats->bc[i]);
3420	}
3421
3422	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3423		     ARRAY_SIZE(binder_return_strings));
3424	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3425		if (stats->br[i])
3426			seq_printf(m, "%s%s: %d\n", prefix,
3427				   binder_return_strings[i], stats->br[i]);
3428	}
3429
3430	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3431		     ARRAY_SIZE(binder_objstat_strings));
3432	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3433		     ARRAY_SIZE(stats->obj_deleted));
3434	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3435		if (stats->obj_created[i] || stats->obj_deleted[i])
3436			seq_printf(m, "%s%s: active %d total %d\n", prefix,
3437				binder_objstat_strings[i],
3438				stats->obj_created[i] - stats->obj_deleted[i],
3439				stats->obj_created[i]);
3440	}
3441}
3442
3443static void print_binder_proc_stats(struct seq_file *m,
3444				    struct binder_proc *proc)
3445{
3446	struct binder_work *w;
3447	struct rb_node *n;
3448	int count, strong, weak;
3449
3450	seq_printf(m, "proc %d\n", proc->pid);
3451	count = 0;
3452	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3453		count++;
3454	seq_printf(m, "  threads: %d\n", count);
3455	seq_printf(m, "  requested threads: %d+%d/%d\n"
3456			"  ready threads %d\n"
3457			"  free async space %zd\n", proc->requested_threads,
3458			proc->requested_threads_started, proc->max_threads,
3459			proc->ready_threads, proc->free_async_space);
3460	count = 0;
3461	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3462		count++;
3463	seq_printf(m, "  nodes: %d\n", count);
3464	count = 0;
3465	strong = 0;
3466	weak = 0;
3467	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3468		struct binder_ref *ref = rb_entry(n, struct binder_ref,
3469						  rb_node_desc);
3470		count++;
3471		strong += ref->strong;
3472		weak += ref->weak;
3473	}
3474	seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
3475
3476	count = 0;
3477	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3478		count++;
3479	seq_printf(m, "  buffers: %d\n", count);
3480
3481	count = 0;
3482	list_for_each_entry(w, &proc->todo, entry) {
3483		switch (w->type) {
3484		case BINDER_WORK_TRANSACTION:
3485			count++;
3486			break;
3487		default:
3488			break;
3489		}
3490	}
3491	seq_printf(m, "  pending transactions: %d\n", count);
3492
3493	print_binder_stats(m, "  ", &proc->stats);
3494}
3495
3496
3497static int binder_state_show(struct seq_file *m, void *unused)
3498{
3499	struct binder_proc *proc;
3500	struct hlist_node *pos;
3501	struct binder_node *node;
3502	int do_lock = !binder_debug_no_lock;
3503
3504	if (do_lock)
3505		binder_lock(__func__);
3506
3507	seq_puts(m, "binder state:\n");
3508
3509	if (!hlist_empty(&binder_dead_nodes))
3510		seq_puts(m, "dead nodes:\n");
3511	hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
3512		print_binder_node(m, node);
3513
3514	hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3515		print_binder_proc(m, proc, 1);
3516	if (do_lock)
3517		binder_unlock(__func__);
3518	return 0;
3519}
3520
3521static int binder_stats_show(struct seq_file *m, void *unused)
3522{
3523	struct binder_proc *proc;
3524	struct hlist_node *pos;
3525	int do_lock = !binder_debug_no_lock;
3526
3527	if (do_lock)
3528		binder_lock(__func__);
3529
3530	seq_puts(m, "binder stats:\n");
3531
3532	print_binder_stats(m, "", &binder_stats);
3533
3534	hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3535		print_binder_proc_stats(m, proc);
3536	if (do_lock)
3537		binder_unlock(__func__);
3538	return 0;
3539}
3540
3541static int binder_transactions_show(struct seq_file *m, void *unused)
3542{
3543	struct binder_proc *proc;
3544	struct hlist_node *pos;
3545	int do_lock = !binder_debug_no_lock;
3546
3547	if (do_lock)
3548		binder_lock(__func__);
3549
3550	seq_puts(m, "binder transactions:\n");
3551	hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3552		print_binder_proc(m, proc, 0);
3553	if (do_lock)
3554		binder_unlock(__func__);
3555	return 0;
3556}
3557
3558static int binder_proc_show(struct seq_file *m, void *unused)
3559{
3560	struct binder_proc *proc = m->private;
3561	int do_lock = !binder_debug_no_lock;
3562
3563	if (do_lock)
3564		binder_lock(__func__);
3565	seq_puts(m, "binder proc state:\n");
3566	print_binder_proc(m, proc, 1);
3567	if (do_lock)
3568		binder_unlock(__func__);
3569	return 0;
3570}
3571
3572static void print_binder_transaction_log_entry(struct seq_file *m,
3573					struct binder_transaction_log_entry *e)
3574{
3575	seq_printf(m,
3576		   "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3577		   e->debug_id, (e->call_type == 2) ? "reply" :
3578		   ((e->call_type == 1) ? "async" : "call "), e->from_proc,
3579		   e->from_thread, e->to_proc, e->to_thread, e->to_node,
3580		   e->target_handle, e->data_size, e->offsets_size);
3581}
3582
3583static int binder_transaction_log_show(struct seq_file *m, void *unused)
3584{
3585	struct binder_transaction_log *log = m->private;
3586	int i;
3587
3588	if (log->full) {
3589		for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
3590			print_binder_transaction_log_entry(m, &log->entry[i]);
3591	}
3592	for (i = 0; i < log->next; i++)
3593		print_binder_transaction_log_entry(m, &log->entry[i]);
3594	return 0;
3595}
3596
3597static const struct file_operations binder_fops = {
3598	.owner = THIS_MODULE,
3599	.poll = binder_poll,
3600	.unlocked_ioctl = binder_ioctl,
3601	.mmap = binder_mmap,
3602	.open = binder_open,
3603	.flush = binder_flush,
3604	.release = binder_release,
3605};
3606
3607static struct miscdevice binder_miscdev = {
3608	.minor = MISC_DYNAMIC_MINOR,
3609	.name = "binder",
3610	.fops = &binder_fops
3611};
3612
3613BINDER_DEBUG_ENTRY(state);
3614BINDER_DEBUG_ENTRY(stats);
3615BINDER_DEBUG_ENTRY(transactions);
3616BINDER_DEBUG_ENTRY(transaction_log);
3617
3618static int __init binder_init(void)
3619{
3620	int ret;
3621
3622	binder_deferred_workqueue = create_singlethread_workqueue("binder");
3623	if (!binder_deferred_workqueue)
3624		return -ENOMEM;
3625
3626	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
3627	if (binder_debugfs_dir_entry_root)
3628		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
3629						 binder_debugfs_dir_entry_root);
3630	ret = misc_register(&binder_miscdev);
3631	if (binder_debugfs_dir_entry_root) {
3632		debugfs_create_file("state",
3633				    S_IRUGO,
3634				    binder_debugfs_dir_entry_root,
3635				    NULL,
3636				    &binder_state_fops);
3637		debugfs_create_file("stats",
3638				    S_IRUGO,
3639				    binder_debugfs_dir_entry_root,
3640				    NULL,
3641				    &binder_stats_fops);
3642		debugfs_create_file("transactions",
3643				    S_IRUGO,
3644				    binder_debugfs_dir_entry_root,
3645				    NULL,
3646				    &binder_transactions_fops);
3647		debugfs_create_file("transaction_log",
3648				    S_IRUGO,
3649				    binder_debugfs_dir_entry_root,
3650				    &binder_transaction_log,
3651				    &binder_transaction_log_fops);
3652		debugfs_create_file("failed_transaction_log",
3653				    S_IRUGO,
3654				    binder_debugfs_dir_entry_root,
3655				    &binder_transaction_log_failed,
3656				    &binder_transaction_log_fops);
3657	}
3658	return ret;
3659}
3660
3661device_initcall(binder_init);
3662
3663#define CREATE_TRACE_POINTS
3664#include "binder_trace.h"
3665
3666MODULE_LICENSE("GPL v2");
3667