binder.c revision fd0a2f07d9b37fa52ece5bdec4f1130e080c1f51
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/fdtable.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/nsproxy.h>
28#include <linux/poll.h>
29#include <linux/proc_fs.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/uaccess.h>
33#include <linux/vmalloc.h>
34#include "binder.h"
35
36static DEFINE_MUTEX(binder_lock);
37static HLIST_HEAD(binder_procs);
38static struct binder_node *binder_context_mgr_node;
39static uid_t binder_context_mgr_uid = -1;
40static int binder_last_id;
41static struct proc_dir_entry *binder_proc_dir_entry_root;
42static struct proc_dir_entry *binder_proc_dir_entry_proc;
43static struct hlist_head binder_dead_nodes;
44static HLIST_HEAD(binder_deferred_list);
45static DEFINE_MUTEX(binder_deferred_lock);
46
47static int binder_read_proc_proc(char *page, char **start, off_t off,
48				 int count, int *eof, void *data);
49
50/* This is only defined in include/asm-arm/sizes.h */
51#ifndef SZ_1K
52#define SZ_1K                               0x400
53#endif
54
55#ifndef SZ_4M
56#define SZ_4M                               0x400000
57#endif
58
59#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
60
61#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
62
63enum {
64	BINDER_DEBUG_USER_ERROR             = 1U << 0,
65	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
66	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
67	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
68	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
69	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
70	BINDER_DEBUG_READ_WRITE             = 1U << 6,
71	BINDER_DEBUG_USER_REFS              = 1U << 7,
72	BINDER_DEBUG_THREADS                = 1U << 8,
73	BINDER_DEBUG_TRANSACTION            = 1U << 9,
74	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
75	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
76	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
77	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
78	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
79	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
80};
81static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
82	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
83module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
84
85static int binder_debug_no_lock;
86module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
87
88static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
89static int binder_stop_on_user_error;
90
91static int binder_set_stop_on_user_error(const char *val,
92					 struct kernel_param *kp)
93{
94	int ret;
95	ret = param_set_int(val, kp);
96	if (binder_stop_on_user_error < 2)
97		wake_up(&binder_user_error_wait);
98	return ret;
99}
100module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
101	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
102
103#define binder_debug(mask, x...) \
104	do { \
105		if (binder_debug_mask & mask) \
106			printk(KERN_INFO x); \
107	} while (0)
108
109#define binder_user_error(x...) \
110	do { \
111		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
112			printk(KERN_INFO x); \
113		if (binder_stop_on_user_error) \
114			binder_stop_on_user_error = 2; \
115	} while (0)
116
117enum {
118	BINDER_STAT_PROC,
119	BINDER_STAT_THREAD,
120	BINDER_STAT_NODE,
121	BINDER_STAT_REF,
122	BINDER_STAT_DEATH,
123	BINDER_STAT_TRANSACTION,
124	BINDER_STAT_TRANSACTION_COMPLETE,
125	BINDER_STAT_COUNT
126};
127
128struct binder_stats {
129	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
130	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
131	int obj_created[BINDER_STAT_COUNT];
132	int obj_deleted[BINDER_STAT_COUNT];
133};
134
135static struct binder_stats binder_stats;
136
137struct binder_transaction_log_entry {
138	int debug_id;
139	int call_type;
140	int from_proc;
141	int from_thread;
142	int target_handle;
143	int to_proc;
144	int to_thread;
145	int to_node;
146	int data_size;
147	int offsets_size;
148};
149struct binder_transaction_log {
150	int next;
151	int full;
152	struct binder_transaction_log_entry entry[32];
153};
154struct binder_transaction_log binder_transaction_log;
155struct binder_transaction_log binder_transaction_log_failed;
156
157static struct binder_transaction_log_entry *binder_transaction_log_add(
158	struct binder_transaction_log *log)
159{
160	struct binder_transaction_log_entry *e;
161	e = &log->entry[log->next];
162	memset(e, 0, sizeof(*e));
163	log->next++;
164	if (log->next == ARRAY_SIZE(log->entry)) {
165		log->next = 0;
166		log->full = 1;
167	}
168	return e;
169}
170
171struct binder_work {
172	struct list_head entry;
173	enum {
174		BINDER_WORK_TRANSACTION = 1,
175		BINDER_WORK_TRANSACTION_COMPLETE,
176		BINDER_WORK_NODE,
177		BINDER_WORK_DEAD_BINDER,
178		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
179		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
180	} type;
181};
182
183struct binder_node {
184	int debug_id;
185	struct binder_work work;
186	union {
187		struct rb_node rb_node;
188		struct hlist_node dead_node;
189	};
190	struct binder_proc *proc;
191	struct hlist_head refs;
192	int internal_strong_refs;
193	int local_weak_refs;
194	int local_strong_refs;
195	void __user *ptr;
196	void __user *cookie;
197	unsigned has_strong_ref:1;
198	unsigned pending_strong_ref:1;
199	unsigned has_weak_ref:1;
200	unsigned pending_weak_ref:1;
201	unsigned has_async_transaction:1;
202	unsigned accept_fds:1;
203	unsigned min_priority:8;
204	struct list_head async_todo;
205};
206
207struct binder_ref_death {
208	struct binder_work work;
209	void __user *cookie;
210};
211
212struct binder_ref {
213	/* Lookups needed: */
214	/*   node + proc => ref (transaction) */
215	/*   desc + proc => ref (transaction, inc/dec ref) */
216	/*   node => refs + procs (proc exit) */
217	int debug_id;
218	struct rb_node rb_node_desc;
219	struct rb_node rb_node_node;
220	struct hlist_node node_entry;
221	struct binder_proc *proc;
222	struct binder_node *node;
223	uint32_t desc;
224	int strong;
225	int weak;
226	struct binder_ref_death *death;
227};
228
229struct binder_buffer {
230	struct list_head entry; /* free and allocated entries by addesss */
231	struct rb_node rb_node; /* free entry by size or allocated entry */
232				/* by address */
233	unsigned free:1;
234	unsigned allow_user_free:1;
235	unsigned async_transaction:1;
236	unsigned debug_id:29;
237
238	struct binder_transaction *transaction;
239
240	struct binder_node *target_node;
241	size_t data_size;
242	size_t offsets_size;
243	uint8_t data[0];
244};
245
246enum binder_deferred_state {
247	BINDER_DEFERRED_PUT_FILES    = 0x01,
248	BINDER_DEFERRED_FLUSH        = 0x02,
249	BINDER_DEFERRED_RELEASE      = 0x04,
250};
251
252struct binder_proc {
253	struct hlist_node proc_node;
254	struct rb_root threads;
255	struct rb_root nodes;
256	struct rb_root refs_by_desc;
257	struct rb_root refs_by_node;
258	int pid;
259	struct vm_area_struct *vma;
260	struct task_struct *tsk;
261	struct files_struct *files;
262	struct hlist_node deferred_work_node;
263	int deferred_work;
264	void *buffer;
265	ptrdiff_t user_buffer_offset;
266
267	struct list_head buffers;
268	struct rb_root free_buffers;
269	struct rb_root allocated_buffers;
270	size_t free_async_space;
271
272	struct page **pages;
273	size_t buffer_size;
274	uint32_t buffer_free;
275	struct list_head todo;
276	wait_queue_head_t wait;
277	struct binder_stats stats;
278	struct list_head delivered_death;
279	int max_threads;
280	int requested_threads;
281	int requested_threads_started;
282	int ready_threads;
283	long default_priority;
284};
285
286enum {
287	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
288	BINDER_LOOPER_STATE_ENTERED     = 0x02,
289	BINDER_LOOPER_STATE_EXITED      = 0x04,
290	BINDER_LOOPER_STATE_INVALID     = 0x08,
291	BINDER_LOOPER_STATE_WAITING     = 0x10,
292	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
293};
294
295struct binder_thread {
296	struct binder_proc *proc;
297	struct rb_node rb_node;
298	int pid;
299	int looper;
300	struct binder_transaction *transaction_stack;
301	struct list_head todo;
302	uint32_t return_error; /* Write failed, return error code in read buf */
303	uint32_t return_error2; /* Write failed, return error code in read */
304		/* buffer. Used when sending a reply to a dead process that */
305		/* we are also waiting on */
306	wait_queue_head_t wait;
307	struct binder_stats stats;
308};
309
310struct binder_transaction {
311	int debug_id;
312	struct binder_work work;
313	struct binder_thread *from;
314	struct binder_transaction *from_parent;
315	struct binder_proc *to_proc;
316	struct binder_thread *to_thread;
317	struct binder_transaction *to_parent;
318	unsigned need_reply:1;
319	/* unsigned is_dead:1; */	/* not used at the moment */
320
321	struct binder_buffer *buffer;
322	unsigned int	code;
323	unsigned int	flags;
324	long	priority;
325	long	saved_priority;
326	uid_t	sender_euid;
327};
328
329static void
330binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
331
332/*
333 * copied from get_unused_fd_flags
334 */
335int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
336{
337	struct files_struct *files = proc->files;
338	int fd, error;
339	struct fdtable *fdt;
340	unsigned long rlim_cur;
341	unsigned long irqs;
342
343	if (files == NULL)
344		return -ESRCH;
345
346	error = -EMFILE;
347	spin_lock(&files->file_lock);
348
349repeat:
350	fdt = files_fdtable(files);
351	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
352				files->next_fd);
353
354	/*
355	 * N.B. For clone tasks sharing a files structure, this test
356	 * will limit the total number of files that can be opened.
357	 */
358	rlim_cur = 0;
359	if (lock_task_sighand(proc->tsk, &irqs)) {
360		rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
361		unlock_task_sighand(proc->tsk, &irqs);
362	}
363	if (fd >= rlim_cur)
364		goto out;
365
366	/* Do we need to expand the fd array or fd set?  */
367	error = expand_files(files, fd);
368	if (error < 0)
369		goto out;
370
371	if (error) {
372		/*
373		 * If we needed to expand the fs array we
374		 * might have blocked - try again.
375		 */
376		error = -EMFILE;
377		goto repeat;
378	}
379
380	FD_SET(fd, fdt->open_fds);
381	if (flags & O_CLOEXEC)
382		FD_SET(fd, fdt->close_on_exec);
383	else
384		FD_CLR(fd, fdt->close_on_exec);
385	files->next_fd = fd + 1;
386#if 1
387	/* Sanity check */
388	if (fdt->fd[fd] != NULL) {
389		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
390		fdt->fd[fd] = NULL;
391	}
392#endif
393	error = fd;
394
395out:
396	spin_unlock(&files->file_lock);
397	return error;
398}
399
400/*
401 * copied from fd_install
402 */
403static void task_fd_install(
404	struct binder_proc *proc, unsigned int fd, struct file *file)
405{
406	struct files_struct *files = proc->files;
407	struct fdtable *fdt;
408
409	if (files == NULL)
410		return;
411
412	spin_lock(&files->file_lock);
413	fdt = files_fdtable(files);
414	BUG_ON(fdt->fd[fd] != NULL);
415	rcu_assign_pointer(fdt->fd[fd], file);
416	spin_unlock(&files->file_lock);
417}
418
419/*
420 * copied from __put_unused_fd in open.c
421 */
422static void __put_unused_fd(struct files_struct *files, unsigned int fd)
423{
424	struct fdtable *fdt = files_fdtable(files);
425	__FD_CLR(fd, fdt->open_fds);
426	if (fd < files->next_fd)
427		files->next_fd = fd;
428}
429
430/*
431 * copied from sys_close
432 */
433static long task_close_fd(struct binder_proc *proc, unsigned int fd)
434{
435	struct file *filp;
436	struct files_struct *files = proc->files;
437	struct fdtable *fdt;
438	int retval;
439
440	if (files == NULL)
441		return -ESRCH;
442
443	spin_lock(&files->file_lock);
444	fdt = files_fdtable(files);
445	if (fd >= fdt->max_fds)
446		goto out_unlock;
447	filp = fdt->fd[fd];
448	if (!filp)
449		goto out_unlock;
450	rcu_assign_pointer(fdt->fd[fd], NULL);
451	FD_CLR(fd, fdt->close_on_exec);
452	__put_unused_fd(files, fd);
453	spin_unlock(&files->file_lock);
454	retval = filp_close(filp, files);
455
456	/* can't restart close syscall because file table entry was cleared */
457	if (unlikely(retval == -ERESTARTSYS ||
458		     retval == -ERESTARTNOINTR ||
459		     retval == -ERESTARTNOHAND ||
460		     retval == -ERESTART_RESTARTBLOCK))
461		retval = -EINTR;
462
463	return retval;
464
465out_unlock:
466	spin_unlock(&files->file_lock);
467	return -EBADF;
468}
469
470static void binder_set_nice(long nice)
471{
472	long min_nice;
473	if (can_nice(current, nice)) {
474		set_user_nice(current, nice);
475		return;
476	}
477	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
478	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
479		     "binder: %d: nice value %ld not allowed use "
480		     "%ld instead\n", current->pid, nice, min_nice);
481	set_user_nice(current, min_nice);
482	if (min_nice < 20)
483		return;
484	binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
485}
486
487static size_t binder_buffer_size(struct binder_proc *proc,
488				 struct binder_buffer *buffer)
489{
490	if (list_is_last(&buffer->entry, &proc->buffers))
491		return proc->buffer + proc->buffer_size - (void *)buffer->data;
492	else
493		return (size_t)list_entry(buffer->entry.next,
494			struct binder_buffer, entry) - (size_t)buffer->data;
495}
496
497static void binder_insert_free_buffer(struct binder_proc *proc,
498				      struct binder_buffer *new_buffer)
499{
500	struct rb_node **p = &proc->free_buffers.rb_node;
501	struct rb_node *parent = NULL;
502	struct binder_buffer *buffer;
503	size_t buffer_size;
504	size_t new_buffer_size;
505
506	BUG_ON(!new_buffer->free);
507
508	new_buffer_size = binder_buffer_size(proc, new_buffer);
509
510	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
511		     "binder: %d: add free buffer, size %zd, "
512		     "at %p\n", proc->pid, new_buffer_size, new_buffer);
513
514	while (*p) {
515		parent = *p;
516		buffer = rb_entry(parent, struct binder_buffer, rb_node);
517		BUG_ON(!buffer->free);
518
519		buffer_size = binder_buffer_size(proc, buffer);
520
521		if (new_buffer_size < buffer_size)
522			p = &parent->rb_left;
523		else
524			p = &parent->rb_right;
525	}
526	rb_link_node(&new_buffer->rb_node, parent, p);
527	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
528}
529
530static void binder_insert_allocated_buffer(struct binder_proc *proc,
531					   struct binder_buffer *new_buffer)
532{
533	struct rb_node **p = &proc->allocated_buffers.rb_node;
534	struct rb_node *parent = NULL;
535	struct binder_buffer *buffer;
536
537	BUG_ON(new_buffer->free);
538
539	while (*p) {
540		parent = *p;
541		buffer = rb_entry(parent, struct binder_buffer, rb_node);
542		BUG_ON(buffer->free);
543
544		if (new_buffer < buffer)
545			p = &parent->rb_left;
546		else if (new_buffer > buffer)
547			p = &parent->rb_right;
548		else
549			BUG();
550	}
551	rb_link_node(&new_buffer->rb_node, parent, p);
552	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
553}
554
555static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
556						  void __user *user_ptr)
557{
558	struct rb_node *n = proc->allocated_buffers.rb_node;
559	struct binder_buffer *buffer;
560	struct binder_buffer *kern_ptr;
561
562	kern_ptr = user_ptr - proc->user_buffer_offset
563		- offsetof(struct binder_buffer, data);
564
565	while (n) {
566		buffer = rb_entry(n, struct binder_buffer, rb_node);
567		BUG_ON(buffer->free);
568
569		if (kern_ptr < buffer)
570			n = n->rb_left;
571		else if (kern_ptr > buffer)
572			n = n->rb_right;
573		else
574			return buffer;
575	}
576	return NULL;
577}
578
579static int binder_update_page_range(struct binder_proc *proc, int allocate,
580				    void *start, void *end,
581				    struct vm_area_struct *vma)
582{
583	void *page_addr;
584	unsigned long user_page_addr;
585	struct vm_struct tmp_area;
586	struct page **page;
587	struct mm_struct *mm;
588
589	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
590		     "binder: %d: %s pages %p-%p\n", proc->pid,
591		     allocate ? "allocate" : "free", start, end);
592
593	if (end <= start)
594		return 0;
595
596	if (vma)
597		mm = NULL;
598	else
599		mm = get_task_mm(proc->tsk);
600
601	if (mm) {
602		down_write(&mm->mmap_sem);
603		vma = proc->vma;
604	}
605
606	if (allocate == 0)
607		goto free_range;
608
609	if (vma == NULL) {
610		printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
611		       "map pages in userspace, no vma\n", proc->pid);
612		goto err_no_vma;
613	}
614
615	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
616		int ret;
617		struct page **page_array_ptr;
618		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
619
620		BUG_ON(*page);
621		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);
622		if (*page == NULL) {
623			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
624			       "for page at %p\n", proc->pid, page_addr);
625			goto err_alloc_page_failed;
626		}
627		tmp_area.addr = page_addr;
628		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
629		page_array_ptr = page;
630		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
631		if (ret) {
632			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
633			       "to map page at %p in kernel\n",
634			       proc->pid, page_addr);
635			goto err_map_kernel_failed;
636		}
637		user_page_addr =
638			(uintptr_t)page_addr + proc->user_buffer_offset;
639		ret = vm_insert_page(vma, user_page_addr, page[0]);
640		if (ret) {
641			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
642			       "to map page at %lx in userspace\n",
643			       proc->pid, user_page_addr);
644			goto err_vm_insert_page_failed;
645		}
646		/* vm_insert_page does not seem to increment the refcount */
647	}
648	if (mm) {
649		up_write(&mm->mmap_sem);
650		mmput(mm);
651	}
652	return 0;
653
654free_range:
655	for (page_addr = end - PAGE_SIZE; page_addr >= start;
656	     page_addr -= PAGE_SIZE) {
657		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
658		if (vma)
659			zap_page_range(vma, (uintptr_t)page_addr +
660				proc->user_buffer_offset, PAGE_SIZE, NULL);
661err_vm_insert_page_failed:
662		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
663err_map_kernel_failed:
664		__free_page(*page);
665		*page = NULL;
666err_alloc_page_failed:
667		;
668	}
669err_no_vma:
670	if (mm) {
671		up_write(&mm->mmap_sem);
672		mmput(mm);
673	}
674	return -ENOMEM;
675}
676
677static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
678					      size_t data_size,
679					      size_t offsets_size, int is_async)
680{
681	struct rb_node *n = proc->free_buffers.rb_node;
682	struct binder_buffer *buffer;
683	size_t buffer_size;
684	struct rb_node *best_fit = NULL;
685	void *has_page_addr;
686	void *end_page_addr;
687	size_t size;
688
689	if (proc->vma == NULL) {
690		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
691		       proc->pid);
692		return NULL;
693	}
694
695	size = ALIGN(data_size, sizeof(void *)) +
696		ALIGN(offsets_size, sizeof(void *));
697
698	if (size < data_size || size < offsets_size) {
699		binder_user_error("binder: %d: got transaction with invalid "
700			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
701		return NULL;
702	}
703
704	if (is_async &&
705	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
706		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
707			     "binder: %d: binder_alloc_buf size %zd"
708			     "failed, no async space left\n", proc->pid, size);
709		return NULL;
710	}
711
712	while (n) {
713		buffer = rb_entry(n, struct binder_buffer, rb_node);
714		BUG_ON(!buffer->free);
715		buffer_size = binder_buffer_size(proc, buffer);
716
717		if (size < buffer_size) {
718			best_fit = n;
719			n = n->rb_left;
720		} else if (size > buffer_size)
721			n = n->rb_right;
722		else {
723			best_fit = n;
724			break;
725		}
726	}
727	if (best_fit == NULL) {
728		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
729		       "no address space\n", proc->pid, size);
730		return NULL;
731	}
732	if (n == NULL) {
733		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
734		buffer_size = binder_buffer_size(proc, buffer);
735	}
736
737	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
738		     "binder: %d: binder_alloc_buf size %zd got buff"
739		     "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
740
741	has_page_addr =
742		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
743	if (n == NULL) {
744		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
745			buffer_size = size; /* no room for other buffers */
746		else
747			buffer_size = size + sizeof(struct binder_buffer);
748	}
749	end_page_addr =
750		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
751	if (end_page_addr > has_page_addr)
752		end_page_addr = has_page_addr;
753	if (binder_update_page_range(proc, 1,
754	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
755		return NULL;
756
757	rb_erase(best_fit, &proc->free_buffers);
758	buffer->free = 0;
759	binder_insert_allocated_buffer(proc, buffer);
760	if (buffer_size != size) {
761		struct binder_buffer *new_buffer = (void *)buffer->data + size;
762		list_add(&new_buffer->entry, &buffer->entry);
763		new_buffer->free = 1;
764		binder_insert_free_buffer(proc, new_buffer);
765	}
766	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
767		     "binder: %d: binder_alloc_buf size %zd got "
768		     "%p\n", proc->pid, size, buffer);
769	buffer->data_size = data_size;
770	buffer->offsets_size = offsets_size;
771	buffer->async_transaction = is_async;
772	if (is_async) {
773		proc->free_async_space -= size + sizeof(struct binder_buffer);
774		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
775		             "binder: %d: binder_alloc_buf size %zd "
776		             "async free %zd\n", proc->pid, size,
777			     proc->free_async_space);
778	}
779
780	return buffer;
781}
782
783static void *buffer_start_page(struct binder_buffer *buffer)
784{
785	return (void *)((uintptr_t)buffer & PAGE_MASK);
786}
787
788static void *buffer_end_page(struct binder_buffer *buffer)
789{
790	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
791}
792
793static void binder_delete_free_buffer(struct binder_proc *proc,
794				      struct binder_buffer *buffer)
795{
796	struct binder_buffer *prev, *next = NULL;
797	int free_page_end = 1;
798	int free_page_start = 1;
799
800	BUG_ON(proc->buffers.next == &buffer->entry);
801	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
802	BUG_ON(!prev->free);
803	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
804		free_page_start = 0;
805		if (buffer_end_page(prev) == buffer_end_page(buffer))
806			free_page_end = 0;
807		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
808			     "binder: %d: merge free, buffer %p "
809			     "share page with %p\n", proc->pid, buffer, prev);
810	}
811
812	if (!list_is_last(&buffer->entry, &proc->buffers)) {
813		next = list_entry(buffer->entry.next,
814				  struct binder_buffer, entry);
815		if (buffer_start_page(next) == buffer_end_page(buffer)) {
816			free_page_end = 0;
817			if (buffer_start_page(next) ==
818			    buffer_start_page(buffer))
819				free_page_start = 0;
820			binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
821				     "binder: %d: merge free, buffer"
822				     " %p share page with %p\n", proc->pid,
823				     buffer, prev);
824		}
825	}
826	list_del(&buffer->entry);
827	if (free_page_start || free_page_end) {
828		binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
829			     "binder: %d: merge free, buffer %p do "
830			     "not share page%s%s with with %p or %p\n",
831			     proc->pid, buffer, free_page_start ? "" : " end",
832			     free_page_end ? "" : " start", prev, next);
833		binder_update_page_range(proc, 0, free_page_start ?
834			buffer_start_page(buffer) : buffer_end_page(buffer),
835			(free_page_end ? buffer_end_page(buffer) :
836			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
837	}
838}
839
840static void binder_free_buf(struct binder_proc *proc,
841			    struct binder_buffer *buffer)
842{
843	size_t size, buffer_size;
844
845	buffer_size = binder_buffer_size(proc, buffer);
846
847	size = ALIGN(buffer->data_size, sizeof(void *)) +
848		ALIGN(buffer->offsets_size, sizeof(void *));
849
850	binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
851		     "binder: %d: binder_free_buf %p size %zd buffer"
852		     "_size %zd\n", proc->pid, buffer, size, buffer_size);
853
854	BUG_ON(buffer->free);
855	BUG_ON(size > buffer_size);
856	BUG_ON(buffer->transaction != NULL);
857	BUG_ON((void *)buffer < proc->buffer);
858	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
859
860	if (buffer->async_transaction) {
861		proc->free_async_space += size + sizeof(struct binder_buffer);
862
863		binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
864			     "binder: %d: binder_free_buf size %zd "
865			     "async free %zd\n", proc->pid, size,
866			     proc->free_async_space);
867	}
868
869	binder_update_page_range(proc, 0,
870		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
871		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
872		NULL);
873	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
874	buffer->free = 1;
875	if (!list_is_last(&buffer->entry, &proc->buffers)) {
876		struct binder_buffer *next = list_entry(buffer->entry.next,
877						struct binder_buffer, entry);
878		if (next->free) {
879			rb_erase(&next->rb_node, &proc->free_buffers);
880			binder_delete_free_buffer(proc, next);
881		}
882	}
883	if (proc->buffers.next != &buffer->entry) {
884		struct binder_buffer *prev = list_entry(buffer->entry.prev,
885						struct binder_buffer, entry);
886		if (prev->free) {
887			binder_delete_free_buffer(proc, buffer);
888			rb_erase(&prev->rb_node, &proc->free_buffers);
889			buffer = prev;
890		}
891	}
892	binder_insert_free_buffer(proc, buffer);
893}
894
895static struct binder_node *binder_get_node(struct binder_proc *proc,
896					   void __user *ptr)
897{
898	struct rb_node *n = proc->nodes.rb_node;
899	struct binder_node *node;
900
901	while (n) {
902		node = rb_entry(n, struct binder_node, rb_node);
903
904		if (ptr < node->ptr)
905			n = n->rb_left;
906		else if (ptr > node->ptr)
907			n = n->rb_right;
908		else
909			return node;
910	}
911	return NULL;
912}
913
914static struct binder_node *binder_new_node(struct binder_proc *proc,
915					   void __user *ptr,
916					   void __user *cookie)
917{
918	struct rb_node **p = &proc->nodes.rb_node;
919	struct rb_node *parent = NULL;
920	struct binder_node *node;
921
922	while (*p) {
923		parent = *p;
924		node = rb_entry(parent, struct binder_node, rb_node);
925
926		if (ptr < node->ptr)
927			p = &(*p)->rb_left;
928		else if (ptr > node->ptr)
929			p = &(*p)->rb_right;
930		else
931			return NULL;
932	}
933
934	node = kzalloc(sizeof(*node), GFP_KERNEL);
935	if (node == NULL)
936		return NULL;
937	binder_stats.obj_created[BINDER_STAT_NODE]++;
938	rb_link_node(&node->rb_node, parent, p);
939	rb_insert_color(&node->rb_node, &proc->nodes);
940	node->debug_id = ++binder_last_id;
941	node->proc = proc;
942	node->ptr = ptr;
943	node->cookie = cookie;
944	node->work.type = BINDER_WORK_NODE;
945	INIT_LIST_HEAD(&node->work.entry);
946	INIT_LIST_HEAD(&node->async_todo);
947	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
948		     "binder: %d:%d node %d u%p c%p created\n",
949		     proc->pid, current->pid, node->debug_id,
950		     node->ptr, node->cookie);
951	return node;
952}
953
954static int binder_inc_node(struct binder_node *node, int strong, int internal,
955			   struct list_head *target_list)
956{
957	if (strong) {
958		if (internal) {
959			if (target_list == NULL &&
960			    node->internal_strong_refs == 0 &&
961			    !(node == binder_context_mgr_node &&
962			    node->has_strong_ref)) {
963				printk(KERN_ERR "binder: invalid inc strong "
964					"node for %d\n", node->debug_id);
965				return -EINVAL;
966			}
967			node->internal_strong_refs++;
968		} else
969			node->local_strong_refs++;
970		if (!node->has_strong_ref && target_list) {
971			list_del_init(&node->work.entry);
972			list_add_tail(&node->work.entry, target_list);
973		}
974	} else {
975		if (!internal)
976			node->local_weak_refs++;
977		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
978			if (target_list == NULL) {
979				printk(KERN_ERR "binder: invalid inc weak node "
980					"for %d\n", node->debug_id);
981				return -EINVAL;
982			}
983			list_add_tail(&node->work.entry, target_list);
984		}
985	}
986	return 0;
987}
988
989static int binder_dec_node(struct binder_node *node, int strong, int internal)
990{
991	if (strong) {
992		if (internal)
993			node->internal_strong_refs--;
994		else
995			node->local_strong_refs--;
996		if (node->local_strong_refs || node->internal_strong_refs)
997			return 0;
998	} else {
999		if (!internal)
1000			node->local_weak_refs--;
1001		if (node->local_weak_refs || !hlist_empty(&node->refs))
1002			return 0;
1003	}
1004	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1005		if (list_empty(&node->work.entry)) {
1006			list_add_tail(&node->work.entry, &node->proc->todo);
1007			wake_up_interruptible(&node->proc->wait);
1008		}
1009	} else {
1010		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1011		    !node->local_weak_refs) {
1012			list_del_init(&node->work.entry);
1013			if (node->proc) {
1014				rb_erase(&node->rb_node, &node->proc->nodes);
1015				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1016					     "binder: refless node %d deleted\n",
1017					     node->debug_id);
1018			} else {
1019				hlist_del(&node->dead_node);
1020				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1021					     "binder: dead node %d deleted\n",
1022					     node->debug_id);
1023			}
1024			kfree(node);
1025			binder_stats.obj_deleted[BINDER_STAT_NODE]++;
1026		}
1027	}
1028
1029	return 0;
1030}
1031
1032
1033static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1034					 uint32_t desc)
1035{
1036	struct rb_node *n = proc->refs_by_desc.rb_node;
1037	struct binder_ref *ref;
1038
1039	while (n) {
1040		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1041
1042		if (desc < ref->desc)
1043			n = n->rb_left;
1044		else if (desc > ref->desc)
1045			n = n->rb_right;
1046		else
1047			return ref;
1048	}
1049	return NULL;
1050}
1051
1052static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1053						  struct binder_node *node)
1054{
1055	struct rb_node *n;
1056	struct rb_node **p = &proc->refs_by_node.rb_node;
1057	struct rb_node *parent = NULL;
1058	struct binder_ref *ref, *new_ref;
1059
1060	while (*p) {
1061		parent = *p;
1062		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1063
1064		if (node < ref->node)
1065			p = &(*p)->rb_left;
1066		else if (node > ref->node)
1067			p = &(*p)->rb_right;
1068		else
1069			return ref;
1070	}
1071	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1072	if (new_ref == NULL)
1073		return NULL;
1074	binder_stats.obj_created[BINDER_STAT_REF]++;
1075	new_ref->debug_id = ++binder_last_id;
1076	new_ref->proc = proc;
1077	new_ref->node = node;
1078	rb_link_node(&new_ref->rb_node_node, parent, p);
1079	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1080
1081	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1082	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1083		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1084		if (ref->desc > new_ref->desc)
1085			break;
1086		new_ref->desc = ref->desc + 1;
1087	}
1088
1089	p = &proc->refs_by_desc.rb_node;
1090	while (*p) {
1091		parent = *p;
1092		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1093
1094		if (new_ref->desc < ref->desc)
1095			p = &(*p)->rb_left;
1096		else if (new_ref->desc > ref->desc)
1097			p = &(*p)->rb_right;
1098		else
1099			BUG();
1100	}
1101	rb_link_node(&new_ref->rb_node_desc, parent, p);
1102	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1103	if (node) {
1104		hlist_add_head(&new_ref->node_entry, &node->refs);
1105
1106		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1107			     "binder: %d new ref %d desc %d for "
1108			     "node %d\n", proc->pid, new_ref->debug_id,
1109			     new_ref->desc, node->debug_id);
1110	} else {
1111		binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1112			     "binder: %d new ref %d desc %d for "
1113			     "dead node\n", proc->pid, new_ref->debug_id,
1114			      new_ref->desc);
1115	}
1116	return new_ref;
1117}
1118
1119static void binder_delete_ref(struct binder_ref *ref)
1120{
1121	binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1122		     "binder: %d delete ref %d desc %d for "
1123		     "node %d\n", ref->proc->pid, ref->debug_id,
1124		     ref->desc, ref->node->debug_id);
1125
1126	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1127	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1128	if (ref->strong)
1129		binder_dec_node(ref->node, 1, 1);
1130	hlist_del(&ref->node_entry);
1131	binder_dec_node(ref->node, 0, 1);
1132	if (ref->death) {
1133		binder_debug(BINDER_DEBUG_DEAD_BINDER,
1134			     "binder: %d delete ref %d desc %d "
1135			     "has death notification\n", ref->proc->pid,
1136		             ref->debug_id, ref->desc);
1137		list_del(&ref->death->work.entry);
1138		kfree(ref->death);
1139		binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
1140	}
1141	kfree(ref);
1142	binder_stats.obj_deleted[BINDER_STAT_REF]++;
1143}
1144
1145static int binder_inc_ref(struct binder_ref *ref, int strong,
1146			  struct list_head *target_list)
1147{
1148	int ret;
1149	if (strong) {
1150		if (ref->strong == 0) {
1151			ret = binder_inc_node(ref->node, 1, 1, target_list);
1152			if (ret)
1153				return ret;
1154		}
1155		ref->strong++;
1156	} else {
1157		if (ref->weak == 0) {
1158			ret = binder_inc_node(ref->node, 0, 1, target_list);
1159			if (ret)
1160				return ret;
1161		}
1162		ref->weak++;
1163	}
1164	return 0;
1165}
1166
1167
1168static int binder_dec_ref(struct binder_ref *ref, int strong)
1169{
1170	if (strong) {
1171		if (ref->strong == 0) {
1172			binder_user_error("binder: %d invalid dec strong, "
1173					  "ref %d desc %d s %d w %d\n",
1174					  ref->proc->pid, ref->debug_id,
1175					  ref->desc, ref->strong, ref->weak);
1176			return -EINVAL;
1177		}
1178		ref->strong--;
1179		if (ref->strong == 0) {
1180			int ret;
1181			ret = binder_dec_node(ref->node, strong, 1);
1182			if (ret)
1183				return ret;
1184		}
1185	} else {
1186		if (ref->weak == 0) {
1187			binder_user_error("binder: %d invalid dec weak, "
1188					  "ref %d desc %d s %d w %d\n",
1189					  ref->proc->pid, ref->debug_id,
1190					  ref->desc, ref->strong, ref->weak);
1191			return -EINVAL;
1192		}
1193		ref->weak--;
1194	}
1195	if (ref->strong == 0 && ref->weak == 0)
1196		binder_delete_ref(ref);
1197	return 0;
1198}
1199
1200static void binder_pop_transaction(struct binder_thread *target_thread,
1201				   struct binder_transaction *t)
1202{
1203	if (target_thread) {
1204		BUG_ON(target_thread->transaction_stack != t);
1205		BUG_ON(target_thread->transaction_stack->from != target_thread);
1206		target_thread->transaction_stack =
1207			target_thread->transaction_stack->from_parent;
1208		t->from = NULL;
1209	}
1210	t->need_reply = 0;
1211	if (t->buffer)
1212		t->buffer->transaction = NULL;
1213	kfree(t);
1214	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
1215}
1216
1217static void binder_send_failed_reply(struct binder_transaction *t,
1218				     uint32_t error_code)
1219{
1220	struct binder_thread *target_thread;
1221	BUG_ON(t->flags & TF_ONE_WAY);
1222	while (1) {
1223		target_thread = t->from;
1224		if (target_thread) {
1225			if (target_thread->return_error != BR_OK &&
1226			   target_thread->return_error2 == BR_OK) {
1227				target_thread->return_error2 =
1228					target_thread->return_error;
1229				target_thread->return_error = BR_OK;
1230			}
1231			if (target_thread->return_error == BR_OK) {
1232				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1233					     "binder: send failed reply for "
1234					     "transaction %d to %d:%d\n",
1235					      t->debug_id, target_thread->proc->pid,
1236					      target_thread->pid);
1237
1238				binder_pop_transaction(target_thread, t);
1239				target_thread->return_error = error_code;
1240				wake_up_interruptible(&target_thread->wait);
1241			} else {
1242				printk(KERN_ERR "binder: reply failed, target "
1243					"thread, %d:%d, has error code %d "
1244					"already\n", target_thread->proc->pid,
1245					target_thread->pid,
1246					target_thread->return_error);
1247			}
1248			return;
1249		} else {
1250			struct binder_transaction *next = t->from_parent;
1251
1252			binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1253				     "binder: send failed reply "
1254				     "for transaction %d, target dead\n",
1255				     t->debug_id);
1256
1257			binder_pop_transaction(target_thread, t);
1258			if (next == NULL) {
1259				binder_debug(BINDER_DEBUG_DEAD_BINDER,
1260					     "binder: reply failed,"
1261					     " no target thread at root\n");
1262				return;
1263			}
1264			t = next;
1265			binder_debug(BINDER_DEBUG_DEAD_BINDER,
1266				     "binder: reply failed, no target "
1267				     "thread -- retry %d\n", t->debug_id);
1268		}
1269	}
1270}
1271
1272static void binder_transaction_buffer_release(struct binder_proc *proc,
1273					      struct binder_buffer *buffer,
1274					      size_t *failed_at)
1275{
1276	size_t *offp, *off_end;
1277	int debug_id = buffer->debug_id;
1278
1279	binder_debug(BINDER_DEBUG_TRANSACTION,
1280		     "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
1281		     proc->pid, buffer->debug_id,
1282		     buffer->data_size, buffer->offsets_size, failed_at);
1283
1284	if (buffer->target_node)
1285		binder_dec_node(buffer->target_node, 1, 0);
1286
1287	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
1288	if (failed_at)
1289		off_end = failed_at;
1290	else
1291		off_end = (void *)offp + buffer->offsets_size;
1292	for (; offp < off_end; offp++) {
1293		struct flat_binder_object *fp;
1294		if (*offp > buffer->data_size - sizeof(*fp) ||
1295		    buffer->data_size < sizeof(*fp) ||
1296		    !IS_ALIGNED(*offp, sizeof(void *))) {
1297			printk(KERN_ERR "binder: transaction release %d bad"
1298					"offset %zd, size %zd\n", debug_id, *offp, buffer->data_size);
1299			continue;
1300		}
1301		fp = (struct flat_binder_object *)(buffer->data + *offp);
1302		switch (fp->type) {
1303		case BINDER_TYPE_BINDER:
1304		case BINDER_TYPE_WEAK_BINDER: {
1305			struct binder_node *node = binder_get_node(proc, fp->binder);
1306			if (node == NULL) {
1307				printk(KERN_ERR "binder: transaction release %d bad node %p\n", debug_id, fp->binder);
1308				break;
1309			}
1310			binder_debug(BINDER_DEBUG_TRANSACTION,
1311				     "        node %d u%p\n",
1312				     node->debug_id, node->ptr);
1313			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1314		} break;
1315		case BINDER_TYPE_HANDLE:
1316		case BINDER_TYPE_WEAK_HANDLE: {
1317			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1318			if (ref == NULL) {
1319				printk(KERN_ERR "binder: transaction release %d bad handle %ld\n", debug_id, fp->handle);
1320				break;
1321			}
1322			binder_debug(BINDER_DEBUG_TRANSACTION,
1323				     "        ref %d desc %d (node %d)\n",
1324				     ref->debug_id, ref->desc, ref->node->debug_id);
1325			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1326		} break;
1327
1328		case BINDER_TYPE_FD:
1329			binder_debug(BINDER_DEBUG_TRANSACTION,
1330				     "        fd %ld\n", fp->handle);
1331			if (failed_at)
1332				task_close_fd(proc, fp->handle);
1333			break;
1334
1335		default:
1336			printk(KERN_ERR "binder: transaction release %d bad object type %lx\n", debug_id, fp->type);
1337			break;
1338		}
1339	}
1340}
1341
1342static void binder_transaction(struct binder_proc *proc,
1343			       struct binder_thread *thread,
1344			       struct binder_transaction_data *tr, int reply)
1345{
1346	struct binder_transaction *t;
1347	struct binder_work *tcomplete;
1348	size_t *offp, *off_end;
1349	struct binder_proc *target_proc;
1350	struct binder_thread *target_thread = NULL;
1351	struct binder_node *target_node = NULL;
1352	struct list_head *target_list;
1353	wait_queue_head_t *target_wait;
1354	struct binder_transaction *in_reply_to = NULL;
1355	struct binder_transaction_log_entry *e;
1356	uint32_t return_error;
1357
1358	e = binder_transaction_log_add(&binder_transaction_log);
1359	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1360	e->from_proc = proc->pid;
1361	e->from_thread = thread->pid;
1362	e->target_handle = tr->target.handle;
1363	e->data_size = tr->data_size;
1364	e->offsets_size = tr->offsets_size;
1365
1366	if (reply) {
1367		in_reply_to = thread->transaction_stack;
1368		if (in_reply_to == NULL) {
1369			binder_user_error("binder: %d:%d got reply transaction "
1370					  "with no transaction stack\n",
1371					  proc->pid, thread->pid);
1372			return_error = BR_FAILED_REPLY;
1373			goto err_empty_call_stack;
1374		}
1375		binder_set_nice(in_reply_to->saved_priority);
1376		if (in_reply_to->to_thread != thread) {
1377			binder_user_error("binder: %d:%d got reply transaction "
1378				"with bad transaction stack,"
1379				" transaction %d has target %d:%d\n",
1380				proc->pid, thread->pid, in_reply_to->debug_id,
1381				in_reply_to->to_proc ?
1382				in_reply_to->to_proc->pid : 0,
1383				in_reply_to->to_thread ?
1384				in_reply_to->to_thread->pid : 0);
1385			return_error = BR_FAILED_REPLY;
1386			in_reply_to = NULL;
1387			goto err_bad_call_stack;
1388		}
1389		thread->transaction_stack = in_reply_to->to_parent;
1390		target_thread = in_reply_to->from;
1391		if (target_thread == NULL) {
1392			return_error = BR_DEAD_REPLY;
1393			goto err_dead_binder;
1394		}
1395		if (target_thread->transaction_stack != in_reply_to) {
1396			binder_user_error("binder: %d:%d got reply transaction "
1397				"with bad target transaction stack %d, "
1398				"expected %d\n",
1399				proc->pid, thread->pid,
1400				target_thread->transaction_stack ?
1401				target_thread->transaction_stack->debug_id : 0,
1402				in_reply_to->debug_id);
1403			return_error = BR_FAILED_REPLY;
1404			in_reply_to = NULL;
1405			target_thread = NULL;
1406			goto err_dead_binder;
1407		}
1408		target_proc = target_thread->proc;
1409	} else {
1410		if (tr->target.handle) {
1411			struct binder_ref *ref;
1412			ref = binder_get_ref(proc, tr->target.handle);
1413			if (ref == NULL) {
1414				binder_user_error("binder: %d:%d got "
1415					"transaction to invalid handle\n",
1416					proc->pid, thread->pid);
1417				return_error = BR_FAILED_REPLY;
1418				goto err_invalid_target_handle;
1419			}
1420			target_node = ref->node;
1421		} else {
1422			target_node = binder_context_mgr_node;
1423			if (target_node == NULL) {
1424				return_error = BR_DEAD_REPLY;
1425				goto err_no_context_mgr_node;
1426			}
1427		}
1428		e->to_node = target_node->debug_id;
1429		target_proc = target_node->proc;
1430		if (target_proc == NULL) {
1431			return_error = BR_DEAD_REPLY;
1432			goto err_dead_binder;
1433		}
1434		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1435			struct binder_transaction *tmp;
1436			tmp = thread->transaction_stack;
1437			if (tmp->to_thread != thread) {
1438				binder_user_error("binder: %d:%d got new "
1439					"transaction with bad transaction stack"
1440					", transaction %d has target %d:%d\n",
1441					proc->pid, thread->pid, tmp->debug_id,
1442					tmp->to_proc ? tmp->to_proc->pid : 0,
1443					tmp->to_thread ?
1444					tmp->to_thread->pid : 0);
1445				return_error = BR_FAILED_REPLY;
1446				goto err_bad_call_stack;
1447			}
1448			while (tmp) {
1449				if (tmp->from && tmp->from->proc == target_proc)
1450					target_thread = tmp->from;
1451				tmp = tmp->from_parent;
1452			}
1453		}
1454	}
1455	if (target_thread) {
1456		e->to_thread = target_thread->pid;
1457		target_list = &target_thread->todo;
1458		target_wait = &target_thread->wait;
1459	} else {
1460		target_list = &target_proc->todo;
1461		target_wait = &target_proc->wait;
1462	}
1463	e->to_proc = target_proc->pid;
1464
1465	/* TODO: reuse incoming transaction for reply */
1466	t = kzalloc(sizeof(*t), GFP_KERNEL);
1467	if (t == NULL) {
1468		return_error = BR_FAILED_REPLY;
1469		goto err_alloc_t_failed;
1470	}
1471	binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;
1472
1473	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1474	if (tcomplete == NULL) {
1475		return_error = BR_FAILED_REPLY;
1476		goto err_alloc_tcomplete_failed;
1477	}
1478	binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;
1479
1480	t->debug_id = ++binder_last_id;
1481	e->debug_id = t->debug_id;
1482
1483	if (reply)
1484		binder_debug(BINDER_DEBUG_TRANSACTION,
1485			     "binder: %d:%d BC_REPLY %d -> %d:%d, "
1486			     "data %p-%p size %zd-%zd\n",
1487			     proc->pid, thread->pid, t->debug_id,
1488			     target_proc->pid, target_thread->pid,
1489			     tr->data.ptr.buffer, tr->data.ptr.offsets,
1490			     tr->data_size, tr->offsets_size);
1491	else
1492		binder_debug(BINDER_DEBUG_TRANSACTION,
1493			     "binder: %d:%d BC_TRANSACTION %d -> "
1494			     "%d - node %d, data %p-%p size %zd-%zd\n",
1495			     proc->pid, thread->pid, t->debug_id,
1496			     target_proc->pid, target_node->debug_id,
1497			     tr->data.ptr.buffer, tr->data.ptr.offsets,
1498			     tr->data_size, tr->offsets_size);
1499
1500	if (!reply && !(tr->flags & TF_ONE_WAY))
1501		t->from = thread;
1502	else
1503		t->from = NULL;
1504	t->sender_euid = proc->tsk->cred->euid;
1505	t->to_proc = target_proc;
1506	t->to_thread = target_thread;
1507	t->code = tr->code;
1508	t->flags = tr->flags;
1509	t->priority = task_nice(current);
1510	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1511		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1512	if (t->buffer == NULL) {
1513		return_error = BR_FAILED_REPLY;
1514		goto err_binder_alloc_buf_failed;
1515	}
1516	t->buffer->allow_user_free = 0;
1517	t->buffer->debug_id = t->debug_id;
1518	t->buffer->transaction = t;
1519	t->buffer->target_node = target_node;
1520	if (target_node)
1521		binder_inc_node(target_node, 1, 0, NULL);
1522
1523	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
1524
1525	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
1526		binder_user_error("binder: %d:%d got transaction with invalid "
1527			"data ptr\n", proc->pid, thread->pid);
1528		return_error = BR_FAILED_REPLY;
1529		goto err_copy_data_failed;
1530	}
1531	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
1532		binder_user_error("binder: %d:%d got transaction with invalid "
1533			"offsets ptr\n", proc->pid, thread->pid);
1534		return_error = BR_FAILED_REPLY;
1535		goto err_copy_data_failed;
1536	}
1537	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
1538		binder_user_error("binder: %d:%d got transaction with "
1539			"invalid offsets size, %zd\n",
1540			proc->pid, thread->pid, tr->offsets_size);
1541		return_error = BR_FAILED_REPLY;
1542		goto err_bad_offset;
1543	}
1544	off_end = (void *)offp + tr->offsets_size;
1545	for (; offp < off_end; offp++) {
1546		struct flat_binder_object *fp;
1547		if (*offp > t->buffer->data_size - sizeof(*fp) ||
1548		    t->buffer->data_size < sizeof(*fp) ||
1549		    !IS_ALIGNED(*offp, sizeof(void *))) {
1550			binder_user_error("binder: %d:%d got transaction with "
1551				"invalid offset, %zd\n",
1552				proc->pid, thread->pid, *offp);
1553			return_error = BR_FAILED_REPLY;
1554			goto err_bad_offset;
1555		}
1556		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1557		switch (fp->type) {
1558		case BINDER_TYPE_BINDER:
1559		case BINDER_TYPE_WEAK_BINDER: {
1560			struct binder_ref *ref;
1561			struct binder_node *node = binder_get_node(proc, fp->binder);
1562			if (node == NULL) {
1563				node = binder_new_node(proc, fp->binder, fp->cookie);
1564				if (node == NULL) {
1565					return_error = BR_FAILED_REPLY;
1566					goto err_binder_new_node_failed;
1567				}
1568				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1569				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1570			}
1571			if (fp->cookie != node->cookie) {
1572				binder_user_error("binder: %d:%d sending u%p "
1573					"node %d, cookie mismatch %p != %p\n",
1574					proc->pid, thread->pid,
1575					fp->binder, node->debug_id,
1576					fp->cookie, node->cookie);
1577				goto err_binder_get_ref_for_node_failed;
1578			}
1579			ref = binder_get_ref_for_node(target_proc, node);
1580			if (ref == NULL) {
1581				return_error = BR_FAILED_REPLY;
1582				goto err_binder_get_ref_for_node_failed;
1583			}
1584			if (fp->type == BINDER_TYPE_BINDER)
1585				fp->type = BINDER_TYPE_HANDLE;
1586			else
1587				fp->type = BINDER_TYPE_WEAK_HANDLE;
1588			fp->handle = ref->desc;
1589			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
1590
1591			binder_debug(BINDER_DEBUG_TRANSACTION,
1592				     "        node %d u%p -> ref %d desc %d\n",
1593				     node->debug_id, node->ptr, ref->debug_id,
1594				     ref->desc);
1595		} break;
1596		case BINDER_TYPE_HANDLE:
1597		case BINDER_TYPE_WEAK_HANDLE: {
1598			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1599			if (ref == NULL) {
1600				binder_user_error("binder: %d:%d got "
1601					"transaction with invalid "
1602					"handle, %ld\n", proc->pid,
1603					thread->pid, fp->handle);
1604				return_error = BR_FAILED_REPLY;
1605				goto err_binder_get_ref_failed;
1606			}
1607			if (ref->node->proc == target_proc) {
1608				if (fp->type == BINDER_TYPE_HANDLE)
1609					fp->type = BINDER_TYPE_BINDER;
1610				else
1611					fp->type = BINDER_TYPE_WEAK_BINDER;
1612				fp->binder = ref->node->ptr;
1613				fp->cookie = ref->node->cookie;
1614				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1615				binder_debug(BINDER_DEBUG_TRANSACTION,
1616					     "        ref %d desc %d -> node %d u%p\n",
1617					     ref->debug_id, ref->desc, ref->node->debug_id,
1618					     ref->node->ptr);
1619			} else {
1620				struct binder_ref *new_ref;
1621				new_ref = binder_get_ref_for_node(target_proc, ref->node);
1622				if (new_ref == NULL) {
1623					return_error = BR_FAILED_REPLY;
1624					goto err_binder_get_ref_for_node_failed;
1625				}
1626				fp->handle = new_ref->desc;
1627				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1628				binder_debug(BINDER_DEBUG_TRANSACTION,
1629					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1630					     ref->debug_id, ref->desc, new_ref->debug_id,
1631					     new_ref->desc, ref->node->debug_id);
1632			}
1633		} break;
1634
1635		case BINDER_TYPE_FD: {
1636			int target_fd;
1637			struct file *file;
1638
1639			if (reply) {
1640				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1641					binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
1642						proc->pid, thread->pid, fp->handle);
1643					return_error = BR_FAILED_REPLY;
1644					goto err_fd_not_allowed;
1645				}
1646			} else if (!target_node->accept_fds) {
1647				binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
1648					proc->pid, thread->pid, fp->handle);
1649				return_error = BR_FAILED_REPLY;
1650				goto err_fd_not_allowed;
1651			}
1652
1653			file = fget(fp->handle);
1654			if (file == NULL) {
1655				binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
1656					proc->pid, thread->pid, fp->handle);
1657				return_error = BR_FAILED_REPLY;
1658				goto err_fget_failed;
1659			}
1660			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1661			if (target_fd < 0) {
1662				fput(file);
1663				return_error = BR_FAILED_REPLY;
1664				goto err_get_unused_fd_failed;
1665			}
1666			task_fd_install(target_proc, target_fd, file);
1667			binder_debug(BINDER_DEBUG_TRANSACTION,
1668				     "        fd %ld -> %d\n", fp->handle, target_fd);
1669			/* TODO: fput? */
1670			fp->handle = target_fd;
1671		} break;
1672
1673		default:
1674			binder_user_error("binder: %d:%d got transactio"
1675				"n with invalid object type, %lx\n",
1676				proc->pid, thread->pid, fp->type);
1677			return_error = BR_FAILED_REPLY;
1678			goto err_bad_object_type;
1679		}
1680	}
1681	if (reply) {
1682		BUG_ON(t->buffer->async_transaction != 0);
1683		binder_pop_transaction(target_thread, in_reply_to);
1684	} else if (!(t->flags & TF_ONE_WAY)) {
1685		BUG_ON(t->buffer->async_transaction != 0);
1686		t->need_reply = 1;
1687		t->from_parent = thread->transaction_stack;
1688		thread->transaction_stack = t;
1689	} else {
1690		BUG_ON(target_node == NULL);
1691		BUG_ON(t->buffer->async_transaction != 1);
1692		if (target_node->has_async_transaction) {
1693			target_list = &target_node->async_todo;
1694			target_wait = NULL;
1695		} else
1696			target_node->has_async_transaction = 1;
1697	}
1698	t->work.type = BINDER_WORK_TRANSACTION;
1699	list_add_tail(&t->work.entry, target_list);
1700	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1701	list_add_tail(&tcomplete->entry, &thread->todo);
1702	if (target_wait)
1703		wake_up_interruptible(target_wait);
1704	return;
1705
1706err_get_unused_fd_failed:
1707err_fget_failed:
1708err_fd_not_allowed:
1709err_binder_get_ref_for_node_failed:
1710err_binder_get_ref_failed:
1711err_binder_new_node_failed:
1712err_bad_object_type:
1713err_bad_offset:
1714err_copy_data_failed:
1715	binder_transaction_buffer_release(target_proc, t->buffer, offp);
1716	t->buffer->transaction = NULL;
1717	binder_free_buf(target_proc, t->buffer);
1718err_binder_alloc_buf_failed:
1719	kfree(tcomplete);
1720	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
1721err_alloc_tcomplete_failed:
1722	kfree(t);
1723	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
1724err_alloc_t_failed:
1725err_bad_call_stack:
1726err_empty_call_stack:
1727err_dead_binder:
1728err_invalid_target_handle:
1729err_no_context_mgr_node:
1730	binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1731		     "binder: %d:%d transaction failed %d, size %zd-%zd\n",
1732		     proc->pid, thread->pid, return_error,
1733		     tr->data_size, tr->offsets_size);
1734
1735	{
1736		struct binder_transaction_log_entry *fe;
1737		fe = binder_transaction_log_add(&binder_transaction_log_failed);
1738		*fe = *e;
1739	}
1740
1741	BUG_ON(thread->return_error != BR_OK);
1742	if (in_reply_to) {
1743		thread->return_error = BR_TRANSACTION_COMPLETE;
1744		binder_send_failed_reply(in_reply_to, return_error);
1745	} else
1746		thread->return_error = return_error;
1747}
1748
1749int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
1750			void __user *buffer, int size, signed long *consumed)
1751{
1752	uint32_t cmd;
1753	void __user *ptr = buffer + *consumed;
1754	void __user *end = buffer + size;
1755
1756	while (ptr < end && thread->return_error == BR_OK) {
1757		if (get_user(cmd, (uint32_t __user *)ptr))
1758			return -EFAULT;
1759		ptr += sizeof(uint32_t);
1760		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1761			binder_stats.bc[_IOC_NR(cmd)]++;
1762			proc->stats.bc[_IOC_NR(cmd)]++;
1763			thread->stats.bc[_IOC_NR(cmd)]++;
1764		}
1765		switch (cmd) {
1766		case BC_INCREFS:
1767		case BC_ACQUIRE:
1768		case BC_RELEASE:
1769		case BC_DECREFS: {
1770			uint32_t target;
1771			struct binder_ref *ref;
1772			const char *debug_string;
1773
1774			if (get_user(target, (uint32_t __user *)ptr))
1775				return -EFAULT;
1776			ptr += sizeof(uint32_t);
1777			if (target == 0 && binder_context_mgr_node &&
1778			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1779				ref = binder_get_ref_for_node(proc,
1780					       binder_context_mgr_node);
1781				if (ref->desc != target) {
1782					binder_user_error("binder: %d:"
1783						"%d tried to acquire "
1784						"reference to desc 0, "
1785						"got %d instead\n",
1786						proc->pid, thread->pid,
1787						ref->desc);
1788				}
1789			} else
1790				ref = binder_get_ref(proc, target);
1791			if (ref == NULL) {
1792				binder_user_error("binder: %d:%d refcou"
1793					"nt change on invalid ref %d\n",
1794					proc->pid, thread->pid, target);
1795				break;
1796			}
1797			switch (cmd) {
1798			case BC_INCREFS:
1799				debug_string = "IncRefs";
1800				binder_inc_ref(ref, 0, NULL);
1801				break;
1802			case BC_ACQUIRE:
1803				debug_string = "Acquire";
1804				binder_inc_ref(ref, 1, NULL);
1805				break;
1806			case BC_RELEASE:
1807				debug_string = "Release";
1808				binder_dec_ref(ref, 1);
1809				break;
1810			case BC_DECREFS:
1811			default:
1812				debug_string = "DecRefs";
1813				binder_dec_ref(ref, 0);
1814				break;
1815			}
1816			binder_debug(BINDER_DEBUG_USER_REFS,
1817				     "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
1818				     proc->pid, thread->pid, debug_string, ref->debug_id,
1819				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1820			break;
1821		}
1822		case BC_INCREFS_DONE:
1823		case BC_ACQUIRE_DONE: {
1824			void __user *node_ptr;
1825			void *cookie;
1826			struct binder_node *node;
1827
1828			if (get_user(node_ptr, (void * __user *)ptr))
1829				return -EFAULT;
1830			ptr += sizeof(void *);
1831			if (get_user(cookie, (void * __user *)ptr))
1832				return -EFAULT;
1833			ptr += sizeof(void *);
1834			node = binder_get_node(proc, node_ptr);
1835			if (node == NULL) {
1836				binder_user_error("binder: %d:%d "
1837					"%s u%p no match\n",
1838					proc->pid, thread->pid,
1839					cmd == BC_INCREFS_DONE ?
1840					"BC_INCREFS_DONE" :
1841					"BC_ACQUIRE_DONE",
1842					node_ptr);
1843				break;
1844			}
1845			if (cookie != node->cookie) {
1846				binder_user_error("binder: %d:%d %s u%p node %d"
1847					" cookie mismatch %p != %p\n",
1848					proc->pid, thread->pid,
1849					cmd == BC_INCREFS_DONE ?
1850					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1851					node_ptr, node->debug_id,
1852					cookie, node->cookie);
1853				break;
1854			}
1855			if (cmd == BC_ACQUIRE_DONE) {
1856				if (node->pending_strong_ref == 0) {
1857					binder_user_error("binder: %d:%d "
1858						"BC_ACQUIRE_DONE node %d has "
1859						"no pending acquire request\n",
1860						proc->pid, thread->pid,
1861						node->debug_id);
1862					break;
1863				}
1864				node->pending_strong_ref = 0;
1865			} else {
1866				if (node->pending_weak_ref == 0) {
1867					binder_user_error("binder: %d:%d "
1868						"BC_INCREFS_DONE node %d has "
1869						"no pending increfs request\n",
1870						proc->pid, thread->pid,
1871						node->debug_id);
1872					break;
1873				}
1874				node->pending_weak_ref = 0;
1875			}
1876			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1877			binder_debug(BINDER_DEBUG_USER_REFS,
1878				     "binder: %d:%d %s node %d ls %d lw %d\n",
1879				     proc->pid, thread->pid,
1880				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1881				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
1882			break;
1883		}
1884		case BC_ATTEMPT_ACQUIRE:
1885			printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
1886			return -EINVAL;
1887		case BC_ACQUIRE_RESULT:
1888			printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
1889			return -EINVAL;
1890
1891		case BC_FREE_BUFFER: {
1892			void __user *data_ptr;
1893			struct binder_buffer *buffer;
1894
1895			if (get_user(data_ptr, (void * __user *)ptr))
1896				return -EFAULT;
1897			ptr += sizeof(void *);
1898
1899			buffer = binder_buffer_lookup(proc, data_ptr);
1900			if (buffer == NULL) {
1901				binder_user_error("binder: %d:%d "
1902					"BC_FREE_BUFFER u%p no match\n",
1903					proc->pid, thread->pid, data_ptr);
1904				break;
1905			}
1906			if (!buffer->allow_user_free) {
1907				binder_user_error("binder: %d:%d "
1908					"BC_FREE_BUFFER u%p matched "
1909					"unreturned buffer\n",
1910					proc->pid, thread->pid, data_ptr);
1911				break;
1912			}
1913			binder_debug(BINDER_DEBUG_FREE_BUFFER,
1914				     "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
1915				     proc->pid, thread->pid, data_ptr, buffer->debug_id,
1916				     buffer->transaction ? "active" : "finished");
1917
1918			if (buffer->transaction) {
1919				buffer->transaction->buffer = NULL;
1920				buffer->transaction = NULL;
1921			}
1922			if (buffer->async_transaction && buffer->target_node) {
1923				BUG_ON(!buffer->target_node->has_async_transaction);
1924				if (list_empty(&buffer->target_node->async_todo))
1925					buffer->target_node->has_async_transaction = 0;
1926				else
1927					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1928			}
1929			binder_transaction_buffer_release(proc, buffer, NULL);
1930			binder_free_buf(proc, buffer);
1931			break;
1932		}
1933
1934		case BC_TRANSACTION:
1935		case BC_REPLY: {
1936			struct binder_transaction_data tr;
1937
1938			if (copy_from_user(&tr, ptr, sizeof(tr)))
1939				return -EFAULT;
1940			ptr += sizeof(tr);
1941			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1942			break;
1943		}
1944
1945		case BC_REGISTER_LOOPER:
1946			binder_debug(BINDER_DEBUG_THREADS,
1947				     "binder: %d:%d BC_REGISTER_LOOPER\n",
1948				     proc->pid, thread->pid);
1949			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1950				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1951				binder_user_error("binder: %d:%d ERROR:"
1952					" BC_REGISTER_LOOPER called "
1953					"after BC_ENTER_LOOPER\n",
1954					proc->pid, thread->pid);
1955			} else if (proc->requested_threads == 0) {
1956				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1957				binder_user_error("binder: %d:%d ERROR:"
1958					" BC_REGISTER_LOOPER called "
1959					"without request\n",
1960					proc->pid, thread->pid);
1961			} else {
1962				proc->requested_threads--;
1963				proc->requested_threads_started++;
1964			}
1965			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1966			break;
1967		case BC_ENTER_LOOPER:
1968			binder_debug(BINDER_DEBUG_THREADS,
1969				     "binder: %d:%d BC_ENTER_LOOPER\n",
1970				     proc->pid, thread->pid);
1971			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1972				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1973				binder_user_error("binder: %d:%d ERROR:"
1974					" BC_ENTER_LOOPER called after "
1975					"BC_REGISTER_LOOPER\n",
1976					proc->pid, thread->pid);
1977			}
1978			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1979			break;
1980		case BC_EXIT_LOOPER:
1981			binder_debug(BINDER_DEBUG_THREADS,
1982				     "binder: %d:%d BC_EXIT_LOOPER\n",
1983				     proc->pid, thread->pid);
1984			thread->looper |= BINDER_LOOPER_STATE_EXITED;
1985			break;
1986
1987		case BC_REQUEST_DEATH_NOTIFICATION:
1988		case BC_CLEAR_DEATH_NOTIFICATION: {
1989			uint32_t target;
1990			void __user *cookie;
1991			struct binder_ref *ref;
1992			struct binder_ref_death *death;
1993
1994			if (get_user(target, (uint32_t __user *)ptr))
1995				return -EFAULT;
1996			ptr += sizeof(uint32_t);
1997			if (get_user(cookie, (void __user * __user *)ptr))
1998				return -EFAULT;
1999			ptr += sizeof(void *);
2000			ref = binder_get_ref(proc, target);
2001			if (ref == NULL) {
2002				binder_user_error("binder: %d:%d %s "
2003					"invalid ref %d\n",
2004					proc->pid, thread->pid,
2005					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2006					"BC_REQUEST_DEATH_NOTIFICATION" :
2007					"BC_CLEAR_DEATH_NOTIFICATION",
2008					target);
2009				break;
2010			}
2011
2012			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2013				     "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
2014				     proc->pid, thread->pid,
2015				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2016				     "BC_REQUEST_DEATH_NOTIFICATION" :
2017				     "BC_CLEAR_DEATH_NOTIFICATION",
2018				     cookie, ref->debug_id, ref->desc,
2019				     ref->strong, ref->weak, ref->node->debug_id);
2020
2021			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2022				if (ref->death) {
2023					binder_user_error("binder: %d:%"
2024						"d BC_REQUEST_DEATH_NOTI"
2025						"FICATION death notific"
2026						"ation already set\n",
2027						proc->pid, thread->pid);
2028					break;
2029				}
2030				death = kzalloc(sizeof(*death), GFP_KERNEL);
2031				if (death == NULL) {
2032					thread->return_error = BR_ERROR;
2033					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2034						     "binder: %d:%d "
2035						     "BC_REQUEST_DEATH_NOTIFICATION failed\n",
2036						     proc->pid, thread->pid);
2037					break;
2038				}
2039				binder_stats.obj_created[BINDER_STAT_DEATH]++;
2040				INIT_LIST_HEAD(&death->work.entry);
2041				death->cookie = cookie;
2042				ref->death = death;
2043				if (ref->node->proc == NULL) {
2044					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2045					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2046						list_add_tail(&ref->death->work.entry, &thread->todo);
2047					} else {
2048						list_add_tail(&ref->death->work.entry, &proc->todo);
2049						wake_up_interruptible(&proc->wait);
2050					}
2051				}
2052			} else {
2053				if (ref->death == NULL) {
2054					binder_user_error("binder: %d:%"
2055						"d BC_CLEAR_DEATH_NOTIFI"
2056						"CATION death notificat"
2057						"ion not active\n",
2058						proc->pid, thread->pid);
2059					break;
2060				}
2061				death = ref->death;
2062				if (death->cookie != cookie) {
2063					binder_user_error("binder: %d:%"
2064						"d BC_CLEAR_DEATH_NOTIFI"
2065						"CATION death notificat"
2066						"ion cookie mismatch "
2067						"%p != %p\n",
2068						proc->pid, thread->pid,
2069						death->cookie, cookie);
2070					break;
2071				}
2072				ref->death = NULL;
2073				if (list_empty(&death->work.entry)) {
2074					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2075					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2076						list_add_tail(&death->work.entry, &thread->todo);
2077					} else {
2078						list_add_tail(&death->work.entry, &proc->todo);
2079						wake_up_interruptible(&proc->wait);
2080					}
2081				} else {
2082					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2083					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2084				}
2085			}
2086		} break;
2087		case BC_DEAD_BINDER_DONE: {
2088			struct binder_work *w;
2089			void __user *cookie;
2090			struct binder_ref_death *death = NULL;
2091			if (get_user(cookie, (void __user * __user *)ptr))
2092				return -EFAULT;
2093
2094			ptr += sizeof(void *);
2095			list_for_each_entry(w, &proc->delivered_death, entry) {
2096				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2097				if (tmp_death->cookie == cookie) {
2098					death = tmp_death;
2099					break;
2100				}
2101			}
2102			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2103				     "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
2104				     proc->pid, thread->pid, cookie, death);
2105			if (death == NULL) {
2106				binder_user_error("binder: %d:%d BC_DEAD"
2107					"_BINDER_DONE %p not found\n",
2108					proc->pid, thread->pid, cookie);
2109				break;
2110			}
2111
2112			list_del_init(&death->work.entry);
2113			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2114				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2115				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2116					list_add_tail(&death->work.entry, &thread->todo);
2117				} else {
2118					list_add_tail(&death->work.entry, &proc->todo);
2119					wake_up_interruptible(&proc->wait);
2120				}
2121			}
2122		} break;
2123
2124		default:
2125			printk(KERN_ERR "binder: %d:%d unknown command %d\n",
2126			       proc->pid, thread->pid, cmd);
2127			return -EINVAL;
2128		}
2129		*consumed = ptr - buffer;
2130	}
2131	return 0;
2132}
2133
2134void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
2135		    uint32_t cmd)
2136{
2137	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2138		binder_stats.br[_IOC_NR(cmd)]++;
2139		proc->stats.br[_IOC_NR(cmd)]++;
2140		thread->stats.br[_IOC_NR(cmd)]++;
2141	}
2142}
2143
2144static int binder_has_proc_work(struct binder_proc *proc,
2145				struct binder_thread *thread)
2146{
2147	return !list_empty(&proc->todo) ||
2148		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2149}
2150
2151static int binder_has_thread_work(struct binder_thread *thread)
2152{
2153	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2154		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2155}
2156
2157static int binder_thread_read(struct binder_proc *proc,
2158			      struct binder_thread *thread,
2159			      void  __user *buffer, int size,
2160			      signed long *consumed, int non_block)
2161{
2162	void __user *ptr = buffer + *consumed;
2163	void __user *end = buffer + size;
2164
2165	int ret = 0;
2166	int wait_for_proc_work;
2167
2168	if (*consumed == 0) {
2169		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2170			return -EFAULT;
2171		ptr += sizeof(uint32_t);
2172	}
2173
2174retry:
2175	wait_for_proc_work = thread->transaction_stack == NULL &&
2176				list_empty(&thread->todo);
2177
2178	if (thread->return_error != BR_OK && ptr < end) {
2179		if (thread->return_error2 != BR_OK) {
2180			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2181				return -EFAULT;
2182			ptr += sizeof(uint32_t);
2183			if (ptr == end)
2184				goto done;
2185			thread->return_error2 = BR_OK;
2186		}
2187		if (put_user(thread->return_error, (uint32_t __user *)ptr))
2188			return -EFAULT;
2189		ptr += sizeof(uint32_t);
2190		thread->return_error = BR_OK;
2191		goto done;
2192	}
2193
2194
2195	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2196	if (wait_for_proc_work)
2197		proc->ready_threads++;
2198	mutex_unlock(&binder_lock);
2199	if (wait_for_proc_work) {
2200		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2201					BINDER_LOOPER_STATE_ENTERED))) {
2202			binder_user_error("binder: %d:%d ERROR: Thread waiting "
2203				"for process work before calling BC_REGISTER_"
2204				"LOOPER or BC_ENTER_LOOPER (state %x)\n",
2205				proc->pid, thread->pid, thread->looper);
2206			wait_event_interruptible(binder_user_error_wait,
2207						 binder_stop_on_user_error < 2);
2208		}
2209		binder_set_nice(proc->default_priority);
2210		if (non_block) {
2211			if (!binder_has_proc_work(proc, thread))
2212				ret = -EAGAIN;
2213		} else
2214			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2215	} else {
2216		if (non_block) {
2217			if (!binder_has_thread_work(thread))
2218				ret = -EAGAIN;
2219		} else
2220			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
2221	}
2222	mutex_lock(&binder_lock);
2223	if (wait_for_proc_work)
2224		proc->ready_threads--;
2225	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2226
2227	if (ret)
2228		return ret;
2229
2230	while (1) {
2231		uint32_t cmd;
2232		struct binder_transaction_data tr;
2233		struct binder_work *w;
2234		struct binder_transaction *t = NULL;
2235
2236		if (!list_empty(&thread->todo))
2237			w = list_first_entry(&thread->todo, struct binder_work, entry);
2238		else if (!list_empty(&proc->todo) && wait_for_proc_work)
2239			w = list_first_entry(&proc->todo, struct binder_work, entry);
2240		else {
2241			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2242				goto retry;
2243			break;
2244		}
2245
2246		if (end - ptr < sizeof(tr) + 4)
2247			break;
2248
2249		switch (w->type) {
2250		case BINDER_WORK_TRANSACTION: {
2251			t = container_of(w, struct binder_transaction, work);
2252		} break;
2253		case BINDER_WORK_TRANSACTION_COMPLETE: {
2254			cmd = BR_TRANSACTION_COMPLETE;
2255			if (put_user(cmd, (uint32_t __user *)ptr))
2256				return -EFAULT;
2257			ptr += sizeof(uint32_t);
2258
2259			binder_stat_br(proc, thread, cmd);
2260			binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
2261				     "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
2262				     proc->pid, thread->pid);
2263
2264			list_del(&w->entry);
2265			kfree(w);
2266			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
2267		} break;
2268		case BINDER_WORK_NODE: {
2269			struct binder_node *node = container_of(w, struct binder_node, work);
2270			uint32_t cmd = BR_NOOP;
2271			const char *cmd_name;
2272			int strong = node->internal_strong_refs || node->local_strong_refs;
2273			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2274			if (weak && !node->has_weak_ref) {
2275				cmd = BR_INCREFS;
2276				cmd_name = "BR_INCREFS";
2277				node->has_weak_ref = 1;
2278				node->pending_weak_ref = 1;
2279				node->local_weak_refs++;
2280			} else if (strong && !node->has_strong_ref) {
2281				cmd = BR_ACQUIRE;
2282				cmd_name = "BR_ACQUIRE";
2283				node->has_strong_ref = 1;
2284				node->pending_strong_ref = 1;
2285				node->local_strong_refs++;
2286			} else if (!strong && node->has_strong_ref) {
2287				cmd = BR_RELEASE;
2288				cmd_name = "BR_RELEASE";
2289				node->has_strong_ref = 0;
2290			} else if (!weak && node->has_weak_ref) {
2291				cmd = BR_DECREFS;
2292				cmd_name = "BR_DECREFS";
2293				node->has_weak_ref = 0;
2294			}
2295			if (cmd != BR_NOOP) {
2296				if (put_user(cmd, (uint32_t __user *)ptr))
2297					return -EFAULT;
2298				ptr += sizeof(uint32_t);
2299				if (put_user(node->ptr, (void * __user *)ptr))
2300					return -EFAULT;
2301				ptr += sizeof(void *);
2302				if (put_user(node->cookie, (void * __user *)ptr))
2303					return -EFAULT;
2304				ptr += sizeof(void *);
2305
2306				binder_stat_br(proc, thread, cmd);
2307				binder_debug(BINDER_DEBUG_USER_REFS,
2308					     "binder: %d:%d %s %d u%p c%p\n",
2309					     proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
2310			} else {
2311				list_del_init(&w->entry);
2312				if (!weak && !strong) {
2313					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2314						     "binder: %d:%d node %d u%p c%p deleted\n",
2315					    	     proc->pid, thread->pid, node->debug_id,
2316						     node->ptr, node->cookie);
2317					rb_erase(&node->rb_node, &proc->nodes);
2318					kfree(node);
2319					binder_stats.obj_deleted[BINDER_STAT_NODE]++;
2320				} else {
2321					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2322						     "binder: %d:%d node %d u%p c%p state unchanged\n",
2323						     proc->pid, thread->pid, node->debug_id, node->ptr,
2324						     node->cookie);
2325				}
2326			}
2327		} break;
2328		case BINDER_WORK_DEAD_BINDER:
2329		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2330		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2331			struct binder_ref_death *death;
2332			uint32_t cmd;
2333
2334			death = container_of(w, struct binder_ref_death, work);
2335			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2336				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2337			else
2338				cmd = BR_DEAD_BINDER;
2339			if (put_user(cmd, (uint32_t __user *)ptr))
2340				return -EFAULT;
2341			ptr += sizeof(uint32_t);
2342			if (put_user(death->cookie, (void * __user *)ptr))
2343				return -EFAULT;
2344			ptr += sizeof(void *);
2345			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
2346				     "binder: %d:%d %s %p\n",
2347				      proc->pid, thread->pid,
2348				      cmd == BR_DEAD_BINDER ?
2349				      "BR_DEAD_BINDER" :
2350				      "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2351				      death->cookie);
2352
2353			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2354				list_del(&w->entry);
2355				kfree(death);
2356				binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
2357			} else
2358				list_move(&w->entry, &proc->delivered_death);
2359			if (cmd == BR_DEAD_BINDER)
2360				goto done; /* DEAD_BINDER notifications can cause transactions */
2361		} break;
2362		}
2363
2364		if (!t)
2365			continue;
2366
2367		BUG_ON(t->buffer == NULL);
2368		if (t->buffer->target_node) {
2369			struct binder_node *target_node = t->buffer->target_node;
2370			tr.target.ptr = target_node->ptr;
2371			tr.cookie =  target_node->cookie;
2372			t->saved_priority = task_nice(current);
2373			if (t->priority < target_node->min_priority &&
2374			    !(t->flags & TF_ONE_WAY))
2375				binder_set_nice(t->priority);
2376			else if (!(t->flags & TF_ONE_WAY) ||
2377				 t->saved_priority > target_node->min_priority)
2378				binder_set_nice(target_node->min_priority);
2379			cmd = BR_TRANSACTION;
2380		} else {
2381			tr.target.ptr = NULL;
2382			tr.cookie = NULL;
2383			cmd = BR_REPLY;
2384		}
2385		tr.code = t->code;
2386		tr.flags = t->flags;
2387		tr.sender_euid = t->sender_euid;
2388
2389		if (t->from) {
2390			struct task_struct *sender = t->from->proc->tsk;
2391			tr.sender_pid = task_tgid_nr_ns(sender,
2392							current->nsproxy->pid_ns);
2393		} else {
2394			tr.sender_pid = 0;
2395		}
2396
2397		tr.data_size = t->buffer->data_size;
2398		tr.offsets_size = t->buffer->offsets_size;
2399		tr.data.ptr.buffer = (void *)t->buffer->data +
2400					proc->user_buffer_offset;
2401		tr.data.ptr.offsets = tr.data.ptr.buffer +
2402					ALIGN(t->buffer->data_size,
2403					    sizeof(void *));
2404
2405		if (put_user(cmd, (uint32_t __user *)ptr))
2406			return -EFAULT;
2407		ptr += sizeof(uint32_t);
2408		if (copy_to_user(ptr, &tr, sizeof(tr)))
2409			return -EFAULT;
2410		ptr += sizeof(tr);
2411
2412		binder_stat_br(proc, thread, cmd);
2413		binder_debug(BINDER_DEBUG_TRANSACTION,
2414			     "binder: %d:%d %s %d %d:%d, cmd %d"
2415			     "size %zd-%zd ptr %p-%p\n",
2416			     proc->pid, thread->pid,
2417			     (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
2418			     "BR_REPLY",
2419			     t->debug_id, t->from ? t->from->proc->pid : 0,
2420			     t->from ? t->from->pid : 0, cmd,
2421			     t->buffer->data_size, t->buffer->offsets_size,
2422			     tr.data.ptr.buffer, tr.data.ptr.offsets);
2423
2424		list_del(&t->work.entry);
2425		t->buffer->allow_user_free = 1;
2426		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2427			t->to_parent = thread->transaction_stack;
2428			t->to_thread = thread;
2429			thread->transaction_stack = t;
2430		} else {
2431			t->buffer->transaction = NULL;
2432			kfree(t);
2433			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
2434		}
2435		break;
2436	}
2437
2438done:
2439
2440	*consumed = ptr - buffer;
2441	if (proc->requested_threads + proc->ready_threads == 0 &&
2442	    proc->requested_threads_started < proc->max_threads &&
2443	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2444	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2445	     /*spawn a new thread if we leave this out */) {
2446		proc->requested_threads++;
2447		binder_debug(BINDER_DEBUG_THREADS,
2448			     "binder: %d:%d BR_SPAWN_LOOPER\n",
2449			     proc->pid, thread->pid);
2450		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2451			return -EFAULT;
2452	}
2453	return 0;
2454}
2455
2456static void binder_release_work(struct list_head *list)
2457{
2458	struct binder_work *w;
2459	while (!list_empty(list)) {
2460		w = list_first_entry(list, struct binder_work, entry);
2461		list_del_init(&w->entry);
2462		switch (w->type) {
2463		case BINDER_WORK_TRANSACTION: {
2464			struct binder_transaction *t;
2465
2466			t = container_of(w, struct binder_transaction, work);
2467			if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
2468				binder_send_failed_reply(t, BR_DEAD_REPLY);
2469		} break;
2470		case BINDER_WORK_TRANSACTION_COMPLETE: {
2471			kfree(w);
2472			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
2473		} break;
2474		default:
2475			break;
2476		}
2477	}
2478
2479}
2480
2481static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2482{
2483	struct binder_thread *thread = NULL;
2484	struct rb_node *parent = NULL;
2485	struct rb_node **p = &proc->threads.rb_node;
2486
2487	while (*p) {
2488		parent = *p;
2489		thread = rb_entry(parent, struct binder_thread, rb_node);
2490
2491		if (current->pid < thread->pid)
2492			p = &(*p)->rb_left;
2493		else if (current->pid > thread->pid)
2494			p = &(*p)->rb_right;
2495		else
2496			break;
2497	}
2498	if (*p == NULL) {
2499		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2500		if (thread == NULL)
2501			return NULL;
2502		binder_stats.obj_created[BINDER_STAT_THREAD]++;
2503		thread->proc = proc;
2504		thread->pid = current->pid;
2505		init_waitqueue_head(&thread->wait);
2506		INIT_LIST_HEAD(&thread->todo);
2507		rb_link_node(&thread->rb_node, parent, p);
2508		rb_insert_color(&thread->rb_node, &proc->threads);
2509		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2510		thread->return_error = BR_OK;
2511		thread->return_error2 = BR_OK;
2512	}
2513	return thread;
2514}
2515
2516static int binder_free_thread(struct binder_proc *proc,
2517			      struct binder_thread *thread)
2518{
2519	struct binder_transaction *t;
2520	struct binder_transaction *send_reply = NULL;
2521	int active_transactions = 0;
2522
2523	rb_erase(&thread->rb_node, &proc->threads);
2524	t = thread->transaction_stack;
2525	if (t && t->to_thread == thread)
2526		send_reply = t;
2527	while (t) {
2528		active_transactions++;
2529		binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2530			     "binder: release %d:%d transaction %d "
2531			     "%s, still active\n", proc->pid, thread->pid,
2532			     t->debug_id,
2533			     (t->to_thread == thread) ? "in" : "out");
2534
2535		if (t->to_thread == thread) {
2536			t->to_proc = NULL;
2537			t->to_thread = NULL;
2538			if (t->buffer) {
2539				t->buffer->transaction = NULL;
2540				t->buffer = NULL;
2541			}
2542			t = t->to_parent;
2543		} else if (t->from == thread) {
2544			t->from = NULL;
2545			t = t->from_parent;
2546		} else
2547			BUG();
2548	}
2549	if (send_reply)
2550		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2551	binder_release_work(&thread->todo);
2552	kfree(thread);
2553	binder_stats.obj_deleted[BINDER_STAT_THREAD]++;
2554	return active_transactions;
2555}
2556
2557static unsigned int binder_poll(struct file *filp,
2558				struct poll_table_struct *wait)
2559{
2560	struct binder_proc *proc = filp->private_data;
2561	struct binder_thread *thread = NULL;
2562	int wait_for_proc_work;
2563
2564	mutex_lock(&binder_lock);
2565	thread = binder_get_thread(proc);
2566
2567	wait_for_proc_work = thread->transaction_stack == NULL &&
2568		list_empty(&thread->todo) && thread->return_error == BR_OK;
2569	mutex_unlock(&binder_lock);
2570
2571	if (wait_for_proc_work) {
2572		if (binder_has_proc_work(proc, thread))
2573			return POLLIN;
2574		poll_wait(filp, &proc->wait, wait);
2575		if (binder_has_proc_work(proc, thread))
2576			return POLLIN;
2577	} else {
2578		if (binder_has_thread_work(thread))
2579			return POLLIN;
2580		poll_wait(filp, &thread->wait, wait);
2581		if (binder_has_thread_work(thread))
2582			return POLLIN;
2583	}
2584	return 0;
2585}
2586
2587static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2588{
2589	int ret;
2590	struct binder_proc *proc = filp->private_data;
2591	struct binder_thread *thread;
2592	unsigned int size = _IOC_SIZE(cmd);
2593	void __user *ubuf = (void __user *)arg;
2594
2595	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2596
2597	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2598	if (ret)
2599		return ret;
2600
2601	mutex_lock(&binder_lock);
2602	thread = binder_get_thread(proc);
2603	if (thread == NULL) {
2604		ret = -ENOMEM;
2605		goto err;
2606	}
2607
2608	switch (cmd) {
2609	case BINDER_WRITE_READ: {
2610		struct binder_write_read bwr;
2611		if (size != sizeof(struct binder_write_read)) {
2612			ret = -EINVAL;
2613			goto err;
2614		}
2615		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2616			ret = -EFAULT;
2617			goto err;
2618		}
2619		binder_debug(BINDER_DEBUG_READ_WRITE,
2620			     "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
2621			     proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
2622			     bwr.read_size, bwr.read_buffer);
2623
2624		if (bwr.write_size > 0) {
2625			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
2626			if (ret < 0) {
2627				bwr.read_consumed = 0;
2628				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2629					ret = -EFAULT;
2630				goto err;
2631			}
2632		}
2633		if (bwr.read_size > 0) {
2634			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2635			if (!list_empty(&proc->todo))
2636				wake_up_interruptible(&proc->wait);
2637			if (ret < 0) {
2638				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2639					ret = -EFAULT;
2640				goto err;
2641			}
2642		}
2643		binder_debug(BINDER_DEBUG_READ_WRITE,
2644			     "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
2645			     proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
2646			     bwr.read_consumed, bwr.read_size);
2647		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2648			ret = -EFAULT;
2649			goto err;
2650		}
2651		break;
2652	}
2653	case BINDER_SET_MAX_THREADS:
2654		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2655			ret = -EINVAL;
2656			goto err;
2657		}
2658		break;
2659	case BINDER_SET_CONTEXT_MGR:
2660		if (binder_context_mgr_node != NULL) {
2661			printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
2662			ret = -EBUSY;
2663			goto err;
2664		}
2665		if (binder_context_mgr_uid != -1) {
2666			if (binder_context_mgr_uid != current->cred->euid) {
2667				printk(KERN_ERR "binder: BINDER_SET_"
2668				       "CONTEXT_MGR bad uid %d != %d\n",
2669				       current->cred->euid,
2670				       binder_context_mgr_uid);
2671				ret = -EPERM;
2672				goto err;
2673			}
2674		} else
2675			binder_context_mgr_uid = current->cred->euid;
2676		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
2677		if (binder_context_mgr_node == NULL) {
2678			ret = -ENOMEM;
2679			goto err;
2680		}
2681		binder_context_mgr_node->local_weak_refs++;
2682		binder_context_mgr_node->local_strong_refs++;
2683		binder_context_mgr_node->has_strong_ref = 1;
2684		binder_context_mgr_node->has_weak_ref = 1;
2685		break;
2686	case BINDER_THREAD_EXIT:
2687		binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
2688			     proc->pid, thread->pid);
2689		binder_free_thread(proc, thread);
2690		thread = NULL;
2691		break;
2692	case BINDER_VERSION:
2693		if (size != sizeof(struct binder_version)) {
2694			ret = -EINVAL;
2695			goto err;
2696		}
2697		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
2698			ret = -EINVAL;
2699			goto err;
2700		}
2701		break;
2702	default:
2703		ret = -EINVAL;
2704		goto err;
2705	}
2706	ret = 0;
2707err:
2708	if (thread)
2709		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2710	mutex_unlock(&binder_lock);
2711	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2712	if (ret && ret != -ERESTARTSYS)
2713		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2714	return ret;
2715}
2716
2717static void binder_vma_open(struct vm_area_struct *vma)
2718{
2719	struct binder_proc *proc = vma->vm_private_data;
2720	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2721		     "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2722		     proc->pid, vma->vm_start, vma->vm_end,
2723		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2724		     (unsigned long)pgprot_val(vma->vm_page_prot));
2725	dump_stack();
2726}
2727
2728static void binder_vma_close(struct vm_area_struct *vma)
2729{
2730	struct binder_proc *proc = vma->vm_private_data;
2731	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2732		     "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2733		     proc->pid, vma->vm_start, vma->vm_end,
2734		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2735		     (unsigned long)pgprot_val(vma->vm_page_prot));
2736	proc->vma = NULL;
2737	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2738}
2739
2740static struct vm_operations_struct binder_vm_ops = {
2741	.open = binder_vma_open,
2742	.close = binder_vma_close,
2743};
2744
2745static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2746{
2747	int ret;
2748	struct vm_struct *area;
2749	struct binder_proc *proc = filp->private_data;
2750	const char *failure_string;
2751	struct binder_buffer *buffer;
2752
2753	if ((vma->vm_end - vma->vm_start) > SZ_4M)
2754		vma->vm_end = vma->vm_start + SZ_4M;
2755
2756	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2757		     "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2758		     proc->pid, vma->vm_start, vma->vm_end,
2759		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2760		     (unsigned long)pgprot_val(vma->vm_page_prot));
2761
2762	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2763		ret = -EPERM;
2764		failure_string = "bad vm_flags";
2765		goto err_bad_arg;
2766	}
2767	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2768
2769	if (proc->buffer) {
2770		ret = -EBUSY;
2771		failure_string = "already mapped";
2772		goto err_already_mapped;
2773	}
2774
2775	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2776	if (area == NULL) {
2777		ret = -ENOMEM;
2778		failure_string = "get_vm_area";
2779		goto err_get_vm_area_failed;
2780	}
2781	proc->buffer = area->addr;
2782	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2783
2784#ifdef CONFIG_CPU_CACHE_VIPT
2785	if (cache_is_vipt_aliasing()) {
2786		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2787			printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2788			vma->vm_start += PAGE_SIZE;
2789		}
2790	}
2791#endif
2792	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2793	if (proc->pages == NULL) {
2794		ret = -ENOMEM;
2795		failure_string = "alloc page array";
2796		goto err_alloc_pages_failed;
2797	}
2798	proc->buffer_size = vma->vm_end - vma->vm_start;
2799
2800	vma->vm_ops = &binder_vm_ops;
2801	vma->vm_private_data = proc;
2802
2803	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2804		ret = -ENOMEM;
2805		failure_string = "alloc small buf";
2806		goto err_alloc_small_buf_failed;
2807	}
2808	buffer = proc->buffer;
2809	INIT_LIST_HEAD(&proc->buffers);
2810	list_add(&buffer->entry, &proc->buffers);
2811	buffer->free = 1;
2812	binder_insert_free_buffer(proc, buffer);
2813	proc->free_async_space = proc->buffer_size / 2;
2814	barrier();
2815	proc->files = get_files_struct(current);
2816	proc->vma = vma;
2817
2818	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
2819		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2820	return 0;
2821
2822err_alloc_small_buf_failed:
2823	kfree(proc->pages);
2824	proc->pages = NULL;
2825err_alloc_pages_failed:
2826	vfree(proc->buffer);
2827	proc->buffer = NULL;
2828err_get_vm_area_failed:
2829err_already_mapped:
2830err_bad_arg:
2831	printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
2832	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2833	return ret;
2834}
2835
2836static int binder_open(struct inode *nodp, struct file *filp)
2837{
2838	struct binder_proc *proc;
2839
2840	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
2841		     current->group_leader->pid, current->pid);
2842
2843	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2844	if (proc == NULL)
2845		return -ENOMEM;
2846	get_task_struct(current);
2847	proc->tsk = current;
2848	INIT_LIST_HEAD(&proc->todo);
2849	init_waitqueue_head(&proc->wait);
2850	proc->default_priority = task_nice(current);
2851	mutex_lock(&binder_lock);
2852	binder_stats.obj_created[BINDER_STAT_PROC]++;
2853	hlist_add_head(&proc->proc_node, &binder_procs);
2854	proc->pid = current->group_leader->pid;
2855	INIT_LIST_HEAD(&proc->delivered_death);
2856	filp->private_data = proc;
2857	mutex_unlock(&binder_lock);
2858
2859	if (binder_proc_dir_entry_proc) {
2860		char strbuf[11];
2861		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2862		remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2863		create_proc_read_entry(strbuf, S_IRUGO,
2864				       binder_proc_dir_entry_proc,
2865				       binder_read_proc_proc, proc);
2866	}
2867
2868	return 0;
2869}
2870
2871static int binder_flush(struct file *filp, fl_owner_t id)
2872{
2873	struct binder_proc *proc = filp->private_data;
2874
2875	binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2876
2877	return 0;
2878}
2879
2880static void binder_deferred_flush(struct binder_proc *proc)
2881{
2882	struct rb_node *n;
2883	int wake_count = 0;
2884	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2885		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2886		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2887		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2888			wake_up_interruptible(&thread->wait);
2889			wake_count++;
2890		}
2891	}
2892	wake_up_interruptible_all(&proc->wait);
2893
2894	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
2895		     "binder_flush: %d woke %d threads\n", proc->pid,
2896		     wake_count);
2897}
2898
2899static int binder_release(struct inode *nodp, struct file *filp)
2900{
2901	struct binder_proc *proc = filp->private_data;
2902	if (binder_proc_dir_entry_proc) {
2903		char strbuf[11];
2904		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2905		remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2906	}
2907
2908	binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
2909
2910	return 0;
2911}
2912
2913static void binder_deferred_release(struct binder_proc *proc)
2914{
2915	struct hlist_node *pos;
2916	struct binder_transaction *t;
2917	struct rb_node *n;
2918	int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
2919
2920	BUG_ON(proc->vma);
2921	BUG_ON(proc->files);
2922
2923	hlist_del(&proc->proc_node);
2924	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
2925		binder_debug(BINDER_DEBUG_DEAD_BINDER,
2926			     "binder_release: %d context_mgr_node gone\n",
2927			     proc->pid);
2928		binder_context_mgr_node = NULL;
2929	}
2930
2931	threads = 0;
2932	active_transactions = 0;
2933	while ((n = rb_first(&proc->threads))) {
2934		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2935		threads++;
2936		active_transactions += binder_free_thread(proc, thread);
2937	}
2938	nodes = 0;
2939	incoming_refs = 0;
2940	while ((n = rb_first(&proc->nodes))) {
2941		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
2942
2943		nodes++;
2944		rb_erase(&node->rb_node, &proc->nodes);
2945		list_del_init(&node->work.entry);
2946		if (hlist_empty(&node->refs)) {
2947			kfree(node);
2948			binder_stats.obj_deleted[BINDER_STAT_NODE]++;
2949		} else {
2950			struct binder_ref *ref;
2951			int death = 0;
2952
2953			node->proc = NULL;
2954			node->local_strong_refs = 0;
2955			node->local_weak_refs = 0;
2956			hlist_add_head(&node->dead_node, &binder_dead_nodes);
2957
2958			hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
2959				incoming_refs++;
2960				if (ref->death) {
2961					death++;
2962					if (list_empty(&ref->death->work.entry)) {
2963						ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2964						list_add_tail(&ref->death->work.entry, &ref->proc->todo);
2965						wake_up_interruptible(&ref->proc->wait);
2966					} else
2967						BUG();
2968				}
2969			}
2970			binder_debug(BINDER_DEBUG_DEAD_BINDER,
2971				     "binder: node %d now dead, "
2972				     "refs %d, death %d\n", node->debug_id,
2973				     incoming_refs, death);
2974		}
2975	}
2976	outgoing_refs = 0;
2977	while ((n = rb_first(&proc->refs_by_desc))) {
2978		struct binder_ref *ref = rb_entry(n, struct binder_ref,
2979						  rb_node_desc);
2980		outgoing_refs++;
2981		binder_delete_ref(ref);
2982	}
2983	binder_release_work(&proc->todo);
2984	buffers = 0;
2985
2986	while ((n = rb_first(&proc->allocated_buffers))) {
2987		struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
2988							rb_node);
2989		t = buffer->transaction;
2990		if (t) {
2991			t->buffer = NULL;
2992			buffer->transaction = NULL;
2993			printk(KERN_ERR "binder: release proc %d, "
2994			       "transaction %d, not freed\n",
2995			       proc->pid, t->debug_id);
2996			/*BUG();*/
2997		}
2998		binder_free_buf(proc, buffer);
2999		buffers++;
3000	}
3001
3002	binder_stats.obj_deleted[BINDER_STAT_PROC]++;
3003
3004	page_count = 0;
3005	if (proc->pages) {
3006		int i;
3007		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3008			if (proc->pages[i]) {
3009				binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
3010					     "binder_release: %d: "
3011					     "page %d at %p not freed\n",
3012					     proc->pid, i,
3013					     proc->buffer + i * PAGE_SIZE);
3014				__free_page(proc->pages[i]);
3015				page_count++;
3016			}
3017		}
3018		kfree(proc->pages);
3019		vfree(proc->buffer);
3020	}
3021
3022	put_task_struct(proc->tsk);
3023
3024	binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3025		     "binder_release: %d threads %d, nodes %d (ref %d), "
3026		     "refs %d, active transactions %d, buffers %d, "
3027		     "pages %d\n",
3028		     proc->pid, threads, nodes, incoming_refs, outgoing_refs,
3029		     active_transactions, buffers, page_count);
3030
3031	kfree(proc);
3032}
3033
3034static void binder_deferred_func(struct work_struct *work)
3035{
3036	struct binder_proc *proc;
3037	struct files_struct *files;
3038
3039	int defer;
3040	do {
3041		mutex_lock(&binder_lock);
3042		mutex_lock(&binder_deferred_lock);
3043		if (!hlist_empty(&binder_deferred_list)) {
3044			proc = hlist_entry(binder_deferred_list.first,
3045					struct binder_proc, deferred_work_node);
3046			hlist_del_init(&proc->deferred_work_node);
3047			defer = proc->deferred_work;
3048			proc->deferred_work = 0;
3049		} else {
3050			proc = NULL;
3051			defer = 0;
3052		}
3053		mutex_unlock(&binder_deferred_lock);
3054
3055		files = NULL;
3056		if (defer & BINDER_DEFERRED_PUT_FILES) {
3057			files = proc->files;
3058			if (files)
3059				proc->files = NULL;
3060		}
3061
3062		if (defer & BINDER_DEFERRED_FLUSH)
3063			binder_deferred_flush(proc);
3064
3065		if (defer & BINDER_DEFERRED_RELEASE)
3066			binder_deferred_release(proc); /* frees proc */
3067
3068		mutex_unlock(&binder_lock);
3069		if (files)
3070			put_files_struct(files);
3071	} while (proc);
3072}
3073static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3074
3075static void
3076binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3077{
3078	mutex_lock(&binder_deferred_lock);
3079	proc->deferred_work |= defer;
3080	if (hlist_unhashed(&proc->deferred_work_node)) {
3081		hlist_add_head(&proc->deferred_work_node,
3082				&binder_deferred_list);
3083		schedule_work(&binder_deferred_work);
3084	}
3085	mutex_unlock(&binder_deferred_lock);
3086}
3087
3088static char *print_binder_transaction(char *buf, char *end, const char *prefix,
3089				      struct binder_transaction *t)
3090{
3091	buf += snprintf(buf, end - buf,
3092			"%s %d: %p from %d:%d to %d:%d code %x "
3093			"flags %x pri %ld r%d",
3094			prefix, t->debug_id, t,
3095			t->from ? t->from->proc->pid : 0,
3096			t->from ? t->from->pid : 0,
3097			t->to_proc ? t->to_proc->pid : 0,
3098			t->to_thread ? t->to_thread->pid : 0,
3099			t->code, t->flags, t->priority, t->need_reply);
3100	if (buf >= end)
3101		return buf;
3102	if (t->buffer == NULL) {
3103		buf += snprintf(buf, end - buf, " buffer free\n");
3104		return buf;
3105	}
3106	if (t->buffer->target_node) {
3107		buf += snprintf(buf, end - buf, " node %d",
3108				t->buffer->target_node->debug_id);
3109		if (buf >= end)
3110			return buf;
3111	}
3112	buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n",
3113			t->buffer->data_size, t->buffer->offsets_size,
3114			t->buffer->data);
3115	return buf;
3116}
3117
3118static char *print_binder_buffer(char *buf, char *end, const char *prefix,
3119				 struct binder_buffer *buffer)
3120{
3121	buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n",
3122			prefix, buffer->debug_id, buffer->data,
3123			buffer->data_size, buffer->offsets_size,
3124			buffer->transaction ? "active" : "delivered");
3125	return buf;
3126}
3127
3128static char *print_binder_work(char *buf, char *end, const char *prefix,
3129			       const char *transaction_prefix,
3130			       struct binder_work *w)
3131{
3132	struct binder_node *node;
3133	struct binder_transaction *t;
3134
3135	switch (w->type) {
3136	case BINDER_WORK_TRANSACTION:
3137		t = container_of(w, struct binder_transaction, work);
3138		buf = print_binder_transaction(buf, end, transaction_prefix, t);
3139		break;
3140	case BINDER_WORK_TRANSACTION_COMPLETE:
3141		buf += snprintf(buf, end - buf,
3142				"%stransaction complete\n", prefix);
3143		break;
3144	case BINDER_WORK_NODE:
3145		node = container_of(w, struct binder_node, work);
3146		buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n",
3147				prefix, node->debug_id, node->ptr,
3148				node->cookie);
3149		break;
3150	case BINDER_WORK_DEAD_BINDER:
3151		buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix);
3152		break;
3153	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3154		buf += snprintf(buf, end - buf,
3155				"%shas cleared dead binder\n", prefix);
3156		break;
3157	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3158		buf += snprintf(buf, end - buf,
3159				"%shas cleared death notification\n", prefix);
3160		break;
3161	default:
3162		buf += snprintf(buf, end - buf, "%sunknown work: type %d\n",
3163				prefix, w->type);
3164		break;
3165	}
3166	return buf;
3167}
3168
3169static char *print_binder_thread(char *buf, char *end,
3170				 struct binder_thread *thread,
3171				 int print_always)
3172{
3173	struct binder_transaction *t;
3174	struct binder_work *w;
3175	char *start_buf = buf;
3176	char *header_buf;
3177
3178	buf += snprintf(buf, end - buf, "  thread %d: l %02x\n",
3179			thread->pid, thread->looper);
3180	header_buf = buf;
3181	t = thread->transaction_stack;
3182	while (t) {
3183		if (buf >= end)
3184			break;
3185		if (t->from == thread) {
3186			buf = print_binder_transaction(buf, end,
3187						"    outgoing transaction", t);
3188			t = t->from_parent;
3189		} else if (t->to_thread == thread) {
3190			buf = print_binder_transaction(buf, end,
3191						"    incoming transaction", t);
3192			t = t->to_parent;
3193		} else {
3194			buf = print_binder_transaction(buf, end,
3195						"    bad transaction", t);
3196			t = NULL;
3197		}
3198	}
3199	list_for_each_entry(w, &thread->todo, entry) {
3200		if (buf >= end)
3201			break;
3202		buf = print_binder_work(buf, end, "    ",
3203					"    pending transaction", w);
3204	}
3205	if (!print_always && buf == header_buf)
3206		buf = start_buf;
3207	return buf;
3208}
3209
3210static char *print_binder_node(char *buf, char *end, struct binder_node *node)
3211{
3212	struct binder_ref *ref;
3213	struct hlist_node *pos;
3214	struct binder_work *w;
3215	int count;
3216
3217	count = 0;
3218	hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3219		count++;
3220
3221	buf += snprintf(buf, end - buf,
3222			"  node %d: u%p c%p hs %d hw %d ls %d lw %d "
3223			"is %d iw %d",
3224			node->debug_id, node->ptr, node->cookie,
3225			node->has_strong_ref, node->has_weak_ref,
3226			node->local_strong_refs, node->local_weak_refs,
3227			node->internal_strong_refs, count);
3228	if (buf >= end)
3229		return buf;
3230	if (count) {
3231		buf += snprintf(buf, end - buf, " proc");
3232		if (buf >= end)
3233			return buf;
3234		hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
3235			buf += snprintf(buf, end - buf, " %d", ref->proc->pid);
3236			if (buf >= end)
3237				return buf;
3238		}
3239	}
3240	buf += snprintf(buf, end - buf, "\n");
3241	list_for_each_entry(w, &node->async_todo, entry) {
3242		if (buf >= end)
3243			break;
3244		buf = print_binder_work(buf, end, "    ",
3245					"    pending async transaction", w);
3246	}
3247	return buf;
3248}
3249
3250static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref)
3251{
3252	buf += snprintf(buf, end - buf,
3253			"  ref %d: desc %d %snode %d s %d w %d d %p\n",
3254			ref->debug_id, ref->desc,
3255			ref->node->proc ? "" : "dead ", ref->node->debug_id,
3256			ref->strong, ref->weak, ref->death);
3257	return buf;
3258}
3259
3260static char *print_binder_proc(char *buf, char *end,
3261			       struct binder_proc *proc, int print_all)
3262{
3263	struct binder_work *w;
3264	struct rb_node *n;
3265	char *start_buf = buf;
3266	char *header_buf;
3267
3268	buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
3269	header_buf = buf;
3270
3271	for (n = rb_first(&proc->threads);
3272	     n != NULL && buf < end;
3273	     n = rb_next(n))
3274		buf = print_binder_thread(buf, end,
3275					  rb_entry(n, struct binder_thread,
3276						   rb_node), print_all);
3277	for (n = rb_first(&proc->nodes);
3278	     n != NULL && buf < end;
3279	     n = rb_next(n)) {
3280		struct binder_node *node = rb_entry(n, struct binder_node,
3281						    rb_node);
3282		if (print_all || node->has_async_transaction)
3283			buf = print_binder_node(buf, end, node);
3284	}
3285	if (print_all) {
3286		for (n = rb_first(&proc->refs_by_desc);
3287		     n != NULL && buf < end;
3288		     n = rb_next(n))
3289			buf = print_binder_ref(buf, end,
3290					       rb_entry(n, struct binder_ref,
3291							rb_node_desc));
3292	}
3293	for (n = rb_first(&proc->allocated_buffers);
3294	     n != NULL && buf < end;
3295	     n = rb_next(n))
3296		buf = print_binder_buffer(buf, end, "  buffer",
3297					  rb_entry(n, struct binder_buffer,
3298						   rb_node));
3299	list_for_each_entry(w, &proc->todo, entry) {
3300		if (buf >= end)
3301			break;
3302		buf = print_binder_work(buf, end, "  ",
3303					"  pending transaction", w);
3304	}
3305	list_for_each_entry(w, &proc->delivered_death, entry) {
3306		if (buf >= end)
3307			break;
3308		buf += snprintf(buf, end - buf,
3309				"  has delivered dead binder\n");
3310		break;
3311	}
3312	if (!print_all && buf == header_buf)
3313		buf = start_buf;
3314	return buf;
3315}
3316
3317static const char *binder_return_strings[] = {
3318	"BR_ERROR",
3319	"BR_OK",
3320	"BR_TRANSACTION",
3321	"BR_REPLY",
3322	"BR_ACQUIRE_RESULT",
3323	"BR_DEAD_REPLY",
3324	"BR_TRANSACTION_COMPLETE",
3325	"BR_INCREFS",
3326	"BR_ACQUIRE",
3327	"BR_RELEASE",
3328	"BR_DECREFS",
3329	"BR_ATTEMPT_ACQUIRE",
3330	"BR_NOOP",
3331	"BR_SPAWN_LOOPER",
3332	"BR_FINISHED",
3333	"BR_DEAD_BINDER",
3334	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
3335	"BR_FAILED_REPLY"
3336};
3337
3338static const char *binder_command_strings[] = {
3339	"BC_TRANSACTION",
3340	"BC_REPLY",
3341	"BC_ACQUIRE_RESULT",
3342	"BC_FREE_BUFFER",
3343	"BC_INCREFS",
3344	"BC_ACQUIRE",
3345	"BC_RELEASE",
3346	"BC_DECREFS",
3347	"BC_INCREFS_DONE",
3348	"BC_ACQUIRE_DONE",
3349	"BC_ATTEMPT_ACQUIRE",
3350	"BC_REGISTER_LOOPER",
3351	"BC_ENTER_LOOPER",
3352	"BC_EXIT_LOOPER",
3353	"BC_REQUEST_DEATH_NOTIFICATION",
3354	"BC_CLEAR_DEATH_NOTIFICATION",
3355	"BC_DEAD_BINDER_DONE"
3356};
3357
3358static const char *binder_objstat_strings[] = {
3359	"proc",
3360	"thread",
3361	"node",
3362	"ref",
3363	"death",
3364	"transaction",
3365	"transaction_complete"
3366};
3367
3368static char *print_binder_stats(char *buf, char *end, const char *prefix,
3369				struct binder_stats *stats)
3370{
3371	int i;
3372
3373	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
3374			ARRAY_SIZE(binder_command_strings));
3375	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3376		if (stats->bc[i])
3377			buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
3378					binder_command_strings[i],
3379					stats->bc[i]);
3380		if (buf >= end)
3381			return buf;
3382	}
3383
3384	BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
3385			ARRAY_SIZE(binder_return_strings));
3386	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3387		if (stats->br[i])
3388			buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
3389					binder_return_strings[i], stats->br[i]);
3390		if (buf >= end)
3391			return buf;
3392	}
3393
3394	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3395			ARRAY_SIZE(binder_objstat_strings));
3396	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
3397			ARRAY_SIZE(stats->obj_deleted));
3398	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3399		if (stats->obj_created[i] || stats->obj_deleted[i])
3400			buf += snprintf(buf, end - buf,
3401					"%s%s: active %d total %d\n", prefix,
3402					binder_objstat_strings[i],
3403					stats->obj_created[i] -
3404						stats->obj_deleted[i],
3405					stats->obj_created[i]);
3406		if (buf >= end)
3407			return buf;
3408	}
3409	return buf;
3410}
3411
3412static char *print_binder_proc_stats(char *buf, char *end,
3413				     struct binder_proc *proc)
3414{
3415	struct binder_work *w;
3416	struct rb_node *n;
3417	int count, strong, weak;
3418
3419	buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
3420	if (buf >= end)
3421		return buf;
3422	count = 0;
3423	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3424		count++;
3425	buf += snprintf(buf, end - buf, "  threads: %d\n", count);
3426	if (buf >= end)
3427		return buf;
3428	buf += snprintf(buf, end - buf, "  requested threads: %d+%d/%d\n"
3429			"  ready threads %d\n"
3430			"  free async space %zd\n", proc->requested_threads,
3431			proc->requested_threads_started, proc->max_threads,
3432			proc->ready_threads, proc->free_async_space);
3433	if (buf >= end)
3434		return buf;
3435	count = 0;
3436	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3437		count++;
3438	buf += snprintf(buf, end - buf, "  nodes: %d\n", count);
3439	if (buf >= end)
3440		return buf;
3441	count = 0;
3442	strong = 0;
3443	weak = 0;
3444	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3445		struct binder_ref *ref = rb_entry(n, struct binder_ref,
3446						  rb_node_desc);
3447		count++;
3448		strong += ref->strong;
3449		weak += ref->weak;
3450	}
3451	buf += snprintf(buf, end - buf, "  refs: %d s %d w %d\n",
3452			count, strong, weak);
3453	if (buf >= end)
3454		return buf;
3455
3456	count = 0;
3457	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3458		count++;
3459	buf += snprintf(buf, end - buf, "  buffers: %d\n", count);
3460	if (buf >= end)
3461		return buf;
3462
3463	count = 0;
3464	list_for_each_entry(w, &proc->todo, entry) {
3465		switch (w->type) {
3466		case BINDER_WORK_TRANSACTION:
3467			count++;
3468			break;
3469		default:
3470			break;
3471		}
3472	}
3473	buf += snprintf(buf, end - buf, "  pending transactions: %d\n", count);
3474	if (buf >= end)
3475		return buf;
3476
3477	buf = print_binder_stats(buf, end, "  ", &proc->stats);
3478
3479	return buf;
3480}
3481
3482
3483static int binder_read_proc_state(char *page, char **start, off_t off,
3484				  int count, int *eof, void *data)
3485{
3486	struct binder_proc *proc;
3487	struct hlist_node *pos;
3488	struct binder_node *node;
3489	int len = 0;
3490	char *buf = page;
3491	char *end = page + PAGE_SIZE;
3492	int do_lock = !binder_debug_no_lock;
3493
3494	if (off)
3495		return 0;
3496
3497	if (do_lock)
3498		mutex_lock(&binder_lock);
3499
3500	buf += snprintf(buf, end - buf, "binder state:\n");
3501
3502	if (!hlist_empty(&binder_dead_nodes))
3503		buf += snprintf(buf, end - buf, "dead nodes:\n");
3504	hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) {
3505		if (buf >= end)
3506			break;
3507		buf = print_binder_node(buf, end, node);
3508	}
3509
3510	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3511		if (buf >= end)
3512			break;
3513		buf = print_binder_proc(buf, end, proc, 1);
3514	}
3515	if (do_lock)
3516		mutex_unlock(&binder_lock);
3517	if (buf > page + PAGE_SIZE)
3518		buf = page + PAGE_SIZE;
3519
3520	*start = page + off;
3521
3522	len = buf - page;
3523	if (len > off)
3524		len -= off;
3525	else
3526		len = 0;
3527
3528	return len < count ? len  : count;
3529}
3530
3531static int binder_read_proc_stats(char *page, char **start, off_t off,
3532				  int count, int *eof, void *data)
3533{
3534	struct binder_proc *proc;
3535	struct hlist_node *pos;
3536	int len = 0;
3537	char *p = page;
3538	int do_lock = !binder_debug_no_lock;
3539
3540	if (off)
3541		return 0;
3542
3543	if (do_lock)
3544		mutex_lock(&binder_lock);
3545
3546	p += snprintf(p, PAGE_SIZE, "binder stats:\n");
3547
3548	p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats);
3549
3550	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3551		if (p >= page + PAGE_SIZE)
3552			break;
3553		p = print_binder_proc_stats(p, page + PAGE_SIZE, proc);
3554	}
3555	if (do_lock)
3556		mutex_unlock(&binder_lock);
3557	if (p > page + PAGE_SIZE)
3558		p = page + PAGE_SIZE;
3559
3560	*start = page + off;
3561
3562	len = p - page;
3563	if (len > off)
3564		len -= off;
3565	else
3566		len = 0;
3567
3568	return len < count ? len  : count;
3569}
3570
3571static int binder_read_proc_transactions(char *page, char **start, off_t off,
3572					 int count, int *eof, void *data)
3573{
3574	struct binder_proc *proc;
3575	struct hlist_node *pos;
3576	int len = 0;
3577	char *buf = page;
3578	char *end = page + PAGE_SIZE;
3579	int do_lock = !binder_debug_no_lock;
3580
3581	if (off)
3582		return 0;
3583
3584	if (do_lock)
3585		mutex_lock(&binder_lock);
3586
3587	buf += snprintf(buf, end - buf, "binder transactions:\n");
3588	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3589		if (buf >= end)
3590			break;
3591		buf = print_binder_proc(buf, end, proc, 0);
3592	}
3593	if (do_lock)
3594		mutex_unlock(&binder_lock);
3595	if (buf > page + PAGE_SIZE)
3596		buf = page + PAGE_SIZE;
3597
3598	*start = page + off;
3599
3600	len = buf - page;
3601	if (len > off)
3602		len -= off;
3603	else
3604		len = 0;
3605
3606	return len < count ? len  : count;
3607}
3608
3609static int binder_read_proc_proc(char *page, char **start, off_t off,
3610				 int count, int *eof, void *data)
3611{
3612	struct binder_proc *proc = data;
3613	int len = 0;
3614	char *p = page;
3615	int do_lock = !binder_debug_no_lock;
3616
3617	if (off)
3618		return 0;
3619
3620	if (do_lock)
3621		mutex_lock(&binder_lock);
3622	p += snprintf(p, PAGE_SIZE, "binder proc state:\n");
3623	p = print_binder_proc(p, page + PAGE_SIZE, proc, 1);
3624	if (do_lock)
3625		mutex_unlock(&binder_lock);
3626
3627	if (p > page + PAGE_SIZE)
3628		p = page + PAGE_SIZE;
3629	*start = page + off;
3630
3631	len = p - page;
3632	if (len > off)
3633		len -= off;
3634	else
3635		len = 0;
3636
3637	return len < count ? len  : count;
3638}
3639
3640static char *print_binder_transaction_log_entry(char *buf, char *end,
3641					struct binder_transaction_log_entry *e)
3642{
3643	buf += snprintf(buf, end - buf,
3644			"%d: %s from %d:%d to %d:%d node %d handle %d "
3645			"size %d:%d\n",
3646			e->debug_id, (e->call_type == 2) ? "reply" :
3647			((e->call_type == 1) ? "async" : "call "), e->from_proc,
3648			e->from_thread, e->to_proc, e->to_thread, e->to_node,
3649			e->target_handle, e->data_size, e->offsets_size);
3650	return buf;
3651}
3652
3653static int binder_read_proc_transaction_log(
3654	char *page, char **start, off_t off, int count, int *eof, void *data)
3655{
3656	struct binder_transaction_log *log = data;
3657	int len = 0;
3658	int i;
3659	char *buf = page;
3660	char *end = page + PAGE_SIZE;
3661
3662	if (off)
3663		return 0;
3664
3665	if (log->full) {
3666		for (i = log->next; i < ARRAY_SIZE(log->entry); i++) {
3667			if (buf >= end)
3668				break;
3669			buf = print_binder_transaction_log_entry(buf, end,
3670								&log->entry[i]);
3671		}
3672	}
3673	for (i = 0; i < log->next; i++) {
3674		if (buf >= end)
3675			break;
3676		buf = print_binder_transaction_log_entry(buf, end,
3677							 &log->entry[i]);
3678	}
3679
3680	*start = page + off;
3681
3682	len = buf - page;
3683	if (len > off)
3684		len -= off;
3685	else
3686		len = 0;
3687
3688	return len < count ? len  : count;
3689}
3690
3691static const struct file_operations binder_fops = {
3692	.owner = THIS_MODULE,
3693	.poll = binder_poll,
3694	.unlocked_ioctl = binder_ioctl,
3695	.mmap = binder_mmap,
3696	.open = binder_open,
3697	.flush = binder_flush,
3698	.release = binder_release,
3699};
3700
3701static struct miscdevice binder_miscdev = {
3702	.minor = MISC_DYNAMIC_MINOR,
3703	.name = "binder",
3704	.fops = &binder_fops
3705};
3706
3707static int __init binder_init(void)
3708{
3709	int ret;
3710
3711	binder_proc_dir_entry_root = proc_mkdir("binder", NULL);
3712	if (binder_proc_dir_entry_root)
3713		binder_proc_dir_entry_proc = proc_mkdir("proc",
3714						binder_proc_dir_entry_root);
3715	ret = misc_register(&binder_miscdev);
3716	if (binder_proc_dir_entry_root) {
3717		create_proc_read_entry("state",
3718				       S_IRUGO,
3719				       binder_proc_dir_entry_root,
3720				       binder_read_proc_state,
3721				       NULL);
3722		create_proc_read_entry("stats",
3723				       S_IRUGO,
3724				       binder_proc_dir_entry_root,
3725				       binder_read_proc_stats,
3726				       NULL);
3727		create_proc_read_entry("transactions",
3728				       S_IRUGO,
3729				       binder_proc_dir_entry_root,
3730				       binder_read_proc_transactions,
3731				       NULL);
3732		create_proc_read_entry("transaction_log",
3733				       S_IRUGO,
3734				       binder_proc_dir_entry_root,
3735				       binder_read_proc_transaction_log,
3736				       &binder_transaction_log);
3737		create_proc_read_entry("failed_transaction_log",
3738				       S_IRUGO,
3739				       binder_proc_dir_entry_root,
3740				       binder_read_proc_transaction_log,
3741				       &binder_transaction_log_failed);
3742	}
3743	return ret;
3744}
3745
3746device_initcall(binder_init);
3747
3748MODULE_LICENSE("GPL v2");
3749