binder.c revision 0cf24a7dc9123ddf63c413b6d4b38017b19db713
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/fdtable.h>
20#include <linux/file.h>
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/miscdevice.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/mutex.h>
27#include <linux/nsproxy.h>
28#include <linux/poll.h>
29#include <linux/proc_fs.h>
30#include <linux/rbtree.h>
31#include <linux/sched.h>
32#include <linux/uaccess.h>
33#include <linux/vmalloc.h>
34#include "binder.h"
35
36static DEFINE_MUTEX(binder_lock);
37static HLIST_HEAD(binder_procs);
38static struct binder_node *binder_context_mgr_node;
39static uid_t binder_context_mgr_uid = -1;
40static int binder_last_id;
41static struct proc_dir_entry *binder_proc_dir_entry_root;
42static struct proc_dir_entry *binder_proc_dir_entry_proc;
43static struct hlist_head binder_dead_nodes;
44static HLIST_HEAD(binder_release_files_list);
45static DEFINE_MUTEX(binder_release_files_lock);
46
47static int binder_read_proc_proc(
48	char *page, char **start, off_t off, int count, int *eof, void *data);
49
50/* This is only defined in include/asm-arm/sizes.h */
51#ifndef SZ_1K
52#define SZ_1K                               0x400
53#endif
54
55#ifndef SZ_4M
56#define SZ_4M                               0x400000
57#endif
58
59#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
60
61#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
62
63enum {
64	BINDER_DEBUG_USER_ERROR             = 1U << 0,
65	BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
66	BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
67	BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
68	BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
69	BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
70	BINDER_DEBUG_READ_WRITE             = 1U << 6,
71	BINDER_DEBUG_USER_REFS              = 1U << 7,
72	BINDER_DEBUG_THREADS                = 1U << 8,
73	BINDER_DEBUG_TRANSACTION            = 1U << 9,
74	BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
75	BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
76	BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
77	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
78	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
79	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
80};
81static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
82	BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
83module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
84static int binder_debug_no_lock;
85module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
86static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
87static int binder_stop_on_user_error;
88static int binder_set_stop_on_user_error(
89	const char *val, struct kernel_param *kp)
90{
91	int ret;
92	ret = param_set_int(val, kp);
93	if (binder_stop_on_user_error < 2)
94		wake_up(&binder_user_error_wait);
95	return ret;
96}
97module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
98	param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
99
100#define binder_user_error(x...) \
101	do { \
102		if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
103			printk(KERN_INFO x); \
104		if (binder_stop_on_user_error) \
105			binder_stop_on_user_error = 2; \
106	} while (0)
107
108enum {
109	BINDER_STAT_PROC,
110	BINDER_STAT_THREAD,
111	BINDER_STAT_NODE,
112	BINDER_STAT_REF,
113	BINDER_STAT_DEATH,
114	BINDER_STAT_TRANSACTION,
115	BINDER_STAT_TRANSACTION_COMPLETE,
116	BINDER_STAT_COUNT
117};
118
119struct binder_stats {
120	int br[_IOC_NR(BR_FAILED_REPLY) + 1];
121	int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
122	int obj_created[BINDER_STAT_COUNT];
123	int obj_deleted[BINDER_STAT_COUNT];
124};
125
126static struct binder_stats binder_stats;
127
128struct binder_transaction_log_entry {
129	int debug_id;
130	int call_type;
131	int from_proc;
132	int from_thread;
133	int target_handle;
134	int to_proc;
135	int to_thread;
136	int to_node;
137	int data_size;
138	int offsets_size;
139};
140struct binder_transaction_log {
141	int next;
142	int full;
143	struct binder_transaction_log_entry entry[32];
144};
145struct binder_transaction_log binder_transaction_log;
146struct binder_transaction_log binder_transaction_log_failed;
147
148static struct binder_transaction_log_entry *binder_transaction_log_add(
149	struct binder_transaction_log *log)
150{
151	struct binder_transaction_log_entry *e;
152	e = &log->entry[log->next];
153	memset(e, 0, sizeof(*e));
154	log->next++;
155	if (log->next == ARRAY_SIZE(log->entry)) {
156		log->next = 0;
157		log->full = 1;
158	}
159	return e;
160}
161
162struct binder_work {
163	struct list_head entry;
164	enum {
165		BINDER_WORK_TRANSACTION = 1,
166		BINDER_WORK_TRANSACTION_COMPLETE,
167		BINDER_WORK_NODE,
168		BINDER_WORK_DEAD_BINDER,
169		BINDER_WORK_DEAD_BINDER_AND_CLEAR,
170		BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
171	} type;
172};
173
174struct binder_node {
175	int debug_id;
176	struct binder_work work;
177	union {
178		struct rb_node rb_node;
179		struct hlist_node dead_node;
180	};
181	struct binder_proc *proc;
182	struct hlist_head refs;
183	int internal_strong_refs;
184	int local_weak_refs;
185	int local_strong_refs;
186	void __user *ptr;
187	void __user *cookie;
188	unsigned has_strong_ref : 1;
189	unsigned pending_strong_ref : 1;
190	unsigned has_weak_ref : 1;
191	unsigned pending_weak_ref : 1;
192	unsigned has_async_transaction : 1;
193	unsigned accept_fds : 1;
194	int min_priority : 8;
195	struct list_head async_todo;
196};
197
198struct binder_ref_death {
199	struct binder_work work;
200	void __user *cookie;
201};
202
203struct binder_ref {
204	/* Lookups needed: */
205	/*   node + proc => ref (transaction) */
206	/*   desc + proc => ref (transaction, inc/dec ref) */
207	/*   node => refs + procs (proc exit) */
208	int debug_id;
209	struct rb_node rb_node_desc;
210	struct rb_node rb_node_node;
211	struct hlist_node node_entry;
212	struct binder_proc *proc;
213	struct binder_node *node;
214	uint32_t desc;
215	int strong;
216	int weak;
217	struct binder_ref_death *death;
218};
219
220struct binder_buffer {
221	struct list_head entry; /* free and allocated entries by addesss */
222	struct rb_node rb_node; /* free entry by size or allocated entry */
223				/* by address */
224	unsigned free : 1;
225	unsigned allow_user_free : 1;
226	unsigned async_transaction : 1;
227	unsigned debug_id : 29;
228
229	struct binder_transaction *transaction;
230
231	struct binder_node *target_node;
232	size_t data_size;
233	size_t offsets_size;
234	uint8_t data[0];
235};
236
237struct binder_proc {
238	struct hlist_node proc_node;
239	struct rb_root threads;
240	struct rb_root nodes;
241	struct rb_root refs_by_desc;
242	struct rb_root refs_by_node;
243	int pid;
244	struct vm_area_struct *vma;
245	struct task_struct *tsk;
246	struct files_struct *files;
247	struct hlist_node release_files_node;
248	void *buffer;
249	ptrdiff_t user_buffer_offset;
250
251	struct list_head buffers;
252	struct rb_root free_buffers;
253	struct rb_root allocated_buffers;
254	size_t free_async_space;
255
256	struct page **pages;
257	size_t buffer_size;
258	uint32_t buffer_free;
259	struct list_head todo;
260	wait_queue_head_t wait;
261	struct binder_stats stats;
262	struct list_head delivered_death;
263	int max_threads;
264	int requested_threads;
265	int requested_threads_started;
266	int ready_threads;
267	long default_priority;
268};
269
270enum {
271	BINDER_LOOPER_STATE_REGISTERED  = 0x01,
272	BINDER_LOOPER_STATE_ENTERED     = 0x02,
273	BINDER_LOOPER_STATE_EXITED      = 0x04,
274	BINDER_LOOPER_STATE_INVALID     = 0x08,
275	BINDER_LOOPER_STATE_WAITING     = 0x10,
276	BINDER_LOOPER_STATE_NEED_RETURN = 0x20
277};
278
279struct binder_thread {
280	struct binder_proc *proc;
281	struct rb_node rb_node;
282	int pid;
283	int looper;
284	struct binder_transaction *transaction_stack;
285	struct list_head todo;
286	uint32_t return_error; /* Write failed, return error code in read buf */
287	uint32_t return_error2; /* Write failed, return error code in read */
288		/* buffer. Used when sending a reply to a dead process that */
289		/* we are also waiting on */
290	wait_queue_head_t wait;
291	struct binder_stats stats;
292};
293
294struct binder_transaction {
295	int debug_id;
296	struct binder_work work;
297	struct binder_thread *from;
298	struct binder_transaction *from_parent;
299	struct binder_proc *to_proc;
300	struct binder_thread *to_thread;
301	struct binder_transaction *to_parent;
302	unsigned need_reply : 1;
303	/*unsigned is_dead : 1;*/ /* not used at the moment */
304
305	struct binder_buffer *buffer;
306	unsigned int	code;
307	unsigned int	flags;
308	long	priority;
309	long	saved_priority;
310	uid_t	sender_euid;
311};
312
313/*
314 * copied from get_unused_fd_flags
315 */
316int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
317{
318	struct files_struct *files = proc->files;
319	int fd, error;
320	struct fdtable *fdt;
321	unsigned long rlim_cur;
322	unsigned long irqs;
323
324	if (files == NULL)
325		return -ESRCH;
326
327	error = -EMFILE;
328	spin_lock(&files->file_lock);
329
330repeat:
331	fdt = files_fdtable(files);
332	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
333				files->next_fd);
334
335	/*
336	 * N.B. For clone tasks sharing a files structure, this test
337	 * will limit the total number of files that can be opened.
338	 */
339	rlim_cur = 0;
340	if (lock_task_sighand(proc->tsk, &irqs)) {
341		rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
342		unlock_task_sighand(proc->tsk, &irqs);
343	}
344	if (fd >= rlim_cur)
345		goto out;
346
347	/* Do we need to expand the fd array or fd set?  */
348	error = expand_files(files, fd);
349	if (error < 0)
350		goto out;
351
352	if (error) {
353		/*
354		 * If we needed to expand the fs array we
355		 * might have blocked - try again.
356		 */
357		error = -EMFILE;
358		goto repeat;
359	}
360
361	FD_SET(fd, fdt->open_fds);
362	if (flags & O_CLOEXEC)
363		FD_SET(fd, fdt->close_on_exec);
364	else
365		FD_CLR(fd, fdt->close_on_exec);
366	files->next_fd = fd + 1;
367#if 1
368	/* Sanity check */
369	if (fdt->fd[fd] != NULL) {
370		printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
371		fdt->fd[fd] = NULL;
372	}
373#endif
374	error = fd;
375
376out:
377	spin_unlock(&files->file_lock);
378	return error;
379}
380
381/*
382 * copied from fd_install
383 */
384static void task_fd_install(
385	struct binder_proc *proc, unsigned int fd, struct file *file)
386{
387	struct files_struct *files = proc->files;
388	struct fdtable *fdt;
389
390	if (files == NULL)
391		return;
392
393	spin_lock(&files->file_lock);
394	fdt = files_fdtable(files);
395	BUG_ON(fdt->fd[fd] != NULL);
396	rcu_assign_pointer(fdt->fd[fd], file);
397	spin_unlock(&files->file_lock);
398}
399
400/*
401 * copied from __put_unused_fd in open.c
402 */
403static void __put_unused_fd(struct files_struct *files, unsigned int fd)
404{
405	struct fdtable *fdt = files_fdtable(files);
406	__FD_CLR(fd, fdt->open_fds);
407	if (fd < files->next_fd)
408		files->next_fd = fd;
409}
410
411/*
412 * copied from sys_close
413 */
414static long task_close_fd(struct binder_proc *proc, unsigned int fd)
415{
416	struct file *filp;
417	struct files_struct *files = proc->files;
418	struct fdtable *fdt;
419	int retval;
420
421	if (files == NULL)
422		return -ESRCH;
423
424	spin_lock(&files->file_lock);
425	fdt = files_fdtable(files);
426	if (fd >= fdt->max_fds)
427		goto out_unlock;
428	filp = fdt->fd[fd];
429	if (!filp)
430		goto out_unlock;
431	rcu_assign_pointer(fdt->fd[fd], NULL);
432	FD_CLR(fd, fdt->close_on_exec);
433	__put_unused_fd(files, fd);
434	spin_unlock(&files->file_lock);
435	retval = filp_close(filp, files);
436
437	/* can't restart close syscall because file table entry was cleared */
438	if (unlikely(retval == -ERESTARTSYS ||
439		     retval == -ERESTARTNOINTR ||
440		     retval == -ERESTARTNOHAND ||
441		     retval == -ERESTART_RESTARTBLOCK))
442		retval = -EINTR;
443
444	return retval;
445
446out_unlock:
447	spin_unlock(&files->file_lock);
448	return -EBADF;
449}
450
451static void binder_set_nice(long nice)
452{
453	long min_nice;
454	if (can_nice(current, nice)) {
455		set_user_nice(current, nice);
456		return;
457	}
458	min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
459	if (binder_debug_mask & BINDER_DEBUG_PRIORITY_CAP)
460		printk(KERN_INFO "binder: %d: nice value %ld not allowed use "
461		       "%ld instead\n", current->pid, nice, min_nice);
462	set_user_nice(current, min_nice);
463	if (min_nice < 20)
464		return;
465	binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
466}
467
468static size_t binder_buffer_size(
469	struct binder_proc *proc, struct binder_buffer *buffer)
470{
471	if (list_is_last(&buffer->entry, &proc->buffers))
472		return proc->buffer + proc->buffer_size - (void *)buffer->data;
473	else
474		return (size_t)list_entry(buffer->entry.next,
475			struct binder_buffer, entry) - (size_t)buffer->data;
476}
477
478static void binder_insert_free_buffer(
479	struct binder_proc *proc, struct binder_buffer *new_buffer)
480{
481	struct rb_node **p = &proc->free_buffers.rb_node;
482	struct rb_node *parent = NULL;
483	struct binder_buffer *buffer;
484	size_t buffer_size;
485	size_t new_buffer_size;
486
487	BUG_ON(!new_buffer->free);
488
489	new_buffer_size = binder_buffer_size(proc, new_buffer);
490
491	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
492		printk(KERN_INFO "binder: %d: add free buffer, size %zd, "
493		       "at %p\n", proc->pid, new_buffer_size, new_buffer);
494
495	while (*p) {
496		parent = *p;
497		buffer = rb_entry(parent, struct binder_buffer, rb_node);
498		BUG_ON(!buffer->free);
499
500		buffer_size = binder_buffer_size(proc, buffer);
501
502		if (new_buffer_size < buffer_size)
503			p = &parent->rb_left;
504		else
505			p = &parent->rb_right;
506	}
507	rb_link_node(&new_buffer->rb_node, parent, p);
508	rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
509}
510
511static void binder_insert_allocated_buffer(
512	struct binder_proc *proc, struct binder_buffer *new_buffer)
513{
514	struct rb_node **p = &proc->allocated_buffers.rb_node;
515	struct rb_node *parent = NULL;
516	struct binder_buffer *buffer;
517
518	BUG_ON(new_buffer->free);
519
520	while (*p) {
521		parent = *p;
522		buffer = rb_entry(parent, struct binder_buffer, rb_node);
523		BUG_ON(buffer->free);
524
525		if (new_buffer < buffer)
526			p = &parent->rb_left;
527		else if (new_buffer > buffer)
528			p = &parent->rb_right;
529		else
530			BUG();
531	}
532	rb_link_node(&new_buffer->rb_node, parent, p);
533	rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
534}
535
536static struct binder_buffer *binder_buffer_lookup(
537	struct binder_proc *proc, void __user *user_ptr)
538{
539	struct rb_node *n = proc->allocated_buffers.rb_node;
540	struct binder_buffer *buffer;
541	struct binder_buffer *kern_ptr;
542
543	kern_ptr = user_ptr - proc->user_buffer_offset
544		- offsetof(struct binder_buffer, data);
545
546	while (n) {
547		buffer = rb_entry(n, struct binder_buffer, rb_node);
548		BUG_ON(buffer->free);
549
550		if (kern_ptr < buffer)
551			n = n->rb_left;
552		else if (kern_ptr > buffer)
553			n = n->rb_right;
554		else
555			return buffer;
556	}
557	return NULL;
558}
559
560static int binder_update_page_range(struct binder_proc *proc, int allocate,
561	void *start, void *end, struct vm_area_struct *vma)
562{
563	void *page_addr;
564	unsigned long user_page_addr;
565	struct vm_struct tmp_area;
566	struct page **page;
567	struct mm_struct *mm;
568
569	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
570		printk(KERN_INFO "binder: %d: %s pages %p-%p\n",
571		       proc->pid, allocate ? "allocate" : "free", start, end);
572
573	if (end <= start)
574		return 0;
575
576	if (vma)
577		mm = NULL;
578	else
579		mm = get_task_mm(proc->tsk);
580
581	if (mm) {
582		down_write(&mm->mmap_sem);
583		vma = proc->vma;
584	}
585
586	if (allocate == 0)
587		goto free_range;
588
589	if (vma == NULL) {
590		printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
591		       "map pages in userspace, no vma\n", proc->pid);
592		goto err_no_vma;
593	}
594
595	for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
596		int ret;
597		struct page **page_array_ptr;
598		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
599
600		BUG_ON(*page);
601		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);
602		if (*page == NULL) {
603			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
604			       "for page at %p\n", proc->pid, page_addr);
605			goto err_alloc_page_failed;
606		}
607		tmp_area.addr = page_addr;
608		tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
609		page_array_ptr = page;
610		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
611		if (ret) {
612			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
613			       "to map page at %p in kernel\n",
614			       proc->pid, page_addr);
615			goto err_map_kernel_failed;
616		}
617		user_page_addr =
618			(uintptr_t)page_addr + proc->user_buffer_offset;
619		ret = vm_insert_page(vma, user_page_addr, page[0]);
620		if (ret) {
621			printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
622			       "to map page at %lx in userspace\n",
623			       proc->pid, user_page_addr);
624			goto err_vm_insert_page_failed;
625		}
626		/* vm_insert_page does not seem to increment the refcount */
627	}
628	if (mm) {
629		up_write(&mm->mmap_sem);
630		mmput(mm);
631	}
632	return 0;
633
634free_range:
635	for (page_addr = end - PAGE_SIZE; page_addr >= start;
636	     page_addr -= PAGE_SIZE) {
637		page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
638		if (vma)
639			zap_page_range(vma, (uintptr_t)page_addr +
640				proc->user_buffer_offset, PAGE_SIZE, NULL);
641err_vm_insert_page_failed:
642		unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
643err_map_kernel_failed:
644		__free_page(*page);
645		*page = NULL;
646err_alloc_page_failed:
647		;
648	}
649err_no_vma:
650	if (mm) {
651		up_write(&mm->mmap_sem);
652		mmput(mm);
653	}
654	return -ENOMEM;
655}
656
657static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
658	size_t data_size, size_t offsets_size, int is_async)
659{
660	struct rb_node *n = proc->free_buffers.rb_node;
661	struct binder_buffer *buffer;
662	size_t buffer_size;
663	struct rb_node *best_fit = NULL;
664	void *has_page_addr;
665	void *end_page_addr;
666	size_t size;
667
668	if (proc->vma == NULL) {
669		printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
670		       proc->pid);
671		return NULL;
672	}
673
674	size = ALIGN(data_size, sizeof(void *)) +
675		ALIGN(offsets_size, sizeof(void *));
676
677	if (size < data_size || size < offsets_size) {
678		binder_user_error("binder: %d: got transaction with invalid "
679			"size %zd-%zd\n", proc->pid, data_size, offsets_size);
680		return NULL;
681	}
682
683	if (is_async &&
684	    proc->free_async_space < size + sizeof(struct binder_buffer)) {
685		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
686			printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd f"
687			       "ailed, no async space left\n", proc->pid, size);
688		return NULL;
689	}
690
691	while (n) {
692		buffer = rb_entry(n, struct binder_buffer, rb_node);
693		BUG_ON(!buffer->free);
694		buffer_size = binder_buffer_size(proc, buffer);
695
696		if (size < buffer_size) {
697			best_fit = n;
698			n = n->rb_left;
699		} else if (size > buffer_size)
700			n = n->rb_right;
701		else {
702			best_fit = n;
703			break;
704		}
705	}
706	if (best_fit == NULL) {
707		printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
708		       "no address space\n", proc->pid, size);
709		return NULL;
710	}
711	if (n == NULL) {
712		buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
713		buffer_size = binder_buffer_size(proc, buffer);
714	}
715	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
716		printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got buff"
717		       "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
718
719	has_page_addr =
720		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
721	if (n == NULL) {
722		if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
723			buffer_size = size; /* no room for other buffers */
724		else
725			buffer_size = size + sizeof(struct binder_buffer);
726	}
727	end_page_addr =
728		(void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
729	if (end_page_addr > has_page_addr)
730		end_page_addr = has_page_addr;
731	if (binder_update_page_range(proc, 1,
732	    (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
733		return NULL;
734
735	rb_erase(best_fit, &proc->free_buffers);
736	buffer->free = 0;
737	binder_insert_allocated_buffer(proc, buffer);
738	if (buffer_size != size) {
739		struct binder_buffer *new_buffer = (void *)buffer->data + size;
740		list_add(&new_buffer->entry, &buffer->entry);
741		new_buffer->free = 1;
742		binder_insert_free_buffer(proc, new_buffer);
743	}
744	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
745		printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got "
746		       "%p\n", proc->pid, size, buffer);
747	buffer->data_size = data_size;
748	buffer->offsets_size = offsets_size;
749	buffer->async_transaction = is_async;
750	if (is_async) {
751		proc->free_async_space -= size + sizeof(struct binder_buffer);
752		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
753			printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd "
754			       "async free %zd\n", proc->pid, size,
755			       proc->free_async_space);
756	}
757
758	return buffer;
759}
760
761static void *buffer_start_page(struct binder_buffer *buffer)
762{
763	return (void *)((uintptr_t)buffer & PAGE_MASK);
764}
765
766static void *buffer_end_page(struct binder_buffer *buffer)
767{
768	return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
769}
770
771static void binder_delete_free_buffer(
772	struct binder_proc *proc, struct binder_buffer *buffer)
773{
774	struct binder_buffer *prev, *next = NULL;
775	int free_page_end = 1;
776	int free_page_start = 1;
777
778	BUG_ON(proc->buffers.next == &buffer->entry);
779	prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
780	BUG_ON(!prev->free);
781	if (buffer_end_page(prev) == buffer_start_page(buffer)) {
782		free_page_start = 0;
783		if (buffer_end_page(prev) == buffer_end_page(buffer))
784			free_page_end = 0;
785		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
786			printk(KERN_INFO "binder: %d: merge free, buffer %p "
787			       "share page with %p\n", proc->pid, buffer, prev);
788	}
789
790	if (!list_is_last(&buffer->entry, &proc->buffers)) {
791		next = list_entry(buffer->entry.next,
792				  struct binder_buffer, entry);
793		if (buffer_start_page(next) == buffer_end_page(buffer)) {
794			free_page_end = 0;
795			if (buffer_start_page(next) ==
796			    buffer_start_page(buffer))
797				free_page_start = 0;
798			if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
799				printk(KERN_INFO "binder: %d: merge free, "
800				       "buffer %p share page with %p\n",
801				       proc->pid, buffer, prev);
802		}
803	}
804	list_del(&buffer->entry);
805	if (free_page_start || free_page_end) {
806		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
807			printk(KERN_INFO "binder: %d: merge free, buffer %p do "
808			       "not share page%s%s with with %p or %p\n",
809			       proc->pid, buffer, free_page_start ? "" : " end",
810			       free_page_end ? "" : " start", prev, next);
811		binder_update_page_range(proc, 0, free_page_start ?
812			buffer_start_page(buffer) : buffer_end_page(buffer),
813			(free_page_end ? buffer_end_page(buffer) :
814			buffer_start_page(buffer)) + PAGE_SIZE, NULL);
815	}
816}
817
818static void binder_free_buf(
819	struct binder_proc *proc, struct binder_buffer *buffer)
820{
821	size_t size, buffer_size;
822
823	buffer_size = binder_buffer_size(proc, buffer);
824
825	size = ALIGN(buffer->data_size, sizeof(void *)) +
826		ALIGN(buffer->offsets_size, sizeof(void *));
827	if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
828		printk(KERN_INFO "binder: %d: binder_free_buf %p size %zd buffer"
829		       "_size %zd\n", proc->pid, buffer, size, buffer_size);
830
831	BUG_ON(buffer->free);
832	BUG_ON(size > buffer_size);
833	BUG_ON(buffer->transaction != NULL);
834	BUG_ON((void *)buffer < proc->buffer);
835	BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
836
837	if (buffer->async_transaction) {
838		proc->free_async_space += size + sizeof(struct binder_buffer);
839		if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC)
840			printk(KERN_INFO "binder: %d: binder_free_buf size %zd "
841			       "async free %zd\n", proc->pid, size,
842			       proc->free_async_space);
843	}
844
845	binder_update_page_range(proc, 0,
846		(void *)PAGE_ALIGN((uintptr_t)buffer->data),
847		(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
848		NULL);
849	rb_erase(&buffer->rb_node, &proc->allocated_buffers);
850	buffer->free = 1;
851	if (!list_is_last(&buffer->entry, &proc->buffers)) {
852		struct binder_buffer *next = list_entry(buffer->entry.next,
853						struct binder_buffer, entry);
854		if (next->free) {
855			rb_erase(&next->rb_node, &proc->free_buffers);
856			binder_delete_free_buffer(proc, next);
857		}
858	}
859	if (proc->buffers.next != &buffer->entry) {
860		struct binder_buffer *prev = list_entry(buffer->entry.prev,
861						struct binder_buffer, entry);
862		if (prev->free) {
863			binder_delete_free_buffer(proc, buffer);
864			rb_erase(&prev->rb_node, &proc->free_buffers);
865			buffer = prev;
866		}
867	}
868	binder_insert_free_buffer(proc, buffer);
869}
870
871static struct binder_node *
872binder_get_node(struct binder_proc *proc, void __user *ptr)
873{
874	struct rb_node *n = proc->nodes.rb_node;
875	struct binder_node *node;
876
877	while (n) {
878		node = rb_entry(n, struct binder_node, rb_node);
879
880		if (ptr < node->ptr)
881			n = n->rb_left;
882		else if (ptr > node->ptr)
883			n = n->rb_right;
884		else
885			return node;
886	}
887	return NULL;
888}
889
890static struct binder_node *
891binder_new_node(struct binder_proc *proc, void __user *ptr, void __user *cookie)
892{
893	struct rb_node **p = &proc->nodes.rb_node;
894	struct rb_node *parent = NULL;
895	struct binder_node *node;
896
897	while (*p) {
898		parent = *p;
899		node = rb_entry(parent, struct binder_node, rb_node);
900
901		if (ptr < node->ptr)
902			p = &(*p)->rb_left;
903		else if (ptr > node->ptr)
904			p = &(*p)->rb_right;
905		else
906			return NULL;
907	}
908
909	node = kzalloc(sizeof(*node), GFP_KERNEL);
910	if (node == NULL)
911		return NULL;
912	binder_stats.obj_created[BINDER_STAT_NODE]++;
913	rb_link_node(&node->rb_node, parent, p);
914	rb_insert_color(&node->rb_node, &proc->nodes);
915	node->debug_id = ++binder_last_id;
916	node->proc = proc;
917	node->ptr = ptr;
918	node->cookie = cookie;
919	node->work.type = BINDER_WORK_NODE;
920	INIT_LIST_HEAD(&node->work.entry);
921	INIT_LIST_HEAD(&node->async_todo);
922	if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
923		printk(KERN_INFO "binder: %d:%d node %d u%p c%p created\n",
924		       proc->pid, current->pid, node->debug_id,
925		       node->ptr, node->cookie);
926	return node;
927}
928
929static int
930binder_inc_node(struct binder_node *node, int strong, int internal,
931		struct list_head *target_list)
932{
933	if (strong) {
934		if (internal) {
935			if (target_list == NULL &&
936			    node->internal_strong_refs == 0 &&
937			    !(node == binder_context_mgr_node &&
938			    node->has_strong_ref)) {
939				printk(KERN_ERR "binder: invalid inc strong "
940					"node for %d\n", node->debug_id);
941				return -EINVAL;
942			}
943			node->internal_strong_refs++;
944		} else
945			node->local_strong_refs++;
946		if (!node->has_strong_ref && target_list) {
947			list_del_init(&node->work.entry);
948			list_add_tail(&node->work.entry, target_list);
949		}
950	} else {
951		if (!internal)
952			node->local_weak_refs++;
953		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
954			if (target_list == NULL) {
955				printk(KERN_ERR "binder: invalid inc weak node "
956					"for %d\n", node->debug_id);
957				return -EINVAL;
958			}
959			list_add_tail(&node->work.entry, target_list);
960		}
961	}
962	return 0;
963}
964
965static int
966binder_dec_node(struct binder_node *node, int strong, int internal)
967{
968	if (strong) {
969		if (internal)
970			node->internal_strong_refs--;
971		else
972			node->local_strong_refs--;
973		if (node->local_strong_refs || node->internal_strong_refs)
974			return 0;
975	} else {
976		if (!internal)
977			node->local_weak_refs--;
978		if (node->local_weak_refs || !hlist_empty(&node->refs))
979			return 0;
980	}
981	if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
982		if (list_empty(&node->work.entry)) {
983			list_add_tail(&node->work.entry, &node->proc->todo);
984			wake_up_interruptible(&node->proc->wait);
985		}
986	} else {
987		if (hlist_empty(&node->refs) && !node->local_strong_refs &&
988		    !node->local_weak_refs) {
989			list_del_init(&node->work.entry);
990			if (node->proc) {
991				rb_erase(&node->rb_node, &node->proc->nodes);
992				if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
993					printk(KERN_INFO "binder: refless node %d deleted\n", node->debug_id);
994			} else {
995				hlist_del(&node->dead_node);
996				if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
997					printk(KERN_INFO "binder: dead node %d deleted\n", node->debug_id);
998			}
999			kfree(node);
1000			binder_stats.obj_deleted[BINDER_STAT_NODE]++;
1001		}
1002	}
1003
1004	return 0;
1005}
1006
1007
1008static struct binder_ref *
1009binder_get_ref(struct binder_proc *proc, uint32_t desc)
1010{
1011	struct rb_node *n = proc->refs_by_desc.rb_node;
1012	struct binder_ref *ref;
1013
1014	while (n) {
1015		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1016
1017		if (desc < ref->desc)
1018			n = n->rb_left;
1019		else if (desc > ref->desc)
1020			n = n->rb_right;
1021		else
1022			return ref;
1023	}
1024	return NULL;
1025}
1026
1027static struct binder_ref *
1028binder_get_ref_for_node(struct binder_proc *proc, struct binder_node *node)
1029{
1030	struct rb_node *n;
1031	struct rb_node **p = &proc->refs_by_node.rb_node;
1032	struct rb_node *parent = NULL;
1033	struct binder_ref *ref, *new_ref;
1034
1035	while (*p) {
1036		parent = *p;
1037		ref = rb_entry(parent, struct binder_ref, rb_node_node);
1038
1039		if (node < ref->node)
1040			p = &(*p)->rb_left;
1041		else if (node > ref->node)
1042			p = &(*p)->rb_right;
1043		else
1044			return ref;
1045	}
1046	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1047	if (new_ref == NULL)
1048		return NULL;
1049	binder_stats.obj_created[BINDER_STAT_REF]++;
1050	new_ref->debug_id = ++binder_last_id;
1051	new_ref->proc = proc;
1052	new_ref->node = node;
1053	rb_link_node(&new_ref->rb_node_node, parent, p);
1054	rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1055
1056	new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
1057	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1058		ref = rb_entry(n, struct binder_ref, rb_node_desc);
1059		if (ref->desc > new_ref->desc)
1060			break;
1061		new_ref->desc = ref->desc + 1;
1062	}
1063
1064	p = &proc->refs_by_desc.rb_node;
1065	while (*p) {
1066		parent = *p;
1067		ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1068
1069		if (new_ref->desc < ref->desc)
1070			p = &(*p)->rb_left;
1071		else if (new_ref->desc > ref->desc)
1072			p = &(*p)->rb_right;
1073		else
1074			BUG();
1075	}
1076	rb_link_node(&new_ref->rb_node_desc, parent, p);
1077	rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1078	if (node) {
1079		hlist_add_head(&new_ref->node_entry, &node->refs);
1080		if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
1081			printk(KERN_INFO "binder: %d new ref %d desc %d for "
1082				"node %d\n", proc->pid, new_ref->debug_id,
1083				new_ref->desc, node->debug_id);
1084	} else {
1085		if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
1086			printk(KERN_INFO "binder: %d new ref %d desc %d for "
1087				"dead node\n", proc->pid, new_ref->debug_id,
1088				new_ref->desc);
1089	}
1090	return new_ref;
1091}
1092
1093static void
1094binder_delete_ref(struct binder_ref *ref)
1095{
1096	if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
1097		printk(KERN_INFO "binder: %d delete ref %d desc %d for "
1098			"node %d\n", ref->proc->pid, ref->debug_id,
1099			ref->desc, ref->node->debug_id);
1100	rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1101	rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1102	if (ref->strong)
1103		binder_dec_node(ref->node, 1, 1);
1104	hlist_del(&ref->node_entry);
1105	binder_dec_node(ref->node, 0, 1);
1106	if (ref->death) {
1107		if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
1108			printk(KERN_INFO "binder: %d delete ref %d desc %d "
1109				"has death notification\n", ref->proc->pid,
1110				ref->debug_id, ref->desc);
1111		list_del(&ref->death->work.entry);
1112		kfree(ref->death);
1113		binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
1114	}
1115	kfree(ref);
1116	binder_stats.obj_deleted[BINDER_STAT_REF]++;
1117}
1118
1119static int
1120binder_inc_ref(
1121	struct binder_ref *ref, int strong, struct list_head *target_list)
1122{
1123	int ret;
1124	if (strong) {
1125		if (ref->strong == 0) {
1126			ret = binder_inc_node(ref->node, 1, 1, target_list);
1127			if (ret)
1128				return ret;
1129		}
1130		ref->strong++;
1131	} else {
1132		if (ref->weak == 0) {
1133			ret = binder_inc_node(ref->node, 0, 1, target_list);
1134			if (ret)
1135				return ret;
1136		}
1137		ref->weak++;
1138	}
1139	return 0;
1140}
1141
1142
1143static int
1144binder_dec_ref(struct binder_ref *ref, int strong)
1145{
1146	if (strong) {
1147		if (ref->strong == 0) {
1148			binder_user_error("binder: %d invalid dec strong, "
1149					  "ref %d desc %d s %d w %d\n",
1150					  ref->proc->pid, ref->debug_id,
1151					  ref->desc, ref->strong, ref->weak);
1152			return -EINVAL;
1153		}
1154		ref->strong--;
1155		if (ref->strong == 0) {
1156			int ret;
1157			ret = binder_dec_node(ref->node, strong, 1);
1158			if (ret)
1159				return ret;
1160		}
1161	} else {
1162		if (ref->weak == 0) {
1163			binder_user_error("binder: %d invalid dec weak, "
1164					  "ref %d desc %d s %d w %d\n",
1165					  ref->proc->pid, ref->debug_id,
1166					  ref->desc, ref->strong, ref->weak);
1167			return -EINVAL;
1168		}
1169		ref->weak--;
1170	}
1171	if (ref->strong == 0 && ref->weak == 0)
1172		binder_delete_ref(ref);
1173	return 0;
1174}
1175
1176static void
1177binder_pop_transaction(
1178	struct binder_thread *target_thread, struct binder_transaction *t)
1179{
1180	if (target_thread) {
1181		BUG_ON(target_thread->transaction_stack != t);
1182		BUG_ON(target_thread->transaction_stack->from != target_thread);
1183		target_thread->transaction_stack =
1184			target_thread->transaction_stack->from_parent;
1185		t->from = NULL;
1186	}
1187	t->need_reply = 0;
1188	if (t->buffer)
1189		t->buffer->transaction = NULL;
1190	kfree(t);
1191	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
1192}
1193
1194static void
1195binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code)
1196{
1197	struct binder_thread *target_thread;
1198	BUG_ON(t->flags & TF_ONE_WAY);
1199	while (1) {
1200		target_thread = t->from;
1201		if (target_thread) {
1202			if (target_thread->return_error != BR_OK &&
1203			   target_thread->return_error2 == BR_OK) {
1204				target_thread->return_error2 =
1205					target_thread->return_error;
1206				target_thread->return_error = BR_OK;
1207			}
1208			if (target_thread->return_error == BR_OK) {
1209				if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
1210					printk(KERN_INFO "binder: send failed reply for transaction %d to %d:%d\n",
1211					       t->debug_id, target_thread->proc->pid, target_thread->pid);
1212
1213				binder_pop_transaction(target_thread, t);
1214				target_thread->return_error = error_code;
1215				wake_up_interruptible(&target_thread->wait);
1216			} else {
1217				printk(KERN_ERR "binder: reply failed, target "
1218					"thread, %d:%d, has error code %d "
1219					"already\n", target_thread->proc->pid,
1220					target_thread->pid,
1221					target_thread->return_error);
1222			}
1223			return;
1224		} else {
1225			struct binder_transaction *next = t->from_parent;
1226
1227			if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
1228				printk(KERN_INFO "binder: send failed reply "
1229					"for transaction %d, target dead\n",
1230					t->debug_id);
1231
1232			binder_pop_transaction(target_thread, t);
1233			if (next == NULL) {
1234				if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
1235					printk(KERN_INFO "binder: reply failed,"
1236						" no target thread at root\n");
1237				return;
1238			}
1239			t = next;
1240			if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
1241				printk(KERN_INFO "binder: reply failed, no targ"
1242					"et thread -- retry %d\n", t->debug_id);
1243		}
1244	}
1245}
1246
1247static void
1248binder_transaction_buffer_release(struct binder_proc *proc,
1249			struct binder_buffer *buffer, size_t *failed_at);
1250
1251static void
1252binder_transaction(struct binder_proc *proc, struct binder_thread *thread,
1253	struct binder_transaction_data *tr, int reply)
1254{
1255	struct binder_transaction *t;
1256	struct binder_work *tcomplete;
1257	size_t *offp, *off_end;
1258	struct binder_proc *target_proc;
1259	struct binder_thread *target_thread = NULL;
1260	struct binder_node *target_node = NULL;
1261	struct list_head *target_list;
1262	wait_queue_head_t *target_wait;
1263	struct binder_transaction *in_reply_to = NULL;
1264	struct binder_transaction_log_entry *e;
1265	uint32_t return_error;
1266
1267	e = binder_transaction_log_add(&binder_transaction_log);
1268	e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1269	e->from_proc = proc->pid;
1270	e->from_thread = thread->pid;
1271	e->target_handle = tr->target.handle;
1272	e->data_size = tr->data_size;
1273	e->offsets_size = tr->offsets_size;
1274
1275	if (reply) {
1276		in_reply_to = thread->transaction_stack;
1277		if (in_reply_to == NULL) {
1278			binder_user_error("binder: %d:%d got reply transaction "
1279					  "with no transaction stack\n",
1280					  proc->pid, thread->pid);
1281			return_error = BR_FAILED_REPLY;
1282			goto err_empty_call_stack;
1283		}
1284		binder_set_nice(in_reply_to->saved_priority);
1285		if (in_reply_to->to_thread != thread) {
1286			binder_user_error("binder: %d:%d got reply transaction "
1287				"with bad transaction stack,"
1288				" transaction %d has target %d:%d\n",
1289				proc->pid, thread->pid, in_reply_to->debug_id,
1290				in_reply_to->to_proc ?
1291				in_reply_to->to_proc->pid : 0,
1292				in_reply_to->to_thread ?
1293				in_reply_to->to_thread->pid : 0);
1294			return_error = BR_FAILED_REPLY;
1295			in_reply_to = NULL;
1296			goto err_bad_call_stack;
1297		}
1298		thread->transaction_stack = in_reply_to->to_parent;
1299		target_thread = in_reply_to->from;
1300		if (target_thread == NULL) {
1301			return_error = BR_DEAD_REPLY;
1302			goto err_dead_binder;
1303		}
1304		if (target_thread->transaction_stack != in_reply_to) {
1305			binder_user_error("binder: %d:%d got reply transaction "
1306				"with bad target transaction stack %d, "
1307				"expected %d\n",
1308				proc->pid, thread->pid,
1309				target_thread->transaction_stack ?
1310				target_thread->transaction_stack->debug_id : 0,
1311				in_reply_to->debug_id);
1312			return_error = BR_FAILED_REPLY;
1313			in_reply_to = NULL;
1314			target_thread = NULL;
1315			goto err_dead_binder;
1316		}
1317		target_proc = target_thread->proc;
1318	} else {
1319		if (tr->target.handle) {
1320			struct binder_ref *ref;
1321			ref = binder_get_ref(proc, tr->target.handle);
1322			if (ref == NULL) {
1323				binder_user_error("binder: %d:%d got "
1324					"transaction to invalid handle\n",
1325					proc->pid, thread->pid);
1326				return_error = BR_FAILED_REPLY;
1327				goto err_invalid_target_handle;
1328			}
1329			target_node = ref->node;
1330		} else {
1331			target_node = binder_context_mgr_node;
1332			if (target_node == NULL) {
1333				return_error = BR_DEAD_REPLY;
1334				goto err_no_context_mgr_node;
1335			}
1336		}
1337		e->to_node = target_node->debug_id;
1338		target_proc = target_node->proc;
1339		if (target_proc == NULL) {
1340			return_error = BR_DEAD_REPLY;
1341			goto err_dead_binder;
1342		}
1343		if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1344			struct binder_transaction *tmp;
1345			tmp = thread->transaction_stack;
1346			if (tmp->to_thread != thread) {
1347				binder_user_error("binder: %d:%d got new "
1348					"transaction with bad transaction stack"
1349					", transaction %d has target %d:%d\n",
1350					proc->pid, thread->pid, tmp->debug_id,
1351					tmp->to_proc ? tmp->to_proc->pid : 0,
1352					tmp->to_thread ?
1353					tmp->to_thread->pid : 0);
1354				return_error = BR_FAILED_REPLY;
1355				goto err_bad_call_stack;
1356			}
1357			while (tmp) {
1358				if (tmp->from && tmp->from->proc == target_proc)
1359					target_thread = tmp->from;
1360				tmp = tmp->from_parent;
1361			}
1362		}
1363	}
1364	if (target_thread) {
1365		e->to_thread = target_thread->pid;
1366		target_list = &target_thread->todo;
1367		target_wait = &target_thread->wait;
1368	} else {
1369		target_list = &target_proc->todo;
1370		target_wait = &target_proc->wait;
1371	}
1372	e->to_proc = target_proc->pid;
1373
1374	/* TODO: reuse incoming transaction for reply */
1375	t = kzalloc(sizeof(*t), GFP_KERNEL);
1376	if (t == NULL) {
1377		return_error = BR_FAILED_REPLY;
1378		goto err_alloc_t_failed;
1379	}
1380	binder_stats.obj_created[BINDER_STAT_TRANSACTION]++;
1381
1382	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1383	if (tcomplete == NULL) {
1384		return_error = BR_FAILED_REPLY;
1385		goto err_alloc_tcomplete_failed;
1386	}
1387	binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++;
1388
1389	t->debug_id = ++binder_last_id;
1390	e->debug_id = t->debug_id;
1391
1392	if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) {
1393		if (reply)
1394			printk(KERN_INFO "binder: %d:%d BC_REPLY %d -> %d:%d, "
1395			       "data %p-%p size %zd-%zd\n",
1396			       proc->pid, thread->pid, t->debug_id,
1397			       target_proc->pid, target_thread->pid,
1398			       tr->data.ptr.buffer, tr->data.ptr.offsets,
1399			       tr->data_size, tr->offsets_size);
1400		else
1401			printk(KERN_INFO "binder: %d:%d BC_TRANSACTION %d -> "
1402			       "%d - node %d, data %p-%p size %zd-%zd\n",
1403			       proc->pid, thread->pid, t->debug_id,
1404			       target_proc->pid, target_node->debug_id,
1405			       tr->data.ptr.buffer, tr->data.ptr.offsets,
1406			       tr->data_size, tr->offsets_size);
1407	}
1408
1409	if (!reply && !(tr->flags & TF_ONE_WAY))
1410		t->from = thread;
1411	else
1412		t->from = NULL;
1413	t->sender_euid = proc->tsk->cred->euid;
1414	t->to_proc = target_proc;
1415	t->to_thread = target_thread;
1416	t->code = tr->code;
1417	t->flags = tr->flags;
1418	t->priority = task_nice(current);
1419	t->buffer = binder_alloc_buf(target_proc, tr->data_size,
1420		tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
1421	if (t->buffer == NULL) {
1422		return_error = BR_FAILED_REPLY;
1423		goto err_binder_alloc_buf_failed;
1424	}
1425	t->buffer->allow_user_free = 0;
1426	t->buffer->debug_id = t->debug_id;
1427	t->buffer->transaction = t;
1428	t->buffer->target_node = target_node;
1429	if (target_node)
1430		binder_inc_node(target_node, 1, 0, NULL);
1431
1432	offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
1433
1434	if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
1435		binder_user_error("binder: %d:%d got transaction with invalid "
1436			"data ptr\n", proc->pid, thread->pid);
1437		return_error = BR_FAILED_REPLY;
1438		goto err_copy_data_failed;
1439	}
1440	if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
1441		binder_user_error("binder: %d:%d got transaction with invalid "
1442			"offsets ptr\n", proc->pid, thread->pid);
1443		return_error = BR_FAILED_REPLY;
1444		goto err_copy_data_failed;
1445	}
1446	if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
1447		binder_user_error("binder: %d:%d got transaction with "
1448			"invalid offsets size, %zd\n",
1449			proc->pid, thread->pid, tr->offsets_size);
1450		return_error = BR_FAILED_REPLY;
1451		goto err_bad_offset;
1452	}
1453	off_end = (void *)offp + tr->offsets_size;
1454	for (; offp < off_end; offp++) {
1455		struct flat_binder_object *fp;
1456		if (*offp > t->buffer->data_size - sizeof(*fp) ||
1457		    t->buffer->data_size < sizeof(*fp) ||
1458		    !IS_ALIGNED(*offp, sizeof(void *))) {
1459			binder_user_error("binder: %d:%d got transaction with "
1460				"invalid offset, %zd\n",
1461				proc->pid, thread->pid, *offp);
1462			return_error = BR_FAILED_REPLY;
1463			goto err_bad_offset;
1464		}
1465		fp = (struct flat_binder_object *)(t->buffer->data + *offp);
1466		switch (fp->type) {
1467		case BINDER_TYPE_BINDER:
1468		case BINDER_TYPE_WEAK_BINDER: {
1469			struct binder_ref *ref;
1470			struct binder_node *node = binder_get_node(proc, fp->binder);
1471			if (node == NULL) {
1472				node = binder_new_node(proc, fp->binder, fp->cookie);
1473				if (node == NULL) {
1474					return_error = BR_FAILED_REPLY;
1475					goto err_binder_new_node_failed;
1476				}
1477				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1478				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1479			}
1480			if (fp->cookie != node->cookie) {
1481				binder_user_error("binder: %d:%d sending u%p "
1482					"node %d, cookie mismatch %p != %p\n",
1483					proc->pid, thread->pid,
1484					fp->binder, node->debug_id,
1485					fp->cookie, node->cookie);
1486				goto err_binder_get_ref_for_node_failed;
1487			}
1488			ref = binder_get_ref_for_node(target_proc, node);
1489			if (ref == NULL) {
1490				return_error = BR_FAILED_REPLY;
1491				goto err_binder_get_ref_for_node_failed;
1492			}
1493			if (fp->type == BINDER_TYPE_BINDER)
1494				fp->type = BINDER_TYPE_HANDLE;
1495			else
1496				fp->type = BINDER_TYPE_WEAK_HANDLE;
1497			fp->handle = ref->desc;
1498			binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo);
1499			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1500				printk(KERN_INFO "        node %d u%p -> ref %d desc %d\n",
1501				       node->debug_id, node->ptr, ref->debug_id, ref->desc);
1502		} break;
1503		case BINDER_TYPE_HANDLE:
1504		case BINDER_TYPE_WEAK_HANDLE: {
1505			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1506			if (ref == NULL) {
1507				binder_user_error("binder: %d:%d got "
1508					"transaction with invalid "
1509					"handle, %ld\n", proc->pid,
1510					thread->pid, fp->handle);
1511				return_error = BR_FAILED_REPLY;
1512				goto err_binder_get_ref_failed;
1513			}
1514			if (ref->node->proc == target_proc) {
1515				if (fp->type == BINDER_TYPE_HANDLE)
1516					fp->type = BINDER_TYPE_BINDER;
1517				else
1518					fp->type = BINDER_TYPE_WEAK_BINDER;
1519				fp->binder = ref->node->ptr;
1520				fp->cookie = ref->node->cookie;
1521				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
1522				if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1523					printk(KERN_INFO "        ref %d desc %d -> node %d u%p\n",
1524					       ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr);
1525			} else {
1526				struct binder_ref *new_ref;
1527				new_ref = binder_get_ref_for_node(target_proc, ref->node);
1528				if (new_ref == NULL) {
1529					return_error = BR_FAILED_REPLY;
1530					goto err_binder_get_ref_for_node_failed;
1531				}
1532				fp->handle = new_ref->desc;
1533				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
1534				if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1535					printk(KERN_INFO "        ref %d desc %d -> ref %d desc %d (node %d)\n",
1536					       ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id);
1537			}
1538		} break;
1539
1540		case BINDER_TYPE_FD: {
1541			int target_fd;
1542			struct file *file;
1543
1544			if (reply) {
1545				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
1546					binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
1547						proc->pid, thread->pid, fp->handle);
1548					return_error = BR_FAILED_REPLY;
1549					goto err_fd_not_allowed;
1550				}
1551			} else if (!target_node->accept_fds) {
1552				binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
1553					proc->pid, thread->pid, fp->handle);
1554				return_error = BR_FAILED_REPLY;
1555				goto err_fd_not_allowed;
1556			}
1557
1558			file = fget(fp->handle);
1559			if (file == NULL) {
1560				binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
1561					proc->pid, thread->pid, fp->handle);
1562				return_error = BR_FAILED_REPLY;
1563				goto err_fget_failed;
1564			}
1565			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1566			if (target_fd < 0) {
1567				fput(file);
1568				return_error = BR_FAILED_REPLY;
1569				goto err_get_unused_fd_failed;
1570			}
1571			task_fd_install(target_proc, target_fd, file);
1572			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1573				printk(KERN_INFO "        fd %ld -> %d\n", fp->handle, target_fd);
1574			/* TODO: fput? */
1575			fp->handle = target_fd;
1576		} break;
1577
1578		default:
1579			binder_user_error("binder: %d:%d got transactio"
1580				"n with invalid object type, %lx\n",
1581				proc->pid, thread->pid, fp->type);
1582			return_error = BR_FAILED_REPLY;
1583			goto err_bad_object_type;
1584		}
1585	}
1586	if (reply) {
1587		BUG_ON(t->buffer->async_transaction != 0);
1588		binder_pop_transaction(target_thread, in_reply_to);
1589	} else if (!(t->flags & TF_ONE_WAY)) {
1590		BUG_ON(t->buffer->async_transaction != 0);
1591		t->need_reply = 1;
1592		t->from_parent = thread->transaction_stack;
1593		thread->transaction_stack = t;
1594	} else {
1595		BUG_ON(target_node == NULL);
1596		BUG_ON(t->buffer->async_transaction != 1);
1597		if (target_node->has_async_transaction) {
1598			target_list = &target_node->async_todo;
1599			target_wait = NULL;
1600		} else
1601			target_node->has_async_transaction = 1;
1602	}
1603	t->work.type = BINDER_WORK_TRANSACTION;
1604	list_add_tail(&t->work.entry, target_list);
1605	tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
1606	list_add_tail(&tcomplete->entry, &thread->todo);
1607	if (target_wait)
1608		wake_up_interruptible(target_wait);
1609	return;
1610
1611err_get_unused_fd_failed:
1612err_fget_failed:
1613err_fd_not_allowed:
1614err_binder_get_ref_for_node_failed:
1615err_binder_get_ref_failed:
1616err_binder_new_node_failed:
1617err_bad_object_type:
1618err_bad_offset:
1619err_copy_data_failed:
1620	binder_transaction_buffer_release(target_proc, t->buffer, offp);
1621	t->buffer->transaction = NULL;
1622	binder_free_buf(target_proc, t->buffer);
1623err_binder_alloc_buf_failed:
1624	kfree(tcomplete);
1625	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
1626err_alloc_tcomplete_failed:
1627	kfree(t);
1628	binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
1629err_alloc_t_failed:
1630err_bad_call_stack:
1631err_empty_call_stack:
1632err_dead_binder:
1633err_invalid_target_handle:
1634err_no_context_mgr_node:
1635	if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
1636		printk(KERN_INFO "binder: %d:%d transaction failed %d, size"
1637				"%zd-%zd\n",
1638			   proc->pid, thread->pid, return_error,
1639			   tr->data_size, tr->offsets_size);
1640
1641	{
1642		struct binder_transaction_log_entry *fe;
1643		fe = binder_transaction_log_add(&binder_transaction_log_failed);
1644		*fe = *e;
1645	}
1646
1647	BUG_ON(thread->return_error != BR_OK);
1648	if (in_reply_to) {
1649		thread->return_error = BR_TRANSACTION_COMPLETE;
1650		binder_send_failed_reply(in_reply_to, return_error);
1651	} else
1652		thread->return_error = return_error;
1653}
1654
1655static void
1656binder_transaction_buffer_release(struct binder_proc *proc, struct binder_buffer *buffer, size_t *failed_at)
1657{
1658	size_t *offp, *off_end;
1659	int debug_id = buffer->debug_id;
1660
1661	if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1662		printk(KERN_INFO "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
1663			   proc->pid, buffer->debug_id,
1664			   buffer->data_size, buffer->offsets_size, failed_at);
1665
1666	if (buffer->target_node)
1667		binder_dec_node(buffer->target_node, 1, 0);
1668
1669	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
1670	if (failed_at)
1671		off_end = failed_at;
1672	else
1673		off_end = (void *)offp + buffer->offsets_size;
1674	for (; offp < off_end; offp++) {
1675		struct flat_binder_object *fp;
1676		if (*offp > buffer->data_size - sizeof(*fp) ||
1677		    buffer->data_size < sizeof(*fp) ||
1678		    !IS_ALIGNED(*offp, sizeof(void *))) {
1679			printk(KERN_ERR "binder: transaction release %d bad"
1680					"offset %zd, size %zd\n", debug_id, *offp, buffer->data_size);
1681			continue;
1682		}
1683		fp = (struct flat_binder_object *)(buffer->data + *offp);
1684		switch (fp->type) {
1685		case BINDER_TYPE_BINDER:
1686		case BINDER_TYPE_WEAK_BINDER: {
1687			struct binder_node *node = binder_get_node(proc, fp->binder);
1688			if (node == NULL) {
1689				printk(KERN_ERR "binder: transaction release %d bad node %p\n", debug_id, fp->binder);
1690				break;
1691			}
1692			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1693				printk(KERN_INFO "        node %d u%p\n",
1694				       node->debug_id, node->ptr);
1695			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
1696		} break;
1697		case BINDER_TYPE_HANDLE:
1698		case BINDER_TYPE_WEAK_HANDLE: {
1699			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1700			if (ref == NULL) {
1701				printk(KERN_ERR "binder: transaction release %d bad handle %ld\n", debug_id, fp->handle);
1702				break;
1703			}
1704			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1705				printk(KERN_INFO "        ref %d desc %d (node %d)\n",
1706				       ref->debug_id, ref->desc, ref->node->debug_id);
1707			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
1708		} break;
1709
1710		case BINDER_TYPE_FD:
1711			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
1712				printk(KERN_INFO "        fd %ld\n", fp->handle);
1713			if (failed_at)
1714				task_close_fd(proc, fp->handle);
1715			break;
1716
1717		default:
1718			printk(KERN_ERR "binder: transaction release %d bad object type %lx\n", debug_id, fp->type);
1719			break;
1720		}
1721	}
1722}
1723
1724int
1725binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
1726		    void __user *buffer, int size, signed long *consumed)
1727{
1728	uint32_t cmd;
1729	void __user *ptr = buffer + *consumed;
1730	void __user *end = buffer + size;
1731
1732	while (ptr < end && thread->return_error == BR_OK) {
1733		if (get_user(cmd, (uint32_t __user *)ptr))
1734			return -EFAULT;
1735		ptr += sizeof(uint32_t);
1736		if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
1737			binder_stats.bc[_IOC_NR(cmd)]++;
1738			proc->stats.bc[_IOC_NR(cmd)]++;
1739			thread->stats.bc[_IOC_NR(cmd)]++;
1740		}
1741		switch (cmd) {
1742		case BC_INCREFS:
1743		case BC_ACQUIRE:
1744		case BC_RELEASE:
1745		case BC_DECREFS: {
1746			uint32_t target;
1747			struct binder_ref *ref;
1748			const char *debug_string;
1749
1750			if (get_user(target, (uint32_t __user *)ptr))
1751				return -EFAULT;
1752			ptr += sizeof(uint32_t);
1753			if (target == 0 && binder_context_mgr_node &&
1754			    (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
1755				ref = binder_get_ref_for_node(proc,
1756					       binder_context_mgr_node);
1757				if (ref->desc != target) {
1758					binder_user_error("binder: %d:"
1759						"%d tried to acquire "
1760						"reference to desc 0, "
1761						"got %d instead\n",
1762						proc->pid, thread->pid,
1763						ref->desc);
1764				}
1765			} else
1766				ref = binder_get_ref(proc, target);
1767			if (ref == NULL) {
1768				binder_user_error("binder: %d:%d refcou"
1769					"nt change on invalid ref %d\n",
1770					proc->pid, thread->pid, target);
1771				break;
1772			}
1773			switch (cmd) {
1774			case BC_INCREFS:
1775				debug_string = "IncRefs";
1776				binder_inc_ref(ref, 0, NULL);
1777				break;
1778			case BC_ACQUIRE:
1779				debug_string = "Acquire";
1780				binder_inc_ref(ref, 1, NULL);
1781				break;
1782			case BC_RELEASE:
1783				debug_string = "Release";
1784				binder_dec_ref(ref, 1);
1785				break;
1786			case BC_DECREFS:
1787			default:
1788				debug_string = "DecRefs";
1789				binder_dec_ref(ref, 0);
1790				break;
1791			}
1792			if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
1793				printk(KERN_INFO "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
1794				       proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id);
1795			break;
1796		}
1797		case BC_INCREFS_DONE:
1798		case BC_ACQUIRE_DONE: {
1799			void __user *node_ptr;
1800			void *cookie;
1801			struct binder_node *node;
1802
1803			if (get_user(node_ptr, (void * __user *)ptr))
1804				return -EFAULT;
1805			ptr += sizeof(void *);
1806			if (get_user(cookie, (void * __user *)ptr))
1807				return -EFAULT;
1808			ptr += sizeof(void *);
1809			node = binder_get_node(proc, node_ptr);
1810			if (node == NULL) {
1811				binder_user_error("binder: %d:%d "
1812					"%s u%p no match\n",
1813					proc->pid, thread->pid,
1814					cmd == BC_INCREFS_DONE ?
1815					"BC_INCREFS_DONE" :
1816					"BC_ACQUIRE_DONE",
1817					node_ptr);
1818				break;
1819			}
1820			if (cookie != node->cookie) {
1821				binder_user_error("binder: %d:%d %s u%p node %d"
1822					" cookie mismatch %p != %p\n",
1823					proc->pid, thread->pid,
1824					cmd == BC_INCREFS_DONE ?
1825					"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
1826					node_ptr, node->debug_id,
1827					cookie, node->cookie);
1828				break;
1829			}
1830			if (cmd == BC_ACQUIRE_DONE) {
1831				if (node->pending_strong_ref == 0) {
1832					binder_user_error("binder: %d:%d "
1833						"BC_ACQUIRE_DONE node %d has "
1834						"no pending acquire request\n",
1835						proc->pid, thread->pid,
1836						node->debug_id);
1837					break;
1838				}
1839				node->pending_strong_ref = 0;
1840			} else {
1841				if (node->pending_weak_ref == 0) {
1842					binder_user_error("binder: %d:%d "
1843						"BC_INCREFS_DONE node %d has "
1844						"no pending increfs request\n",
1845						proc->pid, thread->pid,
1846						node->debug_id);
1847					break;
1848				}
1849				node->pending_weak_ref = 0;
1850			}
1851			binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
1852			if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
1853				printk(KERN_INFO "binder: %d:%d %s node %d ls %d lw %d\n",
1854				       proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs);
1855			break;
1856		}
1857		case BC_ATTEMPT_ACQUIRE:
1858			printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
1859			return -EINVAL;
1860		case BC_ACQUIRE_RESULT:
1861			printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
1862			return -EINVAL;
1863
1864		case BC_FREE_BUFFER: {
1865			void __user *data_ptr;
1866			struct binder_buffer *buffer;
1867
1868			if (get_user(data_ptr, (void * __user *)ptr))
1869				return -EFAULT;
1870			ptr += sizeof(void *);
1871
1872			buffer = binder_buffer_lookup(proc, data_ptr);
1873			if (buffer == NULL) {
1874				binder_user_error("binder: %d:%d "
1875					"BC_FREE_BUFFER u%p no match\n",
1876					proc->pid, thread->pid, data_ptr);
1877				break;
1878			}
1879			if (!buffer->allow_user_free) {
1880				binder_user_error("binder: %d:%d "
1881					"BC_FREE_BUFFER u%p matched "
1882					"unreturned buffer\n",
1883					proc->pid, thread->pid, data_ptr);
1884				break;
1885			}
1886			if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER)
1887				printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
1888				       proc->pid, thread->pid, data_ptr, buffer->debug_id,
1889				       buffer->transaction ? "active" : "finished");
1890
1891			if (buffer->transaction) {
1892				buffer->transaction->buffer = NULL;
1893				buffer->transaction = NULL;
1894			}
1895			if (buffer->async_transaction && buffer->target_node) {
1896				BUG_ON(!buffer->target_node->has_async_transaction);
1897				if (list_empty(&buffer->target_node->async_todo))
1898					buffer->target_node->has_async_transaction = 0;
1899				else
1900					list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
1901			}
1902			binder_transaction_buffer_release(proc, buffer, NULL);
1903			binder_free_buf(proc, buffer);
1904			break;
1905		}
1906
1907		case BC_TRANSACTION:
1908		case BC_REPLY: {
1909			struct binder_transaction_data tr;
1910
1911			if (copy_from_user(&tr, ptr, sizeof(tr)))
1912				return -EFAULT;
1913			ptr += sizeof(tr);
1914			binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
1915			break;
1916		}
1917
1918		case BC_REGISTER_LOOPER:
1919			if (binder_debug_mask & BINDER_DEBUG_THREADS)
1920				printk(KERN_INFO "binder: %d:%d BC_REGISTER_LOOPER\n",
1921				       proc->pid, thread->pid);
1922			if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
1923				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1924				binder_user_error("binder: %d:%d ERROR:"
1925					" BC_REGISTER_LOOPER called "
1926					"after BC_ENTER_LOOPER\n",
1927					proc->pid, thread->pid);
1928			} else if (proc->requested_threads == 0) {
1929				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1930				binder_user_error("binder: %d:%d ERROR:"
1931					" BC_REGISTER_LOOPER called "
1932					"without request\n",
1933					proc->pid, thread->pid);
1934			} else {
1935				proc->requested_threads--;
1936				proc->requested_threads_started++;
1937			}
1938			thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
1939			break;
1940		case BC_ENTER_LOOPER:
1941			if (binder_debug_mask & BINDER_DEBUG_THREADS)
1942				printk(KERN_INFO "binder: %d:%d BC_ENTER_LOOPER\n",
1943				       proc->pid, thread->pid);
1944			if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
1945				thread->looper |= BINDER_LOOPER_STATE_INVALID;
1946				binder_user_error("binder: %d:%d ERROR:"
1947					" BC_ENTER_LOOPER called after "
1948					"BC_REGISTER_LOOPER\n",
1949					proc->pid, thread->pid);
1950			}
1951			thread->looper |= BINDER_LOOPER_STATE_ENTERED;
1952			break;
1953		case BC_EXIT_LOOPER:
1954			if (binder_debug_mask & BINDER_DEBUG_THREADS)
1955				printk(KERN_INFO "binder: %d:%d BC_EXIT_LOOPER\n",
1956				       proc->pid, thread->pid);
1957			thread->looper |= BINDER_LOOPER_STATE_EXITED;
1958			break;
1959
1960		case BC_REQUEST_DEATH_NOTIFICATION:
1961		case BC_CLEAR_DEATH_NOTIFICATION: {
1962			uint32_t target;
1963			void __user *cookie;
1964			struct binder_ref *ref;
1965			struct binder_ref_death *death;
1966
1967			if (get_user(target, (uint32_t __user *)ptr))
1968				return -EFAULT;
1969			ptr += sizeof(uint32_t);
1970			if (get_user(cookie, (void __user * __user *)ptr))
1971				return -EFAULT;
1972			ptr += sizeof(void *);
1973			ref = binder_get_ref(proc, target);
1974			if (ref == NULL) {
1975				binder_user_error("binder: %d:%d %s "
1976					"invalid ref %d\n",
1977					proc->pid, thread->pid,
1978					cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1979					"BC_REQUEST_DEATH_NOTIFICATION" :
1980					"BC_CLEAR_DEATH_NOTIFICATION",
1981					target);
1982				break;
1983			}
1984
1985			if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)
1986				printk(KERN_INFO "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
1987				       proc->pid, thread->pid,
1988				       cmd == BC_REQUEST_DEATH_NOTIFICATION ?
1989				       "BC_REQUEST_DEATH_NOTIFICATION" :
1990				       "BC_CLEAR_DEATH_NOTIFICATION",
1991				       cookie, ref->debug_id, ref->desc,
1992				       ref->strong, ref->weak, ref->node->debug_id);
1993
1994			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
1995				if (ref->death) {
1996					binder_user_error("binder: %d:%"
1997						"d BC_REQUEST_DEATH_NOTI"
1998						"FICATION death notific"
1999						"ation already set\n",
2000						proc->pid, thread->pid);
2001					break;
2002				}
2003				death = kzalloc(sizeof(*death), GFP_KERNEL);
2004				if (death == NULL) {
2005					thread->return_error = BR_ERROR;
2006					if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION)
2007						printk(KERN_INFO "binder: %d:%d "
2008							"BC_REQUEST_DEATH_NOTIFICATION failed\n",
2009							proc->pid, thread->pid);
2010					break;
2011				}
2012				binder_stats.obj_created[BINDER_STAT_DEATH]++;
2013				INIT_LIST_HEAD(&death->work.entry);
2014				death->cookie = cookie;
2015				ref->death = death;
2016				if (ref->node->proc == NULL) {
2017					ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2018					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2019						list_add_tail(&ref->death->work.entry, &thread->todo);
2020					} else {
2021						list_add_tail(&ref->death->work.entry, &proc->todo);
2022						wake_up_interruptible(&proc->wait);
2023					}
2024				}
2025			} else {
2026				if (ref->death == NULL) {
2027					binder_user_error("binder: %d:%"
2028						"d BC_CLEAR_DEATH_NOTIFI"
2029						"CATION death notificat"
2030						"ion not active\n",
2031						proc->pid, thread->pid);
2032					break;
2033				}
2034				death = ref->death;
2035				if (death->cookie != cookie) {
2036					binder_user_error("binder: %d:%"
2037						"d BC_CLEAR_DEATH_NOTIFI"
2038						"CATION death notificat"
2039						"ion cookie mismatch "
2040						"%p != %p\n",
2041						proc->pid, thread->pid,
2042						death->cookie, cookie);
2043					break;
2044				}
2045				ref->death = NULL;
2046				if (list_empty(&death->work.entry)) {
2047					death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2048					if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2049						list_add_tail(&death->work.entry, &thread->todo);
2050					} else {
2051						list_add_tail(&death->work.entry, &proc->todo);
2052						wake_up_interruptible(&proc->wait);
2053					}
2054				} else {
2055					BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2056					death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2057				}
2058			}
2059		} break;
2060		case BC_DEAD_BINDER_DONE: {
2061			struct binder_work *w;
2062			void __user *cookie;
2063			struct binder_ref_death *death = NULL;
2064			if (get_user(cookie, (void __user * __user *)ptr))
2065				return -EFAULT;
2066
2067			ptr += sizeof(void *);
2068			list_for_each_entry(w, &proc->delivered_death, entry) {
2069				struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2070				if (tmp_death->cookie == cookie) {
2071					death = tmp_death;
2072					break;
2073				}
2074			}
2075			if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
2076				printk(KERN_INFO "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
2077				       proc->pid, thread->pid, cookie, death);
2078			if (death == NULL) {
2079				binder_user_error("binder: %d:%d BC_DEAD"
2080					"_BINDER_DONE %p not found\n",
2081					proc->pid, thread->pid, cookie);
2082				break;
2083			}
2084
2085			list_del_init(&death->work.entry);
2086			if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2087				death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2088				if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2089					list_add_tail(&death->work.entry, &thread->todo);
2090				} else {
2091					list_add_tail(&death->work.entry, &proc->todo);
2092					wake_up_interruptible(&proc->wait);
2093				}
2094			}
2095		} break;
2096
2097		default:
2098			printk(KERN_ERR "binder: %d:%d unknown command %d\n", proc->pid, thread->pid, cmd);
2099			return -EINVAL;
2100		}
2101		*consumed = ptr - buffer;
2102	}
2103	return 0;
2104}
2105
2106void
2107binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd)
2108{
2109	if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
2110		binder_stats.br[_IOC_NR(cmd)]++;
2111		proc->stats.br[_IOC_NR(cmd)]++;
2112		thread->stats.br[_IOC_NR(cmd)]++;
2113	}
2114}
2115
2116static int
2117binder_has_proc_work(struct binder_proc *proc, struct binder_thread *thread)
2118{
2119	return !list_empty(&proc->todo) || (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2120}
2121
2122static int
2123binder_has_thread_work(struct binder_thread *thread)
2124{
2125	return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
2126		(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
2127}
2128
2129static int
2130binder_thread_read(struct binder_proc *proc, struct binder_thread *thread,
2131	void  __user *buffer, int size, signed long *consumed, int non_block)
2132{
2133	void __user *ptr = buffer + *consumed;
2134	void __user *end = buffer + size;
2135
2136	int ret = 0;
2137	int wait_for_proc_work;
2138
2139	if (*consumed == 0) {
2140		if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2141			return -EFAULT;
2142		ptr += sizeof(uint32_t);
2143	}
2144
2145retry:
2146	wait_for_proc_work = thread->transaction_stack == NULL && list_empty(&thread->todo);
2147
2148	if (thread->return_error != BR_OK && ptr < end) {
2149		if (thread->return_error2 != BR_OK) {
2150			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
2151				return -EFAULT;
2152			ptr += sizeof(uint32_t);
2153			if (ptr == end)
2154				goto done;
2155			thread->return_error2 = BR_OK;
2156		}
2157		if (put_user(thread->return_error, (uint32_t __user *)ptr))
2158			return -EFAULT;
2159		ptr += sizeof(uint32_t);
2160		thread->return_error = BR_OK;
2161		goto done;
2162	}
2163
2164
2165	thread->looper |= BINDER_LOOPER_STATE_WAITING;
2166	if (wait_for_proc_work)
2167		proc->ready_threads++;
2168	mutex_unlock(&binder_lock);
2169	if (wait_for_proc_work) {
2170		if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2171					BINDER_LOOPER_STATE_ENTERED))) {
2172			binder_user_error("binder: %d:%d ERROR: Thread waiting "
2173				"for process work before calling BC_REGISTER_"
2174				"LOOPER or BC_ENTER_LOOPER (state %x)\n",
2175				proc->pid, thread->pid, thread->looper);
2176			wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2177		}
2178		binder_set_nice(proc->default_priority);
2179		if (non_block) {
2180			if (!binder_has_proc_work(proc, thread))
2181				ret = -EAGAIN;
2182		} else
2183			ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2184	} else {
2185		if (non_block) {
2186			if (!binder_has_thread_work(thread))
2187				ret = -EAGAIN;
2188		} else
2189			ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
2190	}
2191	mutex_lock(&binder_lock);
2192	if (wait_for_proc_work)
2193		proc->ready_threads--;
2194	thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2195
2196	if (ret)
2197		return ret;
2198
2199	while (1) {
2200		uint32_t cmd;
2201		struct binder_transaction_data tr;
2202		struct binder_work *w;
2203		struct binder_transaction *t = NULL;
2204
2205		if (!list_empty(&thread->todo))
2206			w = list_first_entry(&thread->todo, struct binder_work, entry);
2207		else if (!list_empty(&proc->todo) && wait_for_proc_work)
2208			w = list_first_entry(&proc->todo, struct binder_work, entry);
2209		else {
2210			if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
2211				goto retry;
2212			break;
2213		}
2214
2215		if (end - ptr < sizeof(tr) + 4)
2216			break;
2217
2218		switch (w->type) {
2219		case BINDER_WORK_TRANSACTION: {
2220			t = container_of(w, struct binder_transaction, work);
2221		} break;
2222		case BINDER_WORK_TRANSACTION_COMPLETE: {
2223			cmd = BR_TRANSACTION_COMPLETE;
2224			if (put_user(cmd, (uint32_t __user *)ptr))
2225				return -EFAULT;
2226			ptr += sizeof(uint32_t);
2227
2228			binder_stat_br(proc, thread, cmd);
2229			if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE)
2230				printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
2231				       proc->pid, thread->pid);
2232
2233			list_del(&w->entry);
2234			kfree(w);
2235			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
2236		} break;
2237		case BINDER_WORK_NODE: {
2238			struct binder_node *node = container_of(w, struct binder_node, work);
2239			uint32_t cmd = BR_NOOP;
2240			const char *cmd_name;
2241			int strong = node->internal_strong_refs || node->local_strong_refs;
2242			int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
2243			if (weak && !node->has_weak_ref) {
2244				cmd = BR_INCREFS;
2245				cmd_name = "BR_INCREFS";
2246				node->has_weak_ref = 1;
2247				node->pending_weak_ref = 1;
2248				node->local_weak_refs++;
2249			} else if (strong && !node->has_strong_ref) {
2250				cmd = BR_ACQUIRE;
2251				cmd_name = "BR_ACQUIRE";
2252				node->has_strong_ref = 1;
2253				node->pending_strong_ref = 1;
2254				node->local_strong_refs++;
2255			} else if (!strong && node->has_strong_ref) {
2256				cmd = BR_RELEASE;
2257				cmd_name = "BR_RELEASE";
2258				node->has_strong_ref = 0;
2259			} else if (!weak && node->has_weak_ref) {
2260				cmd = BR_DECREFS;
2261				cmd_name = "BR_DECREFS";
2262				node->has_weak_ref = 0;
2263			}
2264			if (cmd != BR_NOOP) {
2265				if (put_user(cmd, (uint32_t __user *)ptr))
2266					return -EFAULT;
2267				ptr += sizeof(uint32_t);
2268				if (put_user(node->ptr, (void * __user *)ptr))
2269					return -EFAULT;
2270				ptr += sizeof(void *);
2271				if (put_user(node->cookie, (void * __user *)ptr))
2272					return -EFAULT;
2273				ptr += sizeof(void *);
2274
2275				binder_stat_br(proc, thread, cmd);
2276				if (binder_debug_mask & BINDER_DEBUG_USER_REFS)
2277					printk(KERN_INFO "binder: %d:%d %s %d u%p c%p\n",
2278					       proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
2279			} else {
2280				list_del_init(&w->entry);
2281				if (!weak && !strong) {
2282					if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
2283						printk(KERN_INFO "binder: %d:%d node %d u%p c%p deleted\n",
2284						       proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie);
2285					rb_erase(&node->rb_node, &proc->nodes);
2286					kfree(node);
2287					binder_stats.obj_deleted[BINDER_STAT_NODE]++;
2288				} else {
2289					if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS)
2290						printk(KERN_INFO "binder: %d:%d node %d u%p c%p state unchanged\n",
2291						       proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie);
2292				}
2293			}
2294		} break;
2295		case BINDER_WORK_DEAD_BINDER:
2296		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2297		case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2298			struct binder_ref_death *death = container_of(w, struct binder_ref_death, work);
2299			uint32_t cmd;
2300			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2301				cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2302			else
2303				cmd = BR_DEAD_BINDER;
2304			if (put_user(cmd, (uint32_t __user *)ptr))
2305				return -EFAULT;
2306			ptr += sizeof(uint32_t);
2307			if (put_user(death->cookie, (void * __user *)ptr))
2308				return -EFAULT;
2309			ptr += sizeof(void *);
2310			if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION)
2311				printk(KERN_INFO "binder: %d:%d %s %p\n",
2312				       proc->pid, thread->pid,
2313				       cmd == BR_DEAD_BINDER ?
2314				       "BR_DEAD_BINDER" :
2315				       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2316				       death->cookie);
2317
2318			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2319				list_del(&w->entry);
2320				kfree(death);
2321				binder_stats.obj_deleted[BINDER_STAT_DEATH]++;
2322			} else
2323				list_move(&w->entry, &proc->delivered_death);
2324			if (cmd == BR_DEAD_BINDER)
2325				goto done; /* DEAD_BINDER notifications can cause transactions */
2326		} break;
2327		}
2328
2329		if (!t)
2330			continue;
2331
2332		BUG_ON(t->buffer == NULL);
2333		if (t->buffer->target_node) {
2334			struct binder_node *target_node = t->buffer->target_node;
2335			tr.target.ptr = target_node->ptr;
2336			tr.cookie =  target_node->cookie;
2337			t->saved_priority = task_nice(current);
2338			if (t->priority < target_node->min_priority &&
2339			    !(t->flags & TF_ONE_WAY))
2340				binder_set_nice(t->priority);
2341			else if (!(t->flags & TF_ONE_WAY) ||
2342				 t->saved_priority > target_node->min_priority)
2343				binder_set_nice(target_node->min_priority);
2344			cmd = BR_TRANSACTION;
2345		} else {
2346			tr.target.ptr = NULL;
2347			tr.cookie = NULL;
2348			cmd = BR_REPLY;
2349		}
2350		tr.code = t->code;
2351		tr.flags = t->flags;
2352		tr.sender_euid = t->sender_euid;
2353
2354		if (t->from) {
2355			struct task_struct *sender = t->from->proc->tsk;
2356			tr.sender_pid = task_tgid_nr_ns(sender, current->nsproxy->pid_ns);
2357		} else {
2358			tr.sender_pid = 0;
2359		}
2360
2361		tr.data_size = t->buffer->data_size;
2362		tr.offsets_size = t->buffer->offsets_size;
2363		tr.data.ptr.buffer = (void *)t->buffer->data + proc->user_buffer_offset;
2364		tr.data.ptr.offsets = tr.data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *));
2365
2366		if (put_user(cmd, (uint32_t __user *)ptr))
2367			return -EFAULT;
2368		ptr += sizeof(uint32_t);
2369		if (copy_to_user(ptr, &tr, sizeof(tr)))
2370			return -EFAULT;
2371		ptr += sizeof(tr);
2372
2373		binder_stat_br(proc, thread, cmd);
2374		if (binder_debug_mask & BINDER_DEBUG_TRANSACTION)
2375			printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d"
2376				"size %zd-%zd ptr %p-%p\n",
2377			       proc->pid, thread->pid,
2378			       (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : "BR_REPLY",
2379			       t->debug_id, t->from ? t->from->proc->pid : 0,
2380			       t->from ? t->from->pid : 0, cmd,
2381			       t->buffer->data_size, t->buffer->offsets_size,
2382			       tr.data.ptr.buffer, tr.data.ptr.offsets);
2383
2384		list_del(&t->work.entry);
2385		t->buffer->allow_user_free = 1;
2386		if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
2387			t->to_parent = thread->transaction_stack;
2388			t->to_thread = thread;
2389			thread->transaction_stack = t;
2390		} else {
2391			t->buffer->transaction = NULL;
2392			kfree(t);
2393			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++;
2394		}
2395		break;
2396	}
2397
2398done:
2399
2400	*consumed = ptr - buffer;
2401	if (proc->requested_threads + proc->ready_threads == 0 &&
2402	    proc->requested_threads_started < proc->max_threads &&
2403	    (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2404	     BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
2405	     /*spawn a new thread if we leave this out */) {
2406		proc->requested_threads++;
2407		if (binder_debug_mask & BINDER_DEBUG_THREADS)
2408			printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n",
2409			       proc->pid, thread->pid);
2410		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
2411			return -EFAULT;
2412	}
2413	return 0;
2414}
2415
2416static void binder_release_work(struct list_head *list)
2417{
2418	struct binder_work *w;
2419	while (!list_empty(list)) {
2420		w = list_first_entry(list, struct binder_work, entry);
2421		list_del_init(&w->entry);
2422		switch (w->type) {
2423		case BINDER_WORK_TRANSACTION: {
2424			struct binder_transaction *t = container_of(w, struct binder_transaction, work);
2425			if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
2426				binder_send_failed_reply(t, BR_DEAD_REPLY);
2427		} break;
2428		case BINDER_WORK_TRANSACTION_COMPLETE: {
2429			kfree(w);
2430			binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++;
2431		} break;
2432		default:
2433			break;
2434		}
2435	}
2436
2437}
2438
2439static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2440{
2441	struct binder_thread *thread = NULL;
2442	struct rb_node *parent = NULL;
2443	struct rb_node **p = &proc->threads.rb_node;
2444
2445	while (*p) {
2446		parent = *p;
2447		thread = rb_entry(parent, struct binder_thread, rb_node);
2448
2449		if (current->pid < thread->pid)
2450			p = &(*p)->rb_left;
2451		else if (current->pid > thread->pid)
2452			p = &(*p)->rb_right;
2453		else
2454			break;
2455	}
2456	if (*p == NULL) {
2457		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
2458		if (thread == NULL)
2459			return NULL;
2460		binder_stats.obj_created[BINDER_STAT_THREAD]++;
2461		thread->proc = proc;
2462		thread->pid = current->pid;
2463		init_waitqueue_head(&thread->wait);
2464		INIT_LIST_HEAD(&thread->todo);
2465		rb_link_node(&thread->rb_node, parent, p);
2466		rb_insert_color(&thread->rb_node, &proc->threads);
2467		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2468		thread->return_error = BR_OK;
2469		thread->return_error2 = BR_OK;
2470	}
2471	return thread;
2472}
2473
2474static int binder_free_thread(struct binder_proc *proc, struct binder_thread *thread)
2475{
2476	struct binder_transaction *t;
2477	struct binder_transaction *send_reply = NULL;
2478	int active_transactions = 0;
2479
2480	rb_erase(&thread->rb_node, &proc->threads);
2481	t = thread->transaction_stack;
2482	if (t && t->to_thread == thread)
2483		send_reply = t;
2484	while (t) {
2485		active_transactions++;
2486		if (binder_debug_mask & BINDER_DEBUG_DEAD_TRANSACTION)
2487			printk(KERN_INFO "binder: release %d:%d transaction %d %s, still active\n",
2488			       proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out");
2489		if (t->to_thread == thread) {
2490			t->to_proc = NULL;
2491			t->to_thread = NULL;
2492			if (t->buffer) {
2493				t->buffer->transaction = NULL;
2494				t->buffer = NULL;
2495			}
2496			t = t->to_parent;
2497		} else if (t->from == thread) {
2498			t->from = NULL;
2499			t = t->from_parent;
2500		} else
2501			BUG();
2502	}
2503	if (send_reply)
2504		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
2505	binder_release_work(&thread->todo);
2506	kfree(thread);
2507	binder_stats.obj_deleted[BINDER_STAT_THREAD]++;
2508	return active_transactions;
2509}
2510
2511static unsigned int binder_poll(struct file *filp, struct poll_table_struct *wait)
2512{
2513	struct binder_proc *proc = filp->private_data;
2514	struct binder_thread *thread = NULL;
2515	int wait_for_proc_work;
2516
2517	mutex_lock(&binder_lock);
2518	thread = binder_get_thread(proc);
2519
2520	wait_for_proc_work = thread->transaction_stack == NULL &&
2521		list_empty(&thread->todo) && thread->return_error == BR_OK;
2522	mutex_unlock(&binder_lock);
2523
2524	if (wait_for_proc_work) {
2525		if (binder_has_proc_work(proc, thread))
2526			return POLLIN;
2527		poll_wait(filp, &proc->wait, wait);
2528		if (binder_has_proc_work(proc, thread))
2529			return POLLIN;
2530	} else {
2531		if (binder_has_thread_work(thread))
2532			return POLLIN;
2533		poll_wait(filp, &thread->wait, wait);
2534		if (binder_has_thread_work(thread))
2535			return POLLIN;
2536	}
2537	return 0;
2538}
2539
2540static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2541{
2542	int ret;
2543	struct binder_proc *proc = filp->private_data;
2544	struct binder_thread *thread;
2545	unsigned int size = _IOC_SIZE(cmd);
2546	void __user *ubuf = (void __user *)arg;
2547
2548	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2549
2550	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2551	if (ret)
2552		return ret;
2553
2554	mutex_lock(&binder_lock);
2555	thread = binder_get_thread(proc);
2556	if (thread == NULL) {
2557		ret = -ENOMEM;
2558		goto err;
2559	}
2560
2561	switch (cmd) {
2562	case BINDER_WRITE_READ: {
2563		struct binder_write_read bwr;
2564		if (size != sizeof(struct binder_write_read)) {
2565			ret = -EINVAL;
2566			goto err;
2567		}
2568		if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
2569			ret = -EFAULT;
2570			goto err;
2571		}
2572		if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
2573			printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
2574			       proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer);
2575		if (bwr.write_size > 0) {
2576			ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
2577			if (ret < 0) {
2578				bwr.read_consumed = 0;
2579				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2580					ret = -EFAULT;
2581				goto err;
2582			}
2583		}
2584		if (bwr.read_size > 0) {
2585			ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2586			if (!list_empty(&proc->todo))
2587				wake_up_interruptible(&proc->wait);
2588			if (ret < 0) {
2589				if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
2590					ret = -EFAULT;
2591				goto err;
2592			}
2593		}
2594		if (binder_debug_mask & BINDER_DEBUG_READ_WRITE)
2595			printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
2596			       proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size);
2597		if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
2598			ret = -EFAULT;
2599			goto err;
2600		}
2601		break;
2602	}
2603	case BINDER_SET_MAX_THREADS:
2604		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2605			ret = -EINVAL;
2606			goto err;
2607		}
2608		break;
2609	case BINDER_SET_CONTEXT_MGR:
2610		if (binder_context_mgr_node != NULL) {
2611			printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
2612			ret = -EBUSY;
2613			goto err;
2614		}
2615		if (binder_context_mgr_uid != -1) {
2616			if (binder_context_mgr_uid != current->cred->euid) {
2617				printk(KERN_ERR "binder: BINDER_SET_"
2618				       "CONTEXT_MGR bad uid %d != %d\n",
2619				       current->cred->euid,
2620				       binder_context_mgr_uid);
2621				ret = -EPERM;
2622				goto err;
2623			}
2624		} else
2625			binder_context_mgr_uid = current->cred->euid;
2626		binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
2627		if (binder_context_mgr_node == NULL) {
2628			ret = -ENOMEM;
2629			goto err;
2630		}
2631		binder_context_mgr_node->local_weak_refs++;
2632		binder_context_mgr_node->local_strong_refs++;
2633		binder_context_mgr_node->has_strong_ref = 1;
2634		binder_context_mgr_node->has_weak_ref = 1;
2635		break;
2636	case BINDER_THREAD_EXIT:
2637		if (binder_debug_mask & BINDER_DEBUG_THREADS)
2638			printk(KERN_INFO "binder: %d:%d exit\n",
2639			       proc->pid, thread->pid);
2640		binder_free_thread(proc, thread);
2641		thread = NULL;
2642		break;
2643	case BINDER_VERSION:
2644		if (size != sizeof(struct binder_version)) {
2645			ret = -EINVAL;
2646			goto err;
2647		}
2648		if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
2649			ret = -EINVAL;
2650			goto err;
2651		}
2652		break;
2653	default:
2654		ret = -EINVAL;
2655		goto err;
2656	}
2657	ret = 0;
2658err:
2659	if (thread)
2660		thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
2661	mutex_unlock(&binder_lock);
2662	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
2663	if (ret && ret != -ERESTARTSYS)
2664		printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2665	return ret;
2666}
2667
2668static void binder_vma_open(struct vm_area_struct *vma)
2669{
2670	struct binder_proc *proc = vma->vm_private_data;
2671	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2672		printk(KERN_INFO
2673			"binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2674			proc->pid, vma->vm_start, vma->vm_end,
2675			(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2676			(unsigned long)pgprot_val(vma->vm_page_prot));
2677	dump_stack();
2678}
2679
2680static void binder_release_files(struct work_struct *work)
2681{
2682	struct binder_proc *proc;
2683	struct files_struct *files;
2684	do {
2685		mutex_lock(&binder_lock);
2686		mutex_lock(&binder_release_files_lock);
2687		if (!hlist_empty(&binder_release_files_list)) {
2688			proc = hlist_entry(binder_release_files_list.first,
2689					struct binder_proc, release_files_node);
2690			hlist_del_init(&proc->release_files_node);
2691			files = proc->files;
2692			if (files)
2693				proc->files = NULL;
2694		} else {
2695			proc = NULL;
2696			files = NULL;
2697		}
2698		mutex_unlock(&binder_release_files_lock);
2699		mutex_unlock(&binder_lock);
2700		if (files)
2701			put_files_struct(files);
2702	} while (proc);
2703}
2704
2705static DECLARE_WORK(binder_release_files_work, binder_release_files);
2706
2707static void binder_vma_close(struct vm_area_struct *vma)
2708{
2709	struct binder_proc *proc = vma->vm_private_data;
2710	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2711		printk(KERN_INFO
2712			"binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
2713			proc->pid, vma->vm_start, vma->vm_end,
2714			(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2715			(unsigned long)pgprot_val(vma->vm_page_prot));
2716	proc->vma = NULL;
2717	mutex_lock(&binder_release_files_lock);
2718	if (proc->files) {
2719		hlist_add_head(&proc->release_files_node,
2720				&binder_release_files_list);
2721		schedule_work(&binder_release_files_work);
2722	}
2723	mutex_unlock(&binder_release_files_lock);
2724}
2725
2726static struct vm_operations_struct binder_vm_ops = {
2727	.open = binder_vma_open,
2728	.close = binder_vma_close,
2729};
2730
2731static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
2732{
2733	int ret;
2734	struct vm_struct *area;
2735	struct binder_proc *proc = filp->private_data;
2736	const char *failure_string;
2737	struct binder_buffer *buffer;
2738
2739	if ((vma->vm_end - vma->vm_start) > SZ_4M)
2740		vma->vm_end = vma->vm_start + SZ_4M;
2741
2742	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2743		printk(KERN_INFO
2744			"binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
2745			proc->pid, vma->vm_start, vma->vm_end,
2746			(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
2747			(unsigned long)pgprot_val(vma->vm_page_prot));
2748
2749	if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
2750		ret = -EPERM;
2751		failure_string = "bad vm_flags";
2752		goto err_bad_arg;
2753	}
2754	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
2755
2756	if (proc->buffer) {
2757		ret = -EBUSY;
2758		failure_string = "already mapped";
2759		goto err_already_mapped;
2760	}
2761
2762	area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
2763	if (area == NULL) {
2764		ret = -ENOMEM;
2765		failure_string = "get_vm_area";
2766		goto err_get_vm_area_failed;
2767	}
2768	proc->buffer = area->addr;
2769	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2770
2771#ifdef CONFIG_CPU_CACHE_VIPT
2772	if (cache_is_vipt_aliasing()) {
2773		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2774			printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2775			vma->vm_start += PAGE_SIZE;
2776		}
2777	}
2778#endif
2779	proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2780	if (proc->pages == NULL) {
2781		ret = -ENOMEM;
2782		failure_string = "alloc page array";
2783		goto err_alloc_pages_failed;
2784	}
2785	proc->buffer_size = vma->vm_end - vma->vm_start;
2786
2787	vma->vm_ops = &binder_vm_ops;
2788	vma->vm_private_data = proc;
2789
2790	if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2791		ret = -ENOMEM;
2792		failure_string = "alloc small buf";
2793		goto err_alloc_small_buf_failed;
2794	}
2795	buffer = proc->buffer;
2796	INIT_LIST_HEAD(&proc->buffers);
2797	list_add(&buffer->entry, &proc->buffers);
2798	buffer->free = 1;
2799	binder_insert_free_buffer(proc, buffer);
2800	proc->free_async_space = proc->buffer_size / 2;
2801	barrier();
2802	proc->files = get_files_struct(current);
2803	proc->vma = vma;
2804
2805	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2806	return 0;
2807
2808err_alloc_small_buf_failed:
2809	kfree(proc->pages);
2810	proc->pages = NULL;
2811err_alloc_pages_failed:
2812	vfree(proc->buffer);
2813	proc->buffer = NULL;
2814err_get_vm_area_failed:
2815err_already_mapped:
2816err_bad_arg:
2817	printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2818	return ret;
2819}
2820
2821static int binder_open(struct inode *nodp, struct file *filp)
2822{
2823	struct binder_proc *proc;
2824
2825	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2826		printk(KERN_INFO "binder_open: %d:%d\n", current->group_leader->pid, current->pid);
2827
2828	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2829	if (proc == NULL)
2830		return -ENOMEM;
2831	get_task_struct(current);
2832	proc->tsk = current;
2833	INIT_LIST_HEAD(&proc->todo);
2834	init_waitqueue_head(&proc->wait);
2835	proc->default_priority = task_nice(current);
2836	mutex_lock(&binder_lock);
2837	binder_stats.obj_created[BINDER_STAT_PROC]++;
2838	hlist_add_head(&proc->proc_node, &binder_procs);
2839	proc->pid = current->group_leader->pid;
2840	INIT_LIST_HEAD(&proc->delivered_death);
2841	filp->private_data = proc;
2842	mutex_unlock(&binder_lock);
2843
2844	if (binder_proc_dir_entry_proc) {
2845		char strbuf[11];
2846		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2847		remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2848		create_proc_read_entry(strbuf, S_IRUGO, binder_proc_dir_entry_proc, binder_read_proc_proc, proc);
2849	}
2850
2851	return 0;
2852}
2853
2854static int binder_flush(struct file *filp, fl_owner_t id)
2855{
2856	struct rb_node *n;
2857	struct binder_proc *proc = filp->private_data;
2858	int wake_count = 0;
2859
2860	mutex_lock(&binder_lock);
2861	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2862		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2863		thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
2864		if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
2865			wake_up_interruptible(&thread->wait);
2866			wake_count++;
2867		}
2868	}
2869	wake_up_interruptible_all(&proc->wait);
2870	mutex_unlock(&binder_lock);
2871
2872	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2873		printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count);
2874
2875	return 0;
2876}
2877
2878static int binder_release(struct inode *nodp, struct file *filp)
2879{
2880	struct hlist_node *pos;
2881	struct binder_transaction *t;
2882	struct rb_node *n;
2883	struct files_struct *files;
2884	struct binder_proc *proc = filp->private_data;
2885	int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
2886
2887	if (binder_proc_dir_entry_proc) {
2888		char strbuf[11];
2889		snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2890		remove_proc_entry(strbuf, binder_proc_dir_entry_proc);
2891	}
2892	mutex_lock(&binder_lock);
2893	mutex_lock(&binder_release_files_lock);
2894	if (!hlist_unhashed(&proc->release_files_node))
2895		hlist_del(&proc->release_files_node);
2896	files = proc->files;
2897	if (files)
2898		proc->files = NULL;
2899	mutex_unlock(&binder_release_files_lock);
2900
2901	hlist_del(&proc->proc_node);
2902	if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
2903		if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
2904			printk(KERN_INFO "binder_release: %d context_mgr_node gone\n", proc->pid);
2905		binder_context_mgr_node = NULL;
2906	}
2907
2908	threads = 0;
2909	active_transactions = 0;
2910	while ((n = rb_first(&proc->threads))) {
2911		struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
2912		threads++;
2913		active_transactions += binder_free_thread(proc, thread);
2914	}
2915	nodes = 0;
2916	incoming_refs = 0;
2917	while ((n = rb_first(&proc->nodes))) {
2918		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
2919
2920		nodes++;
2921		rb_erase(&node->rb_node, &proc->nodes);
2922		list_del_init(&node->work.entry);
2923		if (hlist_empty(&node->refs)) {
2924			kfree(node);
2925			binder_stats.obj_deleted[BINDER_STAT_NODE]++;
2926		} else {
2927			struct binder_ref *ref;
2928			int death = 0;
2929
2930			node->proc = NULL;
2931			node->local_strong_refs = 0;
2932			node->local_weak_refs = 0;
2933			hlist_add_head(&node->dead_node, &binder_dead_nodes);
2934
2935			hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
2936				incoming_refs++;
2937				if (ref->death) {
2938					death++;
2939					if (list_empty(&ref->death->work.entry)) {
2940						ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2941						list_add_tail(&ref->death->work.entry, &ref->proc->todo);
2942						wake_up_interruptible(&ref->proc->wait);
2943					} else
2944						BUG();
2945				}
2946			}
2947			if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER)
2948				printk(KERN_INFO "binder: node %d now dead, refs %d, death %d\n", node->debug_id, incoming_refs, death);
2949		}
2950	}
2951	outgoing_refs = 0;
2952	while ((n = rb_first(&proc->refs_by_desc))) {
2953		struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc);
2954		outgoing_refs++;
2955		binder_delete_ref(ref);
2956	}
2957	binder_release_work(&proc->todo);
2958	buffers = 0;
2959
2960	while ((n = rb_first(&proc->allocated_buffers))) {
2961		struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, rb_node);
2962		t = buffer->transaction;
2963		if (t) {
2964			t->buffer = NULL;
2965			buffer->transaction = NULL;
2966			printk(KERN_ERR "binder: release proc %d, transaction %d, not freed\n", proc->pid, t->debug_id);
2967			/*BUG();*/
2968		}
2969		binder_free_buf(proc, buffer);
2970		buffers++;
2971	}
2972
2973	binder_stats.obj_deleted[BINDER_STAT_PROC]++;
2974	mutex_unlock(&binder_lock);
2975
2976	page_count = 0;
2977	if (proc->pages) {
2978		int i;
2979		for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
2980			if (proc->pages[i]) {
2981				if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC)
2982					printk(KERN_INFO "binder_release: %d: page %d at %p not freed\n", proc->pid, i, proc->buffer + i * PAGE_SIZE);
2983				__free_page(proc->pages[i]);
2984				page_count++;
2985			}
2986		}
2987		kfree(proc->pages);
2988		vfree(proc->buffer);
2989	}
2990
2991	put_task_struct(proc->tsk);
2992
2993	if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE)
2994		printk(KERN_INFO "binder_release: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
2995		       proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions, buffers, page_count);
2996
2997	kfree(proc);
2998	if (files)
2999		put_files_struct(files);
3000	return 0;
3001}
3002
3003static char *print_binder_transaction(char *buf, char *end, const char *prefix, struct binder_transaction *t)
3004{
3005	buf += snprintf(buf, end - buf, "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3006			prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0,
3007			t->from ? t->from->pid : 0,
3008			t->to_proc ? t->to_proc->pid : 0,
3009			t->to_thread ? t->to_thread->pid : 0,
3010			t->code, t->flags, t->priority, t->need_reply);
3011	if (buf >= end)
3012		return buf;
3013	if (t->buffer == NULL) {
3014		buf += snprintf(buf, end - buf, " buffer free\n");
3015		return buf;
3016	}
3017	if (t->buffer->target_node) {
3018		buf += snprintf(buf, end - buf, " node %d",
3019				t->buffer->target_node->debug_id);
3020		if (buf >= end)
3021			return buf;
3022	}
3023	buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n",
3024			t->buffer->data_size, t->buffer->offsets_size,
3025			t->buffer->data);
3026	return buf;
3027}
3028
3029static char *print_binder_buffer(char *buf, char *end, const char *prefix, struct binder_buffer *buffer)
3030{
3031	buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n",
3032			prefix, buffer->debug_id, buffer->data,
3033			buffer->data_size, buffer->offsets_size,
3034			buffer->transaction ? "active" : "delivered");
3035	return buf;
3036}
3037
3038static char *print_binder_work(char *buf, char *end, const char *prefix,
3039	const char *transaction_prefix, struct binder_work *w)
3040{
3041	struct binder_node *node;
3042	struct binder_transaction *t;
3043
3044	switch (w->type) {
3045	case BINDER_WORK_TRANSACTION:
3046		t = container_of(w, struct binder_transaction, work);
3047		buf = print_binder_transaction(buf, end, transaction_prefix, t);
3048		break;
3049	case BINDER_WORK_TRANSACTION_COMPLETE:
3050		buf += snprintf(buf, end - buf,
3051				"%stransaction complete\n", prefix);
3052		break;
3053	case BINDER_WORK_NODE:
3054		node = container_of(w, struct binder_node, work);
3055		buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n",
3056				prefix, node->debug_id, node->ptr, node->cookie);
3057		break;
3058	case BINDER_WORK_DEAD_BINDER:
3059		buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix);
3060		break;
3061	case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3062		buf += snprintf(buf, end - buf,
3063				"%shas cleared dead binder\n", prefix);
3064		break;
3065	case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
3066		buf += snprintf(buf, end - buf,
3067				"%shas cleared death notification\n", prefix);
3068		break;
3069	default:
3070		buf += snprintf(buf, end - buf, "%sunknown work: type %d\n",
3071				prefix, w->type);
3072		break;
3073	}
3074	return buf;
3075}
3076
3077static char *print_binder_thread(char *buf, char *end, struct binder_thread *thread, int print_always)
3078{
3079	struct binder_transaction *t;
3080	struct binder_work *w;
3081	char *start_buf = buf;
3082	char *header_buf;
3083
3084	buf += snprintf(buf, end - buf, "  thread %d: l %02x\n", thread->pid, thread->looper);
3085	header_buf = buf;
3086	t = thread->transaction_stack;
3087	while (t) {
3088		if (buf >= end)
3089			break;
3090		if (t->from == thread) {
3091			buf = print_binder_transaction(buf, end, "    outgoing transaction", t);
3092			t = t->from_parent;
3093		} else if (t->to_thread == thread) {
3094			buf = print_binder_transaction(buf, end, "    incoming transaction", t);
3095			t = t->to_parent;
3096		} else {
3097			buf = print_binder_transaction(buf, end, "    bad transaction", t);
3098			t = NULL;
3099		}
3100	}
3101	list_for_each_entry(w, &thread->todo, entry) {
3102		if (buf >= end)
3103			break;
3104		buf = print_binder_work(buf, end, "    ",
3105					"    pending transaction", w);
3106	}
3107	if (!print_always && buf == header_buf)
3108		buf = start_buf;
3109	return buf;
3110}
3111
3112static char *print_binder_node(char *buf, char *end, struct binder_node *node)
3113{
3114	struct binder_ref *ref;
3115	struct hlist_node *pos;
3116	struct binder_work *w;
3117	int count;
3118	count = 0;
3119	hlist_for_each_entry(ref, pos, &node->refs, node_entry)
3120		count++;
3121
3122	buf += snprintf(buf, end - buf, "  node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
3123			node->debug_id, node->ptr, node->cookie,
3124			node->has_strong_ref, node->has_weak_ref,
3125			node->local_strong_refs, node->local_weak_refs,
3126			node->internal_strong_refs, count);
3127	if (buf >= end)
3128		return buf;
3129	if (count) {
3130		buf += snprintf(buf, end - buf, " proc");
3131		if (buf >= end)
3132			return buf;
3133		hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
3134			buf += snprintf(buf, end - buf, " %d", ref->proc->pid);
3135			if (buf >= end)
3136				return buf;
3137		}
3138	}
3139	buf += snprintf(buf, end - buf, "\n");
3140	list_for_each_entry(w, &node->async_todo, entry) {
3141		if (buf >= end)
3142			break;
3143		buf = print_binder_work(buf, end, "    ",
3144					"    pending async transaction", w);
3145	}
3146	return buf;
3147}
3148
3149static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref)
3150{
3151	buf += snprintf(buf, end - buf, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
3152			ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3153			ref->node->debug_id, ref->strong, ref->weak, ref->death);
3154	return buf;
3155}
3156
3157static char *print_binder_proc(char *buf, char *end, struct binder_proc *proc, int print_all)
3158{
3159	struct binder_work *w;
3160	struct rb_node *n;
3161	char *start_buf = buf;
3162	char *header_buf;
3163
3164	buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
3165	header_buf = buf;
3166
3167	for (n = rb_first(&proc->threads); n != NULL && buf < end; n = rb_next(n))
3168		buf = print_binder_thread(buf, end, rb_entry(n, struct binder_thread, rb_node), print_all);
3169	for (n = rb_first(&proc->nodes); n != NULL && buf < end; n = rb_next(n)) {
3170		struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
3171		if (print_all || node->has_async_transaction)
3172			buf = print_binder_node(buf, end, node);
3173	}
3174	if (print_all) {
3175		for (n = rb_first(&proc->refs_by_desc); n != NULL && buf < end; n = rb_next(n))
3176			buf = print_binder_ref(buf, end, rb_entry(n, struct binder_ref, rb_node_desc));
3177	}
3178	for (n = rb_first(&proc->allocated_buffers); n != NULL && buf < end; n = rb_next(n))
3179		buf = print_binder_buffer(buf, end, "  buffer", rb_entry(n, struct binder_buffer, rb_node));
3180	list_for_each_entry(w, &proc->todo, entry) {
3181		if (buf >= end)
3182			break;
3183		buf = print_binder_work(buf, end, "  ",
3184					"  pending transaction", w);
3185	}
3186	list_for_each_entry(w, &proc->delivered_death, entry) {
3187		if (buf >= end)
3188			break;
3189		buf += snprintf(buf, end - buf, "  has delivered dead binder\n");
3190		break;
3191	}
3192	if (!print_all && buf == header_buf)
3193		buf = start_buf;
3194	return buf;
3195}
3196
3197static const char *binder_return_strings[] = {
3198	"BR_ERROR",
3199	"BR_OK",
3200	"BR_TRANSACTION",
3201	"BR_REPLY",
3202	"BR_ACQUIRE_RESULT",
3203	"BR_DEAD_REPLY",
3204	"BR_TRANSACTION_COMPLETE",
3205	"BR_INCREFS",
3206	"BR_ACQUIRE",
3207	"BR_RELEASE",
3208	"BR_DECREFS",
3209	"BR_ATTEMPT_ACQUIRE",
3210	"BR_NOOP",
3211	"BR_SPAWN_LOOPER",
3212	"BR_FINISHED",
3213	"BR_DEAD_BINDER",
3214	"BR_CLEAR_DEATH_NOTIFICATION_DONE",
3215	"BR_FAILED_REPLY"
3216};
3217
3218static const char *binder_command_strings[] = {
3219	"BC_TRANSACTION",
3220	"BC_REPLY",
3221	"BC_ACQUIRE_RESULT",
3222	"BC_FREE_BUFFER",
3223	"BC_INCREFS",
3224	"BC_ACQUIRE",
3225	"BC_RELEASE",
3226	"BC_DECREFS",
3227	"BC_INCREFS_DONE",
3228	"BC_ACQUIRE_DONE",
3229	"BC_ATTEMPT_ACQUIRE",
3230	"BC_REGISTER_LOOPER",
3231	"BC_ENTER_LOOPER",
3232	"BC_EXIT_LOOPER",
3233	"BC_REQUEST_DEATH_NOTIFICATION",
3234	"BC_CLEAR_DEATH_NOTIFICATION",
3235	"BC_DEAD_BINDER_DONE"
3236};
3237
3238static const char *binder_objstat_strings[] = {
3239	"proc",
3240	"thread",
3241	"node",
3242	"ref",
3243	"death",
3244	"transaction",
3245	"transaction_complete"
3246};
3247
3248static char *print_binder_stats(char *buf, char *end, const char *prefix, struct binder_stats *stats)
3249{
3250	int i;
3251
3252	BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings));
3253	for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
3254		if (stats->bc[i])
3255			buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
3256					binder_command_strings[i], stats->bc[i]);
3257		if (buf >= end)
3258			return buf;
3259	}
3260
3261	BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings));
3262	for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
3263		if (stats->br[i])
3264			buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix,
3265					binder_return_strings[i], stats->br[i]);
3266		if (buf >= end)
3267			return buf;
3268	}
3269
3270	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings));
3271	BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted));
3272	for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
3273		if (stats->obj_created[i] || stats->obj_deleted[i])
3274			buf += snprintf(buf, end - buf, "%s%s: active %d total %d\n", prefix,
3275					binder_objstat_strings[i],
3276					stats->obj_created[i] - stats->obj_deleted[i],
3277					stats->obj_created[i]);
3278		if (buf >= end)
3279			return buf;
3280	}
3281	return buf;
3282}
3283
3284static char *print_binder_proc_stats(char *buf, char *end, struct binder_proc *proc)
3285{
3286	struct binder_work *w;
3287	struct rb_node *n;
3288	int count, strong, weak;
3289
3290	buf += snprintf(buf, end - buf, "proc %d\n", proc->pid);
3291	if (buf >= end)
3292		return buf;
3293	count = 0;
3294	for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3295		count++;
3296	buf += snprintf(buf, end - buf, "  threads: %d\n", count);
3297	if (buf >= end)
3298		return buf;
3299	buf += snprintf(buf, end - buf, "  requested threads: %d+%d/%d\n"
3300			"  ready threads %d\n"
3301			"  free async space %zd\n", proc->requested_threads,
3302			proc->requested_threads_started, proc->max_threads,
3303			proc->ready_threads, proc->free_async_space);
3304	if (buf >= end)
3305		return buf;
3306	count = 0;
3307	for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3308		count++;
3309	buf += snprintf(buf, end - buf, "  nodes: %d\n", count);
3310	if (buf >= end)
3311		return buf;
3312	count = 0;
3313	strong = 0;
3314	weak = 0;
3315	for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3316		struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc);
3317		count++;
3318		strong += ref->strong;
3319		weak += ref->weak;
3320	}
3321	buf += snprintf(buf, end - buf, "  refs: %d s %d w %d\n", count, strong, weak);
3322	if (buf >= end)
3323		return buf;
3324
3325	count = 0;
3326	for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3327		count++;
3328	buf += snprintf(buf, end - buf, "  buffers: %d\n", count);
3329	if (buf >= end)
3330		return buf;
3331
3332	count = 0;
3333	list_for_each_entry(w, &proc->todo, entry) {
3334		switch (w->type) {
3335		case BINDER_WORK_TRANSACTION:
3336			count++;
3337			break;
3338		default:
3339			break;
3340		}
3341	}
3342	buf += snprintf(buf, end - buf, "  pending transactions: %d\n", count);
3343	if (buf >= end)
3344		return buf;
3345
3346	buf = print_binder_stats(buf, end, "  ", &proc->stats);
3347
3348	return buf;
3349}
3350
3351
3352static int binder_read_proc_state(
3353	char *page, char **start, off_t off, int count, int *eof, void *data)
3354{
3355	struct binder_proc *proc;
3356	struct hlist_node *pos;
3357	struct binder_node *node;
3358	int len = 0;
3359	char *buf = page;
3360	char *end = page + PAGE_SIZE;
3361	int do_lock = !binder_debug_no_lock;
3362
3363	if (off)
3364		return 0;
3365
3366	if (do_lock)
3367		mutex_lock(&binder_lock);
3368
3369	buf += snprintf(buf, end - buf, "binder state:\n");
3370
3371	if (!hlist_empty(&binder_dead_nodes))
3372		buf += snprintf(buf, end - buf, "dead nodes:\n");
3373	hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) {
3374		if (buf >= end)
3375			break;
3376		buf = print_binder_node(buf, end, node);
3377	}
3378
3379	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3380		if (buf >= end)
3381			break;
3382		buf = print_binder_proc(buf, end, proc, 1);
3383	}
3384	if (do_lock)
3385		mutex_unlock(&binder_lock);
3386	if (buf > page + PAGE_SIZE)
3387		buf = page + PAGE_SIZE;
3388
3389	*start = page + off;
3390
3391	len = buf - page;
3392	if (len > off)
3393		len -= off;
3394	else
3395		len = 0;
3396
3397	return len < count ? len  : count;
3398}
3399
3400static int binder_read_proc_stats(
3401	char *page, char **start, off_t off, int count, int *eof, void *data)
3402{
3403	struct binder_proc *proc;
3404	struct hlist_node *pos;
3405	int len = 0;
3406	char *p = page;
3407	int do_lock = !binder_debug_no_lock;
3408
3409	if (off)
3410		return 0;
3411
3412	if (do_lock)
3413		mutex_lock(&binder_lock);
3414
3415	p += snprintf(p, PAGE_SIZE, "binder stats:\n");
3416
3417	p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats);
3418
3419	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3420		if (p >= page + PAGE_SIZE)
3421			break;
3422		p = print_binder_proc_stats(p, page + PAGE_SIZE, proc);
3423	}
3424	if (do_lock)
3425		mutex_unlock(&binder_lock);
3426	if (p > page + PAGE_SIZE)
3427		p = page + PAGE_SIZE;
3428
3429	*start = page + off;
3430
3431	len = p - page;
3432	if (len > off)
3433		len -= off;
3434	else
3435		len = 0;
3436
3437	return len < count ? len  : count;
3438}
3439
3440static int binder_read_proc_transactions(
3441	char *page, char **start, off_t off, int count, int *eof, void *data)
3442{
3443	struct binder_proc *proc;
3444	struct hlist_node *pos;
3445	int len = 0;
3446	char *buf = page;
3447	char *end = page + PAGE_SIZE;
3448	int do_lock = !binder_debug_no_lock;
3449
3450	if (off)
3451		return 0;
3452
3453	if (do_lock)
3454		mutex_lock(&binder_lock);
3455
3456	buf += snprintf(buf, end - buf, "binder transactions:\n");
3457	hlist_for_each_entry(proc, pos, &binder_procs, proc_node) {
3458		if (buf >= end)
3459			break;
3460		buf = print_binder_proc(buf, end, proc, 0);
3461	}
3462	if (do_lock)
3463		mutex_unlock(&binder_lock);
3464	if (buf > page + PAGE_SIZE)
3465		buf = page + PAGE_SIZE;
3466
3467	*start = page + off;
3468
3469	len = buf - page;
3470	if (len > off)
3471		len -= off;
3472	else
3473		len = 0;
3474
3475	return len < count ? len  : count;
3476}
3477
3478static int binder_read_proc_proc(
3479	char *page, char **start, off_t off, int count, int *eof, void *data)
3480{
3481	struct binder_proc *proc = data;
3482	int len = 0;
3483	char *p = page;
3484	int do_lock = !binder_debug_no_lock;
3485
3486	if (off)
3487		return 0;
3488
3489	if (do_lock)
3490		mutex_lock(&binder_lock);
3491	p += snprintf(p, PAGE_SIZE, "binder proc state:\n");
3492	p = print_binder_proc(p, page + PAGE_SIZE, proc, 1);
3493	if (do_lock)
3494		mutex_unlock(&binder_lock);
3495
3496	if (p > page + PAGE_SIZE)
3497		p = page + PAGE_SIZE;
3498	*start = page + off;
3499
3500	len = p - page;
3501	if (len > off)
3502		len -= off;
3503	else
3504		len = 0;
3505
3506	return len < count ? len  : count;
3507}
3508
3509static char *print_binder_transaction_log_entry(char *buf, char *end, struct binder_transaction_log_entry *e)
3510{
3511	buf += snprintf(buf, end - buf, "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
3512			e->debug_id, (e->call_type == 2) ? "reply" :
3513			((e->call_type == 1) ? "async" : "call "), e->from_proc,
3514			e->from_thread, e->to_proc, e->to_thread, e->to_node,
3515			e->target_handle, e->data_size, e->offsets_size);
3516	return buf;
3517}
3518
3519static int binder_read_proc_transaction_log(
3520	char *page, char **start, off_t off, int count, int *eof, void *data)
3521{
3522	struct binder_transaction_log *log = data;
3523	int len = 0;
3524	int i;
3525	char *buf = page;
3526	char *end = page + PAGE_SIZE;
3527
3528	if (off)
3529		return 0;
3530
3531	if (log->full) {
3532		for (i = log->next; i < ARRAY_SIZE(log->entry); i++) {
3533			if (buf >= end)
3534				break;
3535			buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]);
3536		}
3537	}
3538	for (i = 0; i < log->next; i++) {
3539		if (buf >= end)
3540			break;
3541		buf = print_binder_transaction_log_entry(buf, end, &log->entry[i]);
3542	}
3543
3544	*start = page + off;
3545
3546	len = buf - page;
3547	if (len > off)
3548		len -= off;
3549	else
3550		len = 0;
3551
3552	return len < count ? len  : count;
3553}
3554
3555static struct file_operations binder_fops = {
3556	.owner = THIS_MODULE,
3557	.poll = binder_poll,
3558	.unlocked_ioctl = binder_ioctl,
3559	.mmap = binder_mmap,
3560	.open = binder_open,
3561	.flush = binder_flush,
3562	.release = binder_release,
3563};
3564
3565static struct miscdevice binder_miscdev = {
3566	.minor = MISC_DYNAMIC_MINOR,
3567	.name = "binder",
3568	.fops = &binder_fops
3569};
3570
3571static int __init binder_init(void)
3572{
3573	int ret;
3574
3575	binder_proc_dir_entry_root = proc_mkdir("binder", NULL);
3576	if (binder_proc_dir_entry_root)
3577		binder_proc_dir_entry_proc = proc_mkdir("proc", binder_proc_dir_entry_root);
3578	ret = misc_register(&binder_miscdev);
3579	if (binder_proc_dir_entry_root) {
3580		create_proc_read_entry("state", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_state, NULL);
3581		create_proc_read_entry("stats", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_stats, NULL);
3582		create_proc_read_entry("transactions", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transactions, NULL);
3583		create_proc_read_entry("transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log);
3584		create_proc_read_entry("failed_transaction_log", S_IRUGO, binder_proc_dir_entry_root, binder_read_proc_transaction_log, &binder_transaction_log_failed);
3585	}
3586	return ret;
3587}
3588
3589device_initcall(binder_init);
3590
3591MODULE_LICENSE("GPL v2");
3592