binder.c revision 16b665543864904714f028b1d349f5d905f39afb
1/* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <asm/cacheflush.h> 19#include <linux/fdtable.h> 20#include <linux/file.h> 21#include <linux/fs.h> 22#include <linux/list.h> 23#include <linux/miscdevice.h> 24#include <linux/mm.h> 25#include <linux/module.h> 26#include <linux/mutex.h> 27#include <linux/nsproxy.h> 28#include <linux/poll.h> 29#include <linux/debugfs.h> 30#include <linux/rbtree.h> 31#include <linux/sched.h> 32#include <linux/seq_file.h> 33#include <linux/uaccess.h> 34#include <linux/vmalloc.h> 35#include <linux/slab.h> 36 37#include "binder.h" 38 39static DEFINE_MUTEX(binder_lock); 40static DEFINE_MUTEX(binder_deferred_lock); 41 42static HLIST_HEAD(binder_procs); 43static HLIST_HEAD(binder_deferred_list); 44static HLIST_HEAD(binder_dead_nodes); 45 46static struct dentry *binder_debugfs_dir_entry_root; 47static struct dentry *binder_debugfs_dir_entry_proc; 48static struct binder_node *binder_context_mgr_node; 49static uid_t binder_context_mgr_uid = -1; 50static int binder_last_id; 51 52#define BINDER_DEBUG_ENTRY(name) \ 53static int binder_##name##_open(struct inode *inode, struct file *file) \ 54{ \ 55 return single_open(file, binder_##name##_show, inode->i_private); \ 56} \ 57\ 58static const struct file_operations binder_##name##_fops = { \ 59 .owner = THIS_MODULE, \ 60 .open = binder_##name##_open, \ 61 .read = seq_read, \ 62 .llseek = seq_lseek, \ 63 .release = single_release, \ 64} 65 66static int binder_proc_show(struct seq_file *m, void *unused); 67BINDER_DEBUG_ENTRY(proc); 68 69/* This is only defined in include/asm-arm/sizes.h */ 70#ifndef SZ_1K 71#define SZ_1K 0x400 72#endif 73 74#ifndef SZ_4M 75#define SZ_4M 0x400000 76#endif 77 78#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 79 80#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 81 82enum { 83 BINDER_DEBUG_USER_ERROR = 1U << 0, 84 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 85 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 86 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 87 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 88 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 89 BINDER_DEBUG_READ_WRITE = 1U << 6, 90 BINDER_DEBUG_USER_REFS = 1U << 7, 91 BINDER_DEBUG_THREADS = 1U << 8, 92 BINDER_DEBUG_TRANSACTION = 1U << 9, 93 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 94 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 95 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 96 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 97 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 98 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 99}; 100static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 101 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 102module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 103 104static int binder_debug_no_lock; 105module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 106 107static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 108static int binder_stop_on_user_error; 109 110static int binder_set_stop_on_user_error(const char *val, 111 struct kernel_param *kp) 112{ 113 int ret; 114 ret = param_set_int(val, kp); 115 if (binder_stop_on_user_error < 2) 116 wake_up(&binder_user_error_wait); 117 return ret; 118} 119module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 120 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 121 122#define binder_debug(mask, x...) \ 123 do { \ 124 if (binder_debug_mask & mask) \ 125 printk(KERN_INFO x); \ 126 } while (0) 127 128#define binder_user_error(x...) \ 129 do { \ 130 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 131 printk(KERN_INFO x); \ 132 if (binder_stop_on_user_error) \ 133 binder_stop_on_user_error = 2; \ 134 } while (0) 135 136enum binder_stat_types { 137 BINDER_STAT_PROC, 138 BINDER_STAT_THREAD, 139 BINDER_STAT_NODE, 140 BINDER_STAT_REF, 141 BINDER_STAT_DEATH, 142 BINDER_STAT_TRANSACTION, 143 BINDER_STAT_TRANSACTION_COMPLETE, 144 BINDER_STAT_COUNT 145}; 146 147struct binder_stats { 148 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 149 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 150 int obj_created[BINDER_STAT_COUNT]; 151 int obj_deleted[BINDER_STAT_COUNT]; 152}; 153 154static struct binder_stats binder_stats; 155 156static inline void binder_stats_deleted(enum binder_stat_types type) 157{ 158 binder_stats.obj_deleted[type]++; 159} 160 161static inline void binder_stats_created(enum binder_stat_types type) 162{ 163 binder_stats.obj_created[type]++; 164} 165 166struct binder_transaction_log_entry { 167 int debug_id; 168 int call_type; 169 int from_proc; 170 int from_thread; 171 int target_handle; 172 int to_proc; 173 int to_thread; 174 int to_node; 175 int data_size; 176 int offsets_size; 177}; 178struct binder_transaction_log { 179 int next; 180 int full; 181 struct binder_transaction_log_entry entry[32]; 182}; 183static struct binder_transaction_log binder_transaction_log; 184static struct binder_transaction_log binder_transaction_log_failed; 185 186static struct binder_transaction_log_entry *binder_transaction_log_add( 187 struct binder_transaction_log *log) 188{ 189 struct binder_transaction_log_entry *e; 190 e = &log->entry[log->next]; 191 memset(e, 0, sizeof(*e)); 192 log->next++; 193 if (log->next == ARRAY_SIZE(log->entry)) { 194 log->next = 0; 195 log->full = 1; 196 } 197 return e; 198} 199 200struct binder_work { 201 struct list_head entry; 202 enum { 203 BINDER_WORK_TRANSACTION = 1, 204 BINDER_WORK_TRANSACTION_COMPLETE, 205 BINDER_WORK_NODE, 206 BINDER_WORK_DEAD_BINDER, 207 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 208 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 209 } type; 210}; 211 212struct binder_node { 213 int debug_id; 214 struct binder_work work; 215 union { 216 struct rb_node rb_node; 217 struct hlist_node dead_node; 218 }; 219 struct binder_proc *proc; 220 struct hlist_head refs; 221 int internal_strong_refs; 222 int local_weak_refs; 223 int local_strong_refs; 224 void __user *ptr; 225 void __user *cookie; 226 unsigned has_strong_ref:1; 227 unsigned pending_strong_ref:1; 228 unsigned has_weak_ref:1; 229 unsigned pending_weak_ref:1; 230 unsigned has_async_transaction:1; 231 unsigned accept_fds:1; 232 unsigned min_priority:8; 233 struct list_head async_todo; 234}; 235 236struct binder_ref_death { 237 struct binder_work work; 238 void __user *cookie; 239}; 240 241struct binder_ref { 242 /* Lookups needed: */ 243 /* node + proc => ref (transaction) */ 244 /* desc + proc => ref (transaction, inc/dec ref) */ 245 /* node => refs + procs (proc exit) */ 246 int debug_id; 247 struct rb_node rb_node_desc; 248 struct rb_node rb_node_node; 249 struct hlist_node node_entry; 250 struct binder_proc *proc; 251 struct binder_node *node; 252 uint32_t desc; 253 int strong; 254 int weak; 255 struct binder_ref_death *death; 256}; 257 258struct binder_buffer { 259 struct list_head entry; /* free and allocated entries by addesss */ 260 struct rb_node rb_node; /* free entry by size or allocated entry */ 261 /* by address */ 262 unsigned free:1; 263 unsigned allow_user_free:1; 264 unsigned async_transaction:1; 265 unsigned debug_id:29; 266 267 struct binder_transaction *transaction; 268 269 struct binder_node *target_node; 270 size_t data_size; 271 size_t offsets_size; 272 uint8_t data[0]; 273}; 274 275enum binder_deferred_state { 276 BINDER_DEFERRED_PUT_FILES = 0x01, 277 BINDER_DEFERRED_FLUSH = 0x02, 278 BINDER_DEFERRED_RELEASE = 0x04, 279}; 280 281struct binder_proc { 282 struct hlist_node proc_node; 283 struct rb_root threads; 284 struct rb_root nodes; 285 struct rb_root refs_by_desc; 286 struct rb_root refs_by_node; 287 int pid; 288 struct vm_area_struct *vma; 289 struct task_struct *tsk; 290 struct files_struct *files; 291 struct hlist_node deferred_work_node; 292 int deferred_work; 293 void *buffer; 294 ptrdiff_t user_buffer_offset; 295 296 struct list_head buffers; 297 struct rb_root free_buffers; 298 struct rb_root allocated_buffers; 299 size_t free_async_space; 300 301 struct page **pages; 302 size_t buffer_size; 303 uint32_t buffer_free; 304 struct list_head todo; 305 wait_queue_head_t wait; 306 struct binder_stats stats; 307 struct list_head delivered_death; 308 int max_threads; 309 int requested_threads; 310 int requested_threads_started; 311 int ready_threads; 312 long default_priority; 313 struct dentry *debugfs_entry; 314}; 315 316enum { 317 BINDER_LOOPER_STATE_REGISTERED = 0x01, 318 BINDER_LOOPER_STATE_ENTERED = 0x02, 319 BINDER_LOOPER_STATE_EXITED = 0x04, 320 BINDER_LOOPER_STATE_INVALID = 0x08, 321 BINDER_LOOPER_STATE_WAITING = 0x10, 322 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 323}; 324 325struct binder_thread { 326 struct binder_proc *proc; 327 struct rb_node rb_node; 328 int pid; 329 int looper; 330 struct binder_transaction *transaction_stack; 331 struct list_head todo; 332 uint32_t return_error; /* Write failed, return error code in read buf */ 333 uint32_t return_error2; /* Write failed, return error code in read */ 334 /* buffer. Used when sending a reply to a dead process that */ 335 /* we are also waiting on */ 336 wait_queue_head_t wait; 337 struct binder_stats stats; 338}; 339 340struct binder_transaction { 341 int debug_id; 342 struct binder_work work; 343 struct binder_thread *from; 344 struct binder_transaction *from_parent; 345 struct binder_proc *to_proc; 346 struct binder_thread *to_thread; 347 struct binder_transaction *to_parent; 348 unsigned need_reply:1; 349 /* unsigned is_dead:1; */ /* not used at the moment */ 350 351 struct binder_buffer *buffer; 352 unsigned int code; 353 unsigned int flags; 354 long priority; 355 long saved_priority; 356 uid_t sender_euid; 357}; 358 359static void 360binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 361 362/* 363 * copied from get_unused_fd_flags 364 */ 365int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 366{ 367 struct files_struct *files = proc->files; 368 int fd, error; 369 struct fdtable *fdt; 370 unsigned long rlim_cur; 371 unsigned long irqs; 372 373 if (files == NULL) 374 return -ESRCH; 375 376 error = -EMFILE; 377 spin_lock(&files->file_lock); 378 379repeat: 380 fdt = files_fdtable(files); 381 fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, 382 files->next_fd); 383 384 /* 385 * N.B. For clone tasks sharing a files structure, this test 386 * will limit the total number of files that can be opened. 387 */ 388 rlim_cur = 0; 389 if (lock_task_sighand(proc->tsk, &irqs)) { 390 rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 391 unlock_task_sighand(proc->tsk, &irqs); 392 } 393 if (fd >= rlim_cur) 394 goto out; 395 396 /* Do we need to expand the fd array or fd set? */ 397 error = expand_files(files, fd); 398 if (error < 0) 399 goto out; 400 401 if (error) { 402 /* 403 * If we needed to expand the fs array we 404 * might have blocked - try again. 405 */ 406 error = -EMFILE; 407 goto repeat; 408 } 409 410 FD_SET(fd, fdt->open_fds); 411 if (flags & O_CLOEXEC) 412 FD_SET(fd, fdt->close_on_exec); 413 else 414 FD_CLR(fd, fdt->close_on_exec); 415 files->next_fd = fd + 1; 416#if 1 417 /* Sanity check */ 418 if (fdt->fd[fd] != NULL) { 419 printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); 420 fdt->fd[fd] = NULL; 421 } 422#endif 423 error = fd; 424 425out: 426 spin_unlock(&files->file_lock); 427 return error; 428} 429 430/* 431 * copied from fd_install 432 */ 433static void task_fd_install( 434 struct binder_proc *proc, unsigned int fd, struct file *file) 435{ 436 struct files_struct *files = proc->files; 437 struct fdtable *fdt; 438 439 if (files == NULL) 440 return; 441 442 spin_lock(&files->file_lock); 443 fdt = files_fdtable(files); 444 BUG_ON(fdt->fd[fd] != NULL); 445 rcu_assign_pointer(fdt->fd[fd], file); 446 spin_unlock(&files->file_lock); 447} 448 449/* 450 * copied from __put_unused_fd in open.c 451 */ 452static void __put_unused_fd(struct files_struct *files, unsigned int fd) 453{ 454 struct fdtable *fdt = files_fdtable(files); 455 __FD_CLR(fd, fdt->open_fds); 456 if (fd < files->next_fd) 457 files->next_fd = fd; 458} 459 460/* 461 * copied from sys_close 462 */ 463static long task_close_fd(struct binder_proc *proc, unsigned int fd) 464{ 465 struct file *filp; 466 struct files_struct *files = proc->files; 467 struct fdtable *fdt; 468 int retval; 469 470 if (files == NULL) 471 return -ESRCH; 472 473 spin_lock(&files->file_lock); 474 fdt = files_fdtable(files); 475 if (fd >= fdt->max_fds) 476 goto out_unlock; 477 filp = fdt->fd[fd]; 478 if (!filp) 479 goto out_unlock; 480 rcu_assign_pointer(fdt->fd[fd], NULL); 481 FD_CLR(fd, fdt->close_on_exec); 482 __put_unused_fd(files, fd); 483 spin_unlock(&files->file_lock); 484 retval = filp_close(filp, files); 485 486 /* can't restart close syscall because file table entry was cleared */ 487 if (unlikely(retval == -ERESTARTSYS || 488 retval == -ERESTARTNOINTR || 489 retval == -ERESTARTNOHAND || 490 retval == -ERESTART_RESTARTBLOCK)) 491 retval = -EINTR; 492 493 return retval; 494 495out_unlock: 496 spin_unlock(&files->file_lock); 497 return -EBADF; 498} 499 500static void binder_set_nice(long nice) 501{ 502 long min_nice; 503 if (can_nice(current, nice)) { 504 set_user_nice(current, nice); 505 return; 506 } 507 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; 508 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 509 "binder: %d: nice value %ld not allowed use " 510 "%ld instead\n", current->pid, nice, min_nice); 511 set_user_nice(current, min_nice); 512 if (min_nice < 20) 513 return; 514 binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); 515} 516 517static size_t binder_buffer_size(struct binder_proc *proc, 518 struct binder_buffer *buffer) 519{ 520 if (list_is_last(&buffer->entry, &proc->buffers)) 521 return proc->buffer + proc->buffer_size - (void *)buffer->data; 522 else 523 return (size_t)list_entry(buffer->entry.next, 524 struct binder_buffer, entry) - (size_t)buffer->data; 525} 526 527static void binder_insert_free_buffer(struct binder_proc *proc, 528 struct binder_buffer *new_buffer) 529{ 530 struct rb_node **p = &proc->free_buffers.rb_node; 531 struct rb_node *parent = NULL; 532 struct binder_buffer *buffer; 533 size_t buffer_size; 534 size_t new_buffer_size; 535 536 BUG_ON(!new_buffer->free); 537 538 new_buffer_size = binder_buffer_size(proc, new_buffer); 539 540 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 541 "binder: %d: add free buffer, size %zd, " 542 "at %p\n", proc->pid, new_buffer_size, new_buffer); 543 544 while (*p) { 545 parent = *p; 546 buffer = rb_entry(parent, struct binder_buffer, rb_node); 547 BUG_ON(!buffer->free); 548 549 buffer_size = binder_buffer_size(proc, buffer); 550 551 if (new_buffer_size < buffer_size) 552 p = &parent->rb_left; 553 else 554 p = &parent->rb_right; 555 } 556 rb_link_node(&new_buffer->rb_node, parent, p); 557 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 558} 559 560static void binder_insert_allocated_buffer(struct binder_proc *proc, 561 struct binder_buffer *new_buffer) 562{ 563 struct rb_node **p = &proc->allocated_buffers.rb_node; 564 struct rb_node *parent = NULL; 565 struct binder_buffer *buffer; 566 567 BUG_ON(new_buffer->free); 568 569 while (*p) { 570 parent = *p; 571 buffer = rb_entry(parent, struct binder_buffer, rb_node); 572 BUG_ON(buffer->free); 573 574 if (new_buffer < buffer) 575 p = &parent->rb_left; 576 else if (new_buffer > buffer) 577 p = &parent->rb_right; 578 else 579 BUG(); 580 } 581 rb_link_node(&new_buffer->rb_node, parent, p); 582 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 583} 584 585static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 586 void __user *user_ptr) 587{ 588 struct rb_node *n = proc->allocated_buffers.rb_node; 589 struct binder_buffer *buffer; 590 struct binder_buffer *kern_ptr; 591 592 kern_ptr = user_ptr - proc->user_buffer_offset 593 - offsetof(struct binder_buffer, data); 594 595 while (n) { 596 buffer = rb_entry(n, struct binder_buffer, rb_node); 597 BUG_ON(buffer->free); 598 599 if (kern_ptr < buffer) 600 n = n->rb_left; 601 else if (kern_ptr > buffer) 602 n = n->rb_right; 603 else 604 return buffer; 605 } 606 return NULL; 607} 608 609static int binder_update_page_range(struct binder_proc *proc, int allocate, 610 void *start, void *end, 611 struct vm_area_struct *vma) 612{ 613 void *page_addr; 614 unsigned long user_page_addr; 615 struct vm_struct tmp_area; 616 struct page **page; 617 struct mm_struct *mm; 618 619 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 620 "binder: %d: %s pages %p-%p\n", proc->pid, 621 allocate ? "allocate" : "free", start, end); 622 623 if (end <= start) 624 return 0; 625 626 if (vma) 627 mm = NULL; 628 else 629 mm = get_task_mm(proc->tsk); 630 631 if (mm) { 632 down_write(&mm->mmap_sem); 633 vma = proc->vma; 634 } 635 636 if (allocate == 0) 637 goto free_range; 638 639 if (vma == NULL) { 640 printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " 641 "map pages in userspace, no vma\n", proc->pid); 642 goto err_no_vma; 643 } 644 645 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 646 int ret; 647 struct page **page_array_ptr; 648 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 649 650 BUG_ON(*page); 651 *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 652 if (*page == NULL) { 653 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 654 "for page at %p\n", proc->pid, page_addr); 655 goto err_alloc_page_failed; 656 } 657 tmp_area.addr = page_addr; 658 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 659 page_array_ptr = page; 660 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); 661 if (ret) { 662 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 663 "to map page at %p in kernel\n", 664 proc->pid, page_addr); 665 goto err_map_kernel_failed; 666 } 667 user_page_addr = 668 (uintptr_t)page_addr + proc->user_buffer_offset; 669 ret = vm_insert_page(vma, user_page_addr, page[0]); 670 if (ret) { 671 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 672 "to map page at %lx in userspace\n", 673 proc->pid, user_page_addr); 674 goto err_vm_insert_page_failed; 675 } 676 /* vm_insert_page does not seem to increment the refcount */ 677 } 678 if (mm) { 679 up_write(&mm->mmap_sem); 680 mmput(mm); 681 } 682 return 0; 683 684free_range: 685 for (page_addr = end - PAGE_SIZE; page_addr >= start; 686 page_addr -= PAGE_SIZE) { 687 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 688 if (vma) 689 zap_page_range(vma, (uintptr_t)page_addr + 690 proc->user_buffer_offset, PAGE_SIZE, NULL); 691err_vm_insert_page_failed: 692 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 693err_map_kernel_failed: 694 __free_page(*page); 695 *page = NULL; 696err_alloc_page_failed: 697 ; 698 } 699err_no_vma: 700 if (mm) { 701 up_write(&mm->mmap_sem); 702 mmput(mm); 703 } 704 return -ENOMEM; 705} 706 707static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 708 size_t data_size, 709 size_t offsets_size, int is_async) 710{ 711 struct rb_node *n = proc->free_buffers.rb_node; 712 struct binder_buffer *buffer; 713 size_t buffer_size; 714 struct rb_node *best_fit = NULL; 715 void *has_page_addr; 716 void *end_page_addr; 717 size_t size; 718 719 if (proc->vma == NULL) { 720 printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", 721 proc->pid); 722 return NULL; 723 } 724 725 size = ALIGN(data_size, sizeof(void *)) + 726 ALIGN(offsets_size, sizeof(void *)); 727 728 if (size < data_size || size < offsets_size) { 729 binder_user_error("binder: %d: got transaction with invalid " 730 "size %zd-%zd\n", proc->pid, data_size, offsets_size); 731 return NULL; 732 } 733 734 if (is_async && 735 proc->free_async_space < size + sizeof(struct binder_buffer)) { 736 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 737 "binder: %d: binder_alloc_buf size %zd" 738 "failed, no async space left\n", proc->pid, size); 739 return NULL; 740 } 741 742 while (n) { 743 buffer = rb_entry(n, struct binder_buffer, rb_node); 744 BUG_ON(!buffer->free); 745 buffer_size = binder_buffer_size(proc, buffer); 746 747 if (size < buffer_size) { 748 best_fit = n; 749 n = n->rb_left; 750 } else if (size > buffer_size) 751 n = n->rb_right; 752 else { 753 best_fit = n; 754 break; 755 } 756 } 757 if (best_fit == NULL) { 758 printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " 759 "no address space\n", proc->pid, size); 760 return NULL; 761 } 762 if (n == NULL) { 763 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 764 buffer_size = binder_buffer_size(proc, buffer); 765 } 766 767 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 768 "binder: %d: binder_alloc_buf size %zd got buff" 769 "er %p size %zd\n", proc->pid, size, buffer, buffer_size); 770 771 has_page_addr = 772 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 773 if (n == NULL) { 774 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 775 buffer_size = size; /* no room for other buffers */ 776 else 777 buffer_size = size + sizeof(struct binder_buffer); 778 } 779 end_page_addr = 780 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 781 if (end_page_addr > has_page_addr) 782 end_page_addr = has_page_addr; 783 if (binder_update_page_range(proc, 1, 784 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 785 return NULL; 786 787 rb_erase(best_fit, &proc->free_buffers); 788 buffer->free = 0; 789 binder_insert_allocated_buffer(proc, buffer); 790 if (buffer_size != size) { 791 struct binder_buffer *new_buffer = (void *)buffer->data + size; 792 list_add(&new_buffer->entry, &buffer->entry); 793 new_buffer->free = 1; 794 binder_insert_free_buffer(proc, new_buffer); 795 } 796 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 797 "binder: %d: binder_alloc_buf size %zd got " 798 "%p\n", proc->pid, size, buffer); 799 buffer->data_size = data_size; 800 buffer->offsets_size = offsets_size; 801 buffer->async_transaction = is_async; 802 if (is_async) { 803 proc->free_async_space -= size + sizeof(struct binder_buffer); 804 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 805 "binder: %d: binder_alloc_buf size %zd " 806 "async free %zd\n", proc->pid, size, 807 proc->free_async_space); 808 } 809 810 return buffer; 811} 812 813static void *buffer_start_page(struct binder_buffer *buffer) 814{ 815 return (void *)((uintptr_t)buffer & PAGE_MASK); 816} 817 818static void *buffer_end_page(struct binder_buffer *buffer) 819{ 820 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 821} 822 823static void binder_delete_free_buffer(struct binder_proc *proc, 824 struct binder_buffer *buffer) 825{ 826 struct binder_buffer *prev, *next = NULL; 827 int free_page_end = 1; 828 int free_page_start = 1; 829 830 BUG_ON(proc->buffers.next == &buffer->entry); 831 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 832 BUG_ON(!prev->free); 833 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 834 free_page_start = 0; 835 if (buffer_end_page(prev) == buffer_end_page(buffer)) 836 free_page_end = 0; 837 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 838 "binder: %d: merge free, buffer %p " 839 "share page with %p\n", proc->pid, buffer, prev); 840 } 841 842 if (!list_is_last(&buffer->entry, &proc->buffers)) { 843 next = list_entry(buffer->entry.next, 844 struct binder_buffer, entry); 845 if (buffer_start_page(next) == buffer_end_page(buffer)) { 846 free_page_end = 0; 847 if (buffer_start_page(next) == 848 buffer_start_page(buffer)) 849 free_page_start = 0; 850 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 851 "binder: %d: merge free, buffer" 852 " %p share page with %p\n", proc->pid, 853 buffer, prev); 854 } 855 } 856 list_del(&buffer->entry); 857 if (free_page_start || free_page_end) { 858 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 859 "binder: %d: merge free, buffer %p do " 860 "not share page%s%s with with %p or %p\n", 861 proc->pid, buffer, free_page_start ? "" : " end", 862 free_page_end ? "" : " start", prev, next); 863 binder_update_page_range(proc, 0, free_page_start ? 864 buffer_start_page(buffer) : buffer_end_page(buffer), 865 (free_page_end ? buffer_end_page(buffer) : 866 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 867 } 868} 869 870static void binder_free_buf(struct binder_proc *proc, 871 struct binder_buffer *buffer) 872{ 873 size_t size, buffer_size; 874 875 buffer_size = binder_buffer_size(proc, buffer); 876 877 size = ALIGN(buffer->data_size, sizeof(void *)) + 878 ALIGN(buffer->offsets_size, sizeof(void *)); 879 880 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 881 "binder: %d: binder_free_buf %p size %zd buffer" 882 "_size %zd\n", proc->pid, buffer, size, buffer_size); 883 884 BUG_ON(buffer->free); 885 BUG_ON(size > buffer_size); 886 BUG_ON(buffer->transaction != NULL); 887 BUG_ON((void *)buffer < proc->buffer); 888 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 889 890 if (buffer->async_transaction) { 891 proc->free_async_space += size + sizeof(struct binder_buffer); 892 893 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 894 "binder: %d: binder_free_buf size %zd " 895 "async free %zd\n", proc->pid, size, 896 proc->free_async_space); 897 } 898 899 binder_update_page_range(proc, 0, 900 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 901 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 902 NULL); 903 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 904 buffer->free = 1; 905 if (!list_is_last(&buffer->entry, &proc->buffers)) { 906 struct binder_buffer *next = list_entry(buffer->entry.next, 907 struct binder_buffer, entry); 908 if (next->free) { 909 rb_erase(&next->rb_node, &proc->free_buffers); 910 binder_delete_free_buffer(proc, next); 911 } 912 } 913 if (proc->buffers.next != &buffer->entry) { 914 struct binder_buffer *prev = list_entry(buffer->entry.prev, 915 struct binder_buffer, entry); 916 if (prev->free) { 917 binder_delete_free_buffer(proc, buffer); 918 rb_erase(&prev->rb_node, &proc->free_buffers); 919 buffer = prev; 920 } 921 } 922 binder_insert_free_buffer(proc, buffer); 923} 924 925static struct binder_node *binder_get_node(struct binder_proc *proc, 926 void __user *ptr) 927{ 928 struct rb_node *n = proc->nodes.rb_node; 929 struct binder_node *node; 930 931 while (n) { 932 node = rb_entry(n, struct binder_node, rb_node); 933 934 if (ptr < node->ptr) 935 n = n->rb_left; 936 else if (ptr > node->ptr) 937 n = n->rb_right; 938 else 939 return node; 940 } 941 return NULL; 942} 943 944static struct binder_node *binder_new_node(struct binder_proc *proc, 945 void __user *ptr, 946 void __user *cookie) 947{ 948 struct rb_node **p = &proc->nodes.rb_node; 949 struct rb_node *parent = NULL; 950 struct binder_node *node; 951 952 while (*p) { 953 parent = *p; 954 node = rb_entry(parent, struct binder_node, rb_node); 955 956 if (ptr < node->ptr) 957 p = &(*p)->rb_left; 958 else if (ptr > node->ptr) 959 p = &(*p)->rb_right; 960 else 961 return NULL; 962 } 963 964 node = kzalloc(sizeof(*node), GFP_KERNEL); 965 if (node == NULL) 966 return NULL; 967 binder_stats_created(BINDER_STAT_NODE); 968 rb_link_node(&node->rb_node, parent, p); 969 rb_insert_color(&node->rb_node, &proc->nodes); 970 node->debug_id = ++binder_last_id; 971 node->proc = proc; 972 node->ptr = ptr; 973 node->cookie = cookie; 974 node->work.type = BINDER_WORK_NODE; 975 INIT_LIST_HEAD(&node->work.entry); 976 INIT_LIST_HEAD(&node->async_todo); 977 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 978 "binder: %d:%d node %d u%p c%p created\n", 979 proc->pid, current->pid, node->debug_id, 980 node->ptr, node->cookie); 981 return node; 982} 983 984static int binder_inc_node(struct binder_node *node, int strong, int internal, 985 struct list_head *target_list) 986{ 987 if (strong) { 988 if (internal) { 989 if (target_list == NULL && 990 node->internal_strong_refs == 0 && 991 !(node == binder_context_mgr_node && 992 node->has_strong_ref)) { 993 printk(KERN_ERR "binder: invalid inc strong " 994 "node for %d\n", node->debug_id); 995 return -EINVAL; 996 } 997 node->internal_strong_refs++; 998 } else 999 node->local_strong_refs++; 1000 if (!node->has_strong_ref && target_list) { 1001 list_del_init(&node->work.entry); 1002 list_add_tail(&node->work.entry, target_list); 1003 } 1004 } else { 1005 if (!internal) 1006 node->local_weak_refs++; 1007 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 1008 if (target_list == NULL) { 1009 printk(KERN_ERR "binder: invalid inc weak node " 1010 "for %d\n", node->debug_id); 1011 return -EINVAL; 1012 } 1013 list_add_tail(&node->work.entry, target_list); 1014 } 1015 } 1016 return 0; 1017} 1018 1019static int binder_dec_node(struct binder_node *node, int strong, int internal) 1020{ 1021 if (strong) { 1022 if (internal) 1023 node->internal_strong_refs--; 1024 else 1025 node->local_strong_refs--; 1026 if (node->local_strong_refs || node->internal_strong_refs) 1027 return 0; 1028 } else { 1029 if (!internal) 1030 node->local_weak_refs--; 1031 if (node->local_weak_refs || !hlist_empty(&node->refs)) 1032 return 0; 1033 } 1034 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 1035 if (list_empty(&node->work.entry)) { 1036 list_add_tail(&node->work.entry, &node->proc->todo); 1037 wake_up_interruptible(&node->proc->wait); 1038 } 1039 } else { 1040 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1041 !node->local_weak_refs) { 1042 list_del_init(&node->work.entry); 1043 if (node->proc) { 1044 rb_erase(&node->rb_node, &node->proc->nodes); 1045 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1046 "binder: refless node %d deleted\n", 1047 node->debug_id); 1048 } else { 1049 hlist_del(&node->dead_node); 1050 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1051 "binder: dead node %d deleted\n", 1052 node->debug_id); 1053 } 1054 kfree(node); 1055 binder_stats_deleted(BINDER_STAT_NODE); 1056 } 1057 } 1058 1059 return 0; 1060} 1061 1062 1063static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1064 uint32_t desc) 1065{ 1066 struct rb_node *n = proc->refs_by_desc.rb_node; 1067 struct binder_ref *ref; 1068 1069 while (n) { 1070 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1071 1072 if (desc < ref->desc) 1073 n = n->rb_left; 1074 else if (desc > ref->desc) 1075 n = n->rb_right; 1076 else 1077 return ref; 1078 } 1079 return NULL; 1080} 1081 1082static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1083 struct binder_node *node) 1084{ 1085 struct rb_node *n; 1086 struct rb_node **p = &proc->refs_by_node.rb_node; 1087 struct rb_node *parent = NULL; 1088 struct binder_ref *ref, *new_ref; 1089 1090 while (*p) { 1091 parent = *p; 1092 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1093 1094 if (node < ref->node) 1095 p = &(*p)->rb_left; 1096 else if (node > ref->node) 1097 p = &(*p)->rb_right; 1098 else 1099 return ref; 1100 } 1101 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1102 if (new_ref == NULL) 1103 return NULL; 1104 binder_stats_created(BINDER_STAT_REF); 1105 new_ref->debug_id = ++binder_last_id; 1106 new_ref->proc = proc; 1107 new_ref->node = node; 1108 rb_link_node(&new_ref->rb_node_node, parent, p); 1109 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1110 1111 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1112 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1113 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1114 if (ref->desc > new_ref->desc) 1115 break; 1116 new_ref->desc = ref->desc + 1; 1117 } 1118 1119 p = &proc->refs_by_desc.rb_node; 1120 while (*p) { 1121 parent = *p; 1122 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1123 1124 if (new_ref->desc < ref->desc) 1125 p = &(*p)->rb_left; 1126 else if (new_ref->desc > ref->desc) 1127 p = &(*p)->rb_right; 1128 else 1129 BUG(); 1130 } 1131 rb_link_node(&new_ref->rb_node_desc, parent, p); 1132 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1133 if (node) { 1134 hlist_add_head(&new_ref->node_entry, &node->refs); 1135 1136 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1137 "binder: %d new ref %d desc %d for " 1138 "node %d\n", proc->pid, new_ref->debug_id, 1139 new_ref->desc, node->debug_id); 1140 } else { 1141 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1142 "binder: %d new ref %d desc %d for " 1143 "dead node\n", proc->pid, new_ref->debug_id, 1144 new_ref->desc); 1145 } 1146 return new_ref; 1147} 1148 1149static void binder_delete_ref(struct binder_ref *ref) 1150{ 1151 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1152 "binder: %d delete ref %d desc %d for " 1153 "node %d\n", ref->proc->pid, ref->debug_id, 1154 ref->desc, ref->node->debug_id); 1155 1156 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1157 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1158 if (ref->strong) 1159 binder_dec_node(ref->node, 1, 1); 1160 hlist_del(&ref->node_entry); 1161 binder_dec_node(ref->node, 0, 1); 1162 if (ref->death) { 1163 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1164 "binder: %d delete ref %d desc %d " 1165 "has death notification\n", ref->proc->pid, 1166 ref->debug_id, ref->desc); 1167 list_del(&ref->death->work.entry); 1168 kfree(ref->death); 1169 binder_stats_deleted(BINDER_STAT_DEATH); 1170 } 1171 kfree(ref); 1172 binder_stats_deleted(BINDER_STAT_REF); 1173} 1174 1175static int binder_inc_ref(struct binder_ref *ref, int strong, 1176 struct list_head *target_list) 1177{ 1178 int ret; 1179 if (strong) { 1180 if (ref->strong == 0) { 1181 ret = binder_inc_node(ref->node, 1, 1, target_list); 1182 if (ret) 1183 return ret; 1184 } 1185 ref->strong++; 1186 } else { 1187 if (ref->weak == 0) { 1188 ret = binder_inc_node(ref->node, 0, 1, target_list); 1189 if (ret) 1190 return ret; 1191 } 1192 ref->weak++; 1193 } 1194 return 0; 1195} 1196 1197 1198static int binder_dec_ref(struct binder_ref *ref, int strong) 1199{ 1200 if (strong) { 1201 if (ref->strong == 0) { 1202 binder_user_error("binder: %d invalid dec strong, " 1203 "ref %d desc %d s %d w %d\n", 1204 ref->proc->pid, ref->debug_id, 1205 ref->desc, ref->strong, ref->weak); 1206 return -EINVAL; 1207 } 1208 ref->strong--; 1209 if (ref->strong == 0) { 1210 int ret; 1211 ret = binder_dec_node(ref->node, strong, 1); 1212 if (ret) 1213 return ret; 1214 } 1215 } else { 1216 if (ref->weak == 0) { 1217 binder_user_error("binder: %d invalid dec weak, " 1218 "ref %d desc %d s %d w %d\n", 1219 ref->proc->pid, ref->debug_id, 1220 ref->desc, ref->strong, ref->weak); 1221 return -EINVAL; 1222 } 1223 ref->weak--; 1224 } 1225 if (ref->strong == 0 && ref->weak == 0) 1226 binder_delete_ref(ref); 1227 return 0; 1228} 1229 1230static void binder_pop_transaction(struct binder_thread *target_thread, 1231 struct binder_transaction *t) 1232{ 1233 if (target_thread) { 1234 BUG_ON(target_thread->transaction_stack != t); 1235 BUG_ON(target_thread->transaction_stack->from != target_thread); 1236 target_thread->transaction_stack = 1237 target_thread->transaction_stack->from_parent; 1238 t->from = NULL; 1239 } 1240 t->need_reply = 0; 1241 if (t->buffer) 1242 t->buffer->transaction = NULL; 1243 kfree(t); 1244 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1245} 1246 1247static void binder_send_failed_reply(struct binder_transaction *t, 1248 uint32_t error_code) 1249{ 1250 struct binder_thread *target_thread; 1251 BUG_ON(t->flags & TF_ONE_WAY); 1252 while (1) { 1253 target_thread = t->from; 1254 if (target_thread) { 1255 if (target_thread->return_error != BR_OK && 1256 target_thread->return_error2 == BR_OK) { 1257 target_thread->return_error2 = 1258 target_thread->return_error; 1259 target_thread->return_error = BR_OK; 1260 } 1261 if (target_thread->return_error == BR_OK) { 1262 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1263 "binder: send failed reply for " 1264 "transaction %d to %d:%d\n", 1265 t->debug_id, target_thread->proc->pid, 1266 target_thread->pid); 1267 1268 binder_pop_transaction(target_thread, t); 1269 target_thread->return_error = error_code; 1270 wake_up_interruptible(&target_thread->wait); 1271 } else { 1272 printk(KERN_ERR "binder: reply failed, target " 1273 "thread, %d:%d, has error code %d " 1274 "already\n", target_thread->proc->pid, 1275 target_thread->pid, 1276 target_thread->return_error); 1277 } 1278 return; 1279 } else { 1280 struct binder_transaction *next = t->from_parent; 1281 1282 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1283 "binder: send failed reply " 1284 "for transaction %d, target dead\n", 1285 t->debug_id); 1286 1287 binder_pop_transaction(target_thread, t); 1288 if (next == NULL) { 1289 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1290 "binder: reply failed," 1291 " no target thread at root\n"); 1292 return; 1293 } 1294 t = next; 1295 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1296 "binder: reply failed, no target " 1297 "thread -- retry %d\n", t->debug_id); 1298 } 1299 } 1300} 1301 1302static void binder_transaction_buffer_release(struct binder_proc *proc, 1303 struct binder_buffer *buffer, 1304 size_t *failed_at) 1305{ 1306 size_t *offp, *off_end; 1307 int debug_id = buffer->debug_id; 1308 1309 binder_debug(BINDER_DEBUG_TRANSACTION, 1310 "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", 1311 proc->pid, buffer->debug_id, 1312 buffer->data_size, buffer->offsets_size, failed_at); 1313 1314 if (buffer->target_node) 1315 binder_dec_node(buffer->target_node, 1, 0); 1316 1317 offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); 1318 if (failed_at) 1319 off_end = failed_at; 1320 else 1321 off_end = (void *)offp + buffer->offsets_size; 1322 for (; offp < off_end; offp++) { 1323 struct flat_binder_object *fp; 1324 if (*offp > buffer->data_size - sizeof(*fp) || 1325 buffer->data_size < sizeof(*fp) || 1326 !IS_ALIGNED(*offp, sizeof(void *))) { 1327 printk(KERN_ERR "binder: transaction release %d bad" 1328 "offset %zd, size %zd\n", debug_id, 1329 *offp, buffer->data_size); 1330 continue; 1331 } 1332 fp = (struct flat_binder_object *)(buffer->data + *offp); 1333 switch (fp->type) { 1334 case BINDER_TYPE_BINDER: 1335 case BINDER_TYPE_WEAK_BINDER: { 1336 struct binder_node *node = binder_get_node(proc, fp->binder); 1337 if (node == NULL) { 1338 printk(KERN_ERR "binder: transaction release %d" 1339 " bad node %p\n", debug_id, fp->binder); 1340 break; 1341 } 1342 binder_debug(BINDER_DEBUG_TRANSACTION, 1343 " node %d u%p\n", 1344 node->debug_id, node->ptr); 1345 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1346 } break; 1347 case BINDER_TYPE_HANDLE: 1348 case BINDER_TYPE_WEAK_HANDLE: { 1349 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1350 if (ref == NULL) { 1351 printk(KERN_ERR "binder: transaction release %d" 1352 " bad handle %ld\n", debug_id, 1353 fp->handle); 1354 break; 1355 } 1356 binder_debug(BINDER_DEBUG_TRANSACTION, 1357 " ref %d desc %d (node %d)\n", 1358 ref->debug_id, ref->desc, ref->node->debug_id); 1359 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1360 } break; 1361 1362 case BINDER_TYPE_FD: 1363 binder_debug(BINDER_DEBUG_TRANSACTION, 1364 " fd %ld\n", fp->handle); 1365 if (failed_at) 1366 task_close_fd(proc, fp->handle); 1367 break; 1368 1369 default: 1370 printk(KERN_ERR "binder: transaction release %d bad " 1371 "object type %lx\n", debug_id, fp->type); 1372 break; 1373 } 1374 } 1375} 1376 1377static void binder_transaction(struct binder_proc *proc, 1378 struct binder_thread *thread, 1379 struct binder_transaction_data *tr, int reply) 1380{ 1381 struct binder_transaction *t; 1382 struct binder_work *tcomplete; 1383 size_t *offp, *off_end; 1384 struct binder_proc *target_proc; 1385 struct binder_thread *target_thread = NULL; 1386 struct binder_node *target_node = NULL; 1387 struct list_head *target_list; 1388 wait_queue_head_t *target_wait; 1389 struct binder_transaction *in_reply_to = NULL; 1390 struct binder_transaction_log_entry *e; 1391 uint32_t return_error; 1392 1393 e = binder_transaction_log_add(&binder_transaction_log); 1394 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1395 e->from_proc = proc->pid; 1396 e->from_thread = thread->pid; 1397 e->target_handle = tr->target.handle; 1398 e->data_size = tr->data_size; 1399 e->offsets_size = tr->offsets_size; 1400 1401 if (reply) { 1402 in_reply_to = thread->transaction_stack; 1403 if (in_reply_to == NULL) { 1404 binder_user_error("binder: %d:%d got reply transaction " 1405 "with no transaction stack\n", 1406 proc->pid, thread->pid); 1407 return_error = BR_FAILED_REPLY; 1408 goto err_empty_call_stack; 1409 } 1410 binder_set_nice(in_reply_to->saved_priority); 1411 if (in_reply_to->to_thread != thread) { 1412 binder_user_error("binder: %d:%d got reply transaction " 1413 "with bad transaction stack," 1414 " transaction %d has target %d:%d\n", 1415 proc->pid, thread->pid, in_reply_to->debug_id, 1416 in_reply_to->to_proc ? 1417 in_reply_to->to_proc->pid : 0, 1418 in_reply_to->to_thread ? 1419 in_reply_to->to_thread->pid : 0); 1420 return_error = BR_FAILED_REPLY; 1421 in_reply_to = NULL; 1422 goto err_bad_call_stack; 1423 } 1424 thread->transaction_stack = in_reply_to->to_parent; 1425 target_thread = in_reply_to->from; 1426 if (target_thread == NULL) { 1427 return_error = BR_DEAD_REPLY; 1428 goto err_dead_binder; 1429 } 1430 if (target_thread->transaction_stack != in_reply_to) { 1431 binder_user_error("binder: %d:%d got reply transaction " 1432 "with bad target transaction stack %d, " 1433 "expected %d\n", 1434 proc->pid, thread->pid, 1435 target_thread->transaction_stack ? 1436 target_thread->transaction_stack->debug_id : 0, 1437 in_reply_to->debug_id); 1438 return_error = BR_FAILED_REPLY; 1439 in_reply_to = NULL; 1440 target_thread = NULL; 1441 goto err_dead_binder; 1442 } 1443 target_proc = target_thread->proc; 1444 } else { 1445 if (tr->target.handle) { 1446 struct binder_ref *ref; 1447 ref = binder_get_ref(proc, tr->target.handle); 1448 if (ref == NULL) { 1449 binder_user_error("binder: %d:%d got " 1450 "transaction to invalid handle\n", 1451 proc->pid, thread->pid); 1452 return_error = BR_FAILED_REPLY; 1453 goto err_invalid_target_handle; 1454 } 1455 target_node = ref->node; 1456 } else { 1457 target_node = binder_context_mgr_node; 1458 if (target_node == NULL) { 1459 return_error = BR_DEAD_REPLY; 1460 goto err_no_context_mgr_node; 1461 } 1462 } 1463 e->to_node = target_node->debug_id; 1464 target_proc = target_node->proc; 1465 if (target_proc == NULL) { 1466 return_error = BR_DEAD_REPLY; 1467 goto err_dead_binder; 1468 } 1469 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1470 struct binder_transaction *tmp; 1471 tmp = thread->transaction_stack; 1472 if (tmp->to_thread != thread) { 1473 binder_user_error("binder: %d:%d got new " 1474 "transaction with bad transaction stack" 1475 ", transaction %d has target %d:%d\n", 1476 proc->pid, thread->pid, tmp->debug_id, 1477 tmp->to_proc ? tmp->to_proc->pid : 0, 1478 tmp->to_thread ? 1479 tmp->to_thread->pid : 0); 1480 return_error = BR_FAILED_REPLY; 1481 goto err_bad_call_stack; 1482 } 1483 while (tmp) { 1484 if (tmp->from && tmp->from->proc == target_proc) 1485 target_thread = tmp->from; 1486 tmp = tmp->from_parent; 1487 } 1488 } 1489 } 1490 if (target_thread) { 1491 e->to_thread = target_thread->pid; 1492 target_list = &target_thread->todo; 1493 target_wait = &target_thread->wait; 1494 } else { 1495 target_list = &target_proc->todo; 1496 target_wait = &target_proc->wait; 1497 } 1498 e->to_proc = target_proc->pid; 1499 1500 /* TODO: reuse incoming transaction for reply */ 1501 t = kzalloc(sizeof(*t), GFP_KERNEL); 1502 if (t == NULL) { 1503 return_error = BR_FAILED_REPLY; 1504 goto err_alloc_t_failed; 1505 } 1506 binder_stats_created(BINDER_STAT_TRANSACTION); 1507 1508 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1509 if (tcomplete == NULL) { 1510 return_error = BR_FAILED_REPLY; 1511 goto err_alloc_tcomplete_failed; 1512 } 1513 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1514 1515 t->debug_id = ++binder_last_id; 1516 e->debug_id = t->debug_id; 1517 1518 if (reply) 1519 binder_debug(BINDER_DEBUG_TRANSACTION, 1520 "binder: %d:%d BC_REPLY %d -> %d:%d, " 1521 "data %p-%p size %zd-%zd\n", 1522 proc->pid, thread->pid, t->debug_id, 1523 target_proc->pid, target_thread->pid, 1524 tr->data.ptr.buffer, tr->data.ptr.offsets, 1525 tr->data_size, tr->offsets_size); 1526 else 1527 binder_debug(BINDER_DEBUG_TRANSACTION, 1528 "binder: %d:%d BC_TRANSACTION %d -> " 1529 "%d - node %d, data %p-%p size %zd-%zd\n", 1530 proc->pid, thread->pid, t->debug_id, 1531 target_proc->pid, target_node->debug_id, 1532 tr->data.ptr.buffer, tr->data.ptr.offsets, 1533 tr->data_size, tr->offsets_size); 1534 1535 if (!reply && !(tr->flags & TF_ONE_WAY)) 1536 t->from = thread; 1537 else 1538 t->from = NULL; 1539 t->sender_euid = proc->tsk->cred->euid; 1540 t->to_proc = target_proc; 1541 t->to_thread = target_thread; 1542 t->code = tr->code; 1543 t->flags = tr->flags; 1544 t->priority = task_nice(current); 1545 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1546 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1547 if (t->buffer == NULL) { 1548 return_error = BR_FAILED_REPLY; 1549 goto err_binder_alloc_buf_failed; 1550 } 1551 t->buffer->allow_user_free = 0; 1552 t->buffer->debug_id = t->debug_id; 1553 t->buffer->transaction = t; 1554 t->buffer->target_node = target_node; 1555 if (target_node) 1556 binder_inc_node(target_node, 1, 0, NULL); 1557 1558 offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); 1559 1560 if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { 1561 binder_user_error("binder: %d:%d got transaction with invalid " 1562 "data ptr\n", proc->pid, thread->pid); 1563 return_error = BR_FAILED_REPLY; 1564 goto err_copy_data_failed; 1565 } 1566 if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { 1567 binder_user_error("binder: %d:%d got transaction with invalid " 1568 "offsets ptr\n", proc->pid, thread->pid); 1569 return_error = BR_FAILED_REPLY; 1570 goto err_copy_data_failed; 1571 } 1572 if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { 1573 binder_user_error("binder: %d:%d got transaction with " 1574 "invalid offsets size, %zd\n", 1575 proc->pid, thread->pid, tr->offsets_size); 1576 return_error = BR_FAILED_REPLY; 1577 goto err_bad_offset; 1578 } 1579 off_end = (void *)offp + tr->offsets_size; 1580 for (; offp < off_end; offp++) { 1581 struct flat_binder_object *fp; 1582 if (*offp > t->buffer->data_size - sizeof(*fp) || 1583 t->buffer->data_size < sizeof(*fp) || 1584 !IS_ALIGNED(*offp, sizeof(void *))) { 1585 binder_user_error("binder: %d:%d got transaction with " 1586 "invalid offset, %zd\n", 1587 proc->pid, thread->pid, *offp); 1588 return_error = BR_FAILED_REPLY; 1589 goto err_bad_offset; 1590 } 1591 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1592 switch (fp->type) { 1593 case BINDER_TYPE_BINDER: 1594 case BINDER_TYPE_WEAK_BINDER: { 1595 struct binder_ref *ref; 1596 struct binder_node *node = binder_get_node(proc, fp->binder); 1597 if (node == NULL) { 1598 node = binder_new_node(proc, fp->binder, fp->cookie); 1599 if (node == NULL) { 1600 return_error = BR_FAILED_REPLY; 1601 goto err_binder_new_node_failed; 1602 } 1603 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1604 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1605 } 1606 if (fp->cookie != node->cookie) { 1607 binder_user_error("binder: %d:%d sending u%p " 1608 "node %d, cookie mismatch %p != %p\n", 1609 proc->pid, thread->pid, 1610 fp->binder, node->debug_id, 1611 fp->cookie, node->cookie); 1612 goto err_binder_get_ref_for_node_failed; 1613 } 1614 ref = binder_get_ref_for_node(target_proc, node); 1615 if (ref == NULL) { 1616 return_error = BR_FAILED_REPLY; 1617 goto err_binder_get_ref_for_node_failed; 1618 } 1619 if (fp->type == BINDER_TYPE_BINDER) 1620 fp->type = BINDER_TYPE_HANDLE; 1621 else 1622 fp->type = BINDER_TYPE_WEAK_HANDLE; 1623 fp->handle = ref->desc; 1624 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1625 &thread->todo); 1626 1627 binder_debug(BINDER_DEBUG_TRANSACTION, 1628 " node %d u%p -> ref %d desc %d\n", 1629 node->debug_id, node->ptr, ref->debug_id, 1630 ref->desc); 1631 } break; 1632 case BINDER_TYPE_HANDLE: 1633 case BINDER_TYPE_WEAK_HANDLE: { 1634 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1635 if (ref == NULL) { 1636 binder_user_error("binder: %d:%d got " 1637 "transaction with invalid " 1638 "handle, %ld\n", proc->pid, 1639 thread->pid, fp->handle); 1640 return_error = BR_FAILED_REPLY; 1641 goto err_binder_get_ref_failed; 1642 } 1643 if (ref->node->proc == target_proc) { 1644 if (fp->type == BINDER_TYPE_HANDLE) 1645 fp->type = BINDER_TYPE_BINDER; 1646 else 1647 fp->type = BINDER_TYPE_WEAK_BINDER; 1648 fp->binder = ref->node->ptr; 1649 fp->cookie = ref->node->cookie; 1650 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1651 binder_debug(BINDER_DEBUG_TRANSACTION, 1652 " ref %d desc %d -> node %d u%p\n", 1653 ref->debug_id, ref->desc, ref->node->debug_id, 1654 ref->node->ptr); 1655 } else { 1656 struct binder_ref *new_ref; 1657 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1658 if (new_ref == NULL) { 1659 return_error = BR_FAILED_REPLY; 1660 goto err_binder_get_ref_for_node_failed; 1661 } 1662 fp->handle = new_ref->desc; 1663 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1664 binder_debug(BINDER_DEBUG_TRANSACTION, 1665 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1666 ref->debug_id, ref->desc, new_ref->debug_id, 1667 new_ref->desc, ref->node->debug_id); 1668 } 1669 } break; 1670 1671 case BINDER_TYPE_FD: { 1672 int target_fd; 1673 struct file *file; 1674 1675 if (reply) { 1676 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1677 binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", 1678 proc->pid, thread->pid, fp->handle); 1679 return_error = BR_FAILED_REPLY; 1680 goto err_fd_not_allowed; 1681 } 1682 } else if (!target_node->accept_fds) { 1683 binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", 1684 proc->pid, thread->pid, fp->handle); 1685 return_error = BR_FAILED_REPLY; 1686 goto err_fd_not_allowed; 1687 } 1688 1689 file = fget(fp->handle); 1690 if (file == NULL) { 1691 binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", 1692 proc->pid, thread->pid, fp->handle); 1693 return_error = BR_FAILED_REPLY; 1694 goto err_fget_failed; 1695 } 1696 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1697 if (target_fd < 0) { 1698 fput(file); 1699 return_error = BR_FAILED_REPLY; 1700 goto err_get_unused_fd_failed; 1701 } 1702 task_fd_install(target_proc, target_fd, file); 1703 binder_debug(BINDER_DEBUG_TRANSACTION, 1704 " fd %ld -> %d\n", fp->handle, target_fd); 1705 /* TODO: fput? */ 1706 fp->handle = target_fd; 1707 } break; 1708 1709 default: 1710 binder_user_error("binder: %d:%d got transactio" 1711 "n with invalid object type, %lx\n", 1712 proc->pid, thread->pid, fp->type); 1713 return_error = BR_FAILED_REPLY; 1714 goto err_bad_object_type; 1715 } 1716 } 1717 if (reply) { 1718 BUG_ON(t->buffer->async_transaction != 0); 1719 binder_pop_transaction(target_thread, in_reply_to); 1720 } else if (!(t->flags & TF_ONE_WAY)) { 1721 BUG_ON(t->buffer->async_transaction != 0); 1722 t->need_reply = 1; 1723 t->from_parent = thread->transaction_stack; 1724 thread->transaction_stack = t; 1725 } else { 1726 BUG_ON(target_node == NULL); 1727 BUG_ON(t->buffer->async_transaction != 1); 1728 if (target_node->has_async_transaction) { 1729 target_list = &target_node->async_todo; 1730 target_wait = NULL; 1731 } else 1732 target_node->has_async_transaction = 1; 1733 } 1734 t->work.type = BINDER_WORK_TRANSACTION; 1735 list_add_tail(&t->work.entry, target_list); 1736 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1737 list_add_tail(&tcomplete->entry, &thread->todo); 1738 if (target_wait) 1739 wake_up_interruptible(target_wait); 1740 return; 1741 1742err_get_unused_fd_failed: 1743err_fget_failed: 1744err_fd_not_allowed: 1745err_binder_get_ref_for_node_failed: 1746err_binder_get_ref_failed: 1747err_binder_new_node_failed: 1748err_bad_object_type: 1749err_bad_offset: 1750err_copy_data_failed: 1751 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1752 t->buffer->transaction = NULL; 1753 binder_free_buf(target_proc, t->buffer); 1754err_binder_alloc_buf_failed: 1755 kfree(tcomplete); 1756 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1757err_alloc_tcomplete_failed: 1758 kfree(t); 1759 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1760err_alloc_t_failed: 1761err_bad_call_stack: 1762err_empty_call_stack: 1763err_dead_binder: 1764err_invalid_target_handle: 1765err_no_context_mgr_node: 1766 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1767 "binder: %d:%d transaction failed %d, size %zd-%zd\n", 1768 proc->pid, thread->pid, return_error, 1769 tr->data_size, tr->offsets_size); 1770 1771 { 1772 struct binder_transaction_log_entry *fe; 1773 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1774 *fe = *e; 1775 } 1776 1777 BUG_ON(thread->return_error != BR_OK); 1778 if (in_reply_to) { 1779 thread->return_error = BR_TRANSACTION_COMPLETE; 1780 binder_send_failed_reply(in_reply_to, return_error); 1781 } else 1782 thread->return_error = return_error; 1783} 1784 1785int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, 1786 void __user *buffer, int size, signed long *consumed) 1787{ 1788 uint32_t cmd; 1789 void __user *ptr = buffer + *consumed; 1790 void __user *end = buffer + size; 1791 1792 while (ptr < end && thread->return_error == BR_OK) { 1793 if (get_user(cmd, (uint32_t __user *)ptr)) 1794 return -EFAULT; 1795 ptr += sizeof(uint32_t); 1796 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1797 binder_stats.bc[_IOC_NR(cmd)]++; 1798 proc->stats.bc[_IOC_NR(cmd)]++; 1799 thread->stats.bc[_IOC_NR(cmd)]++; 1800 } 1801 switch (cmd) { 1802 case BC_INCREFS: 1803 case BC_ACQUIRE: 1804 case BC_RELEASE: 1805 case BC_DECREFS: { 1806 uint32_t target; 1807 struct binder_ref *ref; 1808 const char *debug_string; 1809 1810 if (get_user(target, (uint32_t __user *)ptr)) 1811 return -EFAULT; 1812 ptr += sizeof(uint32_t); 1813 if (target == 0 && binder_context_mgr_node && 1814 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1815 ref = binder_get_ref_for_node(proc, 1816 binder_context_mgr_node); 1817 if (ref->desc != target) { 1818 binder_user_error("binder: %d:" 1819 "%d tried to acquire " 1820 "reference to desc 0, " 1821 "got %d instead\n", 1822 proc->pid, thread->pid, 1823 ref->desc); 1824 } 1825 } else 1826 ref = binder_get_ref(proc, target); 1827 if (ref == NULL) { 1828 binder_user_error("binder: %d:%d refcou" 1829 "nt change on invalid ref %d\n", 1830 proc->pid, thread->pid, target); 1831 break; 1832 } 1833 switch (cmd) { 1834 case BC_INCREFS: 1835 debug_string = "IncRefs"; 1836 binder_inc_ref(ref, 0, NULL); 1837 break; 1838 case BC_ACQUIRE: 1839 debug_string = "Acquire"; 1840 binder_inc_ref(ref, 1, NULL); 1841 break; 1842 case BC_RELEASE: 1843 debug_string = "Release"; 1844 binder_dec_ref(ref, 1); 1845 break; 1846 case BC_DECREFS: 1847 default: 1848 debug_string = "DecRefs"; 1849 binder_dec_ref(ref, 0); 1850 break; 1851 } 1852 binder_debug(BINDER_DEBUG_USER_REFS, 1853 "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", 1854 proc->pid, thread->pid, debug_string, ref->debug_id, 1855 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1856 break; 1857 } 1858 case BC_INCREFS_DONE: 1859 case BC_ACQUIRE_DONE: { 1860 void __user *node_ptr; 1861 void *cookie; 1862 struct binder_node *node; 1863 1864 if (get_user(node_ptr, (void * __user *)ptr)) 1865 return -EFAULT; 1866 ptr += sizeof(void *); 1867 if (get_user(cookie, (void * __user *)ptr)) 1868 return -EFAULT; 1869 ptr += sizeof(void *); 1870 node = binder_get_node(proc, node_ptr); 1871 if (node == NULL) { 1872 binder_user_error("binder: %d:%d " 1873 "%s u%p no match\n", 1874 proc->pid, thread->pid, 1875 cmd == BC_INCREFS_DONE ? 1876 "BC_INCREFS_DONE" : 1877 "BC_ACQUIRE_DONE", 1878 node_ptr); 1879 break; 1880 } 1881 if (cookie != node->cookie) { 1882 binder_user_error("binder: %d:%d %s u%p node %d" 1883 " cookie mismatch %p != %p\n", 1884 proc->pid, thread->pid, 1885 cmd == BC_INCREFS_DONE ? 1886 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1887 node_ptr, node->debug_id, 1888 cookie, node->cookie); 1889 break; 1890 } 1891 if (cmd == BC_ACQUIRE_DONE) { 1892 if (node->pending_strong_ref == 0) { 1893 binder_user_error("binder: %d:%d " 1894 "BC_ACQUIRE_DONE node %d has " 1895 "no pending acquire request\n", 1896 proc->pid, thread->pid, 1897 node->debug_id); 1898 break; 1899 } 1900 node->pending_strong_ref = 0; 1901 } else { 1902 if (node->pending_weak_ref == 0) { 1903 binder_user_error("binder: %d:%d " 1904 "BC_INCREFS_DONE node %d has " 1905 "no pending increfs request\n", 1906 proc->pid, thread->pid, 1907 node->debug_id); 1908 break; 1909 } 1910 node->pending_weak_ref = 0; 1911 } 1912 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1913 binder_debug(BINDER_DEBUG_USER_REFS, 1914 "binder: %d:%d %s node %d ls %d lw %d\n", 1915 proc->pid, thread->pid, 1916 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1917 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1918 break; 1919 } 1920 case BC_ATTEMPT_ACQUIRE: 1921 printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); 1922 return -EINVAL; 1923 case BC_ACQUIRE_RESULT: 1924 printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); 1925 return -EINVAL; 1926 1927 case BC_FREE_BUFFER: { 1928 void __user *data_ptr; 1929 struct binder_buffer *buffer; 1930 1931 if (get_user(data_ptr, (void * __user *)ptr)) 1932 return -EFAULT; 1933 ptr += sizeof(void *); 1934 1935 buffer = binder_buffer_lookup(proc, data_ptr); 1936 if (buffer == NULL) { 1937 binder_user_error("binder: %d:%d " 1938 "BC_FREE_BUFFER u%p no match\n", 1939 proc->pid, thread->pid, data_ptr); 1940 break; 1941 } 1942 if (!buffer->allow_user_free) { 1943 binder_user_error("binder: %d:%d " 1944 "BC_FREE_BUFFER u%p matched " 1945 "unreturned buffer\n", 1946 proc->pid, thread->pid, data_ptr); 1947 break; 1948 } 1949 binder_debug(BINDER_DEBUG_FREE_BUFFER, 1950 "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", 1951 proc->pid, thread->pid, data_ptr, buffer->debug_id, 1952 buffer->transaction ? "active" : "finished"); 1953 1954 if (buffer->transaction) { 1955 buffer->transaction->buffer = NULL; 1956 buffer->transaction = NULL; 1957 } 1958 if (buffer->async_transaction && buffer->target_node) { 1959 BUG_ON(!buffer->target_node->has_async_transaction); 1960 if (list_empty(&buffer->target_node->async_todo)) 1961 buffer->target_node->has_async_transaction = 0; 1962 else 1963 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1964 } 1965 binder_transaction_buffer_release(proc, buffer, NULL); 1966 binder_free_buf(proc, buffer); 1967 break; 1968 } 1969 1970 case BC_TRANSACTION: 1971 case BC_REPLY: { 1972 struct binder_transaction_data tr; 1973 1974 if (copy_from_user(&tr, ptr, sizeof(tr))) 1975 return -EFAULT; 1976 ptr += sizeof(tr); 1977 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1978 break; 1979 } 1980 1981 case BC_REGISTER_LOOPER: 1982 binder_debug(BINDER_DEBUG_THREADS, 1983 "binder: %d:%d BC_REGISTER_LOOPER\n", 1984 proc->pid, thread->pid); 1985 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1986 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1987 binder_user_error("binder: %d:%d ERROR:" 1988 " BC_REGISTER_LOOPER called " 1989 "after BC_ENTER_LOOPER\n", 1990 proc->pid, thread->pid); 1991 } else if (proc->requested_threads == 0) { 1992 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1993 binder_user_error("binder: %d:%d ERROR:" 1994 " BC_REGISTER_LOOPER called " 1995 "without request\n", 1996 proc->pid, thread->pid); 1997 } else { 1998 proc->requested_threads--; 1999 proc->requested_threads_started++; 2000 } 2001 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 2002 break; 2003 case BC_ENTER_LOOPER: 2004 binder_debug(BINDER_DEBUG_THREADS, 2005 "binder: %d:%d BC_ENTER_LOOPER\n", 2006 proc->pid, thread->pid); 2007 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 2008 thread->looper |= BINDER_LOOPER_STATE_INVALID; 2009 binder_user_error("binder: %d:%d ERROR:" 2010 " BC_ENTER_LOOPER called after " 2011 "BC_REGISTER_LOOPER\n", 2012 proc->pid, thread->pid); 2013 } 2014 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 2015 break; 2016 case BC_EXIT_LOOPER: 2017 binder_debug(BINDER_DEBUG_THREADS, 2018 "binder: %d:%d BC_EXIT_LOOPER\n", 2019 proc->pid, thread->pid); 2020 thread->looper |= BINDER_LOOPER_STATE_EXITED; 2021 break; 2022 2023 case BC_REQUEST_DEATH_NOTIFICATION: 2024 case BC_CLEAR_DEATH_NOTIFICATION: { 2025 uint32_t target; 2026 void __user *cookie; 2027 struct binder_ref *ref; 2028 struct binder_ref_death *death; 2029 2030 if (get_user(target, (uint32_t __user *)ptr)) 2031 return -EFAULT; 2032 ptr += sizeof(uint32_t); 2033 if (get_user(cookie, (void __user * __user *)ptr)) 2034 return -EFAULT; 2035 ptr += sizeof(void *); 2036 ref = binder_get_ref(proc, target); 2037 if (ref == NULL) { 2038 binder_user_error("binder: %d:%d %s " 2039 "invalid ref %d\n", 2040 proc->pid, thread->pid, 2041 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2042 "BC_REQUEST_DEATH_NOTIFICATION" : 2043 "BC_CLEAR_DEATH_NOTIFICATION", 2044 target); 2045 break; 2046 } 2047 2048 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2049 "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", 2050 proc->pid, thread->pid, 2051 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2052 "BC_REQUEST_DEATH_NOTIFICATION" : 2053 "BC_CLEAR_DEATH_NOTIFICATION", 2054 cookie, ref->debug_id, ref->desc, 2055 ref->strong, ref->weak, ref->node->debug_id); 2056 2057 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2058 if (ref->death) { 2059 binder_user_error("binder: %d:%" 2060 "d BC_REQUEST_DEATH_NOTI" 2061 "FICATION death notific" 2062 "ation already set\n", 2063 proc->pid, thread->pid); 2064 break; 2065 } 2066 death = kzalloc(sizeof(*death), GFP_KERNEL); 2067 if (death == NULL) { 2068 thread->return_error = BR_ERROR; 2069 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2070 "binder: %d:%d " 2071 "BC_REQUEST_DEATH_NOTIFICATION failed\n", 2072 proc->pid, thread->pid); 2073 break; 2074 } 2075 binder_stats_created(BINDER_STAT_DEATH); 2076 INIT_LIST_HEAD(&death->work.entry); 2077 death->cookie = cookie; 2078 ref->death = death; 2079 if (ref->node->proc == NULL) { 2080 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2081 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2082 list_add_tail(&ref->death->work.entry, &thread->todo); 2083 } else { 2084 list_add_tail(&ref->death->work.entry, &proc->todo); 2085 wake_up_interruptible(&proc->wait); 2086 } 2087 } 2088 } else { 2089 if (ref->death == NULL) { 2090 binder_user_error("binder: %d:%" 2091 "d BC_CLEAR_DEATH_NOTIFI" 2092 "CATION death notificat" 2093 "ion not active\n", 2094 proc->pid, thread->pid); 2095 break; 2096 } 2097 death = ref->death; 2098 if (death->cookie != cookie) { 2099 binder_user_error("binder: %d:%" 2100 "d BC_CLEAR_DEATH_NOTIFI" 2101 "CATION death notificat" 2102 "ion cookie mismatch " 2103 "%p != %p\n", 2104 proc->pid, thread->pid, 2105 death->cookie, cookie); 2106 break; 2107 } 2108 ref->death = NULL; 2109 if (list_empty(&death->work.entry)) { 2110 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2111 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2112 list_add_tail(&death->work.entry, &thread->todo); 2113 } else { 2114 list_add_tail(&death->work.entry, &proc->todo); 2115 wake_up_interruptible(&proc->wait); 2116 } 2117 } else { 2118 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2119 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2120 } 2121 } 2122 } break; 2123 case BC_DEAD_BINDER_DONE: { 2124 struct binder_work *w; 2125 void __user *cookie; 2126 struct binder_ref_death *death = NULL; 2127 if (get_user(cookie, (void __user * __user *)ptr)) 2128 return -EFAULT; 2129 2130 ptr += sizeof(void *); 2131 list_for_each_entry(w, &proc->delivered_death, entry) { 2132 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2133 if (tmp_death->cookie == cookie) { 2134 death = tmp_death; 2135 break; 2136 } 2137 } 2138 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2139 "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", 2140 proc->pid, thread->pid, cookie, death); 2141 if (death == NULL) { 2142 binder_user_error("binder: %d:%d BC_DEAD" 2143 "_BINDER_DONE %p not found\n", 2144 proc->pid, thread->pid, cookie); 2145 break; 2146 } 2147 2148 list_del_init(&death->work.entry); 2149 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2150 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2151 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2152 list_add_tail(&death->work.entry, &thread->todo); 2153 } else { 2154 list_add_tail(&death->work.entry, &proc->todo); 2155 wake_up_interruptible(&proc->wait); 2156 } 2157 } 2158 } break; 2159 2160 default: 2161 printk(KERN_ERR "binder: %d:%d unknown command %d\n", 2162 proc->pid, thread->pid, cmd); 2163 return -EINVAL; 2164 } 2165 *consumed = ptr - buffer; 2166 } 2167 return 0; 2168} 2169 2170void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, 2171 uint32_t cmd) 2172{ 2173 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2174 binder_stats.br[_IOC_NR(cmd)]++; 2175 proc->stats.br[_IOC_NR(cmd)]++; 2176 thread->stats.br[_IOC_NR(cmd)]++; 2177 } 2178} 2179 2180static int binder_has_proc_work(struct binder_proc *proc, 2181 struct binder_thread *thread) 2182{ 2183 return !list_empty(&proc->todo) || 2184 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2185} 2186 2187static int binder_has_thread_work(struct binder_thread *thread) 2188{ 2189 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2190 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2191} 2192 2193static int binder_thread_read(struct binder_proc *proc, 2194 struct binder_thread *thread, 2195 void __user *buffer, int size, 2196 signed long *consumed, int non_block) 2197{ 2198 void __user *ptr = buffer + *consumed; 2199 void __user *end = buffer + size; 2200 2201 int ret = 0; 2202 int wait_for_proc_work; 2203 2204 if (*consumed == 0) { 2205 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2206 return -EFAULT; 2207 ptr += sizeof(uint32_t); 2208 } 2209 2210retry: 2211 wait_for_proc_work = thread->transaction_stack == NULL && 2212 list_empty(&thread->todo); 2213 2214 if (thread->return_error != BR_OK && ptr < end) { 2215 if (thread->return_error2 != BR_OK) { 2216 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2217 return -EFAULT; 2218 ptr += sizeof(uint32_t); 2219 if (ptr == end) 2220 goto done; 2221 thread->return_error2 = BR_OK; 2222 } 2223 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2224 return -EFAULT; 2225 ptr += sizeof(uint32_t); 2226 thread->return_error = BR_OK; 2227 goto done; 2228 } 2229 2230 2231 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2232 if (wait_for_proc_work) 2233 proc->ready_threads++; 2234 mutex_unlock(&binder_lock); 2235 if (wait_for_proc_work) { 2236 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2237 BINDER_LOOPER_STATE_ENTERED))) { 2238 binder_user_error("binder: %d:%d ERROR: Thread waiting " 2239 "for process work before calling BC_REGISTER_" 2240 "LOOPER or BC_ENTER_LOOPER (state %x)\n", 2241 proc->pid, thread->pid, thread->looper); 2242 wait_event_interruptible(binder_user_error_wait, 2243 binder_stop_on_user_error < 2); 2244 } 2245 binder_set_nice(proc->default_priority); 2246 if (non_block) { 2247 if (!binder_has_proc_work(proc, thread)) 2248 ret = -EAGAIN; 2249 } else 2250 ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2251 } else { 2252 if (non_block) { 2253 if (!binder_has_thread_work(thread)) 2254 ret = -EAGAIN; 2255 } else 2256 ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); 2257 } 2258 mutex_lock(&binder_lock); 2259 if (wait_for_proc_work) 2260 proc->ready_threads--; 2261 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2262 2263 if (ret) 2264 return ret; 2265 2266 while (1) { 2267 uint32_t cmd; 2268 struct binder_transaction_data tr; 2269 struct binder_work *w; 2270 struct binder_transaction *t = NULL; 2271 2272 if (!list_empty(&thread->todo)) 2273 w = list_first_entry(&thread->todo, struct binder_work, entry); 2274 else if (!list_empty(&proc->todo) && wait_for_proc_work) 2275 w = list_first_entry(&proc->todo, struct binder_work, entry); 2276 else { 2277 if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ 2278 goto retry; 2279 break; 2280 } 2281 2282 if (end - ptr < sizeof(tr) + 4) 2283 break; 2284 2285 switch (w->type) { 2286 case BINDER_WORK_TRANSACTION: { 2287 t = container_of(w, struct binder_transaction, work); 2288 } break; 2289 case BINDER_WORK_TRANSACTION_COMPLETE: { 2290 cmd = BR_TRANSACTION_COMPLETE; 2291 if (put_user(cmd, (uint32_t __user *)ptr)) 2292 return -EFAULT; 2293 ptr += sizeof(uint32_t); 2294 2295 binder_stat_br(proc, thread, cmd); 2296 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2297 "binder: %d:%d BR_TRANSACTION_COMPLETE\n", 2298 proc->pid, thread->pid); 2299 2300 list_del(&w->entry); 2301 kfree(w); 2302 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2303 } break; 2304 case BINDER_WORK_NODE: { 2305 struct binder_node *node = container_of(w, struct binder_node, work); 2306 uint32_t cmd = BR_NOOP; 2307 const char *cmd_name; 2308 int strong = node->internal_strong_refs || node->local_strong_refs; 2309 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2310 if (weak && !node->has_weak_ref) { 2311 cmd = BR_INCREFS; 2312 cmd_name = "BR_INCREFS"; 2313 node->has_weak_ref = 1; 2314 node->pending_weak_ref = 1; 2315 node->local_weak_refs++; 2316 } else if (strong && !node->has_strong_ref) { 2317 cmd = BR_ACQUIRE; 2318 cmd_name = "BR_ACQUIRE"; 2319 node->has_strong_ref = 1; 2320 node->pending_strong_ref = 1; 2321 node->local_strong_refs++; 2322 } else if (!strong && node->has_strong_ref) { 2323 cmd = BR_RELEASE; 2324 cmd_name = "BR_RELEASE"; 2325 node->has_strong_ref = 0; 2326 } else if (!weak && node->has_weak_ref) { 2327 cmd = BR_DECREFS; 2328 cmd_name = "BR_DECREFS"; 2329 node->has_weak_ref = 0; 2330 } 2331 if (cmd != BR_NOOP) { 2332 if (put_user(cmd, (uint32_t __user *)ptr)) 2333 return -EFAULT; 2334 ptr += sizeof(uint32_t); 2335 if (put_user(node->ptr, (void * __user *)ptr)) 2336 return -EFAULT; 2337 ptr += sizeof(void *); 2338 if (put_user(node->cookie, (void * __user *)ptr)) 2339 return -EFAULT; 2340 ptr += sizeof(void *); 2341 2342 binder_stat_br(proc, thread, cmd); 2343 binder_debug(BINDER_DEBUG_USER_REFS, 2344 "binder: %d:%d %s %d u%p c%p\n", 2345 proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); 2346 } else { 2347 list_del_init(&w->entry); 2348 if (!weak && !strong) { 2349 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2350 "binder: %d:%d node %d u%p c%p deleted\n", 2351 proc->pid, thread->pid, node->debug_id, 2352 node->ptr, node->cookie); 2353 rb_erase(&node->rb_node, &proc->nodes); 2354 kfree(node); 2355 binder_stats_deleted(BINDER_STAT_NODE); 2356 } else { 2357 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2358 "binder: %d:%d node %d u%p c%p state unchanged\n", 2359 proc->pid, thread->pid, node->debug_id, node->ptr, 2360 node->cookie); 2361 } 2362 } 2363 } break; 2364 case BINDER_WORK_DEAD_BINDER: 2365 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2366 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2367 struct binder_ref_death *death; 2368 uint32_t cmd; 2369 2370 death = container_of(w, struct binder_ref_death, work); 2371 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2372 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2373 else 2374 cmd = BR_DEAD_BINDER; 2375 if (put_user(cmd, (uint32_t __user *)ptr)) 2376 return -EFAULT; 2377 ptr += sizeof(uint32_t); 2378 if (put_user(death->cookie, (void * __user *)ptr)) 2379 return -EFAULT; 2380 ptr += sizeof(void *); 2381 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2382 "binder: %d:%d %s %p\n", 2383 proc->pid, thread->pid, 2384 cmd == BR_DEAD_BINDER ? 2385 "BR_DEAD_BINDER" : 2386 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2387 death->cookie); 2388 2389 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2390 list_del(&w->entry); 2391 kfree(death); 2392 binder_stats_deleted(BINDER_STAT_DEATH); 2393 } else 2394 list_move(&w->entry, &proc->delivered_death); 2395 if (cmd == BR_DEAD_BINDER) 2396 goto done; /* DEAD_BINDER notifications can cause transactions */ 2397 } break; 2398 } 2399 2400 if (!t) 2401 continue; 2402 2403 BUG_ON(t->buffer == NULL); 2404 if (t->buffer->target_node) { 2405 struct binder_node *target_node = t->buffer->target_node; 2406 tr.target.ptr = target_node->ptr; 2407 tr.cookie = target_node->cookie; 2408 t->saved_priority = task_nice(current); 2409 if (t->priority < target_node->min_priority && 2410 !(t->flags & TF_ONE_WAY)) 2411 binder_set_nice(t->priority); 2412 else if (!(t->flags & TF_ONE_WAY) || 2413 t->saved_priority > target_node->min_priority) 2414 binder_set_nice(target_node->min_priority); 2415 cmd = BR_TRANSACTION; 2416 } else { 2417 tr.target.ptr = NULL; 2418 tr.cookie = NULL; 2419 cmd = BR_REPLY; 2420 } 2421 tr.code = t->code; 2422 tr.flags = t->flags; 2423 tr.sender_euid = t->sender_euid; 2424 2425 if (t->from) { 2426 struct task_struct *sender = t->from->proc->tsk; 2427 tr.sender_pid = task_tgid_nr_ns(sender, 2428 current->nsproxy->pid_ns); 2429 } else { 2430 tr.sender_pid = 0; 2431 } 2432 2433 tr.data_size = t->buffer->data_size; 2434 tr.offsets_size = t->buffer->offsets_size; 2435 tr.data.ptr.buffer = (void *)t->buffer->data + 2436 proc->user_buffer_offset; 2437 tr.data.ptr.offsets = tr.data.ptr.buffer + 2438 ALIGN(t->buffer->data_size, 2439 sizeof(void *)); 2440 2441 if (put_user(cmd, (uint32_t __user *)ptr)) 2442 return -EFAULT; 2443 ptr += sizeof(uint32_t); 2444 if (copy_to_user(ptr, &tr, sizeof(tr))) 2445 return -EFAULT; 2446 ptr += sizeof(tr); 2447 2448 binder_stat_br(proc, thread, cmd); 2449 binder_debug(BINDER_DEBUG_TRANSACTION, 2450 "binder: %d:%d %s %d %d:%d, cmd %d" 2451 "size %zd-%zd ptr %p-%p\n", 2452 proc->pid, thread->pid, 2453 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2454 "BR_REPLY", 2455 t->debug_id, t->from ? t->from->proc->pid : 0, 2456 t->from ? t->from->pid : 0, cmd, 2457 t->buffer->data_size, t->buffer->offsets_size, 2458 tr.data.ptr.buffer, tr.data.ptr.offsets); 2459 2460 list_del(&t->work.entry); 2461 t->buffer->allow_user_free = 1; 2462 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2463 t->to_parent = thread->transaction_stack; 2464 t->to_thread = thread; 2465 thread->transaction_stack = t; 2466 } else { 2467 t->buffer->transaction = NULL; 2468 kfree(t); 2469 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2470 } 2471 break; 2472 } 2473 2474done: 2475 2476 *consumed = ptr - buffer; 2477 if (proc->requested_threads + proc->ready_threads == 0 && 2478 proc->requested_threads_started < proc->max_threads && 2479 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2480 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2481 /*spawn a new thread if we leave this out */) { 2482 proc->requested_threads++; 2483 binder_debug(BINDER_DEBUG_THREADS, 2484 "binder: %d:%d BR_SPAWN_LOOPER\n", 2485 proc->pid, thread->pid); 2486 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2487 return -EFAULT; 2488 } 2489 return 0; 2490} 2491 2492static void binder_release_work(struct list_head *list) 2493{ 2494 struct binder_work *w; 2495 while (!list_empty(list)) { 2496 w = list_first_entry(list, struct binder_work, entry); 2497 list_del_init(&w->entry); 2498 switch (w->type) { 2499 case BINDER_WORK_TRANSACTION: { 2500 struct binder_transaction *t; 2501 2502 t = container_of(w, struct binder_transaction, work); 2503 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) 2504 binder_send_failed_reply(t, BR_DEAD_REPLY); 2505 } break; 2506 case BINDER_WORK_TRANSACTION_COMPLETE: { 2507 kfree(w); 2508 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2509 } break; 2510 default: 2511 break; 2512 } 2513 } 2514 2515} 2516 2517static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2518{ 2519 struct binder_thread *thread = NULL; 2520 struct rb_node *parent = NULL; 2521 struct rb_node **p = &proc->threads.rb_node; 2522 2523 while (*p) { 2524 parent = *p; 2525 thread = rb_entry(parent, struct binder_thread, rb_node); 2526 2527 if (current->pid < thread->pid) 2528 p = &(*p)->rb_left; 2529 else if (current->pid > thread->pid) 2530 p = &(*p)->rb_right; 2531 else 2532 break; 2533 } 2534 if (*p == NULL) { 2535 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2536 if (thread == NULL) 2537 return NULL; 2538 binder_stats_created(BINDER_STAT_THREAD); 2539 thread->proc = proc; 2540 thread->pid = current->pid; 2541 init_waitqueue_head(&thread->wait); 2542 INIT_LIST_HEAD(&thread->todo); 2543 rb_link_node(&thread->rb_node, parent, p); 2544 rb_insert_color(&thread->rb_node, &proc->threads); 2545 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2546 thread->return_error = BR_OK; 2547 thread->return_error2 = BR_OK; 2548 } 2549 return thread; 2550} 2551 2552static int binder_free_thread(struct binder_proc *proc, 2553 struct binder_thread *thread) 2554{ 2555 struct binder_transaction *t; 2556 struct binder_transaction *send_reply = NULL; 2557 int active_transactions = 0; 2558 2559 rb_erase(&thread->rb_node, &proc->threads); 2560 t = thread->transaction_stack; 2561 if (t && t->to_thread == thread) 2562 send_reply = t; 2563 while (t) { 2564 active_transactions++; 2565 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2566 "binder: release %d:%d transaction %d " 2567 "%s, still active\n", proc->pid, thread->pid, 2568 t->debug_id, 2569 (t->to_thread == thread) ? "in" : "out"); 2570 2571 if (t->to_thread == thread) { 2572 t->to_proc = NULL; 2573 t->to_thread = NULL; 2574 if (t->buffer) { 2575 t->buffer->transaction = NULL; 2576 t->buffer = NULL; 2577 } 2578 t = t->to_parent; 2579 } else if (t->from == thread) { 2580 t->from = NULL; 2581 t = t->from_parent; 2582 } else 2583 BUG(); 2584 } 2585 if (send_reply) 2586 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2587 binder_release_work(&thread->todo); 2588 kfree(thread); 2589 binder_stats_deleted(BINDER_STAT_THREAD); 2590 return active_transactions; 2591} 2592 2593static unsigned int binder_poll(struct file *filp, 2594 struct poll_table_struct *wait) 2595{ 2596 struct binder_proc *proc = filp->private_data; 2597 struct binder_thread *thread = NULL; 2598 int wait_for_proc_work; 2599 2600 mutex_lock(&binder_lock); 2601 thread = binder_get_thread(proc); 2602 2603 wait_for_proc_work = thread->transaction_stack == NULL && 2604 list_empty(&thread->todo) && thread->return_error == BR_OK; 2605 mutex_unlock(&binder_lock); 2606 2607 if (wait_for_proc_work) { 2608 if (binder_has_proc_work(proc, thread)) 2609 return POLLIN; 2610 poll_wait(filp, &proc->wait, wait); 2611 if (binder_has_proc_work(proc, thread)) 2612 return POLLIN; 2613 } else { 2614 if (binder_has_thread_work(thread)) 2615 return POLLIN; 2616 poll_wait(filp, &thread->wait, wait); 2617 if (binder_has_thread_work(thread)) 2618 return POLLIN; 2619 } 2620 return 0; 2621} 2622 2623static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2624{ 2625 int ret; 2626 struct binder_proc *proc = filp->private_data; 2627 struct binder_thread *thread; 2628 unsigned int size = _IOC_SIZE(cmd); 2629 void __user *ubuf = (void __user *)arg; 2630 2631 /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ 2632 2633 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2634 if (ret) 2635 return ret; 2636 2637 mutex_lock(&binder_lock); 2638 thread = binder_get_thread(proc); 2639 if (thread == NULL) { 2640 ret = -ENOMEM; 2641 goto err; 2642 } 2643 2644 switch (cmd) { 2645 case BINDER_WRITE_READ: { 2646 struct binder_write_read bwr; 2647 if (size != sizeof(struct binder_write_read)) { 2648 ret = -EINVAL; 2649 goto err; 2650 } 2651 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2652 ret = -EFAULT; 2653 goto err; 2654 } 2655 binder_debug(BINDER_DEBUG_READ_WRITE, 2656 "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", 2657 proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, 2658 bwr.read_size, bwr.read_buffer); 2659 2660 if (bwr.write_size > 0) { 2661 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); 2662 if (ret < 0) { 2663 bwr.read_consumed = 0; 2664 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2665 ret = -EFAULT; 2666 goto err; 2667 } 2668 } 2669 if (bwr.read_size > 0) { 2670 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); 2671 if (!list_empty(&proc->todo)) 2672 wake_up_interruptible(&proc->wait); 2673 if (ret < 0) { 2674 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2675 ret = -EFAULT; 2676 goto err; 2677 } 2678 } 2679 binder_debug(BINDER_DEBUG_READ_WRITE, 2680 "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", 2681 proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, 2682 bwr.read_consumed, bwr.read_size); 2683 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2684 ret = -EFAULT; 2685 goto err; 2686 } 2687 break; 2688 } 2689 case BINDER_SET_MAX_THREADS: 2690 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2691 ret = -EINVAL; 2692 goto err; 2693 } 2694 break; 2695 case BINDER_SET_CONTEXT_MGR: 2696 if (binder_context_mgr_node != NULL) { 2697 printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); 2698 ret = -EBUSY; 2699 goto err; 2700 } 2701 if (binder_context_mgr_uid != -1) { 2702 if (binder_context_mgr_uid != current->cred->euid) { 2703 printk(KERN_ERR "binder: BINDER_SET_" 2704 "CONTEXT_MGR bad uid %d != %d\n", 2705 current->cred->euid, 2706 binder_context_mgr_uid); 2707 ret = -EPERM; 2708 goto err; 2709 } 2710 } else 2711 binder_context_mgr_uid = current->cred->euid; 2712 binder_context_mgr_node = binder_new_node(proc, NULL, NULL); 2713 if (binder_context_mgr_node == NULL) { 2714 ret = -ENOMEM; 2715 goto err; 2716 } 2717 binder_context_mgr_node->local_weak_refs++; 2718 binder_context_mgr_node->local_strong_refs++; 2719 binder_context_mgr_node->has_strong_ref = 1; 2720 binder_context_mgr_node->has_weak_ref = 1; 2721 break; 2722 case BINDER_THREAD_EXIT: 2723 binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", 2724 proc->pid, thread->pid); 2725 binder_free_thread(proc, thread); 2726 thread = NULL; 2727 break; 2728 case BINDER_VERSION: 2729 if (size != sizeof(struct binder_version)) { 2730 ret = -EINVAL; 2731 goto err; 2732 } 2733 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { 2734 ret = -EINVAL; 2735 goto err; 2736 } 2737 break; 2738 default: 2739 ret = -EINVAL; 2740 goto err; 2741 } 2742 ret = 0; 2743err: 2744 if (thread) 2745 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2746 mutex_unlock(&binder_lock); 2747 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2748 if (ret && ret != -ERESTARTSYS) 2749 printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2750 return ret; 2751} 2752 2753static void binder_vma_open(struct vm_area_struct *vma) 2754{ 2755 struct binder_proc *proc = vma->vm_private_data; 2756 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2757 "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2758 proc->pid, vma->vm_start, vma->vm_end, 2759 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2760 (unsigned long)pgprot_val(vma->vm_page_prot)); 2761 dump_stack(); 2762} 2763 2764static void binder_vma_close(struct vm_area_struct *vma) 2765{ 2766 struct binder_proc *proc = vma->vm_private_data; 2767 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2768 "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2769 proc->pid, vma->vm_start, vma->vm_end, 2770 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2771 (unsigned long)pgprot_val(vma->vm_page_prot)); 2772 proc->vma = NULL; 2773 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2774} 2775 2776static struct vm_operations_struct binder_vm_ops = { 2777 .open = binder_vma_open, 2778 .close = binder_vma_close, 2779}; 2780 2781static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2782{ 2783 int ret; 2784 struct vm_struct *area; 2785 struct binder_proc *proc = filp->private_data; 2786 const char *failure_string; 2787 struct binder_buffer *buffer; 2788 2789 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2790 vma->vm_end = vma->vm_start + SZ_4M; 2791 2792 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2793 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2794 proc->pid, vma->vm_start, vma->vm_end, 2795 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2796 (unsigned long)pgprot_val(vma->vm_page_prot)); 2797 2798 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2799 ret = -EPERM; 2800 failure_string = "bad vm_flags"; 2801 goto err_bad_arg; 2802 } 2803 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2804 2805 if (proc->buffer) { 2806 ret = -EBUSY; 2807 failure_string = "already mapped"; 2808 goto err_already_mapped; 2809 } 2810 2811 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2812 if (area == NULL) { 2813 ret = -ENOMEM; 2814 failure_string = "get_vm_area"; 2815 goto err_get_vm_area_failed; 2816 } 2817 proc->buffer = area->addr; 2818 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2819 2820#ifdef CONFIG_CPU_CACHE_VIPT 2821 if (cache_is_vipt_aliasing()) { 2822 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2823 printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2824 vma->vm_start += PAGE_SIZE; 2825 } 2826 } 2827#endif 2828 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2829 if (proc->pages == NULL) { 2830 ret = -ENOMEM; 2831 failure_string = "alloc page array"; 2832 goto err_alloc_pages_failed; 2833 } 2834 proc->buffer_size = vma->vm_end - vma->vm_start; 2835 2836 vma->vm_ops = &binder_vm_ops; 2837 vma->vm_private_data = proc; 2838 2839 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2840 ret = -ENOMEM; 2841 failure_string = "alloc small buf"; 2842 goto err_alloc_small_buf_failed; 2843 } 2844 buffer = proc->buffer; 2845 INIT_LIST_HEAD(&proc->buffers); 2846 list_add(&buffer->entry, &proc->buffers); 2847 buffer->free = 1; 2848 binder_insert_free_buffer(proc, buffer); 2849 proc->free_async_space = proc->buffer_size / 2; 2850 barrier(); 2851 proc->files = get_files_struct(current); 2852 proc->vma = vma; 2853 2854 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", 2855 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2856 return 0; 2857 2858err_alloc_small_buf_failed: 2859 kfree(proc->pages); 2860 proc->pages = NULL; 2861err_alloc_pages_failed: 2862 vfree(proc->buffer); 2863 proc->buffer = NULL; 2864err_get_vm_area_failed: 2865err_already_mapped: 2866err_bad_arg: 2867 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", 2868 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2869 return ret; 2870} 2871 2872static int binder_open(struct inode *nodp, struct file *filp) 2873{ 2874 struct binder_proc *proc; 2875 2876 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 2877 current->group_leader->pid, current->pid); 2878 2879 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2880 if (proc == NULL) 2881 return -ENOMEM; 2882 get_task_struct(current); 2883 proc->tsk = current; 2884 INIT_LIST_HEAD(&proc->todo); 2885 init_waitqueue_head(&proc->wait); 2886 proc->default_priority = task_nice(current); 2887 mutex_lock(&binder_lock); 2888 binder_stats_created(BINDER_STAT_PROC); 2889 hlist_add_head(&proc->proc_node, &binder_procs); 2890 proc->pid = current->group_leader->pid; 2891 INIT_LIST_HEAD(&proc->delivered_death); 2892 filp->private_data = proc; 2893 mutex_unlock(&binder_lock); 2894 2895 if (binder_debugfs_dir_entry_proc) { 2896 char strbuf[11]; 2897 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2898 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 2899 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); 2900 } 2901 2902 return 0; 2903} 2904 2905static int binder_flush(struct file *filp, fl_owner_t id) 2906{ 2907 struct binder_proc *proc = filp->private_data; 2908 2909 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2910 2911 return 0; 2912} 2913 2914static void binder_deferred_flush(struct binder_proc *proc) 2915{ 2916 struct rb_node *n; 2917 int wake_count = 0; 2918 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2919 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2920 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2921 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2922 wake_up_interruptible(&thread->wait); 2923 wake_count++; 2924 } 2925 } 2926 wake_up_interruptible_all(&proc->wait); 2927 2928 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2929 "binder_flush: %d woke %d threads\n", proc->pid, 2930 wake_count); 2931} 2932 2933static int binder_release(struct inode *nodp, struct file *filp) 2934{ 2935 struct binder_proc *proc = filp->private_data; 2936 debugfs_remove(proc->debugfs_entry); 2937 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2938 2939 return 0; 2940} 2941 2942static void binder_deferred_release(struct binder_proc *proc) 2943{ 2944 struct hlist_node *pos; 2945 struct binder_transaction *t; 2946 struct rb_node *n; 2947 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2948 2949 BUG_ON(proc->vma); 2950 BUG_ON(proc->files); 2951 2952 hlist_del(&proc->proc_node); 2953 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2954 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2955 "binder_release: %d context_mgr_node gone\n", 2956 proc->pid); 2957 binder_context_mgr_node = NULL; 2958 } 2959 2960 threads = 0; 2961 active_transactions = 0; 2962 while ((n = rb_first(&proc->threads))) { 2963 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2964 threads++; 2965 active_transactions += binder_free_thread(proc, thread); 2966 } 2967 nodes = 0; 2968 incoming_refs = 0; 2969 while ((n = rb_first(&proc->nodes))) { 2970 struct binder_node *node = rb_entry(n, struct binder_node, rb_node); 2971 2972 nodes++; 2973 rb_erase(&node->rb_node, &proc->nodes); 2974 list_del_init(&node->work.entry); 2975 if (hlist_empty(&node->refs)) { 2976 kfree(node); 2977 binder_stats_deleted(BINDER_STAT_NODE); 2978 } else { 2979 struct binder_ref *ref; 2980 int death = 0; 2981 2982 node->proc = NULL; 2983 node->local_strong_refs = 0; 2984 node->local_weak_refs = 0; 2985 hlist_add_head(&node->dead_node, &binder_dead_nodes); 2986 2987 hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 2988 incoming_refs++; 2989 if (ref->death) { 2990 death++; 2991 if (list_empty(&ref->death->work.entry)) { 2992 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2993 list_add_tail(&ref->death->work.entry, &ref->proc->todo); 2994 wake_up_interruptible(&ref->proc->wait); 2995 } else 2996 BUG(); 2997 } 2998 } 2999 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3000 "binder: node %d now dead, " 3001 "refs %d, death %d\n", node->debug_id, 3002 incoming_refs, death); 3003 } 3004 } 3005 outgoing_refs = 0; 3006 while ((n = rb_first(&proc->refs_by_desc))) { 3007 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3008 rb_node_desc); 3009 outgoing_refs++; 3010 binder_delete_ref(ref); 3011 } 3012 binder_release_work(&proc->todo); 3013 buffers = 0; 3014 3015 while ((n = rb_first(&proc->allocated_buffers))) { 3016 struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, 3017 rb_node); 3018 t = buffer->transaction; 3019 if (t) { 3020 t->buffer = NULL; 3021 buffer->transaction = NULL; 3022 printk(KERN_ERR "binder: release proc %d, " 3023 "transaction %d, not freed\n", 3024 proc->pid, t->debug_id); 3025 /*BUG();*/ 3026 } 3027 binder_free_buf(proc, buffer); 3028 buffers++; 3029 } 3030 3031 binder_stats_deleted(BINDER_STAT_PROC); 3032 3033 page_count = 0; 3034 if (proc->pages) { 3035 int i; 3036 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3037 if (proc->pages[i]) { 3038 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3039 "binder_release: %d: " 3040 "page %d at %p not freed\n", 3041 proc->pid, i, 3042 proc->buffer + i * PAGE_SIZE); 3043 __free_page(proc->pages[i]); 3044 page_count++; 3045 } 3046 } 3047 kfree(proc->pages); 3048 vfree(proc->buffer); 3049 } 3050 3051 put_task_struct(proc->tsk); 3052 3053 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3054 "binder_release: %d threads %d, nodes %d (ref %d), " 3055 "refs %d, active transactions %d, buffers %d, " 3056 "pages %d\n", 3057 proc->pid, threads, nodes, incoming_refs, outgoing_refs, 3058 active_transactions, buffers, page_count); 3059 3060 kfree(proc); 3061} 3062 3063static void binder_deferred_func(struct work_struct *work) 3064{ 3065 struct binder_proc *proc; 3066 struct files_struct *files; 3067 3068 int defer; 3069 do { 3070 mutex_lock(&binder_lock); 3071 mutex_lock(&binder_deferred_lock); 3072 if (!hlist_empty(&binder_deferred_list)) { 3073 proc = hlist_entry(binder_deferred_list.first, 3074 struct binder_proc, deferred_work_node); 3075 hlist_del_init(&proc->deferred_work_node); 3076 defer = proc->deferred_work; 3077 proc->deferred_work = 0; 3078 } else { 3079 proc = NULL; 3080 defer = 0; 3081 } 3082 mutex_unlock(&binder_deferred_lock); 3083 3084 files = NULL; 3085 if (defer & BINDER_DEFERRED_PUT_FILES) { 3086 files = proc->files; 3087 if (files) 3088 proc->files = NULL; 3089 } 3090 3091 if (defer & BINDER_DEFERRED_FLUSH) 3092 binder_deferred_flush(proc); 3093 3094 if (defer & BINDER_DEFERRED_RELEASE) 3095 binder_deferred_release(proc); /* frees proc */ 3096 3097 mutex_unlock(&binder_lock); 3098 if (files) 3099 put_files_struct(files); 3100 } while (proc); 3101} 3102static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3103 3104static void 3105binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3106{ 3107 mutex_lock(&binder_deferred_lock); 3108 proc->deferred_work |= defer; 3109 if (hlist_unhashed(&proc->deferred_work_node)) { 3110 hlist_add_head(&proc->deferred_work_node, 3111 &binder_deferred_list); 3112 schedule_work(&binder_deferred_work); 3113 } 3114 mutex_unlock(&binder_deferred_lock); 3115} 3116 3117static void print_binder_transaction(struct seq_file *m, const char *prefix, 3118 struct binder_transaction *t) 3119{ 3120 seq_printf(m, 3121 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3122 prefix, t->debug_id, t, 3123 t->from ? t->from->proc->pid : 0, 3124 t->from ? t->from->pid : 0, 3125 t->to_proc ? t->to_proc->pid : 0, 3126 t->to_thread ? t->to_thread->pid : 0, 3127 t->code, t->flags, t->priority, t->need_reply); 3128 if (t->buffer == NULL) { 3129 seq_puts(m, " buffer free\n"); 3130 return; 3131 } 3132 if (t->buffer->target_node) 3133 seq_printf(m, " node %d", 3134 t->buffer->target_node->debug_id); 3135 seq_printf(m, " size %zd:%zd data %p\n", 3136 t->buffer->data_size, t->buffer->offsets_size, 3137 t->buffer->data); 3138} 3139 3140static void print_binder_buffer(struct seq_file *m, const char *prefix, 3141 struct binder_buffer *buffer) 3142{ 3143 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3144 prefix, buffer->debug_id, buffer->data, 3145 buffer->data_size, buffer->offsets_size, 3146 buffer->transaction ? "active" : "delivered"); 3147} 3148 3149static void print_binder_work(struct seq_file *m, const char *prefix, 3150 const char *transaction_prefix, 3151 struct binder_work *w) 3152{ 3153 struct binder_node *node; 3154 struct binder_transaction *t; 3155 3156 switch (w->type) { 3157 case BINDER_WORK_TRANSACTION: 3158 t = container_of(w, struct binder_transaction, work); 3159 print_binder_transaction(m, transaction_prefix, t); 3160 break; 3161 case BINDER_WORK_TRANSACTION_COMPLETE: 3162 seq_printf(m, "%stransaction complete\n", prefix); 3163 break; 3164 case BINDER_WORK_NODE: 3165 node = container_of(w, struct binder_node, work); 3166 seq_printf(m, "%snode work %d: u%p c%p\n", 3167 prefix, node->debug_id, node->ptr, node->cookie); 3168 break; 3169 case BINDER_WORK_DEAD_BINDER: 3170 seq_printf(m, "%shas dead binder\n", prefix); 3171 break; 3172 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3173 seq_printf(m, "%shas cleared dead binder\n", prefix); 3174 break; 3175 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3176 seq_printf(m, "%shas cleared death notification\n", prefix); 3177 break; 3178 default: 3179 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3180 break; 3181 } 3182} 3183 3184static void print_binder_thread(struct seq_file *m, 3185 struct binder_thread *thread, 3186 int print_always) 3187{ 3188 struct binder_transaction *t; 3189 struct binder_work *w; 3190 size_t start_pos = m->count; 3191 size_t header_pos; 3192 3193 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3194 header_pos = m->count; 3195 t = thread->transaction_stack; 3196 while (t) { 3197 if (t->from == thread) { 3198 print_binder_transaction(m, 3199 " outgoing transaction", t); 3200 t = t->from_parent; 3201 } else if (t->to_thread == thread) { 3202 print_binder_transaction(m, 3203 " incoming transaction", t); 3204 t = t->to_parent; 3205 } else { 3206 print_binder_transaction(m, " bad transaction", t); 3207 t = NULL; 3208 } 3209 } 3210 list_for_each_entry(w, &thread->todo, entry) { 3211 print_binder_work(m, " ", " pending transaction", w); 3212 } 3213 if (!print_always && m->count == header_pos) 3214 m->count = start_pos; 3215} 3216 3217static void print_binder_node(struct seq_file *m, struct binder_node *node) 3218{ 3219 struct binder_ref *ref; 3220 struct hlist_node *pos; 3221 struct binder_work *w; 3222 int count; 3223 3224 count = 0; 3225 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3226 count++; 3227 3228 seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", 3229 node->debug_id, node->ptr, node->cookie, 3230 node->has_strong_ref, node->has_weak_ref, 3231 node->local_strong_refs, node->local_weak_refs, 3232 node->internal_strong_refs, count); 3233 if (count) { 3234 seq_puts(m, " proc"); 3235 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3236 seq_printf(m, " %d", ref->proc->pid); 3237 } 3238 seq_puts(m, "\n"); 3239 list_for_each_entry(w, &node->async_todo, entry) 3240 print_binder_work(m, " ", 3241 " pending async transaction", w); 3242} 3243 3244static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3245{ 3246 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3247 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3248 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3249} 3250 3251static void print_binder_proc(struct seq_file *m, 3252 struct binder_proc *proc, int print_all) 3253{ 3254 struct binder_work *w; 3255 struct rb_node *n; 3256 size_t start_pos = m->count; 3257 size_t header_pos; 3258 3259 seq_printf(m, "proc %d\n", proc->pid); 3260 header_pos = m->count; 3261 3262 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3263 print_binder_thread(m, rb_entry(n, struct binder_thread, 3264 rb_node), print_all); 3265 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3266 struct binder_node *node = rb_entry(n, struct binder_node, 3267 rb_node); 3268 if (print_all || node->has_async_transaction) 3269 print_binder_node(m, node); 3270 } 3271 if (print_all) { 3272 for (n = rb_first(&proc->refs_by_desc); 3273 n != NULL; 3274 n = rb_next(n)) 3275 print_binder_ref(m, rb_entry(n, struct binder_ref, 3276 rb_node_desc)); 3277 } 3278 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3279 print_binder_buffer(m, " buffer", 3280 rb_entry(n, struct binder_buffer, rb_node)); 3281 list_for_each_entry(w, &proc->todo, entry) 3282 print_binder_work(m, " ", " pending transaction", w); 3283 list_for_each_entry(w, &proc->delivered_death, entry) { 3284 seq_puts(m, " has delivered dead binder\n"); 3285 break; 3286 } 3287 if (!print_all && m->count == header_pos) 3288 m->count = start_pos; 3289} 3290 3291static const char *binder_return_strings[] = { 3292 "BR_ERROR", 3293 "BR_OK", 3294 "BR_TRANSACTION", 3295 "BR_REPLY", 3296 "BR_ACQUIRE_RESULT", 3297 "BR_DEAD_REPLY", 3298 "BR_TRANSACTION_COMPLETE", 3299 "BR_INCREFS", 3300 "BR_ACQUIRE", 3301 "BR_RELEASE", 3302 "BR_DECREFS", 3303 "BR_ATTEMPT_ACQUIRE", 3304 "BR_NOOP", 3305 "BR_SPAWN_LOOPER", 3306 "BR_FINISHED", 3307 "BR_DEAD_BINDER", 3308 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3309 "BR_FAILED_REPLY" 3310}; 3311 3312static const char *binder_command_strings[] = { 3313 "BC_TRANSACTION", 3314 "BC_REPLY", 3315 "BC_ACQUIRE_RESULT", 3316 "BC_FREE_BUFFER", 3317 "BC_INCREFS", 3318 "BC_ACQUIRE", 3319 "BC_RELEASE", 3320 "BC_DECREFS", 3321 "BC_INCREFS_DONE", 3322 "BC_ACQUIRE_DONE", 3323 "BC_ATTEMPT_ACQUIRE", 3324 "BC_REGISTER_LOOPER", 3325 "BC_ENTER_LOOPER", 3326 "BC_EXIT_LOOPER", 3327 "BC_REQUEST_DEATH_NOTIFICATION", 3328 "BC_CLEAR_DEATH_NOTIFICATION", 3329 "BC_DEAD_BINDER_DONE" 3330}; 3331 3332static const char *binder_objstat_strings[] = { 3333 "proc", 3334 "thread", 3335 "node", 3336 "ref", 3337 "death", 3338 "transaction", 3339 "transaction_complete" 3340}; 3341 3342static void print_binder_stats(struct seq_file *m, const char *prefix, 3343 struct binder_stats *stats) 3344{ 3345 int i; 3346 3347 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3348 ARRAY_SIZE(binder_command_strings)); 3349 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3350 if (stats->bc[i]) 3351 seq_printf(m, "%s%s: %d\n", prefix, 3352 binder_command_strings[i], stats->bc[i]); 3353 } 3354 3355 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3356 ARRAY_SIZE(binder_return_strings)); 3357 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3358 if (stats->br[i]) 3359 seq_printf(m, "%s%s: %d\n", prefix, 3360 binder_return_strings[i], stats->br[i]); 3361 } 3362 3363 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3364 ARRAY_SIZE(binder_objstat_strings)); 3365 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3366 ARRAY_SIZE(stats->obj_deleted)); 3367 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3368 if (stats->obj_created[i] || stats->obj_deleted[i]) 3369 seq_printf(m, "%s%s: active %d total %d\n", prefix, 3370 binder_objstat_strings[i], 3371 stats->obj_created[i] - stats->obj_deleted[i], 3372 stats->obj_created[i]); 3373 } 3374} 3375 3376static void print_binder_proc_stats(struct seq_file *m, 3377 struct binder_proc *proc) 3378{ 3379 struct binder_work *w; 3380 struct rb_node *n; 3381 int count, strong, weak; 3382 3383 seq_printf(m, "proc %d\n", proc->pid); 3384 count = 0; 3385 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3386 count++; 3387 seq_printf(m, " threads: %d\n", count); 3388 seq_printf(m, " requested threads: %d+%d/%d\n" 3389 " ready threads %d\n" 3390 " free async space %zd\n", proc->requested_threads, 3391 proc->requested_threads_started, proc->max_threads, 3392 proc->ready_threads, proc->free_async_space); 3393 count = 0; 3394 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3395 count++; 3396 seq_printf(m, " nodes: %d\n", count); 3397 count = 0; 3398 strong = 0; 3399 weak = 0; 3400 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3401 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3402 rb_node_desc); 3403 count++; 3404 strong += ref->strong; 3405 weak += ref->weak; 3406 } 3407 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3408 3409 count = 0; 3410 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3411 count++; 3412 seq_printf(m, " buffers: %d\n", count); 3413 3414 count = 0; 3415 list_for_each_entry(w, &proc->todo, entry) { 3416 switch (w->type) { 3417 case BINDER_WORK_TRANSACTION: 3418 count++; 3419 break; 3420 default: 3421 break; 3422 } 3423 } 3424 seq_printf(m, " pending transactions: %d\n", count); 3425 3426 print_binder_stats(m, " ", &proc->stats); 3427} 3428 3429 3430static int binder_state_show(struct seq_file *m, void *unused) 3431{ 3432 struct binder_proc *proc; 3433 struct hlist_node *pos; 3434 struct binder_node *node; 3435 int do_lock = !binder_debug_no_lock; 3436 3437 if (do_lock) 3438 mutex_lock(&binder_lock); 3439 3440 seq_puts(m, "binder state:\n"); 3441 3442 if (!hlist_empty(&binder_dead_nodes)) 3443 seq_puts(m, "dead nodes:\n"); 3444 hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) 3445 print_binder_node(m, node); 3446 3447 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3448 print_binder_proc(m, proc, 1); 3449 if (do_lock) 3450 mutex_unlock(&binder_lock); 3451 return 0; 3452} 3453 3454static int binder_stats_show(struct seq_file *m, void *unused) 3455{ 3456 struct binder_proc *proc; 3457 struct hlist_node *pos; 3458 int do_lock = !binder_debug_no_lock; 3459 3460 if (do_lock) 3461 mutex_lock(&binder_lock); 3462 3463 seq_puts(m, "binder stats:\n"); 3464 3465 print_binder_stats(m, "", &binder_stats); 3466 3467 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3468 print_binder_proc_stats(m, proc); 3469 if (do_lock) 3470 mutex_unlock(&binder_lock); 3471 return 0; 3472} 3473 3474static int binder_transactions_show(struct seq_file *m, void *unused) 3475{ 3476 struct binder_proc *proc; 3477 struct hlist_node *pos; 3478 int do_lock = !binder_debug_no_lock; 3479 3480 if (do_lock) 3481 mutex_lock(&binder_lock); 3482 3483 seq_puts(m, "binder transactions:\n"); 3484 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3485 print_binder_proc(m, proc, 0); 3486 if (do_lock) 3487 mutex_unlock(&binder_lock); 3488 return 0; 3489} 3490 3491static int binder_proc_show(struct seq_file *m, void *unused) 3492{ 3493 struct binder_proc *proc = m->private; 3494 int do_lock = !binder_debug_no_lock; 3495 3496 if (do_lock) 3497 mutex_lock(&binder_lock); 3498 seq_puts(m, "binder proc state:\n"); 3499 print_binder_proc(m, proc, 1); 3500 if (do_lock) 3501 mutex_unlock(&binder_lock); 3502 return 0; 3503} 3504 3505static void print_binder_transaction_log_entry(struct seq_file *m, 3506 struct binder_transaction_log_entry *e) 3507{ 3508 seq_printf(m, 3509 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 3510 e->debug_id, (e->call_type == 2) ? "reply" : 3511 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3512 e->from_thread, e->to_proc, e->to_thread, e->to_node, 3513 e->target_handle, e->data_size, e->offsets_size); 3514} 3515 3516static int binder_transaction_log_show(struct seq_file *m, void *unused) 3517{ 3518 struct binder_transaction_log *log = m->private; 3519 int i; 3520 3521 if (log->full) { 3522 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 3523 print_binder_transaction_log_entry(m, &log->entry[i]); 3524 } 3525 for (i = 0; i < log->next; i++) 3526 print_binder_transaction_log_entry(m, &log->entry[i]); 3527 return 0; 3528} 3529 3530static const struct file_operations binder_fops = { 3531 .owner = THIS_MODULE, 3532 .poll = binder_poll, 3533 .unlocked_ioctl = binder_ioctl, 3534 .mmap = binder_mmap, 3535 .open = binder_open, 3536 .flush = binder_flush, 3537 .release = binder_release, 3538}; 3539 3540static struct miscdevice binder_miscdev = { 3541 .minor = MISC_DYNAMIC_MINOR, 3542 .name = "binder", 3543 .fops = &binder_fops 3544}; 3545 3546BINDER_DEBUG_ENTRY(state); 3547BINDER_DEBUG_ENTRY(stats); 3548BINDER_DEBUG_ENTRY(transactions); 3549BINDER_DEBUG_ENTRY(transaction_log); 3550 3551static int __init binder_init(void) 3552{ 3553 int ret; 3554 3555 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3556 if (binder_debugfs_dir_entry_root) 3557 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3558 binder_debugfs_dir_entry_root); 3559 ret = misc_register(&binder_miscdev); 3560 if (binder_debugfs_dir_entry_root) { 3561 debugfs_create_file("state", 3562 S_IRUGO, 3563 binder_debugfs_dir_entry_root, 3564 NULL, 3565 &binder_state_fops); 3566 debugfs_create_file("stats", 3567 S_IRUGO, 3568 binder_debugfs_dir_entry_root, 3569 NULL, 3570 &binder_stats_fops); 3571 debugfs_create_file("transactions", 3572 S_IRUGO, 3573 binder_debugfs_dir_entry_root, 3574 NULL, 3575 &binder_transactions_fops); 3576 debugfs_create_file("transaction_log", 3577 S_IRUGO, 3578 binder_debugfs_dir_entry_root, 3579 &binder_transaction_log, 3580 &binder_transaction_log_fops); 3581 debugfs_create_file("failed_transaction_log", 3582 S_IRUGO, 3583 binder_debugfs_dir_entry_root, 3584 &binder_transaction_log_failed, 3585 &binder_transaction_log_fops); 3586 } 3587 return ret; 3588} 3589 3590device_initcall(binder_init); 3591 3592MODULE_LICENSE("GPL v2"); 3593