binder.c revision c11a166cd4c19664355e0e3d9c04cfa7ee4aa9f4
1/* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <asm/cacheflush.h> 19#include <linux/fdtable.h> 20#include <linux/file.h> 21#include <linux/fs.h> 22#include <linux/list.h> 23#include <linux/miscdevice.h> 24#include <linux/mm.h> 25#include <linux/module.h> 26#include <linux/mutex.h> 27#include <linux/nsproxy.h> 28#include <linux/poll.h> 29#include <linux/proc_fs.h> 30#include <linux/rbtree.h> 31#include <linux/sched.h> 32#include <linux/uaccess.h> 33#include <linux/vmalloc.h> 34#include <linux/slab.h> 35 36#include "binder.h" 37 38static DEFINE_MUTEX(binder_lock); 39static DEFINE_MUTEX(binder_deferred_lock); 40 41static HLIST_HEAD(binder_procs); 42static HLIST_HEAD(binder_deferred_list); 43static HLIST_HEAD(binder_dead_nodes); 44 45static struct proc_dir_entry *binder_proc_dir_entry_root; 46static struct proc_dir_entry *binder_proc_dir_entry_proc; 47static struct binder_node *binder_context_mgr_node; 48static uid_t binder_context_mgr_uid = -1; 49static int binder_last_id; 50 51static int binder_read_proc_proc(char *page, char **start, off_t off, 52 int count, int *eof, void *data); 53 54/* This is only defined in include/asm-arm/sizes.h */ 55#ifndef SZ_1K 56#define SZ_1K 0x400 57#endif 58 59#ifndef SZ_4M 60#define SZ_4M 0x400000 61#endif 62 63#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 64 65#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 66 67enum { 68 BINDER_DEBUG_USER_ERROR = 1U << 0, 69 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 70 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 71 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 72 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 73 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 74 BINDER_DEBUG_READ_WRITE = 1U << 6, 75 BINDER_DEBUG_USER_REFS = 1U << 7, 76 BINDER_DEBUG_THREADS = 1U << 8, 77 BINDER_DEBUG_TRANSACTION = 1U << 9, 78 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 79 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 80 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 81 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 82 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 83 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 84}; 85static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 86 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 87module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 88 89static int binder_debug_no_lock; 90module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 91 92static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 93static int binder_stop_on_user_error; 94 95static int binder_set_stop_on_user_error(const char *val, 96 struct kernel_param *kp) 97{ 98 int ret; 99 ret = param_set_int(val, kp); 100 if (binder_stop_on_user_error < 2) 101 wake_up(&binder_user_error_wait); 102 return ret; 103} 104module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 105 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 106 107#define binder_debug(mask, x...) \ 108 do { \ 109 if (binder_debug_mask & mask) \ 110 printk(KERN_INFO x); \ 111 } while (0) 112 113#define binder_user_error(x...) \ 114 do { \ 115 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 116 printk(KERN_INFO x); \ 117 if (binder_stop_on_user_error) \ 118 binder_stop_on_user_error = 2; \ 119 } while (0) 120 121enum binder_stat_types { 122 BINDER_STAT_PROC, 123 BINDER_STAT_THREAD, 124 BINDER_STAT_NODE, 125 BINDER_STAT_REF, 126 BINDER_STAT_DEATH, 127 BINDER_STAT_TRANSACTION, 128 BINDER_STAT_TRANSACTION_COMPLETE, 129 BINDER_STAT_COUNT 130}; 131 132struct binder_stats { 133 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 134 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 135 int obj_created[BINDER_STAT_COUNT]; 136 int obj_deleted[BINDER_STAT_COUNT]; 137}; 138 139static struct binder_stats binder_stats; 140 141static inline void binder_stats_deleted(enum binder_stat_types type) 142{ 143 binder_stats.obj_deleted[type]++; 144} 145 146static inline void binder_stats_created(enum binder_stat_types type) 147{ 148 binder_stats.obj_created[type]++; 149} 150 151struct binder_transaction_log_entry { 152 int debug_id; 153 int call_type; 154 int from_proc; 155 int from_thread; 156 int target_handle; 157 int to_proc; 158 int to_thread; 159 int to_node; 160 int data_size; 161 int offsets_size; 162}; 163struct binder_transaction_log { 164 int next; 165 int full; 166 struct binder_transaction_log_entry entry[32]; 167}; 168static struct binder_transaction_log binder_transaction_log; 169static struct binder_transaction_log binder_transaction_log_failed; 170 171static struct binder_transaction_log_entry *binder_transaction_log_add( 172 struct binder_transaction_log *log) 173{ 174 struct binder_transaction_log_entry *e; 175 e = &log->entry[log->next]; 176 memset(e, 0, sizeof(*e)); 177 log->next++; 178 if (log->next == ARRAY_SIZE(log->entry)) { 179 log->next = 0; 180 log->full = 1; 181 } 182 return e; 183} 184 185struct binder_work { 186 struct list_head entry; 187 enum { 188 BINDER_WORK_TRANSACTION = 1, 189 BINDER_WORK_TRANSACTION_COMPLETE, 190 BINDER_WORK_NODE, 191 BINDER_WORK_DEAD_BINDER, 192 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 193 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 194 } type; 195}; 196 197struct binder_node { 198 int debug_id; 199 struct binder_work work; 200 union { 201 struct rb_node rb_node; 202 struct hlist_node dead_node; 203 }; 204 struct binder_proc *proc; 205 struct hlist_head refs; 206 int internal_strong_refs; 207 int local_weak_refs; 208 int local_strong_refs; 209 void __user *ptr; 210 void __user *cookie; 211 unsigned has_strong_ref:1; 212 unsigned pending_strong_ref:1; 213 unsigned has_weak_ref:1; 214 unsigned pending_weak_ref:1; 215 unsigned has_async_transaction:1; 216 unsigned accept_fds:1; 217 unsigned min_priority:8; 218 struct list_head async_todo; 219}; 220 221struct binder_ref_death { 222 struct binder_work work; 223 void __user *cookie; 224}; 225 226struct binder_ref { 227 /* Lookups needed: */ 228 /* node + proc => ref (transaction) */ 229 /* desc + proc => ref (transaction, inc/dec ref) */ 230 /* node => refs + procs (proc exit) */ 231 int debug_id; 232 struct rb_node rb_node_desc; 233 struct rb_node rb_node_node; 234 struct hlist_node node_entry; 235 struct binder_proc *proc; 236 struct binder_node *node; 237 uint32_t desc; 238 int strong; 239 int weak; 240 struct binder_ref_death *death; 241}; 242 243struct binder_buffer { 244 struct list_head entry; /* free and allocated entries by addesss */ 245 struct rb_node rb_node; /* free entry by size or allocated entry */ 246 /* by address */ 247 unsigned free:1; 248 unsigned allow_user_free:1; 249 unsigned async_transaction:1; 250 unsigned debug_id:29; 251 252 struct binder_transaction *transaction; 253 254 struct binder_node *target_node; 255 size_t data_size; 256 size_t offsets_size; 257 uint8_t data[0]; 258}; 259 260enum binder_deferred_state { 261 BINDER_DEFERRED_PUT_FILES = 0x01, 262 BINDER_DEFERRED_FLUSH = 0x02, 263 BINDER_DEFERRED_RELEASE = 0x04, 264}; 265 266struct binder_proc { 267 struct hlist_node proc_node; 268 struct rb_root threads; 269 struct rb_root nodes; 270 struct rb_root refs_by_desc; 271 struct rb_root refs_by_node; 272 int pid; 273 struct vm_area_struct *vma; 274 struct task_struct *tsk; 275 struct files_struct *files; 276 struct hlist_node deferred_work_node; 277 int deferred_work; 278 void *buffer; 279 ptrdiff_t user_buffer_offset; 280 281 struct list_head buffers; 282 struct rb_root free_buffers; 283 struct rb_root allocated_buffers; 284 size_t free_async_space; 285 286 struct page **pages; 287 size_t buffer_size; 288 uint32_t buffer_free; 289 struct list_head todo; 290 wait_queue_head_t wait; 291 struct binder_stats stats; 292 struct list_head delivered_death; 293 int max_threads; 294 int requested_threads; 295 int requested_threads_started; 296 int ready_threads; 297 long default_priority; 298}; 299 300enum { 301 BINDER_LOOPER_STATE_REGISTERED = 0x01, 302 BINDER_LOOPER_STATE_ENTERED = 0x02, 303 BINDER_LOOPER_STATE_EXITED = 0x04, 304 BINDER_LOOPER_STATE_INVALID = 0x08, 305 BINDER_LOOPER_STATE_WAITING = 0x10, 306 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 307}; 308 309struct binder_thread { 310 struct binder_proc *proc; 311 struct rb_node rb_node; 312 int pid; 313 int looper; 314 struct binder_transaction *transaction_stack; 315 struct list_head todo; 316 uint32_t return_error; /* Write failed, return error code in read buf */ 317 uint32_t return_error2; /* Write failed, return error code in read */ 318 /* buffer. Used when sending a reply to a dead process that */ 319 /* we are also waiting on */ 320 wait_queue_head_t wait; 321 struct binder_stats stats; 322}; 323 324struct binder_transaction { 325 int debug_id; 326 struct binder_work work; 327 struct binder_thread *from; 328 struct binder_transaction *from_parent; 329 struct binder_proc *to_proc; 330 struct binder_thread *to_thread; 331 struct binder_transaction *to_parent; 332 unsigned need_reply:1; 333 /* unsigned is_dead:1; */ /* not used at the moment */ 334 335 struct binder_buffer *buffer; 336 unsigned int code; 337 unsigned int flags; 338 long priority; 339 long saved_priority; 340 uid_t sender_euid; 341}; 342 343static void 344binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 345 346/* 347 * copied from get_unused_fd_flags 348 */ 349int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 350{ 351 struct files_struct *files = proc->files; 352 int fd, error; 353 struct fdtable *fdt; 354 unsigned long rlim_cur; 355 unsigned long irqs; 356 357 if (files == NULL) 358 return -ESRCH; 359 360 error = -EMFILE; 361 spin_lock(&files->file_lock); 362 363repeat: 364 fdt = files_fdtable(files); 365 fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, 366 files->next_fd); 367 368 /* 369 * N.B. For clone tasks sharing a files structure, this test 370 * will limit the total number of files that can be opened. 371 */ 372 rlim_cur = 0; 373 if (lock_task_sighand(proc->tsk, &irqs)) { 374 rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; 375 unlock_task_sighand(proc->tsk, &irqs); 376 } 377 if (fd >= rlim_cur) 378 goto out; 379 380 /* Do we need to expand the fd array or fd set? */ 381 error = expand_files(files, fd); 382 if (error < 0) 383 goto out; 384 385 if (error) { 386 /* 387 * If we needed to expand the fs array we 388 * might have blocked - try again. 389 */ 390 error = -EMFILE; 391 goto repeat; 392 } 393 394 FD_SET(fd, fdt->open_fds); 395 if (flags & O_CLOEXEC) 396 FD_SET(fd, fdt->close_on_exec); 397 else 398 FD_CLR(fd, fdt->close_on_exec); 399 files->next_fd = fd + 1; 400#if 1 401 /* Sanity check */ 402 if (fdt->fd[fd] != NULL) { 403 printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); 404 fdt->fd[fd] = NULL; 405 } 406#endif 407 error = fd; 408 409out: 410 spin_unlock(&files->file_lock); 411 return error; 412} 413 414/* 415 * copied from fd_install 416 */ 417static void task_fd_install( 418 struct binder_proc *proc, unsigned int fd, struct file *file) 419{ 420 struct files_struct *files = proc->files; 421 struct fdtable *fdt; 422 423 if (files == NULL) 424 return; 425 426 spin_lock(&files->file_lock); 427 fdt = files_fdtable(files); 428 BUG_ON(fdt->fd[fd] != NULL); 429 rcu_assign_pointer(fdt->fd[fd], file); 430 spin_unlock(&files->file_lock); 431} 432 433/* 434 * copied from __put_unused_fd in open.c 435 */ 436static void __put_unused_fd(struct files_struct *files, unsigned int fd) 437{ 438 struct fdtable *fdt = files_fdtable(files); 439 __FD_CLR(fd, fdt->open_fds); 440 if (fd < files->next_fd) 441 files->next_fd = fd; 442} 443 444/* 445 * copied from sys_close 446 */ 447static long task_close_fd(struct binder_proc *proc, unsigned int fd) 448{ 449 struct file *filp; 450 struct files_struct *files = proc->files; 451 struct fdtable *fdt; 452 int retval; 453 454 if (files == NULL) 455 return -ESRCH; 456 457 spin_lock(&files->file_lock); 458 fdt = files_fdtable(files); 459 if (fd >= fdt->max_fds) 460 goto out_unlock; 461 filp = fdt->fd[fd]; 462 if (!filp) 463 goto out_unlock; 464 rcu_assign_pointer(fdt->fd[fd], NULL); 465 FD_CLR(fd, fdt->close_on_exec); 466 __put_unused_fd(files, fd); 467 spin_unlock(&files->file_lock); 468 retval = filp_close(filp, files); 469 470 /* can't restart close syscall because file table entry was cleared */ 471 if (unlikely(retval == -ERESTARTSYS || 472 retval == -ERESTARTNOINTR || 473 retval == -ERESTARTNOHAND || 474 retval == -ERESTART_RESTARTBLOCK)) 475 retval = -EINTR; 476 477 return retval; 478 479out_unlock: 480 spin_unlock(&files->file_lock); 481 return -EBADF; 482} 483 484static void binder_set_nice(long nice) 485{ 486 long min_nice; 487 if (can_nice(current, nice)) { 488 set_user_nice(current, nice); 489 return; 490 } 491 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; 492 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 493 "binder: %d: nice value %ld not allowed use " 494 "%ld instead\n", current->pid, nice, min_nice); 495 set_user_nice(current, min_nice); 496 if (min_nice < 20) 497 return; 498 binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); 499} 500 501static size_t binder_buffer_size(struct binder_proc *proc, 502 struct binder_buffer *buffer) 503{ 504 if (list_is_last(&buffer->entry, &proc->buffers)) 505 return proc->buffer + proc->buffer_size - (void *)buffer->data; 506 else 507 return (size_t)list_entry(buffer->entry.next, 508 struct binder_buffer, entry) - (size_t)buffer->data; 509} 510 511static void binder_insert_free_buffer(struct binder_proc *proc, 512 struct binder_buffer *new_buffer) 513{ 514 struct rb_node **p = &proc->free_buffers.rb_node; 515 struct rb_node *parent = NULL; 516 struct binder_buffer *buffer; 517 size_t buffer_size; 518 size_t new_buffer_size; 519 520 BUG_ON(!new_buffer->free); 521 522 new_buffer_size = binder_buffer_size(proc, new_buffer); 523 524 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 525 "binder: %d: add free buffer, size %zd, " 526 "at %p\n", proc->pid, new_buffer_size, new_buffer); 527 528 while (*p) { 529 parent = *p; 530 buffer = rb_entry(parent, struct binder_buffer, rb_node); 531 BUG_ON(!buffer->free); 532 533 buffer_size = binder_buffer_size(proc, buffer); 534 535 if (new_buffer_size < buffer_size) 536 p = &parent->rb_left; 537 else 538 p = &parent->rb_right; 539 } 540 rb_link_node(&new_buffer->rb_node, parent, p); 541 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 542} 543 544static void binder_insert_allocated_buffer(struct binder_proc *proc, 545 struct binder_buffer *new_buffer) 546{ 547 struct rb_node **p = &proc->allocated_buffers.rb_node; 548 struct rb_node *parent = NULL; 549 struct binder_buffer *buffer; 550 551 BUG_ON(new_buffer->free); 552 553 while (*p) { 554 parent = *p; 555 buffer = rb_entry(parent, struct binder_buffer, rb_node); 556 BUG_ON(buffer->free); 557 558 if (new_buffer < buffer) 559 p = &parent->rb_left; 560 else if (new_buffer > buffer) 561 p = &parent->rb_right; 562 else 563 BUG(); 564 } 565 rb_link_node(&new_buffer->rb_node, parent, p); 566 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 567} 568 569static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 570 void __user *user_ptr) 571{ 572 struct rb_node *n = proc->allocated_buffers.rb_node; 573 struct binder_buffer *buffer; 574 struct binder_buffer *kern_ptr; 575 576 kern_ptr = user_ptr - proc->user_buffer_offset 577 - offsetof(struct binder_buffer, data); 578 579 while (n) { 580 buffer = rb_entry(n, struct binder_buffer, rb_node); 581 BUG_ON(buffer->free); 582 583 if (kern_ptr < buffer) 584 n = n->rb_left; 585 else if (kern_ptr > buffer) 586 n = n->rb_right; 587 else 588 return buffer; 589 } 590 return NULL; 591} 592 593static int binder_update_page_range(struct binder_proc *proc, int allocate, 594 void *start, void *end, 595 struct vm_area_struct *vma) 596{ 597 void *page_addr; 598 unsigned long user_page_addr; 599 struct vm_struct tmp_area; 600 struct page **page; 601 struct mm_struct *mm; 602 603 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 604 "binder: %d: %s pages %p-%p\n", proc->pid, 605 allocate ? "allocate" : "free", start, end); 606 607 if (end <= start) 608 return 0; 609 610 if (vma) 611 mm = NULL; 612 else 613 mm = get_task_mm(proc->tsk); 614 615 if (mm) { 616 down_write(&mm->mmap_sem); 617 vma = proc->vma; 618 } 619 620 if (allocate == 0) 621 goto free_range; 622 623 if (vma == NULL) { 624 printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " 625 "map pages in userspace, no vma\n", proc->pid); 626 goto err_no_vma; 627 } 628 629 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 630 int ret; 631 struct page **page_array_ptr; 632 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 633 634 BUG_ON(*page); 635 *page = alloc_page(GFP_KERNEL | __GFP_ZERO); 636 if (*page == NULL) { 637 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 638 "for page at %p\n", proc->pid, page_addr); 639 goto err_alloc_page_failed; 640 } 641 tmp_area.addr = page_addr; 642 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 643 page_array_ptr = page; 644 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); 645 if (ret) { 646 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 647 "to map page at %p in kernel\n", 648 proc->pid, page_addr); 649 goto err_map_kernel_failed; 650 } 651 user_page_addr = 652 (uintptr_t)page_addr + proc->user_buffer_offset; 653 ret = vm_insert_page(vma, user_page_addr, page[0]); 654 if (ret) { 655 printk(KERN_ERR "binder: %d: binder_alloc_buf failed " 656 "to map page at %lx in userspace\n", 657 proc->pid, user_page_addr); 658 goto err_vm_insert_page_failed; 659 } 660 /* vm_insert_page does not seem to increment the refcount */ 661 } 662 if (mm) { 663 up_write(&mm->mmap_sem); 664 mmput(mm); 665 } 666 return 0; 667 668free_range: 669 for (page_addr = end - PAGE_SIZE; page_addr >= start; 670 page_addr -= PAGE_SIZE) { 671 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 672 if (vma) 673 zap_page_range(vma, (uintptr_t)page_addr + 674 proc->user_buffer_offset, PAGE_SIZE, NULL); 675err_vm_insert_page_failed: 676 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 677err_map_kernel_failed: 678 __free_page(*page); 679 *page = NULL; 680err_alloc_page_failed: 681 ; 682 } 683err_no_vma: 684 if (mm) { 685 up_write(&mm->mmap_sem); 686 mmput(mm); 687 } 688 return -ENOMEM; 689} 690 691static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 692 size_t data_size, 693 size_t offsets_size, int is_async) 694{ 695 struct rb_node *n = proc->free_buffers.rb_node; 696 struct binder_buffer *buffer; 697 size_t buffer_size; 698 struct rb_node *best_fit = NULL; 699 void *has_page_addr; 700 void *end_page_addr; 701 size_t size; 702 703 if (proc->vma == NULL) { 704 printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", 705 proc->pid); 706 return NULL; 707 } 708 709 size = ALIGN(data_size, sizeof(void *)) + 710 ALIGN(offsets_size, sizeof(void *)); 711 712 if (size < data_size || size < offsets_size) { 713 binder_user_error("binder: %d: got transaction with invalid " 714 "size %zd-%zd\n", proc->pid, data_size, offsets_size); 715 return NULL; 716 } 717 718 if (is_async && 719 proc->free_async_space < size + sizeof(struct binder_buffer)) { 720 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 721 "binder: %d: binder_alloc_buf size %zd" 722 "failed, no async space left\n", proc->pid, size); 723 return NULL; 724 } 725 726 while (n) { 727 buffer = rb_entry(n, struct binder_buffer, rb_node); 728 BUG_ON(!buffer->free); 729 buffer_size = binder_buffer_size(proc, buffer); 730 731 if (size < buffer_size) { 732 best_fit = n; 733 n = n->rb_left; 734 } else if (size > buffer_size) 735 n = n->rb_right; 736 else { 737 best_fit = n; 738 break; 739 } 740 } 741 if (best_fit == NULL) { 742 printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " 743 "no address space\n", proc->pid, size); 744 return NULL; 745 } 746 if (n == NULL) { 747 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 748 buffer_size = binder_buffer_size(proc, buffer); 749 } 750 751 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 752 "binder: %d: binder_alloc_buf size %zd got buff" 753 "er %p size %zd\n", proc->pid, size, buffer, buffer_size); 754 755 has_page_addr = 756 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 757 if (n == NULL) { 758 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 759 buffer_size = size; /* no room for other buffers */ 760 else 761 buffer_size = size + sizeof(struct binder_buffer); 762 } 763 end_page_addr = 764 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 765 if (end_page_addr > has_page_addr) 766 end_page_addr = has_page_addr; 767 if (binder_update_page_range(proc, 1, 768 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 769 return NULL; 770 771 rb_erase(best_fit, &proc->free_buffers); 772 buffer->free = 0; 773 binder_insert_allocated_buffer(proc, buffer); 774 if (buffer_size != size) { 775 struct binder_buffer *new_buffer = (void *)buffer->data + size; 776 list_add(&new_buffer->entry, &buffer->entry); 777 new_buffer->free = 1; 778 binder_insert_free_buffer(proc, new_buffer); 779 } 780 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 781 "binder: %d: binder_alloc_buf size %zd got " 782 "%p\n", proc->pid, size, buffer); 783 buffer->data_size = data_size; 784 buffer->offsets_size = offsets_size; 785 buffer->async_transaction = is_async; 786 if (is_async) { 787 proc->free_async_space -= size + sizeof(struct binder_buffer); 788 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 789 "binder: %d: binder_alloc_buf size %zd " 790 "async free %zd\n", proc->pid, size, 791 proc->free_async_space); 792 } 793 794 return buffer; 795} 796 797static void *buffer_start_page(struct binder_buffer *buffer) 798{ 799 return (void *)((uintptr_t)buffer & PAGE_MASK); 800} 801 802static void *buffer_end_page(struct binder_buffer *buffer) 803{ 804 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 805} 806 807static void binder_delete_free_buffer(struct binder_proc *proc, 808 struct binder_buffer *buffer) 809{ 810 struct binder_buffer *prev, *next = NULL; 811 int free_page_end = 1; 812 int free_page_start = 1; 813 814 BUG_ON(proc->buffers.next == &buffer->entry); 815 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 816 BUG_ON(!prev->free); 817 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 818 free_page_start = 0; 819 if (buffer_end_page(prev) == buffer_end_page(buffer)) 820 free_page_end = 0; 821 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 822 "binder: %d: merge free, buffer %p " 823 "share page with %p\n", proc->pid, buffer, prev); 824 } 825 826 if (!list_is_last(&buffer->entry, &proc->buffers)) { 827 next = list_entry(buffer->entry.next, 828 struct binder_buffer, entry); 829 if (buffer_start_page(next) == buffer_end_page(buffer)) { 830 free_page_end = 0; 831 if (buffer_start_page(next) == 832 buffer_start_page(buffer)) 833 free_page_start = 0; 834 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 835 "binder: %d: merge free, buffer" 836 " %p share page with %p\n", proc->pid, 837 buffer, prev); 838 } 839 } 840 list_del(&buffer->entry); 841 if (free_page_start || free_page_end) { 842 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 843 "binder: %d: merge free, buffer %p do " 844 "not share page%s%s with with %p or %p\n", 845 proc->pid, buffer, free_page_start ? "" : " end", 846 free_page_end ? "" : " start", prev, next); 847 binder_update_page_range(proc, 0, free_page_start ? 848 buffer_start_page(buffer) : buffer_end_page(buffer), 849 (free_page_end ? buffer_end_page(buffer) : 850 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 851 } 852} 853 854static void binder_free_buf(struct binder_proc *proc, 855 struct binder_buffer *buffer) 856{ 857 size_t size, buffer_size; 858 859 buffer_size = binder_buffer_size(proc, buffer); 860 861 size = ALIGN(buffer->data_size, sizeof(void *)) + 862 ALIGN(buffer->offsets_size, sizeof(void *)); 863 864 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 865 "binder: %d: binder_free_buf %p size %zd buffer" 866 "_size %zd\n", proc->pid, buffer, size, buffer_size); 867 868 BUG_ON(buffer->free); 869 BUG_ON(size > buffer_size); 870 BUG_ON(buffer->transaction != NULL); 871 BUG_ON((void *)buffer < proc->buffer); 872 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 873 874 if (buffer->async_transaction) { 875 proc->free_async_space += size + sizeof(struct binder_buffer); 876 877 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 878 "binder: %d: binder_free_buf size %zd " 879 "async free %zd\n", proc->pid, size, 880 proc->free_async_space); 881 } 882 883 binder_update_page_range(proc, 0, 884 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 885 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 886 NULL); 887 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 888 buffer->free = 1; 889 if (!list_is_last(&buffer->entry, &proc->buffers)) { 890 struct binder_buffer *next = list_entry(buffer->entry.next, 891 struct binder_buffer, entry); 892 if (next->free) { 893 rb_erase(&next->rb_node, &proc->free_buffers); 894 binder_delete_free_buffer(proc, next); 895 } 896 } 897 if (proc->buffers.next != &buffer->entry) { 898 struct binder_buffer *prev = list_entry(buffer->entry.prev, 899 struct binder_buffer, entry); 900 if (prev->free) { 901 binder_delete_free_buffer(proc, buffer); 902 rb_erase(&prev->rb_node, &proc->free_buffers); 903 buffer = prev; 904 } 905 } 906 binder_insert_free_buffer(proc, buffer); 907} 908 909static struct binder_node *binder_get_node(struct binder_proc *proc, 910 void __user *ptr) 911{ 912 struct rb_node *n = proc->nodes.rb_node; 913 struct binder_node *node; 914 915 while (n) { 916 node = rb_entry(n, struct binder_node, rb_node); 917 918 if (ptr < node->ptr) 919 n = n->rb_left; 920 else if (ptr > node->ptr) 921 n = n->rb_right; 922 else 923 return node; 924 } 925 return NULL; 926} 927 928static struct binder_node *binder_new_node(struct binder_proc *proc, 929 void __user *ptr, 930 void __user *cookie) 931{ 932 struct rb_node **p = &proc->nodes.rb_node; 933 struct rb_node *parent = NULL; 934 struct binder_node *node; 935 936 while (*p) { 937 parent = *p; 938 node = rb_entry(parent, struct binder_node, rb_node); 939 940 if (ptr < node->ptr) 941 p = &(*p)->rb_left; 942 else if (ptr > node->ptr) 943 p = &(*p)->rb_right; 944 else 945 return NULL; 946 } 947 948 node = kzalloc(sizeof(*node), GFP_KERNEL); 949 if (node == NULL) 950 return NULL; 951 binder_stats_created(BINDER_STAT_NODE); 952 rb_link_node(&node->rb_node, parent, p); 953 rb_insert_color(&node->rb_node, &proc->nodes); 954 node->debug_id = ++binder_last_id; 955 node->proc = proc; 956 node->ptr = ptr; 957 node->cookie = cookie; 958 node->work.type = BINDER_WORK_NODE; 959 INIT_LIST_HEAD(&node->work.entry); 960 INIT_LIST_HEAD(&node->async_todo); 961 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 962 "binder: %d:%d node %d u%p c%p created\n", 963 proc->pid, current->pid, node->debug_id, 964 node->ptr, node->cookie); 965 return node; 966} 967 968static int binder_inc_node(struct binder_node *node, int strong, int internal, 969 struct list_head *target_list) 970{ 971 if (strong) { 972 if (internal) { 973 if (target_list == NULL && 974 node->internal_strong_refs == 0 && 975 !(node == binder_context_mgr_node && 976 node->has_strong_ref)) { 977 printk(KERN_ERR "binder: invalid inc strong " 978 "node for %d\n", node->debug_id); 979 return -EINVAL; 980 } 981 node->internal_strong_refs++; 982 } else 983 node->local_strong_refs++; 984 if (!node->has_strong_ref && target_list) { 985 list_del_init(&node->work.entry); 986 list_add_tail(&node->work.entry, target_list); 987 } 988 } else { 989 if (!internal) 990 node->local_weak_refs++; 991 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 992 if (target_list == NULL) { 993 printk(KERN_ERR "binder: invalid inc weak node " 994 "for %d\n", node->debug_id); 995 return -EINVAL; 996 } 997 list_add_tail(&node->work.entry, target_list); 998 } 999 } 1000 return 0; 1001} 1002 1003static int binder_dec_node(struct binder_node *node, int strong, int internal) 1004{ 1005 if (strong) { 1006 if (internal) 1007 node->internal_strong_refs--; 1008 else 1009 node->local_strong_refs--; 1010 if (node->local_strong_refs || node->internal_strong_refs) 1011 return 0; 1012 } else { 1013 if (!internal) 1014 node->local_weak_refs--; 1015 if (node->local_weak_refs || !hlist_empty(&node->refs)) 1016 return 0; 1017 } 1018 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 1019 if (list_empty(&node->work.entry)) { 1020 list_add_tail(&node->work.entry, &node->proc->todo); 1021 wake_up_interruptible(&node->proc->wait); 1022 } 1023 } else { 1024 if (hlist_empty(&node->refs) && !node->local_strong_refs && 1025 !node->local_weak_refs) { 1026 list_del_init(&node->work.entry); 1027 if (node->proc) { 1028 rb_erase(&node->rb_node, &node->proc->nodes); 1029 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1030 "binder: refless node %d deleted\n", 1031 node->debug_id); 1032 } else { 1033 hlist_del(&node->dead_node); 1034 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1035 "binder: dead node %d deleted\n", 1036 node->debug_id); 1037 } 1038 kfree(node); 1039 binder_stats_deleted(BINDER_STAT_NODE); 1040 } 1041 } 1042 1043 return 0; 1044} 1045 1046 1047static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1048 uint32_t desc) 1049{ 1050 struct rb_node *n = proc->refs_by_desc.rb_node; 1051 struct binder_ref *ref; 1052 1053 while (n) { 1054 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1055 1056 if (desc < ref->desc) 1057 n = n->rb_left; 1058 else if (desc > ref->desc) 1059 n = n->rb_right; 1060 else 1061 return ref; 1062 } 1063 return NULL; 1064} 1065 1066static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1067 struct binder_node *node) 1068{ 1069 struct rb_node *n; 1070 struct rb_node **p = &proc->refs_by_node.rb_node; 1071 struct rb_node *parent = NULL; 1072 struct binder_ref *ref, *new_ref; 1073 1074 while (*p) { 1075 parent = *p; 1076 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1077 1078 if (node < ref->node) 1079 p = &(*p)->rb_left; 1080 else if (node > ref->node) 1081 p = &(*p)->rb_right; 1082 else 1083 return ref; 1084 } 1085 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1086 if (new_ref == NULL) 1087 return NULL; 1088 binder_stats_created(BINDER_STAT_REF); 1089 new_ref->debug_id = ++binder_last_id; 1090 new_ref->proc = proc; 1091 new_ref->node = node; 1092 rb_link_node(&new_ref->rb_node_node, parent, p); 1093 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1094 1095 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1096 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1097 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1098 if (ref->desc > new_ref->desc) 1099 break; 1100 new_ref->desc = ref->desc + 1; 1101 } 1102 1103 p = &proc->refs_by_desc.rb_node; 1104 while (*p) { 1105 parent = *p; 1106 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1107 1108 if (new_ref->desc < ref->desc) 1109 p = &(*p)->rb_left; 1110 else if (new_ref->desc > ref->desc) 1111 p = &(*p)->rb_right; 1112 else 1113 BUG(); 1114 } 1115 rb_link_node(&new_ref->rb_node_desc, parent, p); 1116 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1117 if (node) { 1118 hlist_add_head(&new_ref->node_entry, &node->refs); 1119 1120 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1121 "binder: %d new ref %d desc %d for " 1122 "node %d\n", proc->pid, new_ref->debug_id, 1123 new_ref->desc, node->debug_id); 1124 } else { 1125 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1126 "binder: %d new ref %d desc %d for " 1127 "dead node\n", proc->pid, new_ref->debug_id, 1128 new_ref->desc); 1129 } 1130 return new_ref; 1131} 1132 1133static void binder_delete_ref(struct binder_ref *ref) 1134{ 1135 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1136 "binder: %d delete ref %d desc %d for " 1137 "node %d\n", ref->proc->pid, ref->debug_id, 1138 ref->desc, ref->node->debug_id); 1139 1140 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1141 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1142 if (ref->strong) 1143 binder_dec_node(ref->node, 1, 1); 1144 hlist_del(&ref->node_entry); 1145 binder_dec_node(ref->node, 0, 1); 1146 if (ref->death) { 1147 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1148 "binder: %d delete ref %d desc %d " 1149 "has death notification\n", ref->proc->pid, 1150 ref->debug_id, ref->desc); 1151 list_del(&ref->death->work.entry); 1152 kfree(ref->death); 1153 binder_stats_deleted(BINDER_STAT_DEATH); 1154 } 1155 kfree(ref); 1156 binder_stats_deleted(BINDER_STAT_REF); 1157} 1158 1159static int binder_inc_ref(struct binder_ref *ref, int strong, 1160 struct list_head *target_list) 1161{ 1162 int ret; 1163 if (strong) { 1164 if (ref->strong == 0) { 1165 ret = binder_inc_node(ref->node, 1, 1, target_list); 1166 if (ret) 1167 return ret; 1168 } 1169 ref->strong++; 1170 } else { 1171 if (ref->weak == 0) { 1172 ret = binder_inc_node(ref->node, 0, 1, target_list); 1173 if (ret) 1174 return ret; 1175 } 1176 ref->weak++; 1177 } 1178 return 0; 1179} 1180 1181 1182static int binder_dec_ref(struct binder_ref *ref, int strong) 1183{ 1184 if (strong) { 1185 if (ref->strong == 0) { 1186 binder_user_error("binder: %d invalid dec strong, " 1187 "ref %d desc %d s %d w %d\n", 1188 ref->proc->pid, ref->debug_id, 1189 ref->desc, ref->strong, ref->weak); 1190 return -EINVAL; 1191 } 1192 ref->strong--; 1193 if (ref->strong == 0) { 1194 int ret; 1195 ret = binder_dec_node(ref->node, strong, 1); 1196 if (ret) 1197 return ret; 1198 } 1199 } else { 1200 if (ref->weak == 0) { 1201 binder_user_error("binder: %d invalid dec weak, " 1202 "ref %d desc %d s %d w %d\n", 1203 ref->proc->pid, ref->debug_id, 1204 ref->desc, ref->strong, ref->weak); 1205 return -EINVAL; 1206 } 1207 ref->weak--; 1208 } 1209 if (ref->strong == 0 && ref->weak == 0) 1210 binder_delete_ref(ref); 1211 return 0; 1212} 1213 1214static void binder_pop_transaction(struct binder_thread *target_thread, 1215 struct binder_transaction *t) 1216{ 1217 if (target_thread) { 1218 BUG_ON(target_thread->transaction_stack != t); 1219 BUG_ON(target_thread->transaction_stack->from != target_thread); 1220 target_thread->transaction_stack = 1221 target_thread->transaction_stack->from_parent; 1222 t->from = NULL; 1223 } 1224 t->need_reply = 0; 1225 if (t->buffer) 1226 t->buffer->transaction = NULL; 1227 kfree(t); 1228 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1229} 1230 1231static void binder_send_failed_reply(struct binder_transaction *t, 1232 uint32_t error_code) 1233{ 1234 struct binder_thread *target_thread; 1235 BUG_ON(t->flags & TF_ONE_WAY); 1236 while (1) { 1237 target_thread = t->from; 1238 if (target_thread) { 1239 if (target_thread->return_error != BR_OK && 1240 target_thread->return_error2 == BR_OK) { 1241 target_thread->return_error2 = 1242 target_thread->return_error; 1243 target_thread->return_error = BR_OK; 1244 } 1245 if (target_thread->return_error == BR_OK) { 1246 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1247 "binder: send failed reply for " 1248 "transaction %d to %d:%d\n", 1249 t->debug_id, target_thread->proc->pid, 1250 target_thread->pid); 1251 1252 binder_pop_transaction(target_thread, t); 1253 target_thread->return_error = error_code; 1254 wake_up_interruptible(&target_thread->wait); 1255 } else { 1256 printk(KERN_ERR "binder: reply failed, target " 1257 "thread, %d:%d, has error code %d " 1258 "already\n", target_thread->proc->pid, 1259 target_thread->pid, 1260 target_thread->return_error); 1261 } 1262 return; 1263 } else { 1264 struct binder_transaction *next = t->from_parent; 1265 1266 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1267 "binder: send failed reply " 1268 "for transaction %d, target dead\n", 1269 t->debug_id); 1270 1271 binder_pop_transaction(target_thread, t); 1272 if (next == NULL) { 1273 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1274 "binder: reply failed," 1275 " no target thread at root\n"); 1276 return; 1277 } 1278 t = next; 1279 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1280 "binder: reply failed, no target " 1281 "thread -- retry %d\n", t->debug_id); 1282 } 1283 } 1284} 1285 1286static void binder_transaction_buffer_release(struct binder_proc *proc, 1287 struct binder_buffer *buffer, 1288 size_t *failed_at) 1289{ 1290 size_t *offp, *off_end; 1291 int debug_id = buffer->debug_id; 1292 1293 binder_debug(BINDER_DEBUG_TRANSACTION, 1294 "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", 1295 proc->pid, buffer->debug_id, 1296 buffer->data_size, buffer->offsets_size, failed_at); 1297 1298 if (buffer->target_node) 1299 binder_dec_node(buffer->target_node, 1, 0); 1300 1301 offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); 1302 if (failed_at) 1303 off_end = failed_at; 1304 else 1305 off_end = (void *)offp + buffer->offsets_size; 1306 for (; offp < off_end; offp++) { 1307 struct flat_binder_object *fp; 1308 if (*offp > buffer->data_size - sizeof(*fp) || 1309 buffer->data_size < sizeof(*fp) || 1310 !IS_ALIGNED(*offp, sizeof(void *))) { 1311 printk(KERN_ERR "binder: transaction release %d bad" 1312 "offset %zd, size %zd\n", debug_id, 1313 *offp, buffer->data_size); 1314 continue; 1315 } 1316 fp = (struct flat_binder_object *)(buffer->data + *offp); 1317 switch (fp->type) { 1318 case BINDER_TYPE_BINDER: 1319 case BINDER_TYPE_WEAK_BINDER: { 1320 struct binder_node *node = binder_get_node(proc, fp->binder); 1321 if (node == NULL) { 1322 printk(KERN_ERR "binder: transaction release %d" 1323 " bad node %p\n", debug_id, fp->binder); 1324 break; 1325 } 1326 binder_debug(BINDER_DEBUG_TRANSACTION, 1327 " node %d u%p\n", 1328 node->debug_id, node->ptr); 1329 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1330 } break; 1331 case BINDER_TYPE_HANDLE: 1332 case BINDER_TYPE_WEAK_HANDLE: { 1333 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1334 if (ref == NULL) { 1335 printk(KERN_ERR "binder: transaction release %d" 1336 " bad handle %ld\n", debug_id, 1337 fp->handle); 1338 break; 1339 } 1340 binder_debug(BINDER_DEBUG_TRANSACTION, 1341 " ref %d desc %d (node %d)\n", 1342 ref->debug_id, ref->desc, ref->node->debug_id); 1343 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1344 } break; 1345 1346 case BINDER_TYPE_FD: 1347 binder_debug(BINDER_DEBUG_TRANSACTION, 1348 " fd %ld\n", fp->handle); 1349 if (failed_at) 1350 task_close_fd(proc, fp->handle); 1351 break; 1352 1353 default: 1354 printk(KERN_ERR "binder: transaction release %d bad " 1355 "object type %lx\n", debug_id, fp->type); 1356 break; 1357 } 1358 } 1359} 1360 1361static void binder_transaction(struct binder_proc *proc, 1362 struct binder_thread *thread, 1363 struct binder_transaction_data *tr, int reply) 1364{ 1365 struct binder_transaction *t; 1366 struct binder_work *tcomplete; 1367 size_t *offp, *off_end; 1368 struct binder_proc *target_proc; 1369 struct binder_thread *target_thread = NULL; 1370 struct binder_node *target_node = NULL; 1371 struct list_head *target_list; 1372 wait_queue_head_t *target_wait; 1373 struct binder_transaction *in_reply_to = NULL; 1374 struct binder_transaction_log_entry *e; 1375 uint32_t return_error; 1376 1377 e = binder_transaction_log_add(&binder_transaction_log); 1378 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1379 e->from_proc = proc->pid; 1380 e->from_thread = thread->pid; 1381 e->target_handle = tr->target.handle; 1382 e->data_size = tr->data_size; 1383 e->offsets_size = tr->offsets_size; 1384 1385 if (reply) { 1386 in_reply_to = thread->transaction_stack; 1387 if (in_reply_to == NULL) { 1388 binder_user_error("binder: %d:%d got reply transaction " 1389 "with no transaction stack\n", 1390 proc->pid, thread->pid); 1391 return_error = BR_FAILED_REPLY; 1392 goto err_empty_call_stack; 1393 } 1394 binder_set_nice(in_reply_to->saved_priority); 1395 if (in_reply_to->to_thread != thread) { 1396 binder_user_error("binder: %d:%d got reply transaction " 1397 "with bad transaction stack," 1398 " transaction %d has target %d:%d\n", 1399 proc->pid, thread->pid, in_reply_to->debug_id, 1400 in_reply_to->to_proc ? 1401 in_reply_to->to_proc->pid : 0, 1402 in_reply_to->to_thread ? 1403 in_reply_to->to_thread->pid : 0); 1404 return_error = BR_FAILED_REPLY; 1405 in_reply_to = NULL; 1406 goto err_bad_call_stack; 1407 } 1408 thread->transaction_stack = in_reply_to->to_parent; 1409 target_thread = in_reply_to->from; 1410 if (target_thread == NULL) { 1411 return_error = BR_DEAD_REPLY; 1412 goto err_dead_binder; 1413 } 1414 if (target_thread->transaction_stack != in_reply_to) { 1415 binder_user_error("binder: %d:%d got reply transaction " 1416 "with bad target transaction stack %d, " 1417 "expected %d\n", 1418 proc->pid, thread->pid, 1419 target_thread->transaction_stack ? 1420 target_thread->transaction_stack->debug_id : 0, 1421 in_reply_to->debug_id); 1422 return_error = BR_FAILED_REPLY; 1423 in_reply_to = NULL; 1424 target_thread = NULL; 1425 goto err_dead_binder; 1426 } 1427 target_proc = target_thread->proc; 1428 } else { 1429 if (tr->target.handle) { 1430 struct binder_ref *ref; 1431 ref = binder_get_ref(proc, tr->target.handle); 1432 if (ref == NULL) { 1433 binder_user_error("binder: %d:%d got " 1434 "transaction to invalid handle\n", 1435 proc->pid, thread->pid); 1436 return_error = BR_FAILED_REPLY; 1437 goto err_invalid_target_handle; 1438 } 1439 target_node = ref->node; 1440 } else { 1441 target_node = binder_context_mgr_node; 1442 if (target_node == NULL) { 1443 return_error = BR_DEAD_REPLY; 1444 goto err_no_context_mgr_node; 1445 } 1446 } 1447 e->to_node = target_node->debug_id; 1448 target_proc = target_node->proc; 1449 if (target_proc == NULL) { 1450 return_error = BR_DEAD_REPLY; 1451 goto err_dead_binder; 1452 } 1453 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1454 struct binder_transaction *tmp; 1455 tmp = thread->transaction_stack; 1456 if (tmp->to_thread != thread) { 1457 binder_user_error("binder: %d:%d got new " 1458 "transaction with bad transaction stack" 1459 ", transaction %d has target %d:%d\n", 1460 proc->pid, thread->pid, tmp->debug_id, 1461 tmp->to_proc ? tmp->to_proc->pid : 0, 1462 tmp->to_thread ? 1463 tmp->to_thread->pid : 0); 1464 return_error = BR_FAILED_REPLY; 1465 goto err_bad_call_stack; 1466 } 1467 while (tmp) { 1468 if (tmp->from && tmp->from->proc == target_proc) 1469 target_thread = tmp->from; 1470 tmp = tmp->from_parent; 1471 } 1472 } 1473 } 1474 if (target_thread) { 1475 e->to_thread = target_thread->pid; 1476 target_list = &target_thread->todo; 1477 target_wait = &target_thread->wait; 1478 } else { 1479 target_list = &target_proc->todo; 1480 target_wait = &target_proc->wait; 1481 } 1482 e->to_proc = target_proc->pid; 1483 1484 /* TODO: reuse incoming transaction for reply */ 1485 t = kzalloc(sizeof(*t), GFP_KERNEL); 1486 if (t == NULL) { 1487 return_error = BR_FAILED_REPLY; 1488 goto err_alloc_t_failed; 1489 } 1490 binder_stats_created(BINDER_STAT_TRANSACTION); 1491 1492 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1493 if (tcomplete == NULL) { 1494 return_error = BR_FAILED_REPLY; 1495 goto err_alloc_tcomplete_failed; 1496 } 1497 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1498 1499 t->debug_id = ++binder_last_id; 1500 e->debug_id = t->debug_id; 1501 1502 if (reply) 1503 binder_debug(BINDER_DEBUG_TRANSACTION, 1504 "binder: %d:%d BC_REPLY %d -> %d:%d, " 1505 "data %p-%p size %zd-%zd\n", 1506 proc->pid, thread->pid, t->debug_id, 1507 target_proc->pid, target_thread->pid, 1508 tr->data.ptr.buffer, tr->data.ptr.offsets, 1509 tr->data_size, tr->offsets_size); 1510 else 1511 binder_debug(BINDER_DEBUG_TRANSACTION, 1512 "binder: %d:%d BC_TRANSACTION %d -> " 1513 "%d - node %d, data %p-%p size %zd-%zd\n", 1514 proc->pid, thread->pid, t->debug_id, 1515 target_proc->pid, target_node->debug_id, 1516 tr->data.ptr.buffer, tr->data.ptr.offsets, 1517 tr->data_size, tr->offsets_size); 1518 1519 if (!reply && !(tr->flags & TF_ONE_WAY)) 1520 t->from = thread; 1521 else 1522 t->from = NULL; 1523 t->sender_euid = proc->tsk->cred->euid; 1524 t->to_proc = target_proc; 1525 t->to_thread = target_thread; 1526 t->code = tr->code; 1527 t->flags = tr->flags; 1528 t->priority = task_nice(current); 1529 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1530 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1531 if (t->buffer == NULL) { 1532 return_error = BR_FAILED_REPLY; 1533 goto err_binder_alloc_buf_failed; 1534 } 1535 t->buffer->allow_user_free = 0; 1536 t->buffer->debug_id = t->debug_id; 1537 t->buffer->transaction = t; 1538 t->buffer->target_node = target_node; 1539 if (target_node) 1540 binder_inc_node(target_node, 1, 0, NULL); 1541 1542 offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); 1543 1544 if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { 1545 binder_user_error("binder: %d:%d got transaction with invalid " 1546 "data ptr\n", proc->pid, thread->pid); 1547 return_error = BR_FAILED_REPLY; 1548 goto err_copy_data_failed; 1549 } 1550 if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { 1551 binder_user_error("binder: %d:%d got transaction with invalid " 1552 "offsets ptr\n", proc->pid, thread->pid); 1553 return_error = BR_FAILED_REPLY; 1554 goto err_copy_data_failed; 1555 } 1556 if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { 1557 binder_user_error("binder: %d:%d got transaction with " 1558 "invalid offsets size, %zd\n", 1559 proc->pid, thread->pid, tr->offsets_size); 1560 return_error = BR_FAILED_REPLY; 1561 goto err_bad_offset; 1562 } 1563 off_end = (void *)offp + tr->offsets_size; 1564 for (; offp < off_end; offp++) { 1565 struct flat_binder_object *fp; 1566 if (*offp > t->buffer->data_size - sizeof(*fp) || 1567 t->buffer->data_size < sizeof(*fp) || 1568 !IS_ALIGNED(*offp, sizeof(void *))) { 1569 binder_user_error("binder: %d:%d got transaction with " 1570 "invalid offset, %zd\n", 1571 proc->pid, thread->pid, *offp); 1572 return_error = BR_FAILED_REPLY; 1573 goto err_bad_offset; 1574 } 1575 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1576 switch (fp->type) { 1577 case BINDER_TYPE_BINDER: 1578 case BINDER_TYPE_WEAK_BINDER: { 1579 struct binder_ref *ref; 1580 struct binder_node *node = binder_get_node(proc, fp->binder); 1581 if (node == NULL) { 1582 node = binder_new_node(proc, fp->binder, fp->cookie); 1583 if (node == NULL) { 1584 return_error = BR_FAILED_REPLY; 1585 goto err_binder_new_node_failed; 1586 } 1587 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1588 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1589 } 1590 if (fp->cookie != node->cookie) { 1591 binder_user_error("binder: %d:%d sending u%p " 1592 "node %d, cookie mismatch %p != %p\n", 1593 proc->pid, thread->pid, 1594 fp->binder, node->debug_id, 1595 fp->cookie, node->cookie); 1596 goto err_binder_get_ref_for_node_failed; 1597 } 1598 ref = binder_get_ref_for_node(target_proc, node); 1599 if (ref == NULL) { 1600 return_error = BR_FAILED_REPLY; 1601 goto err_binder_get_ref_for_node_failed; 1602 } 1603 if (fp->type == BINDER_TYPE_BINDER) 1604 fp->type = BINDER_TYPE_HANDLE; 1605 else 1606 fp->type = BINDER_TYPE_WEAK_HANDLE; 1607 fp->handle = ref->desc; 1608 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1609 &thread->todo); 1610 1611 binder_debug(BINDER_DEBUG_TRANSACTION, 1612 " node %d u%p -> ref %d desc %d\n", 1613 node->debug_id, node->ptr, ref->debug_id, 1614 ref->desc); 1615 } break; 1616 case BINDER_TYPE_HANDLE: 1617 case BINDER_TYPE_WEAK_HANDLE: { 1618 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1619 if (ref == NULL) { 1620 binder_user_error("binder: %d:%d got " 1621 "transaction with invalid " 1622 "handle, %ld\n", proc->pid, 1623 thread->pid, fp->handle); 1624 return_error = BR_FAILED_REPLY; 1625 goto err_binder_get_ref_failed; 1626 } 1627 if (ref->node->proc == target_proc) { 1628 if (fp->type == BINDER_TYPE_HANDLE) 1629 fp->type = BINDER_TYPE_BINDER; 1630 else 1631 fp->type = BINDER_TYPE_WEAK_BINDER; 1632 fp->binder = ref->node->ptr; 1633 fp->cookie = ref->node->cookie; 1634 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1635 binder_debug(BINDER_DEBUG_TRANSACTION, 1636 " ref %d desc %d -> node %d u%p\n", 1637 ref->debug_id, ref->desc, ref->node->debug_id, 1638 ref->node->ptr); 1639 } else { 1640 struct binder_ref *new_ref; 1641 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1642 if (new_ref == NULL) { 1643 return_error = BR_FAILED_REPLY; 1644 goto err_binder_get_ref_for_node_failed; 1645 } 1646 fp->handle = new_ref->desc; 1647 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1648 binder_debug(BINDER_DEBUG_TRANSACTION, 1649 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1650 ref->debug_id, ref->desc, new_ref->debug_id, 1651 new_ref->desc, ref->node->debug_id); 1652 } 1653 } break; 1654 1655 case BINDER_TYPE_FD: { 1656 int target_fd; 1657 struct file *file; 1658 1659 if (reply) { 1660 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1661 binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", 1662 proc->pid, thread->pid, fp->handle); 1663 return_error = BR_FAILED_REPLY; 1664 goto err_fd_not_allowed; 1665 } 1666 } else if (!target_node->accept_fds) { 1667 binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", 1668 proc->pid, thread->pid, fp->handle); 1669 return_error = BR_FAILED_REPLY; 1670 goto err_fd_not_allowed; 1671 } 1672 1673 file = fget(fp->handle); 1674 if (file == NULL) { 1675 binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", 1676 proc->pid, thread->pid, fp->handle); 1677 return_error = BR_FAILED_REPLY; 1678 goto err_fget_failed; 1679 } 1680 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1681 if (target_fd < 0) { 1682 fput(file); 1683 return_error = BR_FAILED_REPLY; 1684 goto err_get_unused_fd_failed; 1685 } 1686 task_fd_install(target_proc, target_fd, file); 1687 binder_debug(BINDER_DEBUG_TRANSACTION, 1688 " fd %ld -> %d\n", fp->handle, target_fd); 1689 /* TODO: fput? */ 1690 fp->handle = target_fd; 1691 } break; 1692 1693 default: 1694 binder_user_error("binder: %d:%d got transactio" 1695 "n with invalid object type, %lx\n", 1696 proc->pid, thread->pid, fp->type); 1697 return_error = BR_FAILED_REPLY; 1698 goto err_bad_object_type; 1699 } 1700 } 1701 if (reply) { 1702 BUG_ON(t->buffer->async_transaction != 0); 1703 binder_pop_transaction(target_thread, in_reply_to); 1704 } else if (!(t->flags & TF_ONE_WAY)) { 1705 BUG_ON(t->buffer->async_transaction != 0); 1706 t->need_reply = 1; 1707 t->from_parent = thread->transaction_stack; 1708 thread->transaction_stack = t; 1709 } else { 1710 BUG_ON(target_node == NULL); 1711 BUG_ON(t->buffer->async_transaction != 1); 1712 if (target_node->has_async_transaction) { 1713 target_list = &target_node->async_todo; 1714 target_wait = NULL; 1715 } else 1716 target_node->has_async_transaction = 1; 1717 } 1718 t->work.type = BINDER_WORK_TRANSACTION; 1719 list_add_tail(&t->work.entry, target_list); 1720 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1721 list_add_tail(&tcomplete->entry, &thread->todo); 1722 if (target_wait) 1723 wake_up_interruptible(target_wait); 1724 return; 1725 1726err_get_unused_fd_failed: 1727err_fget_failed: 1728err_fd_not_allowed: 1729err_binder_get_ref_for_node_failed: 1730err_binder_get_ref_failed: 1731err_binder_new_node_failed: 1732err_bad_object_type: 1733err_bad_offset: 1734err_copy_data_failed: 1735 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1736 t->buffer->transaction = NULL; 1737 binder_free_buf(target_proc, t->buffer); 1738err_binder_alloc_buf_failed: 1739 kfree(tcomplete); 1740 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1741err_alloc_tcomplete_failed: 1742 kfree(t); 1743 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1744err_alloc_t_failed: 1745err_bad_call_stack: 1746err_empty_call_stack: 1747err_dead_binder: 1748err_invalid_target_handle: 1749err_no_context_mgr_node: 1750 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1751 "binder: %d:%d transaction failed %d, size %zd-%zd\n", 1752 proc->pid, thread->pid, return_error, 1753 tr->data_size, tr->offsets_size); 1754 1755 { 1756 struct binder_transaction_log_entry *fe; 1757 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1758 *fe = *e; 1759 } 1760 1761 BUG_ON(thread->return_error != BR_OK); 1762 if (in_reply_to) { 1763 thread->return_error = BR_TRANSACTION_COMPLETE; 1764 binder_send_failed_reply(in_reply_to, return_error); 1765 } else 1766 thread->return_error = return_error; 1767} 1768 1769int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, 1770 void __user *buffer, int size, signed long *consumed) 1771{ 1772 uint32_t cmd; 1773 void __user *ptr = buffer + *consumed; 1774 void __user *end = buffer + size; 1775 1776 while (ptr < end && thread->return_error == BR_OK) { 1777 if (get_user(cmd, (uint32_t __user *)ptr)) 1778 return -EFAULT; 1779 ptr += sizeof(uint32_t); 1780 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1781 binder_stats.bc[_IOC_NR(cmd)]++; 1782 proc->stats.bc[_IOC_NR(cmd)]++; 1783 thread->stats.bc[_IOC_NR(cmd)]++; 1784 } 1785 switch (cmd) { 1786 case BC_INCREFS: 1787 case BC_ACQUIRE: 1788 case BC_RELEASE: 1789 case BC_DECREFS: { 1790 uint32_t target; 1791 struct binder_ref *ref; 1792 const char *debug_string; 1793 1794 if (get_user(target, (uint32_t __user *)ptr)) 1795 return -EFAULT; 1796 ptr += sizeof(uint32_t); 1797 if (target == 0 && binder_context_mgr_node && 1798 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1799 ref = binder_get_ref_for_node(proc, 1800 binder_context_mgr_node); 1801 if (ref->desc != target) { 1802 binder_user_error("binder: %d:" 1803 "%d tried to acquire " 1804 "reference to desc 0, " 1805 "got %d instead\n", 1806 proc->pid, thread->pid, 1807 ref->desc); 1808 } 1809 } else 1810 ref = binder_get_ref(proc, target); 1811 if (ref == NULL) { 1812 binder_user_error("binder: %d:%d refcou" 1813 "nt change on invalid ref %d\n", 1814 proc->pid, thread->pid, target); 1815 break; 1816 } 1817 switch (cmd) { 1818 case BC_INCREFS: 1819 debug_string = "IncRefs"; 1820 binder_inc_ref(ref, 0, NULL); 1821 break; 1822 case BC_ACQUIRE: 1823 debug_string = "Acquire"; 1824 binder_inc_ref(ref, 1, NULL); 1825 break; 1826 case BC_RELEASE: 1827 debug_string = "Release"; 1828 binder_dec_ref(ref, 1); 1829 break; 1830 case BC_DECREFS: 1831 default: 1832 debug_string = "DecRefs"; 1833 binder_dec_ref(ref, 0); 1834 break; 1835 } 1836 binder_debug(BINDER_DEBUG_USER_REFS, 1837 "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", 1838 proc->pid, thread->pid, debug_string, ref->debug_id, 1839 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1840 break; 1841 } 1842 case BC_INCREFS_DONE: 1843 case BC_ACQUIRE_DONE: { 1844 void __user *node_ptr; 1845 void *cookie; 1846 struct binder_node *node; 1847 1848 if (get_user(node_ptr, (void * __user *)ptr)) 1849 return -EFAULT; 1850 ptr += sizeof(void *); 1851 if (get_user(cookie, (void * __user *)ptr)) 1852 return -EFAULT; 1853 ptr += sizeof(void *); 1854 node = binder_get_node(proc, node_ptr); 1855 if (node == NULL) { 1856 binder_user_error("binder: %d:%d " 1857 "%s u%p no match\n", 1858 proc->pid, thread->pid, 1859 cmd == BC_INCREFS_DONE ? 1860 "BC_INCREFS_DONE" : 1861 "BC_ACQUIRE_DONE", 1862 node_ptr); 1863 break; 1864 } 1865 if (cookie != node->cookie) { 1866 binder_user_error("binder: %d:%d %s u%p node %d" 1867 " cookie mismatch %p != %p\n", 1868 proc->pid, thread->pid, 1869 cmd == BC_INCREFS_DONE ? 1870 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1871 node_ptr, node->debug_id, 1872 cookie, node->cookie); 1873 break; 1874 } 1875 if (cmd == BC_ACQUIRE_DONE) { 1876 if (node->pending_strong_ref == 0) { 1877 binder_user_error("binder: %d:%d " 1878 "BC_ACQUIRE_DONE node %d has " 1879 "no pending acquire request\n", 1880 proc->pid, thread->pid, 1881 node->debug_id); 1882 break; 1883 } 1884 node->pending_strong_ref = 0; 1885 } else { 1886 if (node->pending_weak_ref == 0) { 1887 binder_user_error("binder: %d:%d " 1888 "BC_INCREFS_DONE node %d has " 1889 "no pending increfs request\n", 1890 proc->pid, thread->pid, 1891 node->debug_id); 1892 break; 1893 } 1894 node->pending_weak_ref = 0; 1895 } 1896 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1897 binder_debug(BINDER_DEBUG_USER_REFS, 1898 "binder: %d:%d %s node %d ls %d lw %d\n", 1899 proc->pid, thread->pid, 1900 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1901 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1902 break; 1903 } 1904 case BC_ATTEMPT_ACQUIRE: 1905 printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); 1906 return -EINVAL; 1907 case BC_ACQUIRE_RESULT: 1908 printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); 1909 return -EINVAL; 1910 1911 case BC_FREE_BUFFER: { 1912 void __user *data_ptr; 1913 struct binder_buffer *buffer; 1914 1915 if (get_user(data_ptr, (void * __user *)ptr)) 1916 return -EFAULT; 1917 ptr += sizeof(void *); 1918 1919 buffer = binder_buffer_lookup(proc, data_ptr); 1920 if (buffer == NULL) { 1921 binder_user_error("binder: %d:%d " 1922 "BC_FREE_BUFFER u%p no match\n", 1923 proc->pid, thread->pid, data_ptr); 1924 break; 1925 } 1926 if (!buffer->allow_user_free) { 1927 binder_user_error("binder: %d:%d " 1928 "BC_FREE_BUFFER u%p matched " 1929 "unreturned buffer\n", 1930 proc->pid, thread->pid, data_ptr); 1931 break; 1932 } 1933 binder_debug(BINDER_DEBUG_FREE_BUFFER, 1934 "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", 1935 proc->pid, thread->pid, data_ptr, buffer->debug_id, 1936 buffer->transaction ? "active" : "finished"); 1937 1938 if (buffer->transaction) { 1939 buffer->transaction->buffer = NULL; 1940 buffer->transaction = NULL; 1941 } 1942 if (buffer->async_transaction && buffer->target_node) { 1943 BUG_ON(!buffer->target_node->has_async_transaction); 1944 if (list_empty(&buffer->target_node->async_todo)) 1945 buffer->target_node->has_async_transaction = 0; 1946 else 1947 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1948 } 1949 binder_transaction_buffer_release(proc, buffer, NULL); 1950 binder_free_buf(proc, buffer); 1951 break; 1952 } 1953 1954 case BC_TRANSACTION: 1955 case BC_REPLY: { 1956 struct binder_transaction_data tr; 1957 1958 if (copy_from_user(&tr, ptr, sizeof(tr))) 1959 return -EFAULT; 1960 ptr += sizeof(tr); 1961 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1962 break; 1963 } 1964 1965 case BC_REGISTER_LOOPER: 1966 binder_debug(BINDER_DEBUG_THREADS, 1967 "binder: %d:%d BC_REGISTER_LOOPER\n", 1968 proc->pid, thread->pid); 1969 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1970 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1971 binder_user_error("binder: %d:%d ERROR:" 1972 " BC_REGISTER_LOOPER called " 1973 "after BC_ENTER_LOOPER\n", 1974 proc->pid, thread->pid); 1975 } else if (proc->requested_threads == 0) { 1976 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1977 binder_user_error("binder: %d:%d ERROR:" 1978 " BC_REGISTER_LOOPER called " 1979 "without request\n", 1980 proc->pid, thread->pid); 1981 } else { 1982 proc->requested_threads--; 1983 proc->requested_threads_started++; 1984 } 1985 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 1986 break; 1987 case BC_ENTER_LOOPER: 1988 binder_debug(BINDER_DEBUG_THREADS, 1989 "binder: %d:%d BC_ENTER_LOOPER\n", 1990 proc->pid, thread->pid); 1991 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 1992 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1993 binder_user_error("binder: %d:%d ERROR:" 1994 " BC_ENTER_LOOPER called after " 1995 "BC_REGISTER_LOOPER\n", 1996 proc->pid, thread->pid); 1997 } 1998 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 1999 break; 2000 case BC_EXIT_LOOPER: 2001 binder_debug(BINDER_DEBUG_THREADS, 2002 "binder: %d:%d BC_EXIT_LOOPER\n", 2003 proc->pid, thread->pid); 2004 thread->looper |= BINDER_LOOPER_STATE_EXITED; 2005 break; 2006 2007 case BC_REQUEST_DEATH_NOTIFICATION: 2008 case BC_CLEAR_DEATH_NOTIFICATION: { 2009 uint32_t target; 2010 void __user *cookie; 2011 struct binder_ref *ref; 2012 struct binder_ref_death *death; 2013 2014 if (get_user(target, (uint32_t __user *)ptr)) 2015 return -EFAULT; 2016 ptr += sizeof(uint32_t); 2017 if (get_user(cookie, (void __user * __user *)ptr)) 2018 return -EFAULT; 2019 ptr += sizeof(void *); 2020 ref = binder_get_ref(proc, target); 2021 if (ref == NULL) { 2022 binder_user_error("binder: %d:%d %s " 2023 "invalid ref %d\n", 2024 proc->pid, thread->pid, 2025 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2026 "BC_REQUEST_DEATH_NOTIFICATION" : 2027 "BC_CLEAR_DEATH_NOTIFICATION", 2028 target); 2029 break; 2030 } 2031 2032 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2033 "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", 2034 proc->pid, thread->pid, 2035 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2036 "BC_REQUEST_DEATH_NOTIFICATION" : 2037 "BC_CLEAR_DEATH_NOTIFICATION", 2038 cookie, ref->debug_id, ref->desc, 2039 ref->strong, ref->weak, ref->node->debug_id); 2040 2041 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2042 if (ref->death) { 2043 binder_user_error("binder: %d:%" 2044 "d BC_REQUEST_DEATH_NOTI" 2045 "FICATION death notific" 2046 "ation already set\n", 2047 proc->pid, thread->pid); 2048 break; 2049 } 2050 death = kzalloc(sizeof(*death), GFP_KERNEL); 2051 if (death == NULL) { 2052 thread->return_error = BR_ERROR; 2053 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2054 "binder: %d:%d " 2055 "BC_REQUEST_DEATH_NOTIFICATION failed\n", 2056 proc->pid, thread->pid); 2057 break; 2058 } 2059 binder_stats_created(BINDER_STAT_DEATH); 2060 INIT_LIST_HEAD(&death->work.entry); 2061 death->cookie = cookie; 2062 ref->death = death; 2063 if (ref->node->proc == NULL) { 2064 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2065 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2066 list_add_tail(&ref->death->work.entry, &thread->todo); 2067 } else { 2068 list_add_tail(&ref->death->work.entry, &proc->todo); 2069 wake_up_interruptible(&proc->wait); 2070 } 2071 } 2072 } else { 2073 if (ref->death == NULL) { 2074 binder_user_error("binder: %d:%" 2075 "d BC_CLEAR_DEATH_NOTIFI" 2076 "CATION death notificat" 2077 "ion not active\n", 2078 proc->pid, thread->pid); 2079 break; 2080 } 2081 death = ref->death; 2082 if (death->cookie != cookie) { 2083 binder_user_error("binder: %d:%" 2084 "d BC_CLEAR_DEATH_NOTIFI" 2085 "CATION death notificat" 2086 "ion cookie mismatch " 2087 "%p != %p\n", 2088 proc->pid, thread->pid, 2089 death->cookie, cookie); 2090 break; 2091 } 2092 ref->death = NULL; 2093 if (list_empty(&death->work.entry)) { 2094 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2095 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2096 list_add_tail(&death->work.entry, &thread->todo); 2097 } else { 2098 list_add_tail(&death->work.entry, &proc->todo); 2099 wake_up_interruptible(&proc->wait); 2100 } 2101 } else { 2102 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2103 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2104 } 2105 } 2106 } break; 2107 case BC_DEAD_BINDER_DONE: { 2108 struct binder_work *w; 2109 void __user *cookie; 2110 struct binder_ref_death *death = NULL; 2111 if (get_user(cookie, (void __user * __user *)ptr)) 2112 return -EFAULT; 2113 2114 ptr += sizeof(void *); 2115 list_for_each_entry(w, &proc->delivered_death, entry) { 2116 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2117 if (tmp_death->cookie == cookie) { 2118 death = tmp_death; 2119 break; 2120 } 2121 } 2122 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2123 "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", 2124 proc->pid, thread->pid, cookie, death); 2125 if (death == NULL) { 2126 binder_user_error("binder: %d:%d BC_DEAD" 2127 "_BINDER_DONE %p not found\n", 2128 proc->pid, thread->pid, cookie); 2129 break; 2130 } 2131 2132 list_del_init(&death->work.entry); 2133 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2134 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2135 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2136 list_add_tail(&death->work.entry, &thread->todo); 2137 } else { 2138 list_add_tail(&death->work.entry, &proc->todo); 2139 wake_up_interruptible(&proc->wait); 2140 } 2141 } 2142 } break; 2143 2144 default: 2145 printk(KERN_ERR "binder: %d:%d unknown command %d\n", 2146 proc->pid, thread->pid, cmd); 2147 return -EINVAL; 2148 } 2149 *consumed = ptr - buffer; 2150 } 2151 return 0; 2152} 2153 2154void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, 2155 uint32_t cmd) 2156{ 2157 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2158 binder_stats.br[_IOC_NR(cmd)]++; 2159 proc->stats.br[_IOC_NR(cmd)]++; 2160 thread->stats.br[_IOC_NR(cmd)]++; 2161 } 2162} 2163 2164static int binder_has_proc_work(struct binder_proc *proc, 2165 struct binder_thread *thread) 2166{ 2167 return !list_empty(&proc->todo) || 2168 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2169} 2170 2171static int binder_has_thread_work(struct binder_thread *thread) 2172{ 2173 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2174 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2175} 2176 2177static int binder_thread_read(struct binder_proc *proc, 2178 struct binder_thread *thread, 2179 void __user *buffer, int size, 2180 signed long *consumed, int non_block) 2181{ 2182 void __user *ptr = buffer + *consumed; 2183 void __user *end = buffer + size; 2184 2185 int ret = 0; 2186 int wait_for_proc_work; 2187 2188 if (*consumed == 0) { 2189 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2190 return -EFAULT; 2191 ptr += sizeof(uint32_t); 2192 } 2193 2194retry: 2195 wait_for_proc_work = thread->transaction_stack == NULL && 2196 list_empty(&thread->todo); 2197 2198 if (thread->return_error != BR_OK && ptr < end) { 2199 if (thread->return_error2 != BR_OK) { 2200 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2201 return -EFAULT; 2202 ptr += sizeof(uint32_t); 2203 if (ptr == end) 2204 goto done; 2205 thread->return_error2 = BR_OK; 2206 } 2207 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2208 return -EFAULT; 2209 ptr += sizeof(uint32_t); 2210 thread->return_error = BR_OK; 2211 goto done; 2212 } 2213 2214 2215 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2216 if (wait_for_proc_work) 2217 proc->ready_threads++; 2218 mutex_unlock(&binder_lock); 2219 if (wait_for_proc_work) { 2220 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2221 BINDER_LOOPER_STATE_ENTERED))) { 2222 binder_user_error("binder: %d:%d ERROR: Thread waiting " 2223 "for process work before calling BC_REGISTER_" 2224 "LOOPER or BC_ENTER_LOOPER (state %x)\n", 2225 proc->pid, thread->pid, thread->looper); 2226 wait_event_interruptible(binder_user_error_wait, 2227 binder_stop_on_user_error < 2); 2228 } 2229 binder_set_nice(proc->default_priority); 2230 if (non_block) { 2231 if (!binder_has_proc_work(proc, thread)) 2232 ret = -EAGAIN; 2233 } else 2234 ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2235 } else { 2236 if (non_block) { 2237 if (!binder_has_thread_work(thread)) 2238 ret = -EAGAIN; 2239 } else 2240 ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); 2241 } 2242 mutex_lock(&binder_lock); 2243 if (wait_for_proc_work) 2244 proc->ready_threads--; 2245 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2246 2247 if (ret) 2248 return ret; 2249 2250 while (1) { 2251 uint32_t cmd; 2252 struct binder_transaction_data tr; 2253 struct binder_work *w; 2254 struct binder_transaction *t = NULL; 2255 2256 if (!list_empty(&thread->todo)) 2257 w = list_first_entry(&thread->todo, struct binder_work, entry); 2258 else if (!list_empty(&proc->todo) && wait_for_proc_work) 2259 w = list_first_entry(&proc->todo, struct binder_work, entry); 2260 else { 2261 if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ 2262 goto retry; 2263 break; 2264 } 2265 2266 if (end - ptr < sizeof(tr) + 4) 2267 break; 2268 2269 switch (w->type) { 2270 case BINDER_WORK_TRANSACTION: { 2271 t = container_of(w, struct binder_transaction, work); 2272 } break; 2273 case BINDER_WORK_TRANSACTION_COMPLETE: { 2274 cmd = BR_TRANSACTION_COMPLETE; 2275 if (put_user(cmd, (uint32_t __user *)ptr)) 2276 return -EFAULT; 2277 ptr += sizeof(uint32_t); 2278 2279 binder_stat_br(proc, thread, cmd); 2280 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2281 "binder: %d:%d BR_TRANSACTION_COMPLETE\n", 2282 proc->pid, thread->pid); 2283 2284 list_del(&w->entry); 2285 kfree(w); 2286 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2287 } break; 2288 case BINDER_WORK_NODE: { 2289 struct binder_node *node = container_of(w, struct binder_node, work); 2290 uint32_t cmd = BR_NOOP; 2291 const char *cmd_name; 2292 int strong = node->internal_strong_refs || node->local_strong_refs; 2293 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2294 if (weak && !node->has_weak_ref) { 2295 cmd = BR_INCREFS; 2296 cmd_name = "BR_INCREFS"; 2297 node->has_weak_ref = 1; 2298 node->pending_weak_ref = 1; 2299 node->local_weak_refs++; 2300 } else if (strong && !node->has_strong_ref) { 2301 cmd = BR_ACQUIRE; 2302 cmd_name = "BR_ACQUIRE"; 2303 node->has_strong_ref = 1; 2304 node->pending_strong_ref = 1; 2305 node->local_strong_refs++; 2306 } else if (!strong && node->has_strong_ref) { 2307 cmd = BR_RELEASE; 2308 cmd_name = "BR_RELEASE"; 2309 node->has_strong_ref = 0; 2310 } else if (!weak && node->has_weak_ref) { 2311 cmd = BR_DECREFS; 2312 cmd_name = "BR_DECREFS"; 2313 node->has_weak_ref = 0; 2314 } 2315 if (cmd != BR_NOOP) { 2316 if (put_user(cmd, (uint32_t __user *)ptr)) 2317 return -EFAULT; 2318 ptr += sizeof(uint32_t); 2319 if (put_user(node->ptr, (void * __user *)ptr)) 2320 return -EFAULT; 2321 ptr += sizeof(void *); 2322 if (put_user(node->cookie, (void * __user *)ptr)) 2323 return -EFAULT; 2324 ptr += sizeof(void *); 2325 2326 binder_stat_br(proc, thread, cmd); 2327 binder_debug(BINDER_DEBUG_USER_REFS, 2328 "binder: %d:%d %s %d u%p c%p\n", 2329 proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); 2330 } else { 2331 list_del_init(&w->entry); 2332 if (!weak && !strong) { 2333 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2334 "binder: %d:%d node %d u%p c%p deleted\n", 2335 proc->pid, thread->pid, node->debug_id, 2336 node->ptr, node->cookie); 2337 rb_erase(&node->rb_node, &proc->nodes); 2338 kfree(node); 2339 binder_stats_deleted(BINDER_STAT_NODE); 2340 } else { 2341 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2342 "binder: %d:%d node %d u%p c%p state unchanged\n", 2343 proc->pid, thread->pid, node->debug_id, node->ptr, 2344 node->cookie); 2345 } 2346 } 2347 } break; 2348 case BINDER_WORK_DEAD_BINDER: 2349 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2350 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2351 struct binder_ref_death *death; 2352 uint32_t cmd; 2353 2354 death = container_of(w, struct binder_ref_death, work); 2355 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2356 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2357 else 2358 cmd = BR_DEAD_BINDER; 2359 if (put_user(cmd, (uint32_t __user *)ptr)) 2360 return -EFAULT; 2361 ptr += sizeof(uint32_t); 2362 if (put_user(death->cookie, (void * __user *)ptr)) 2363 return -EFAULT; 2364 ptr += sizeof(void *); 2365 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2366 "binder: %d:%d %s %p\n", 2367 proc->pid, thread->pid, 2368 cmd == BR_DEAD_BINDER ? 2369 "BR_DEAD_BINDER" : 2370 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2371 death->cookie); 2372 2373 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2374 list_del(&w->entry); 2375 kfree(death); 2376 binder_stats_deleted(BINDER_STAT_DEATH); 2377 } else 2378 list_move(&w->entry, &proc->delivered_death); 2379 if (cmd == BR_DEAD_BINDER) 2380 goto done; /* DEAD_BINDER notifications can cause transactions */ 2381 } break; 2382 } 2383 2384 if (!t) 2385 continue; 2386 2387 BUG_ON(t->buffer == NULL); 2388 if (t->buffer->target_node) { 2389 struct binder_node *target_node = t->buffer->target_node; 2390 tr.target.ptr = target_node->ptr; 2391 tr.cookie = target_node->cookie; 2392 t->saved_priority = task_nice(current); 2393 if (t->priority < target_node->min_priority && 2394 !(t->flags & TF_ONE_WAY)) 2395 binder_set_nice(t->priority); 2396 else if (!(t->flags & TF_ONE_WAY) || 2397 t->saved_priority > target_node->min_priority) 2398 binder_set_nice(target_node->min_priority); 2399 cmd = BR_TRANSACTION; 2400 } else { 2401 tr.target.ptr = NULL; 2402 tr.cookie = NULL; 2403 cmd = BR_REPLY; 2404 } 2405 tr.code = t->code; 2406 tr.flags = t->flags; 2407 tr.sender_euid = t->sender_euid; 2408 2409 if (t->from) { 2410 struct task_struct *sender = t->from->proc->tsk; 2411 tr.sender_pid = task_tgid_nr_ns(sender, 2412 current->nsproxy->pid_ns); 2413 } else { 2414 tr.sender_pid = 0; 2415 } 2416 2417 tr.data_size = t->buffer->data_size; 2418 tr.offsets_size = t->buffer->offsets_size; 2419 tr.data.ptr.buffer = (void *)t->buffer->data + 2420 proc->user_buffer_offset; 2421 tr.data.ptr.offsets = tr.data.ptr.buffer + 2422 ALIGN(t->buffer->data_size, 2423 sizeof(void *)); 2424 2425 if (put_user(cmd, (uint32_t __user *)ptr)) 2426 return -EFAULT; 2427 ptr += sizeof(uint32_t); 2428 if (copy_to_user(ptr, &tr, sizeof(tr))) 2429 return -EFAULT; 2430 ptr += sizeof(tr); 2431 2432 binder_stat_br(proc, thread, cmd); 2433 binder_debug(BINDER_DEBUG_TRANSACTION, 2434 "binder: %d:%d %s %d %d:%d, cmd %d" 2435 "size %zd-%zd ptr %p-%p\n", 2436 proc->pid, thread->pid, 2437 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2438 "BR_REPLY", 2439 t->debug_id, t->from ? t->from->proc->pid : 0, 2440 t->from ? t->from->pid : 0, cmd, 2441 t->buffer->data_size, t->buffer->offsets_size, 2442 tr.data.ptr.buffer, tr.data.ptr.offsets); 2443 2444 list_del(&t->work.entry); 2445 t->buffer->allow_user_free = 1; 2446 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2447 t->to_parent = thread->transaction_stack; 2448 t->to_thread = thread; 2449 thread->transaction_stack = t; 2450 } else { 2451 t->buffer->transaction = NULL; 2452 kfree(t); 2453 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2454 } 2455 break; 2456 } 2457 2458done: 2459 2460 *consumed = ptr - buffer; 2461 if (proc->requested_threads + proc->ready_threads == 0 && 2462 proc->requested_threads_started < proc->max_threads && 2463 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2464 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2465 /*spawn a new thread if we leave this out */) { 2466 proc->requested_threads++; 2467 binder_debug(BINDER_DEBUG_THREADS, 2468 "binder: %d:%d BR_SPAWN_LOOPER\n", 2469 proc->pid, thread->pid); 2470 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2471 return -EFAULT; 2472 } 2473 return 0; 2474} 2475 2476static void binder_release_work(struct list_head *list) 2477{ 2478 struct binder_work *w; 2479 while (!list_empty(list)) { 2480 w = list_first_entry(list, struct binder_work, entry); 2481 list_del_init(&w->entry); 2482 switch (w->type) { 2483 case BINDER_WORK_TRANSACTION: { 2484 struct binder_transaction *t; 2485 2486 t = container_of(w, struct binder_transaction, work); 2487 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) 2488 binder_send_failed_reply(t, BR_DEAD_REPLY); 2489 } break; 2490 case BINDER_WORK_TRANSACTION_COMPLETE: { 2491 kfree(w); 2492 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2493 } break; 2494 default: 2495 break; 2496 } 2497 } 2498 2499} 2500 2501static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2502{ 2503 struct binder_thread *thread = NULL; 2504 struct rb_node *parent = NULL; 2505 struct rb_node **p = &proc->threads.rb_node; 2506 2507 while (*p) { 2508 parent = *p; 2509 thread = rb_entry(parent, struct binder_thread, rb_node); 2510 2511 if (current->pid < thread->pid) 2512 p = &(*p)->rb_left; 2513 else if (current->pid > thread->pid) 2514 p = &(*p)->rb_right; 2515 else 2516 break; 2517 } 2518 if (*p == NULL) { 2519 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2520 if (thread == NULL) 2521 return NULL; 2522 binder_stats_created(BINDER_STAT_THREAD); 2523 thread->proc = proc; 2524 thread->pid = current->pid; 2525 init_waitqueue_head(&thread->wait); 2526 INIT_LIST_HEAD(&thread->todo); 2527 rb_link_node(&thread->rb_node, parent, p); 2528 rb_insert_color(&thread->rb_node, &proc->threads); 2529 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2530 thread->return_error = BR_OK; 2531 thread->return_error2 = BR_OK; 2532 } 2533 return thread; 2534} 2535 2536static int binder_free_thread(struct binder_proc *proc, 2537 struct binder_thread *thread) 2538{ 2539 struct binder_transaction *t; 2540 struct binder_transaction *send_reply = NULL; 2541 int active_transactions = 0; 2542 2543 rb_erase(&thread->rb_node, &proc->threads); 2544 t = thread->transaction_stack; 2545 if (t && t->to_thread == thread) 2546 send_reply = t; 2547 while (t) { 2548 active_transactions++; 2549 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2550 "binder: release %d:%d transaction %d " 2551 "%s, still active\n", proc->pid, thread->pid, 2552 t->debug_id, 2553 (t->to_thread == thread) ? "in" : "out"); 2554 2555 if (t->to_thread == thread) { 2556 t->to_proc = NULL; 2557 t->to_thread = NULL; 2558 if (t->buffer) { 2559 t->buffer->transaction = NULL; 2560 t->buffer = NULL; 2561 } 2562 t = t->to_parent; 2563 } else if (t->from == thread) { 2564 t->from = NULL; 2565 t = t->from_parent; 2566 } else 2567 BUG(); 2568 } 2569 if (send_reply) 2570 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2571 binder_release_work(&thread->todo); 2572 kfree(thread); 2573 binder_stats_deleted(BINDER_STAT_THREAD); 2574 return active_transactions; 2575} 2576 2577static unsigned int binder_poll(struct file *filp, 2578 struct poll_table_struct *wait) 2579{ 2580 struct binder_proc *proc = filp->private_data; 2581 struct binder_thread *thread = NULL; 2582 int wait_for_proc_work; 2583 2584 mutex_lock(&binder_lock); 2585 thread = binder_get_thread(proc); 2586 2587 wait_for_proc_work = thread->transaction_stack == NULL && 2588 list_empty(&thread->todo) && thread->return_error == BR_OK; 2589 mutex_unlock(&binder_lock); 2590 2591 if (wait_for_proc_work) { 2592 if (binder_has_proc_work(proc, thread)) 2593 return POLLIN; 2594 poll_wait(filp, &proc->wait, wait); 2595 if (binder_has_proc_work(proc, thread)) 2596 return POLLIN; 2597 } else { 2598 if (binder_has_thread_work(thread)) 2599 return POLLIN; 2600 poll_wait(filp, &thread->wait, wait); 2601 if (binder_has_thread_work(thread)) 2602 return POLLIN; 2603 } 2604 return 0; 2605} 2606 2607static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2608{ 2609 int ret; 2610 struct binder_proc *proc = filp->private_data; 2611 struct binder_thread *thread; 2612 unsigned int size = _IOC_SIZE(cmd); 2613 void __user *ubuf = (void __user *)arg; 2614 2615 /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ 2616 2617 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2618 if (ret) 2619 return ret; 2620 2621 mutex_lock(&binder_lock); 2622 thread = binder_get_thread(proc); 2623 if (thread == NULL) { 2624 ret = -ENOMEM; 2625 goto err; 2626 } 2627 2628 switch (cmd) { 2629 case BINDER_WRITE_READ: { 2630 struct binder_write_read bwr; 2631 if (size != sizeof(struct binder_write_read)) { 2632 ret = -EINVAL; 2633 goto err; 2634 } 2635 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2636 ret = -EFAULT; 2637 goto err; 2638 } 2639 binder_debug(BINDER_DEBUG_READ_WRITE, 2640 "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", 2641 proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, 2642 bwr.read_size, bwr.read_buffer); 2643 2644 if (bwr.write_size > 0) { 2645 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); 2646 if (ret < 0) { 2647 bwr.read_consumed = 0; 2648 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2649 ret = -EFAULT; 2650 goto err; 2651 } 2652 } 2653 if (bwr.read_size > 0) { 2654 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); 2655 if (!list_empty(&proc->todo)) 2656 wake_up_interruptible(&proc->wait); 2657 if (ret < 0) { 2658 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2659 ret = -EFAULT; 2660 goto err; 2661 } 2662 } 2663 binder_debug(BINDER_DEBUG_READ_WRITE, 2664 "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", 2665 proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, 2666 bwr.read_consumed, bwr.read_size); 2667 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2668 ret = -EFAULT; 2669 goto err; 2670 } 2671 break; 2672 } 2673 case BINDER_SET_MAX_THREADS: 2674 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2675 ret = -EINVAL; 2676 goto err; 2677 } 2678 break; 2679 case BINDER_SET_CONTEXT_MGR: 2680 if (binder_context_mgr_node != NULL) { 2681 printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); 2682 ret = -EBUSY; 2683 goto err; 2684 } 2685 if (binder_context_mgr_uid != -1) { 2686 if (binder_context_mgr_uid != current->cred->euid) { 2687 printk(KERN_ERR "binder: BINDER_SET_" 2688 "CONTEXT_MGR bad uid %d != %d\n", 2689 current->cred->euid, 2690 binder_context_mgr_uid); 2691 ret = -EPERM; 2692 goto err; 2693 } 2694 } else 2695 binder_context_mgr_uid = current->cred->euid; 2696 binder_context_mgr_node = binder_new_node(proc, NULL, NULL); 2697 if (binder_context_mgr_node == NULL) { 2698 ret = -ENOMEM; 2699 goto err; 2700 } 2701 binder_context_mgr_node->local_weak_refs++; 2702 binder_context_mgr_node->local_strong_refs++; 2703 binder_context_mgr_node->has_strong_ref = 1; 2704 binder_context_mgr_node->has_weak_ref = 1; 2705 break; 2706 case BINDER_THREAD_EXIT: 2707 binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", 2708 proc->pid, thread->pid); 2709 binder_free_thread(proc, thread); 2710 thread = NULL; 2711 break; 2712 case BINDER_VERSION: 2713 if (size != sizeof(struct binder_version)) { 2714 ret = -EINVAL; 2715 goto err; 2716 } 2717 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { 2718 ret = -EINVAL; 2719 goto err; 2720 } 2721 break; 2722 default: 2723 ret = -EINVAL; 2724 goto err; 2725 } 2726 ret = 0; 2727err: 2728 if (thread) 2729 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2730 mutex_unlock(&binder_lock); 2731 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2732 if (ret && ret != -ERESTARTSYS) 2733 printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2734 return ret; 2735} 2736 2737static void binder_vma_open(struct vm_area_struct *vma) 2738{ 2739 struct binder_proc *proc = vma->vm_private_data; 2740 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2741 "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2742 proc->pid, vma->vm_start, vma->vm_end, 2743 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2744 (unsigned long)pgprot_val(vma->vm_page_prot)); 2745 dump_stack(); 2746} 2747 2748static void binder_vma_close(struct vm_area_struct *vma) 2749{ 2750 struct binder_proc *proc = vma->vm_private_data; 2751 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2752 "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2753 proc->pid, vma->vm_start, vma->vm_end, 2754 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2755 (unsigned long)pgprot_val(vma->vm_page_prot)); 2756 proc->vma = NULL; 2757 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2758} 2759 2760static struct vm_operations_struct binder_vm_ops = { 2761 .open = binder_vma_open, 2762 .close = binder_vma_close, 2763}; 2764 2765static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2766{ 2767 int ret; 2768 struct vm_struct *area; 2769 struct binder_proc *proc = filp->private_data; 2770 const char *failure_string; 2771 struct binder_buffer *buffer; 2772 2773 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2774 vma->vm_end = vma->vm_start + SZ_4M; 2775 2776 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2777 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2778 proc->pid, vma->vm_start, vma->vm_end, 2779 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2780 (unsigned long)pgprot_val(vma->vm_page_prot)); 2781 2782 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2783 ret = -EPERM; 2784 failure_string = "bad vm_flags"; 2785 goto err_bad_arg; 2786 } 2787 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2788 2789 if (proc->buffer) { 2790 ret = -EBUSY; 2791 failure_string = "already mapped"; 2792 goto err_already_mapped; 2793 } 2794 2795 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2796 if (area == NULL) { 2797 ret = -ENOMEM; 2798 failure_string = "get_vm_area"; 2799 goto err_get_vm_area_failed; 2800 } 2801 proc->buffer = area->addr; 2802 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2803 2804#ifdef CONFIG_CPU_CACHE_VIPT 2805 if (cache_is_vipt_aliasing()) { 2806 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2807 printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2808 vma->vm_start += PAGE_SIZE; 2809 } 2810 } 2811#endif 2812 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2813 if (proc->pages == NULL) { 2814 ret = -ENOMEM; 2815 failure_string = "alloc page array"; 2816 goto err_alloc_pages_failed; 2817 } 2818 proc->buffer_size = vma->vm_end - vma->vm_start; 2819 2820 vma->vm_ops = &binder_vm_ops; 2821 vma->vm_private_data = proc; 2822 2823 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2824 ret = -ENOMEM; 2825 failure_string = "alloc small buf"; 2826 goto err_alloc_small_buf_failed; 2827 } 2828 buffer = proc->buffer; 2829 INIT_LIST_HEAD(&proc->buffers); 2830 list_add(&buffer->entry, &proc->buffers); 2831 buffer->free = 1; 2832 binder_insert_free_buffer(proc, buffer); 2833 proc->free_async_space = proc->buffer_size / 2; 2834 barrier(); 2835 proc->files = get_files_struct(current); 2836 proc->vma = vma; 2837 2838 /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", 2839 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2840 return 0; 2841 2842err_alloc_small_buf_failed: 2843 kfree(proc->pages); 2844 proc->pages = NULL; 2845err_alloc_pages_failed: 2846 vfree(proc->buffer); 2847 proc->buffer = NULL; 2848err_get_vm_area_failed: 2849err_already_mapped: 2850err_bad_arg: 2851 printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", 2852 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2853 return ret; 2854} 2855 2856static int binder_open(struct inode *nodp, struct file *filp) 2857{ 2858 struct binder_proc *proc; 2859 2860 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 2861 current->group_leader->pid, current->pid); 2862 2863 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2864 if (proc == NULL) 2865 return -ENOMEM; 2866 get_task_struct(current); 2867 proc->tsk = current; 2868 INIT_LIST_HEAD(&proc->todo); 2869 init_waitqueue_head(&proc->wait); 2870 proc->default_priority = task_nice(current); 2871 mutex_lock(&binder_lock); 2872 binder_stats_created(BINDER_STAT_PROC); 2873 hlist_add_head(&proc->proc_node, &binder_procs); 2874 proc->pid = current->group_leader->pid; 2875 INIT_LIST_HEAD(&proc->delivered_death); 2876 filp->private_data = proc; 2877 mutex_unlock(&binder_lock); 2878 2879 if (binder_proc_dir_entry_proc) { 2880 char strbuf[11]; 2881 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2882 remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2883 create_proc_read_entry(strbuf, S_IRUGO, 2884 binder_proc_dir_entry_proc, 2885 binder_read_proc_proc, proc); 2886 } 2887 2888 return 0; 2889} 2890 2891static int binder_flush(struct file *filp, fl_owner_t id) 2892{ 2893 struct binder_proc *proc = filp->private_data; 2894 2895 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2896 2897 return 0; 2898} 2899 2900static void binder_deferred_flush(struct binder_proc *proc) 2901{ 2902 struct rb_node *n; 2903 int wake_count = 0; 2904 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2905 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2906 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2907 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2908 wake_up_interruptible(&thread->wait); 2909 wake_count++; 2910 } 2911 } 2912 wake_up_interruptible_all(&proc->wait); 2913 2914 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2915 "binder_flush: %d woke %d threads\n", proc->pid, 2916 wake_count); 2917} 2918 2919static int binder_release(struct inode *nodp, struct file *filp) 2920{ 2921 struct binder_proc *proc = filp->private_data; 2922 if (binder_proc_dir_entry_proc) { 2923 char strbuf[11]; 2924 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2925 remove_proc_entry(strbuf, binder_proc_dir_entry_proc); 2926 } 2927 2928 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2929 2930 return 0; 2931} 2932 2933static void binder_deferred_release(struct binder_proc *proc) 2934{ 2935 struct hlist_node *pos; 2936 struct binder_transaction *t; 2937 struct rb_node *n; 2938 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2939 2940 BUG_ON(proc->vma); 2941 BUG_ON(proc->files); 2942 2943 hlist_del(&proc->proc_node); 2944 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2945 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2946 "binder_release: %d context_mgr_node gone\n", 2947 proc->pid); 2948 binder_context_mgr_node = NULL; 2949 } 2950 2951 threads = 0; 2952 active_transactions = 0; 2953 while ((n = rb_first(&proc->threads))) { 2954 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2955 threads++; 2956 active_transactions += binder_free_thread(proc, thread); 2957 } 2958 nodes = 0; 2959 incoming_refs = 0; 2960 while ((n = rb_first(&proc->nodes))) { 2961 struct binder_node *node = rb_entry(n, struct binder_node, rb_node); 2962 2963 nodes++; 2964 rb_erase(&node->rb_node, &proc->nodes); 2965 list_del_init(&node->work.entry); 2966 if (hlist_empty(&node->refs)) { 2967 kfree(node); 2968 binder_stats_deleted(BINDER_STAT_NODE); 2969 } else { 2970 struct binder_ref *ref; 2971 int death = 0; 2972 2973 node->proc = NULL; 2974 node->local_strong_refs = 0; 2975 node->local_weak_refs = 0; 2976 hlist_add_head(&node->dead_node, &binder_dead_nodes); 2977 2978 hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 2979 incoming_refs++; 2980 if (ref->death) { 2981 death++; 2982 if (list_empty(&ref->death->work.entry)) { 2983 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2984 list_add_tail(&ref->death->work.entry, &ref->proc->todo); 2985 wake_up_interruptible(&ref->proc->wait); 2986 } else 2987 BUG(); 2988 } 2989 } 2990 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2991 "binder: node %d now dead, " 2992 "refs %d, death %d\n", node->debug_id, 2993 incoming_refs, death); 2994 } 2995 } 2996 outgoing_refs = 0; 2997 while ((n = rb_first(&proc->refs_by_desc))) { 2998 struct binder_ref *ref = rb_entry(n, struct binder_ref, 2999 rb_node_desc); 3000 outgoing_refs++; 3001 binder_delete_ref(ref); 3002 } 3003 binder_release_work(&proc->todo); 3004 buffers = 0; 3005 3006 while ((n = rb_first(&proc->allocated_buffers))) { 3007 struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, 3008 rb_node); 3009 t = buffer->transaction; 3010 if (t) { 3011 t->buffer = NULL; 3012 buffer->transaction = NULL; 3013 printk(KERN_ERR "binder: release proc %d, " 3014 "transaction %d, not freed\n", 3015 proc->pid, t->debug_id); 3016 /*BUG();*/ 3017 } 3018 binder_free_buf(proc, buffer); 3019 buffers++; 3020 } 3021 3022 binder_stats_deleted(BINDER_STAT_PROC); 3023 3024 page_count = 0; 3025 if (proc->pages) { 3026 int i; 3027 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3028 if (proc->pages[i]) { 3029 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3030 "binder_release: %d: " 3031 "page %d at %p not freed\n", 3032 proc->pid, i, 3033 proc->buffer + i * PAGE_SIZE); 3034 __free_page(proc->pages[i]); 3035 page_count++; 3036 } 3037 } 3038 kfree(proc->pages); 3039 vfree(proc->buffer); 3040 } 3041 3042 put_task_struct(proc->tsk); 3043 3044 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3045 "binder_release: %d threads %d, nodes %d (ref %d), " 3046 "refs %d, active transactions %d, buffers %d, " 3047 "pages %d\n", 3048 proc->pid, threads, nodes, incoming_refs, outgoing_refs, 3049 active_transactions, buffers, page_count); 3050 3051 kfree(proc); 3052} 3053 3054static void binder_deferred_func(struct work_struct *work) 3055{ 3056 struct binder_proc *proc; 3057 struct files_struct *files; 3058 3059 int defer; 3060 do { 3061 mutex_lock(&binder_lock); 3062 mutex_lock(&binder_deferred_lock); 3063 if (!hlist_empty(&binder_deferred_list)) { 3064 proc = hlist_entry(binder_deferred_list.first, 3065 struct binder_proc, deferred_work_node); 3066 hlist_del_init(&proc->deferred_work_node); 3067 defer = proc->deferred_work; 3068 proc->deferred_work = 0; 3069 } else { 3070 proc = NULL; 3071 defer = 0; 3072 } 3073 mutex_unlock(&binder_deferred_lock); 3074 3075 files = NULL; 3076 if (defer & BINDER_DEFERRED_PUT_FILES) { 3077 files = proc->files; 3078 if (files) 3079 proc->files = NULL; 3080 } 3081 3082 if (defer & BINDER_DEFERRED_FLUSH) 3083 binder_deferred_flush(proc); 3084 3085 if (defer & BINDER_DEFERRED_RELEASE) 3086 binder_deferred_release(proc); /* frees proc */ 3087 3088 mutex_unlock(&binder_lock); 3089 if (files) 3090 put_files_struct(files); 3091 } while (proc); 3092} 3093static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3094 3095static void 3096binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3097{ 3098 mutex_lock(&binder_deferred_lock); 3099 proc->deferred_work |= defer; 3100 if (hlist_unhashed(&proc->deferred_work_node)) { 3101 hlist_add_head(&proc->deferred_work_node, 3102 &binder_deferred_list); 3103 schedule_work(&binder_deferred_work); 3104 } 3105 mutex_unlock(&binder_deferred_lock); 3106} 3107 3108static char *print_binder_transaction(char *buf, char *end, const char *prefix, 3109 struct binder_transaction *t) 3110{ 3111 buf += snprintf(buf, end - buf, 3112 "%s %d: %p from %d:%d to %d:%d code %x " 3113 "flags %x pri %ld r%d", 3114 prefix, t->debug_id, t, 3115 t->from ? t->from->proc->pid : 0, 3116 t->from ? t->from->pid : 0, 3117 t->to_proc ? t->to_proc->pid : 0, 3118 t->to_thread ? t->to_thread->pid : 0, 3119 t->code, t->flags, t->priority, t->need_reply); 3120 if (buf >= end) 3121 return buf; 3122 if (t->buffer == NULL) { 3123 buf += snprintf(buf, end - buf, " buffer free\n"); 3124 return buf; 3125 } 3126 if (t->buffer->target_node) { 3127 buf += snprintf(buf, end - buf, " node %d", 3128 t->buffer->target_node->debug_id); 3129 if (buf >= end) 3130 return buf; 3131 } 3132 buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n", 3133 t->buffer->data_size, t->buffer->offsets_size, 3134 t->buffer->data); 3135 return buf; 3136} 3137 3138static char *print_binder_buffer(char *buf, char *end, const char *prefix, 3139 struct binder_buffer *buffer) 3140{ 3141 buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n", 3142 prefix, buffer->debug_id, buffer->data, 3143 buffer->data_size, buffer->offsets_size, 3144 buffer->transaction ? "active" : "delivered"); 3145 return buf; 3146} 3147 3148static char *print_binder_work(char *buf, char *end, const char *prefix, 3149 const char *transaction_prefix, 3150 struct binder_work *w) 3151{ 3152 struct binder_node *node; 3153 struct binder_transaction *t; 3154 3155 switch (w->type) { 3156 case BINDER_WORK_TRANSACTION: 3157 t = container_of(w, struct binder_transaction, work); 3158 buf = print_binder_transaction(buf, end, transaction_prefix, t); 3159 break; 3160 case BINDER_WORK_TRANSACTION_COMPLETE: 3161 buf += snprintf(buf, end - buf, 3162 "%stransaction complete\n", prefix); 3163 break; 3164 case BINDER_WORK_NODE: 3165 node = container_of(w, struct binder_node, work); 3166 buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n", 3167 prefix, node->debug_id, node->ptr, 3168 node->cookie); 3169 break; 3170 case BINDER_WORK_DEAD_BINDER: 3171 buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix); 3172 break; 3173 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3174 buf += snprintf(buf, end - buf, 3175 "%shas cleared dead binder\n", prefix); 3176 break; 3177 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3178 buf += snprintf(buf, end - buf, 3179 "%shas cleared death notification\n", prefix); 3180 break; 3181 default: 3182 buf += snprintf(buf, end - buf, "%sunknown work: type %d\n", 3183 prefix, w->type); 3184 break; 3185 } 3186 return buf; 3187} 3188 3189static char *print_binder_thread(char *buf, char *end, 3190 struct binder_thread *thread, 3191 int print_always) 3192{ 3193 struct binder_transaction *t; 3194 struct binder_work *w; 3195 char *start_buf = buf; 3196 char *header_buf; 3197 3198 buf += snprintf(buf, end - buf, " thread %d: l %02x\n", 3199 thread->pid, thread->looper); 3200 header_buf = buf; 3201 t = thread->transaction_stack; 3202 while (t) { 3203 if (buf >= end) 3204 break; 3205 if (t->from == thread) { 3206 buf = print_binder_transaction(buf, end, 3207 " outgoing transaction", t); 3208 t = t->from_parent; 3209 } else if (t->to_thread == thread) { 3210 buf = print_binder_transaction(buf, end, 3211 " incoming transaction", t); 3212 t = t->to_parent; 3213 } else { 3214 buf = print_binder_transaction(buf, end, 3215 " bad transaction", t); 3216 t = NULL; 3217 } 3218 } 3219 list_for_each_entry(w, &thread->todo, entry) { 3220 if (buf >= end) 3221 break; 3222 buf = print_binder_work(buf, end, " ", 3223 " pending transaction", w); 3224 } 3225 if (!print_always && buf == header_buf) 3226 buf = start_buf; 3227 return buf; 3228} 3229 3230static char *print_binder_node(char *buf, char *end, struct binder_node *node) 3231{ 3232 struct binder_ref *ref; 3233 struct hlist_node *pos; 3234 struct binder_work *w; 3235 int count; 3236 3237 count = 0; 3238 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3239 count++; 3240 3241 buf += snprintf(buf, end - buf, 3242 " node %d: u%p c%p hs %d hw %d ls %d lw %d " 3243 "is %d iw %d", 3244 node->debug_id, node->ptr, node->cookie, 3245 node->has_strong_ref, node->has_weak_ref, 3246 node->local_strong_refs, node->local_weak_refs, 3247 node->internal_strong_refs, count); 3248 if (buf >= end) 3249 return buf; 3250 if (count) { 3251 buf += snprintf(buf, end - buf, " proc"); 3252 if (buf >= end) 3253 return buf; 3254 hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 3255 buf += snprintf(buf, end - buf, " %d", ref->proc->pid); 3256 if (buf >= end) 3257 return buf; 3258 } 3259 } 3260 buf += snprintf(buf, end - buf, "\n"); 3261 list_for_each_entry(w, &node->async_todo, entry) { 3262 if (buf >= end) 3263 break; 3264 buf = print_binder_work(buf, end, " ", 3265 " pending async transaction", w); 3266 } 3267 return buf; 3268} 3269 3270static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref) 3271{ 3272 buf += snprintf(buf, end - buf, 3273 " ref %d: desc %d %snode %d s %d w %d d %p\n", 3274 ref->debug_id, ref->desc, 3275 ref->node->proc ? "" : "dead ", ref->node->debug_id, 3276 ref->strong, ref->weak, ref->death); 3277 return buf; 3278} 3279 3280static char *print_binder_proc(char *buf, char *end, 3281 struct binder_proc *proc, int print_all) 3282{ 3283 struct binder_work *w; 3284 struct rb_node *n; 3285 char *start_buf = buf; 3286 char *header_buf; 3287 3288 buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); 3289 header_buf = buf; 3290 3291 for (n = rb_first(&proc->threads); 3292 n != NULL && buf < end; 3293 n = rb_next(n)) 3294 buf = print_binder_thread(buf, end, 3295 rb_entry(n, struct binder_thread, 3296 rb_node), print_all); 3297 for (n = rb_first(&proc->nodes); 3298 n != NULL && buf < end; 3299 n = rb_next(n)) { 3300 struct binder_node *node = rb_entry(n, struct binder_node, 3301 rb_node); 3302 if (print_all || node->has_async_transaction) 3303 buf = print_binder_node(buf, end, node); 3304 } 3305 if (print_all) { 3306 for (n = rb_first(&proc->refs_by_desc); 3307 n != NULL && buf < end; 3308 n = rb_next(n)) 3309 buf = print_binder_ref(buf, end, 3310 rb_entry(n, struct binder_ref, 3311 rb_node_desc)); 3312 } 3313 for (n = rb_first(&proc->allocated_buffers); 3314 n != NULL && buf < end; 3315 n = rb_next(n)) 3316 buf = print_binder_buffer(buf, end, " buffer", 3317 rb_entry(n, struct binder_buffer, 3318 rb_node)); 3319 list_for_each_entry(w, &proc->todo, entry) { 3320 if (buf >= end) 3321 break; 3322 buf = print_binder_work(buf, end, " ", 3323 " pending transaction", w); 3324 } 3325 list_for_each_entry(w, &proc->delivered_death, entry) { 3326 if (buf >= end) 3327 break; 3328 buf += snprintf(buf, end - buf, 3329 " has delivered dead binder\n"); 3330 break; 3331 } 3332 if (!print_all && buf == header_buf) 3333 buf = start_buf; 3334 return buf; 3335} 3336 3337static const char *binder_return_strings[] = { 3338 "BR_ERROR", 3339 "BR_OK", 3340 "BR_TRANSACTION", 3341 "BR_REPLY", 3342 "BR_ACQUIRE_RESULT", 3343 "BR_DEAD_REPLY", 3344 "BR_TRANSACTION_COMPLETE", 3345 "BR_INCREFS", 3346 "BR_ACQUIRE", 3347 "BR_RELEASE", 3348 "BR_DECREFS", 3349 "BR_ATTEMPT_ACQUIRE", 3350 "BR_NOOP", 3351 "BR_SPAWN_LOOPER", 3352 "BR_FINISHED", 3353 "BR_DEAD_BINDER", 3354 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3355 "BR_FAILED_REPLY" 3356}; 3357 3358static const char *binder_command_strings[] = { 3359 "BC_TRANSACTION", 3360 "BC_REPLY", 3361 "BC_ACQUIRE_RESULT", 3362 "BC_FREE_BUFFER", 3363 "BC_INCREFS", 3364 "BC_ACQUIRE", 3365 "BC_RELEASE", 3366 "BC_DECREFS", 3367 "BC_INCREFS_DONE", 3368 "BC_ACQUIRE_DONE", 3369 "BC_ATTEMPT_ACQUIRE", 3370 "BC_REGISTER_LOOPER", 3371 "BC_ENTER_LOOPER", 3372 "BC_EXIT_LOOPER", 3373 "BC_REQUEST_DEATH_NOTIFICATION", 3374 "BC_CLEAR_DEATH_NOTIFICATION", 3375 "BC_DEAD_BINDER_DONE" 3376}; 3377 3378static const char *binder_objstat_strings[] = { 3379 "proc", 3380 "thread", 3381 "node", 3382 "ref", 3383 "death", 3384 "transaction", 3385 "transaction_complete" 3386}; 3387 3388static char *print_binder_stats(char *buf, char *end, const char *prefix, 3389 struct binder_stats *stats) 3390{ 3391 int i; 3392 3393 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3394 ARRAY_SIZE(binder_command_strings)); 3395 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3396 if (stats->bc[i]) 3397 buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, 3398 binder_command_strings[i], 3399 stats->bc[i]); 3400 if (buf >= end) 3401 return buf; 3402 } 3403 3404 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3405 ARRAY_SIZE(binder_return_strings)); 3406 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3407 if (stats->br[i]) 3408 buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, 3409 binder_return_strings[i], stats->br[i]); 3410 if (buf >= end) 3411 return buf; 3412 } 3413 3414 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3415 ARRAY_SIZE(binder_objstat_strings)); 3416 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3417 ARRAY_SIZE(stats->obj_deleted)); 3418 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3419 if (stats->obj_created[i] || stats->obj_deleted[i]) 3420 buf += snprintf(buf, end - buf, 3421 "%s%s: active %d total %d\n", prefix, 3422 binder_objstat_strings[i], 3423 stats->obj_created[i] - 3424 stats->obj_deleted[i], 3425 stats->obj_created[i]); 3426 if (buf >= end) 3427 return buf; 3428 } 3429 return buf; 3430} 3431 3432static char *print_binder_proc_stats(char *buf, char *end, 3433 struct binder_proc *proc) 3434{ 3435 struct binder_work *w; 3436 struct rb_node *n; 3437 int count, strong, weak; 3438 3439 buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); 3440 if (buf >= end) 3441 return buf; 3442 count = 0; 3443 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3444 count++; 3445 buf += snprintf(buf, end - buf, " threads: %d\n", count); 3446 if (buf >= end) 3447 return buf; 3448 buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n" 3449 " ready threads %d\n" 3450 " free async space %zd\n", proc->requested_threads, 3451 proc->requested_threads_started, proc->max_threads, 3452 proc->ready_threads, proc->free_async_space); 3453 if (buf >= end) 3454 return buf; 3455 count = 0; 3456 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3457 count++; 3458 buf += snprintf(buf, end - buf, " nodes: %d\n", count); 3459 if (buf >= end) 3460 return buf; 3461 count = 0; 3462 strong = 0; 3463 weak = 0; 3464 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3465 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3466 rb_node_desc); 3467 count++; 3468 strong += ref->strong; 3469 weak += ref->weak; 3470 } 3471 buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n", 3472 count, strong, weak); 3473 if (buf >= end) 3474 return buf; 3475 3476 count = 0; 3477 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3478 count++; 3479 buf += snprintf(buf, end - buf, " buffers: %d\n", count); 3480 if (buf >= end) 3481 return buf; 3482 3483 count = 0; 3484 list_for_each_entry(w, &proc->todo, entry) { 3485 switch (w->type) { 3486 case BINDER_WORK_TRANSACTION: 3487 count++; 3488 break; 3489 default: 3490 break; 3491 } 3492 } 3493 buf += snprintf(buf, end - buf, " pending transactions: %d\n", count); 3494 if (buf >= end) 3495 return buf; 3496 3497 buf = print_binder_stats(buf, end, " ", &proc->stats); 3498 3499 return buf; 3500} 3501 3502 3503static int binder_read_proc_state(char *page, char **start, off_t off, 3504 int count, int *eof, void *data) 3505{ 3506 struct binder_proc *proc; 3507 struct hlist_node *pos; 3508 struct binder_node *node; 3509 int len = 0; 3510 char *buf = page; 3511 char *end = page + PAGE_SIZE; 3512 int do_lock = !binder_debug_no_lock; 3513 3514 if (off) 3515 return 0; 3516 3517 if (do_lock) 3518 mutex_lock(&binder_lock); 3519 3520 buf += snprintf(buf, end - buf, "binder state:\n"); 3521 3522 if (!hlist_empty(&binder_dead_nodes)) 3523 buf += snprintf(buf, end - buf, "dead nodes:\n"); 3524 hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) { 3525 if (buf >= end) 3526 break; 3527 buf = print_binder_node(buf, end, node); 3528 } 3529 3530 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { 3531 if (buf >= end) 3532 break; 3533 buf = print_binder_proc(buf, end, proc, 1); 3534 } 3535 if (do_lock) 3536 mutex_unlock(&binder_lock); 3537 if (buf > page + PAGE_SIZE) 3538 buf = page + PAGE_SIZE; 3539 3540 *start = page + off; 3541 3542 len = buf - page; 3543 if (len > off) 3544 len -= off; 3545 else 3546 len = 0; 3547 3548 return len < count ? len : count; 3549} 3550 3551static int binder_read_proc_stats(char *page, char **start, off_t off, 3552 int count, int *eof, void *data) 3553{ 3554 struct binder_proc *proc; 3555 struct hlist_node *pos; 3556 int len = 0; 3557 char *p = page; 3558 int do_lock = !binder_debug_no_lock; 3559 3560 if (off) 3561 return 0; 3562 3563 if (do_lock) 3564 mutex_lock(&binder_lock); 3565 3566 p += snprintf(p, PAGE_SIZE, "binder stats:\n"); 3567 3568 p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats); 3569 3570 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { 3571 if (p >= page + PAGE_SIZE) 3572 break; 3573 p = print_binder_proc_stats(p, page + PAGE_SIZE, proc); 3574 } 3575 if (do_lock) 3576 mutex_unlock(&binder_lock); 3577 if (p > page + PAGE_SIZE) 3578 p = page + PAGE_SIZE; 3579 3580 *start = page + off; 3581 3582 len = p - page; 3583 if (len > off) 3584 len -= off; 3585 else 3586 len = 0; 3587 3588 return len < count ? len : count; 3589} 3590 3591static int binder_read_proc_transactions(char *page, char **start, off_t off, 3592 int count, int *eof, void *data) 3593{ 3594 struct binder_proc *proc; 3595 struct hlist_node *pos; 3596 int len = 0; 3597 char *buf = page; 3598 char *end = page + PAGE_SIZE; 3599 int do_lock = !binder_debug_no_lock; 3600 3601 if (off) 3602 return 0; 3603 3604 if (do_lock) 3605 mutex_lock(&binder_lock); 3606 3607 buf += snprintf(buf, end - buf, "binder transactions:\n"); 3608 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { 3609 if (buf >= end) 3610 break; 3611 buf = print_binder_proc(buf, end, proc, 0); 3612 } 3613 if (do_lock) 3614 mutex_unlock(&binder_lock); 3615 if (buf > page + PAGE_SIZE) 3616 buf = page + PAGE_SIZE; 3617 3618 *start = page + off; 3619 3620 len = buf - page; 3621 if (len > off) 3622 len -= off; 3623 else 3624 len = 0; 3625 3626 return len < count ? len : count; 3627} 3628 3629static int binder_read_proc_proc(char *page, char **start, off_t off, 3630 int count, int *eof, void *data) 3631{ 3632 struct binder_proc *proc = data; 3633 int len = 0; 3634 char *p = page; 3635 int do_lock = !binder_debug_no_lock; 3636 3637 if (off) 3638 return 0; 3639 3640 if (do_lock) 3641 mutex_lock(&binder_lock); 3642 p += snprintf(p, PAGE_SIZE, "binder proc state:\n"); 3643 p = print_binder_proc(p, page + PAGE_SIZE, proc, 1); 3644 if (do_lock) 3645 mutex_unlock(&binder_lock); 3646 3647 if (p > page + PAGE_SIZE) 3648 p = page + PAGE_SIZE; 3649 *start = page + off; 3650 3651 len = p - page; 3652 if (len > off) 3653 len -= off; 3654 else 3655 len = 0; 3656 3657 return len < count ? len : count; 3658} 3659 3660static char *print_binder_transaction_log_entry(char *buf, char *end, 3661 struct binder_transaction_log_entry *e) 3662{ 3663 buf += snprintf(buf, end - buf, 3664 "%d: %s from %d:%d to %d:%d node %d handle %d " 3665 "size %d:%d\n", 3666 e->debug_id, (e->call_type == 2) ? "reply" : 3667 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3668 e->from_thread, e->to_proc, e->to_thread, e->to_node, 3669 e->target_handle, e->data_size, e->offsets_size); 3670 return buf; 3671} 3672 3673static int binder_read_proc_transaction_log( 3674 char *page, char **start, off_t off, int count, int *eof, void *data) 3675{ 3676 struct binder_transaction_log *log = data; 3677 int len = 0; 3678 int i; 3679 char *buf = page; 3680 char *end = page + PAGE_SIZE; 3681 3682 if (off) 3683 return 0; 3684 3685 if (log->full) { 3686 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) { 3687 if (buf >= end) 3688 break; 3689 buf = print_binder_transaction_log_entry(buf, end, 3690 &log->entry[i]); 3691 } 3692 } 3693 for (i = 0; i < log->next; i++) { 3694 if (buf >= end) 3695 break; 3696 buf = print_binder_transaction_log_entry(buf, end, 3697 &log->entry[i]); 3698 } 3699 3700 *start = page + off; 3701 3702 len = buf - page; 3703 if (len > off) 3704 len -= off; 3705 else 3706 len = 0; 3707 3708 return len < count ? len : count; 3709} 3710 3711static const struct file_operations binder_fops = { 3712 .owner = THIS_MODULE, 3713 .poll = binder_poll, 3714 .unlocked_ioctl = binder_ioctl, 3715 .mmap = binder_mmap, 3716 .open = binder_open, 3717 .flush = binder_flush, 3718 .release = binder_release, 3719}; 3720 3721static struct miscdevice binder_miscdev = { 3722 .minor = MISC_DYNAMIC_MINOR, 3723 .name = "binder", 3724 .fops = &binder_fops 3725}; 3726 3727static int __init binder_init(void) 3728{ 3729 int ret; 3730 3731 binder_proc_dir_entry_root = proc_mkdir("binder", NULL); 3732 if (binder_proc_dir_entry_root) 3733 binder_proc_dir_entry_proc = proc_mkdir("proc", 3734 binder_proc_dir_entry_root); 3735 ret = misc_register(&binder_miscdev); 3736 if (binder_proc_dir_entry_root) { 3737 create_proc_read_entry("state", 3738 S_IRUGO, 3739 binder_proc_dir_entry_root, 3740 binder_read_proc_state, 3741 NULL); 3742 create_proc_read_entry("stats", 3743 S_IRUGO, 3744 binder_proc_dir_entry_root, 3745 binder_read_proc_stats, 3746 NULL); 3747 create_proc_read_entry("transactions", 3748 S_IRUGO, 3749 binder_proc_dir_entry_root, 3750 binder_read_proc_transactions, 3751 NULL); 3752 create_proc_read_entry("transaction_log", 3753 S_IRUGO, 3754 binder_proc_dir_entry_root, 3755 binder_read_proc_transaction_log, 3756 &binder_transaction_log); 3757 create_proc_read_entry("failed_transaction_log", 3758 S_IRUGO, 3759 binder_proc_dir_entry_root, 3760 binder_read_proc_transaction_log, 3761 &binder_transaction_log_failed); 3762 } 3763 return ret; 3764} 3765 3766device_initcall(binder_init); 3767 3768MODULE_LICENSE("GPL v2"); 3769