binder.c revision 17cf22c33e1f1b5e435469c84e43872579497653
1/* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#include <asm/cacheflush.h> 19#include <linux/fdtable.h> 20#include <linux/file.h> 21#include <linux/fs.h> 22#include <linux/list.h> 23#include <linux/miscdevice.h> 24#include <linux/mm.h> 25#include <linux/module.h> 26#include <linux/mutex.h> 27#include <linux/nsproxy.h> 28#include <linux/poll.h> 29#include <linux/debugfs.h> 30#include <linux/rbtree.h> 31#include <linux/sched.h> 32#include <linux/seq_file.h> 33#include <linux/uaccess.h> 34#include <linux/vmalloc.h> 35#include <linux/slab.h> 36#include <linux/pid_namespace.h> 37 38#include "binder.h" 39 40static DEFINE_MUTEX(binder_lock); 41static DEFINE_MUTEX(binder_deferred_lock); 42static DEFINE_MUTEX(binder_mmap_lock); 43 44static HLIST_HEAD(binder_procs); 45static HLIST_HEAD(binder_deferred_list); 46static HLIST_HEAD(binder_dead_nodes); 47 48static struct dentry *binder_debugfs_dir_entry_root; 49static struct dentry *binder_debugfs_dir_entry_proc; 50static struct binder_node *binder_context_mgr_node; 51static kuid_t binder_context_mgr_uid = INVALID_UID; 52static int binder_last_id; 53static struct workqueue_struct *binder_deferred_workqueue; 54 55#define BINDER_DEBUG_ENTRY(name) \ 56static int binder_##name##_open(struct inode *inode, struct file *file) \ 57{ \ 58 return single_open(file, binder_##name##_show, inode->i_private); \ 59} \ 60\ 61static const struct file_operations binder_##name##_fops = { \ 62 .owner = THIS_MODULE, \ 63 .open = binder_##name##_open, \ 64 .read = seq_read, \ 65 .llseek = seq_lseek, \ 66 .release = single_release, \ 67} 68 69static int binder_proc_show(struct seq_file *m, void *unused); 70BINDER_DEBUG_ENTRY(proc); 71 72/* This is only defined in include/asm-arm/sizes.h */ 73#ifndef SZ_1K 74#define SZ_1K 0x400 75#endif 76 77#ifndef SZ_4M 78#define SZ_4M 0x400000 79#endif 80 81#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 82 83#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 84 85enum { 86 BINDER_DEBUG_USER_ERROR = 1U << 0, 87 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 88 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 89 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 90 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 91 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 92 BINDER_DEBUG_READ_WRITE = 1U << 6, 93 BINDER_DEBUG_USER_REFS = 1U << 7, 94 BINDER_DEBUG_THREADS = 1U << 8, 95 BINDER_DEBUG_TRANSACTION = 1U << 9, 96 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 97 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 98 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 99 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 100 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 101 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 102}; 103static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 104 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 105module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 106 107static bool binder_debug_no_lock; 108module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 109 110static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 111static int binder_stop_on_user_error; 112 113static int binder_set_stop_on_user_error(const char *val, 114 struct kernel_param *kp) 115{ 116 int ret; 117 ret = param_set_int(val, kp); 118 if (binder_stop_on_user_error < 2) 119 wake_up(&binder_user_error_wait); 120 return ret; 121} 122module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 123 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 124 125#define binder_debug(mask, x...) \ 126 do { \ 127 if (binder_debug_mask & mask) \ 128 pr_info(x); \ 129 } while (0) 130 131#define binder_user_error(x...) \ 132 do { \ 133 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 134 pr_info(x); \ 135 if (binder_stop_on_user_error) \ 136 binder_stop_on_user_error = 2; \ 137 } while (0) 138 139enum binder_stat_types { 140 BINDER_STAT_PROC, 141 BINDER_STAT_THREAD, 142 BINDER_STAT_NODE, 143 BINDER_STAT_REF, 144 BINDER_STAT_DEATH, 145 BINDER_STAT_TRANSACTION, 146 BINDER_STAT_TRANSACTION_COMPLETE, 147 BINDER_STAT_COUNT 148}; 149 150struct binder_stats { 151 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 152 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 153 int obj_created[BINDER_STAT_COUNT]; 154 int obj_deleted[BINDER_STAT_COUNT]; 155}; 156 157static struct binder_stats binder_stats; 158 159static inline void binder_stats_deleted(enum binder_stat_types type) 160{ 161 binder_stats.obj_deleted[type]++; 162} 163 164static inline void binder_stats_created(enum binder_stat_types type) 165{ 166 binder_stats.obj_created[type]++; 167} 168 169struct binder_transaction_log_entry { 170 int debug_id; 171 int call_type; 172 int from_proc; 173 int from_thread; 174 int target_handle; 175 int to_proc; 176 int to_thread; 177 int to_node; 178 int data_size; 179 int offsets_size; 180}; 181struct binder_transaction_log { 182 int next; 183 int full; 184 struct binder_transaction_log_entry entry[32]; 185}; 186static struct binder_transaction_log binder_transaction_log; 187static struct binder_transaction_log binder_transaction_log_failed; 188 189static struct binder_transaction_log_entry *binder_transaction_log_add( 190 struct binder_transaction_log *log) 191{ 192 struct binder_transaction_log_entry *e; 193 e = &log->entry[log->next]; 194 memset(e, 0, sizeof(*e)); 195 log->next++; 196 if (log->next == ARRAY_SIZE(log->entry)) { 197 log->next = 0; 198 log->full = 1; 199 } 200 return e; 201} 202 203struct binder_work { 204 struct list_head entry; 205 enum { 206 BINDER_WORK_TRANSACTION = 1, 207 BINDER_WORK_TRANSACTION_COMPLETE, 208 BINDER_WORK_NODE, 209 BINDER_WORK_DEAD_BINDER, 210 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 211 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 212 } type; 213}; 214 215struct binder_node { 216 int debug_id; 217 struct binder_work work; 218 union { 219 struct rb_node rb_node; 220 struct hlist_node dead_node; 221 }; 222 struct binder_proc *proc; 223 struct hlist_head refs; 224 int internal_strong_refs; 225 int local_weak_refs; 226 int local_strong_refs; 227 void __user *ptr; 228 void __user *cookie; 229 unsigned has_strong_ref:1; 230 unsigned pending_strong_ref:1; 231 unsigned has_weak_ref:1; 232 unsigned pending_weak_ref:1; 233 unsigned has_async_transaction:1; 234 unsigned accept_fds:1; 235 unsigned min_priority:8; 236 struct list_head async_todo; 237}; 238 239struct binder_ref_death { 240 struct binder_work work; 241 void __user *cookie; 242}; 243 244struct binder_ref { 245 /* Lookups needed: */ 246 /* node + proc => ref (transaction) */ 247 /* desc + proc => ref (transaction, inc/dec ref) */ 248 /* node => refs + procs (proc exit) */ 249 int debug_id; 250 struct rb_node rb_node_desc; 251 struct rb_node rb_node_node; 252 struct hlist_node node_entry; 253 struct binder_proc *proc; 254 struct binder_node *node; 255 uint32_t desc; 256 int strong; 257 int weak; 258 struct binder_ref_death *death; 259}; 260 261struct binder_buffer { 262 struct list_head entry; /* free and allocated entries by address */ 263 struct rb_node rb_node; /* free entry by size or allocated entry */ 264 /* by address */ 265 unsigned free:1; 266 unsigned allow_user_free:1; 267 unsigned async_transaction:1; 268 unsigned debug_id:29; 269 270 struct binder_transaction *transaction; 271 272 struct binder_node *target_node; 273 size_t data_size; 274 size_t offsets_size; 275 uint8_t data[0]; 276}; 277 278enum binder_deferred_state { 279 BINDER_DEFERRED_PUT_FILES = 0x01, 280 BINDER_DEFERRED_FLUSH = 0x02, 281 BINDER_DEFERRED_RELEASE = 0x04, 282}; 283 284struct binder_proc { 285 struct hlist_node proc_node; 286 struct rb_root threads; 287 struct rb_root nodes; 288 struct rb_root refs_by_desc; 289 struct rb_root refs_by_node; 290 int pid; 291 struct vm_area_struct *vma; 292 struct mm_struct *vma_vm_mm; 293 struct task_struct *tsk; 294 struct files_struct *files; 295 struct hlist_node deferred_work_node; 296 int deferred_work; 297 void *buffer; 298 ptrdiff_t user_buffer_offset; 299 300 struct list_head buffers; 301 struct rb_root free_buffers; 302 struct rb_root allocated_buffers; 303 size_t free_async_space; 304 305 struct page **pages; 306 size_t buffer_size; 307 uint32_t buffer_free; 308 struct list_head todo; 309 wait_queue_head_t wait; 310 struct binder_stats stats; 311 struct list_head delivered_death; 312 int max_threads; 313 int requested_threads; 314 int requested_threads_started; 315 int ready_threads; 316 long default_priority; 317 struct dentry *debugfs_entry; 318}; 319 320enum { 321 BINDER_LOOPER_STATE_REGISTERED = 0x01, 322 BINDER_LOOPER_STATE_ENTERED = 0x02, 323 BINDER_LOOPER_STATE_EXITED = 0x04, 324 BINDER_LOOPER_STATE_INVALID = 0x08, 325 BINDER_LOOPER_STATE_WAITING = 0x10, 326 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 327}; 328 329struct binder_thread { 330 struct binder_proc *proc; 331 struct rb_node rb_node; 332 int pid; 333 int looper; 334 struct binder_transaction *transaction_stack; 335 struct list_head todo; 336 uint32_t return_error; /* Write failed, return error code in read buf */ 337 uint32_t return_error2; /* Write failed, return error code in read */ 338 /* buffer. Used when sending a reply to a dead process that */ 339 /* we are also waiting on */ 340 wait_queue_head_t wait; 341 struct binder_stats stats; 342}; 343 344struct binder_transaction { 345 int debug_id; 346 struct binder_work work; 347 struct binder_thread *from; 348 struct binder_transaction *from_parent; 349 struct binder_proc *to_proc; 350 struct binder_thread *to_thread; 351 struct binder_transaction *to_parent; 352 unsigned need_reply:1; 353 /* unsigned is_dead:1; */ /* not used at the moment */ 354 355 struct binder_buffer *buffer; 356 unsigned int code; 357 unsigned int flags; 358 long priority; 359 long saved_priority; 360 kuid_t sender_euid; 361}; 362 363static void 364binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 365 366static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 367{ 368 struct files_struct *files = proc->files; 369 unsigned long rlim_cur; 370 unsigned long irqs; 371 372 if (files == NULL) 373 return -ESRCH; 374 375 if (!lock_task_sighand(proc->tsk, &irqs)) 376 return -EMFILE; 377 378 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 379 unlock_task_sighand(proc->tsk, &irqs); 380 381 return __alloc_fd(files, 0, rlim_cur, flags); 382} 383 384/* 385 * copied from fd_install 386 */ 387static void task_fd_install( 388 struct binder_proc *proc, unsigned int fd, struct file *file) 389{ 390 if (proc->files) 391 __fd_install(proc->files, fd, file); 392} 393 394/* 395 * copied from sys_close 396 */ 397static long task_close_fd(struct binder_proc *proc, unsigned int fd) 398{ 399 int retval; 400 401 if (proc->files == NULL) 402 return -ESRCH; 403 404 retval = __close_fd(proc->files, fd); 405 /* can't restart close syscall because file table entry was cleared */ 406 if (unlikely(retval == -ERESTARTSYS || 407 retval == -ERESTARTNOINTR || 408 retval == -ERESTARTNOHAND || 409 retval == -ERESTART_RESTARTBLOCK)) 410 retval = -EINTR; 411 412 return retval; 413} 414 415static void binder_set_nice(long nice) 416{ 417 long min_nice; 418 if (can_nice(current, nice)) { 419 set_user_nice(current, nice); 420 return; 421 } 422 min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; 423 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 424 "binder: %d: nice value %ld not allowed use " 425 "%ld instead\n", current->pid, nice, min_nice); 426 set_user_nice(current, min_nice); 427 if (min_nice < 20) 428 return; 429 binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); 430} 431 432static size_t binder_buffer_size(struct binder_proc *proc, 433 struct binder_buffer *buffer) 434{ 435 if (list_is_last(&buffer->entry, &proc->buffers)) 436 return proc->buffer + proc->buffer_size - (void *)buffer->data; 437 else 438 return (size_t)list_entry(buffer->entry.next, 439 struct binder_buffer, entry) - (size_t)buffer->data; 440} 441 442static void binder_insert_free_buffer(struct binder_proc *proc, 443 struct binder_buffer *new_buffer) 444{ 445 struct rb_node **p = &proc->free_buffers.rb_node; 446 struct rb_node *parent = NULL; 447 struct binder_buffer *buffer; 448 size_t buffer_size; 449 size_t new_buffer_size; 450 451 BUG_ON(!new_buffer->free); 452 453 new_buffer_size = binder_buffer_size(proc, new_buffer); 454 455 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 456 "binder: %d: add free buffer, size %zd, " 457 "at %p\n", proc->pid, new_buffer_size, new_buffer); 458 459 while (*p) { 460 parent = *p; 461 buffer = rb_entry(parent, struct binder_buffer, rb_node); 462 BUG_ON(!buffer->free); 463 464 buffer_size = binder_buffer_size(proc, buffer); 465 466 if (new_buffer_size < buffer_size) 467 p = &parent->rb_left; 468 else 469 p = &parent->rb_right; 470 } 471 rb_link_node(&new_buffer->rb_node, parent, p); 472 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 473} 474 475static void binder_insert_allocated_buffer(struct binder_proc *proc, 476 struct binder_buffer *new_buffer) 477{ 478 struct rb_node **p = &proc->allocated_buffers.rb_node; 479 struct rb_node *parent = NULL; 480 struct binder_buffer *buffer; 481 482 BUG_ON(new_buffer->free); 483 484 while (*p) { 485 parent = *p; 486 buffer = rb_entry(parent, struct binder_buffer, rb_node); 487 BUG_ON(buffer->free); 488 489 if (new_buffer < buffer) 490 p = &parent->rb_left; 491 else if (new_buffer > buffer) 492 p = &parent->rb_right; 493 else 494 BUG(); 495 } 496 rb_link_node(&new_buffer->rb_node, parent, p); 497 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 498} 499 500static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 501 void __user *user_ptr) 502{ 503 struct rb_node *n = proc->allocated_buffers.rb_node; 504 struct binder_buffer *buffer; 505 struct binder_buffer *kern_ptr; 506 507 kern_ptr = user_ptr - proc->user_buffer_offset 508 - offsetof(struct binder_buffer, data); 509 510 while (n) { 511 buffer = rb_entry(n, struct binder_buffer, rb_node); 512 BUG_ON(buffer->free); 513 514 if (kern_ptr < buffer) 515 n = n->rb_left; 516 else if (kern_ptr > buffer) 517 n = n->rb_right; 518 else 519 return buffer; 520 } 521 return NULL; 522} 523 524static int binder_update_page_range(struct binder_proc *proc, int allocate, 525 void *start, void *end, 526 struct vm_area_struct *vma) 527{ 528 void *page_addr; 529 unsigned long user_page_addr; 530 struct vm_struct tmp_area; 531 struct page **page; 532 struct mm_struct *mm; 533 534 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 535 "binder: %d: %s pages %p-%p\n", proc->pid, 536 allocate ? "allocate" : "free", start, end); 537 538 if (end <= start) 539 return 0; 540 541 if (vma) 542 mm = NULL; 543 else 544 mm = get_task_mm(proc->tsk); 545 546 if (mm) { 547 down_write(&mm->mmap_sem); 548 vma = proc->vma; 549 if (vma && mm != proc->vma_vm_mm) { 550 pr_err("binder: %d: vma mm and task mm mismatch\n", 551 proc->pid); 552 vma = NULL; 553 } 554 } 555 556 if (allocate == 0) 557 goto free_range; 558 559 if (vma == NULL) { 560 pr_err("binder: %d: binder_alloc_buf failed to " 561 "map pages in userspace, no vma\n", proc->pid); 562 goto err_no_vma; 563 } 564 565 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 566 int ret; 567 struct page **page_array_ptr; 568 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 569 570 BUG_ON(*page); 571 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 572 if (*page == NULL) { 573 pr_err("binder: %d: binder_alloc_buf failed " 574 "for page at %p\n", proc->pid, page_addr); 575 goto err_alloc_page_failed; 576 } 577 tmp_area.addr = page_addr; 578 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 579 page_array_ptr = page; 580 ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); 581 if (ret) { 582 pr_err("binder: %d: binder_alloc_buf failed " 583 "to map page at %p in kernel\n", 584 proc->pid, page_addr); 585 goto err_map_kernel_failed; 586 } 587 user_page_addr = 588 (uintptr_t)page_addr + proc->user_buffer_offset; 589 ret = vm_insert_page(vma, user_page_addr, page[0]); 590 if (ret) { 591 pr_err("binder: %d: binder_alloc_buf failed " 592 "to map page at %lx in userspace\n", 593 proc->pid, user_page_addr); 594 goto err_vm_insert_page_failed; 595 } 596 /* vm_insert_page does not seem to increment the refcount */ 597 } 598 if (mm) { 599 up_write(&mm->mmap_sem); 600 mmput(mm); 601 } 602 return 0; 603 604free_range: 605 for (page_addr = end - PAGE_SIZE; page_addr >= start; 606 page_addr -= PAGE_SIZE) { 607 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 608 if (vma) 609 zap_page_range(vma, (uintptr_t)page_addr + 610 proc->user_buffer_offset, PAGE_SIZE, NULL); 611err_vm_insert_page_failed: 612 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 613err_map_kernel_failed: 614 __free_page(*page); 615 *page = NULL; 616err_alloc_page_failed: 617 ; 618 } 619err_no_vma: 620 if (mm) { 621 up_write(&mm->mmap_sem); 622 mmput(mm); 623 } 624 return -ENOMEM; 625} 626 627static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 628 size_t data_size, 629 size_t offsets_size, int is_async) 630{ 631 struct rb_node *n = proc->free_buffers.rb_node; 632 struct binder_buffer *buffer; 633 size_t buffer_size; 634 struct rb_node *best_fit = NULL; 635 void *has_page_addr; 636 void *end_page_addr; 637 size_t size; 638 639 if (proc->vma == NULL) { 640 pr_err("binder: %d: binder_alloc_buf, no vma\n", 641 proc->pid); 642 return NULL; 643 } 644 645 size = ALIGN(data_size, sizeof(void *)) + 646 ALIGN(offsets_size, sizeof(void *)); 647 648 if (size < data_size || size < offsets_size) { 649 binder_user_error("binder: %d: got transaction with invalid " 650 "size %zd-%zd\n", proc->pid, data_size, offsets_size); 651 return NULL; 652 } 653 654 if (is_async && 655 proc->free_async_space < size + sizeof(struct binder_buffer)) { 656 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 657 "binder: %d: binder_alloc_buf size %zd" 658 "failed, no async space left\n", proc->pid, size); 659 return NULL; 660 } 661 662 while (n) { 663 buffer = rb_entry(n, struct binder_buffer, rb_node); 664 BUG_ON(!buffer->free); 665 buffer_size = binder_buffer_size(proc, buffer); 666 667 if (size < buffer_size) { 668 best_fit = n; 669 n = n->rb_left; 670 } else if (size > buffer_size) 671 n = n->rb_right; 672 else { 673 best_fit = n; 674 break; 675 } 676 } 677 if (best_fit == NULL) { 678 pr_err("binder: %d: binder_alloc_buf size %zd failed, " 679 "no address space\n", proc->pid, size); 680 return NULL; 681 } 682 if (n == NULL) { 683 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 684 buffer_size = binder_buffer_size(proc, buffer); 685 } 686 687 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 688 "binder: %d: binder_alloc_buf size %zd got buff" 689 "er %p size %zd\n", proc->pid, size, buffer, buffer_size); 690 691 has_page_addr = 692 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 693 if (n == NULL) { 694 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 695 buffer_size = size; /* no room for other buffers */ 696 else 697 buffer_size = size + sizeof(struct binder_buffer); 698 } 699 end_page_addr = 700 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 701 if (end_page_addr > has_page_addr) 702 end_page_addr = has_page_addr; 703 if (binder_update_page_range(proc, 1, 704 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 705 return NULL; 706 707 rb_erase(best_fit, &proc->free_buffers); 708 buffer->free = 0; 709 binder_insert_allocated_buffer(proc, buffer); 710 if (buffer_size != size) { 711 struct binder_buffer *new_buffer = (void *)buffer->data + size; 712 list_add(&new_buffer->entry, &buffer->entry); 713 new_buffer->free = 1; 714 binder_insert_free_buffer(proc, new_buffer); 715 } 716 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 717 "binder: %d: binder_alloc_buf size %zd got " 718 "%p\n", proc->pid, size, buffer); 719 buffer->data_size = data_size; 720 buffer->offsets_size = offsets_size; 721 buffer->async_transaction = is_async; 722 if (is_async) { 723 proc->free_async_space -= size + sizeof(struct binder_buffer); 724 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 725 "binder: %d: binder_alloc_buf size %zd " 726 "async free %zd\n", proc->pid, size, 727 proc->free_async_space); 728 } 729 730 return buffer; 731} 732 733static void *buffer_start_page(struct binder_buffer *buffer) 734{ 735 return (void *)((uintptr_t)buffer & PAGE_MASK); 736} 737 738static void *buffer_end_page(struct binder_buffer *buffer) 739{ 740 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 741} 742 743static void binder_delete_free_buffer(struct binder_proc *proc, 744 struct binder_buffer *buffer) 745{ 746 struct binder_buffer *prev, *next = NULL; 747 int free_page_end = 1; 748 int free_page_start = 1; 749 750 BUG_ON(proc->buffers.next == &buffer->entry); 751 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 752 BUG_ON(!prev->free); 753 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 754 free_page_start = 0; 755 if (buffer_end_page(prev) == buffer_end_page(buffer)) 756 free_page_end = 0; 757 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 758 "binder: %d: merge free, buffer %p " 759 "share page with %p\n", proc->pid, buffer, prev); 760 } 761 762 if (!list_is_last(&buffer->entry, &proc->buffers)) { 763 next = list_entry(buffer->entry.next, 764 struct binder_buffer, entry); 765 if (buffer_start_page(next) == buffer_end_page(buffer)) { 766 free_page_end = 0; 767 if (buffer_start_page(next) == 768 buffer_start_page(buffer)) 769 free_page_start = 0; 770 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 771 "binder: %d: merge free, buffer" 772 " %p share page with %p\n", proc->pid, 773 buffer, prev); 774 } 775 } 776 list_del(&buffer->entry); 777 if (free_page_start || free_page_end) { 778 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 779 "binder: %d: merge free, buffer %p do " 780 "not share page%s%s with with %p or %p\n", 781 proc->pid, buffer, free_page_start ? "" : " end", 782 free_page_end ? "" : " start", prev, next); 783 binder_update_page_range(proc, 0, free_page_start ? 784 buffer_start_page(buffer) : buffer_end_page(buffer), 785 (free_page_end ? buffer_end_page(buffer) : 786 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 787 } 788} 789 790static void binder_free_buf(struct binder_proc *proc, 791 struct binder_buffer *buffer) 792{ 793 size_t size, buffer_size; 794 795 buffer_size = binder_buffer_size(proc, buffer); 796 797 size = ALIGN(buffer->data_size, sizeof(void *)) + 798 ALIGN(buffer->offsets_size, sizeof(void *)); 799 800 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 801 "binder: %d: binder_free_buf %p size %zd buffer" 802 "_size %zd\n", proc->pid, buffer, size, buffer_size); 803 804 BUG_ON(buffer->free); 805 BUG_ON(size > buffer_size); 806 BUG_ON(buffer->transaction != NULL); 807 BUG_ON((void *)buffer < proc->buffer); 808 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 809 810 if (buffer->async_transaction) { 811 proc->free_async_space += size + sizeof(struct binder_buffer); 812 813 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 814 "binder: %d: binder_free_buf size %zd " 815 "async free %zd\n", proc->pid, size, 816 proc->free_async_space); 817 } 818 819 binder_update_page_range(proc, 0, 820 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 821 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 822 NULL); 823 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 824 buffer->free = 1; 825 if (!list_is_last(&buffer->entry, &proc->buffers)) { 826 struct binder_buffer *next = list_entry(buffer->entry.next, 827 struct binder_buffer, entry); 828 if (next->free) { 829 rb_erase(&next->rb_node, &proc->free_buffers); 830 binder_delete_free_buffer(proc, next); 831 } 832 } 833 if (proc->buffers.next != &buffer->entry) { 834 struct binder_buffer *prev = list_entry(buffer->entry.prev, 835 struct binder_buffer, entry); 836 if (prev->free) { 837 binder_delete_free_buffer(proc, buffer); 838 rb_erase(&prev->rb_node, &proc->free_buffers); 839 buffer = prev; 840 } 841 } 842 binder_insert_free_buffer(proc, buffer); 843} 844 845static struct binder_node *binder_get_node(struct binder_proc *proc, 846 void __user *ptr) 847{ 848 struct rb_node *n = proc->nodes.rb_node; 849 struct binder_node *node; 850 851 while (n) { 852 node = rb_entry(n, struct binder_node, rb_node); 853 854 if (ptr < node->ptr) 855 n = n->rb_left; 856 else if (ptr > node->ptr) 857 n = n->rb_right; 858 else 859 return node; 860 } 861 return NULL; 862} 863 864static struct binder_node *binder_new_node(struct binder_proc *proc, 865 void __user *ptr, 866 void __user *cookie) 867{ 868 struct rb_node **p = &proc->nodes.rb_node; 869 struct rb_node *parent = NULL; 870 struct binder_node *node; 871 872 while (*p) { 873 parent = *p; 874 node = rb_entry(parent, struct binder_node, rb_node); 875 876 if (ptr < node->ptr) 877 p = &(*p)->rb_left; 878 else if (ptr > node->ptr) 879 p = &(*p)->rb_right; 880 else 881 return NULL; 882 } 883 884 node = kzalloc(sizeof(*node), GFP_KERNEL); 885 if (node == NULL) 886 return NULL; 887 binder_stats_created(BINDER_STAT_NODE); 888 rb_link_node(&node->rb_node, parent, p); 889 rb_insert_color(&node->rb_node, &proc->nodes); 890 node->debug_id = ++binder_last_id; 891 node->proc = proc; 892 node->ptr = ptr; 893 node->cookie = cookie; 894 node->work.type = BINDER_WORK_NODE; 895 INIT_LIST_HEAD(&node->work.entry); 896 INIT_LIST_HEAD(&node->async_todo); 897 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 898 "binder: %d:%d node %d u%p c%p created\n", 899 proc->pid, current->pid, node->debug_id, 900 node->ptr, node->cookie); 901 return node; 902} 903 904static int binder_inc_node(struct binder_node *node, int strong, int internal, 905 struct list_head *target_list) 906{ 907 if (strong) { 908 if (internal) { 909 if (target_list == NULL && 910 node->internal_strong_refs == 0 && 911 !(node == binder_context_mgr_node && 912 node->has_strong_ref)) { 913 pr_err("binder: invalid inc strong " 914 "node for %d\n", node->debug_id); 915 return -EINVAL; 916 } 917 node->internal_strong_refs++; 918 } else 919 node->local_strong_refs++; 920 if (!node->has_strong_ref && target_list) { 921 list_del_init(&node->work.entry); 922 list_add_tail(&node->work.entry, target_list); 923 } 924 } else { 925 if (!internal) 926 node->local_weak_refs++; 927 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 928 if (target_list == NULL) { 929 pr_err("binder: invalid inc weak node " 930 "for %d\n", node->debug_id); 931 return -EINVAL; 932 } 933 list_add_tail(&node->work.entry, target_list); 934 } 935 } 936 return 0; 937} 938 939static int binder_dec_node(struct binder_node *node, int strong, int internal) 940{ 941 if (strong) { 942 if (internal) 943 node->internal_strong_refs--; 944 else 945 node->local_strong_refs--; 946 if (node->local_strong_refs || node->internal_strong_refs) 947 return 0; 948 } else { 949 if (!internal) 950 node->local_weak_refs--; 951 if (node->local_weak_refs || !hlist_empty(&node->refs)) 952 return 0; 953 } 954 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 955 if (list_empty(&node->work.entry)) { 956 list_add_tail(&node->work.entry, &node->proc->todo); 957 wake_up_interruptible(&node->proc->wait); 958 } 959 } else { 960 if (hlist_empty(&node->refs) && !node->local_strong_refs && 961 !node->local_weak_refs) { 962 list_del_init(&node->work.entry); 963 if (node->proc) { 964 rb_erase(&node->rb_node, &node->proc->nodes); 965 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 966 "binder: refless node %d deleted\n", 967 node->debug_id); 968 } else { 969 hlist_del(&node->dead_node); 970 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 971 "binder: dead node %d deleted\n", 972 node->debug_id); 973 } 974 kfree(node); 975 binder_stats_deleted(BINDER_STAT_NODE); 976 } 977 } 978 979 return 0; 980} 981 982 983static struct binder_ref *binder_get_ref(struct binder_proc *proc, 984 uint32_t desc) 985{ 986 struct rb_node *n = proc->refs_by_desc.rb_node; 987 struct binder_ref *ref; 988 989 while (n) { 990 ref = rb_entry(n, struct binder_ref, rb_node_desc); 991 992 if (desc < ref->desc) 993 n = n->rb_left; 994 else if (desc > ref->desc) 995 n = n->rb_right; 996 else 997 return ref; 998 } 999 return NULL; 1000} 1001 1002static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1003 struct binder_node *node) 1004{ 1005 struct rb_node *n; 1006 struct rb_node **p = &proc->refs_by_node.rb_node; 1007 struct rb_node *parent = NULL; 1008 struct binder_ref *ref, *new_ref; 1009 1010 while (*p) { 1011 parent = *p; 1012 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1013 1014 if (node < ref->node) 1015 p = &(*p)->rb_left; 1016 else if (node > ref->node) 1017 p = &(*p)->rb_right; 1018 else 1019 return ref; 1020 } 1021 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1022 if (new_ref == NULL) 1023 return NULL; 1024 binder_stats_created(BINDER_STAT_REF); 1025 new_ref->debug_id = ++binder_last_id; 1026 new_ref->proc = proc; 1027 new_ref->node = node; 1028 rb_link_node(&new_ref->rb_node_node, parent, p); 1029 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1030 1031 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1032 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1033 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1034 if (ref->desc > new_ref->desc) 1035 break; 1036 new_ref->desc = ref->desc + 1; 1037 } 1038 1039 p = &proc->refs_by_desc.rb_node; 1040 while (*p) { 1041 parent = *p; 1042 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1043 1044 if (new_ref->desc < ref->desc) 1045 p = &(*p)->rb_left; 1046 else if (new_ref->desc > ref->desc) 1047 p = &(*p)->rb_right; 1048 else 1049 BUG(); 1050 } 1051 rb_link_node(&new_ref->rb_node_desc, parent, p); 1052 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1053 if (node) { 1054 hlist_add_head(&new_ref->node_entry, &node->refs); 1055 1056 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1057 "binder: %d new ref %d desc %d for " 1058 "node %d\n", proc->pid, new_ref->debug_id, 1059 new_ref->desc, node->debug_id); 1060 } else { 1061 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1062 "binder: %d new ref %d desc %d for " 1063 "dead node\n", proc->pid, new_ref->debug_id, 1064 new_ref->desc); 1065 } 1066 return new_ref; 1067} 1068 1069static void binder_delete_ref(struct binder_ref *ref) 1070{ 1071 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1072 "binder: %d delete ref %d desc %d for " 1073 "node %d\n", ref->proc->pid, ref->debug_id, 1074 ref->desc, ref->node->debug_id); 1075 1076 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1077 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1078 if (ref->strong) 1079 binder_dec_node(ref->node, 1, 1); 1080 hlist_del(&ref->node_entry); 1081 binder_dec_node(ref->node, 0, 1); 1082 if (ref->death) { 1083 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1084 "binder: %d delete ref %d desc %d " 1085 "has death notification\n", ref->proc->pid, 1086 ref->debug_id, ref->desc); 1087 list_del(&ref->death->work.entry); 1088 kfree(ref->death); 1089 binder_stats_deleted(BINDER_STAT_DEATH); 1090 } 1091 kfree(ref); 1092 binder_stats_deleted(BINDER_STAT_REF); 1093} 1094 1095static int binder_inc_ref(struct binder_ref *ref, int strong, 1096 struct list_head *target_list) 1097{ 1098 int ret; 1099 if (strong) { 1100 if (ref->strong == 0) { 1101 ret = binder_inc_node(ref->node, 1, 1, target_list); 1102 if (ret) 1103 return ret; 1104 } 1105 ref->strong++; 1106 } else { 1107 if (ref->weak == 0) { 1108 ret = binder_inc_node(ref->node, 0, 1, target_list); 1109 if (ret) 1110 return ret; 1111 } 1112 ref->weak++; 1113 } 1114 return 0; 1115} 1116 1117 1118static int binder_dec_ref(struct binder_ref *ref, int strong) 1119{ 1120 if (strong) { 1121 if (ref->strong == 0) { 1122 binder_user_error("binder: %d invalid dec strong, " 1123 "ref %d desc %d s %d w %d\n", 1124 ref->proc->pid, ref->debug_id, 1125 ref->desc, ref->strong, ref->weak); 1126 return -EINVAL; 1127 } 1128 ref->strong--; 1129 if (ref->strong == 0) { 1130 int ret; 1131 ret = binder_dec_node(ref->node, strong, 1); 1132 if (ret) 1133 return ret; 1134 } 1135 } else { 1136 if (ref->weak == 0) { 1137 binder_user_error("binder: %d invalid dec weak, " 1138 "ref %d desc %d s %d w %d\n", 1139 ref->proc->pid, ref->debug_id, 1140 ref->desc, ref->strong, ref->weak); 1141 return -EINVAL; 1142 } 1143 ref->weak--; 1144 } 1145 if (ref->strong == 0 && ref->weak == 0) 1146 binder_delete_ref(ref); 1147 return 0; 1148} 1149 1150static void binder_pop_transaction(struct binder_thread *target_thread, 1151 struct binder_transaction *t) 1152{ 1153 if (target_thread) { 1154 BUG_ON(target_thread->transaction_stack != t); 1155 BUG_ON(target_thread->transaction_stack->from != target_thread); 1156 target_thread->transaction_stack = 1157 target_thread->transaction_stack->from_parent; 1158 t->from = NULL; 1159 } 1160 t->need_reply = 0; 1161 if (t->buffer) 1162 t->buffer->transaction = NULL; 1163 kfree(t); 1164 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1165} 1166 1167static void binder_send_failed_reply(struct binder_transaction *t, 1168 uint32_t error_code) 1169{ 1170 struct binder_thread *target_thread; 1171 BUG_ON(t->flags & TF_ONE_WAY); 1172 while (1) { 1173 target_thread = t->from; 1174 if (target_thread) { 1175 if (target_thread->return_error != BR_OK && 1176 target_thread->return_error2 == BR_OK) { 1177 target_thread->return_error2 = 1178 target_thread->return_error; 1179 target_thread->return_error = BR_OK; 1180 } 1181 if (target_thread->return_error == BR_OK) { 1182 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1183 "binder: send failed reply for " 1184 "transaction %d to %d:%d\n", 1185 t->debug_id, target_thread->proc->pid, 1186 target_thread->pid); 1187 1188 binder_pop_transaction(target_thread, t); 1189 target_thread->return_error = error_code; 1190 wake_up_interruptible(&target_thread->wait); 1191 } else { 1192 pr_err("binder: reply failed, target " 1193 "thread, %d:%d, has error code %d " 1194 "already\n", target_thread->proc->pid, 1195 target_thread->pid, 1196 target_thread->return_error); 1197 } 1198 return; 1199 } else { 1200 struct binder_transaction *next = t->from_parent; 1201 1202 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1203 "binder: send failed reply " 1204 "for transaction %d, target dead\n", 1205 t->debug_id); 1206 1207 binder_pop_transaction(target_thread, t); 1208 if (next == NULL) { 1209 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1210 "binder: reply failed," 1211 " no target thread at root\n"); 1212 return; 1213 } 1214 t = next; 1215 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1216 "binder: reply failed, no target " 1217 "thread -- retry %d\n", t->debug_id); 1218 } 1219 } 1220} 1221 1222static void binder_transaction_buffer_release(struct binder_proc *proc, 1223 struct binder_buffer *buffer, 1224 size_t *failed_at) 1225{ 1226 size_t *offp, *off_end; 1227 int debug_id = buffer->debug_id; 1228 1229 binder_debug(BINDER_DEBUG_TRANSACTION, 1230 "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", 1231 proc->pid, buffer->debug_id, 1232 buffer->data_size, buffer->offsets_size, failed_at); 1233 1234 if (buffer->target_node) 1235 binder_dec_node(buffer->target_node, 1, 0); 1236 1237 offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); 1238 if (failed_at) 1239 off_end = failed_at; 1240 else 1241 off_end = (void *)offp + buffer->offsets_size; 1242 for (; offp < off_end; offp++) { 1243 struct flat_binder_object *fp; 1244 if (*offp > buffer->data_size - sizeof(*fp) || 1245 buffer->data_size < sizeof(*fp) || 1246 !IS_ALIGNED(*offp, sizeof(void *))) { 1247 pr_err("binder: transaction release %d bad" 1248 "offset %zd, size %zd\n", debug_id, 1249 *offp, buffer->data_size); 1250 continue; 1251 } 1252 fp = (struct flat_binder_object *)(buffer->data + *offp); 1253 switch (fp->type) { 1254 case BINDER_TYPE_BINDER: 1255 case BINDER_TYPE_WEAK_BINDER: { 1256 struct binder_node *node = binder_get_node(proc, fp->binder); 1257 if (node == NULL) { 1258 pr_err("binder: transaction release %d" 1259 " bad node %p\n", debug_id, fp->binder); 1260 break; 1261 } 1262 binder_debug(BINDER_DEBUG_TRANSACTION, 1263 " node %d u%p\n", 1264 node->debug_id, node->ptr); 1265 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1266 } break; 1267 case BINDER_TYPE_HANDLE: 1268 case BINDER_TYPE_WEAK_HANDLE: { 1269 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1270 if (ref == NULL) { 1271 pr_err("binder: transaction release %d" 1272 " bad handle %ld\n", debug_id, 1273 fp->handle); 1274 break; 1275 } 1276 binder_debug(BINDER_DEBUG_TRANSACTION, 1277 " ref %d desc %d (node %d)\n", 1278 ref->debug_id, ref->desc, ref->node->debug_id); 1279 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1280 } break; 1281 1282 case BINDER_TYPE_FD: 1283 binder_debug(BINDER_DEBUG_TRANSACTION, 1284 " fd %ld\n", fp->handle); 1285 if (failed_at) 1286 task_close_fd(proc, fp->handle); 1287 break; 1288 1289 default: 1290 pr_err("binder: transaction release %d bad " 1291 "object type %lx\n", debug_id, fp->type); 1292 break; 1293 } 1294 } 1295} 1296 1297static void binder_transaction(struct binder_proc *proc, 1298 struct binder_thread *thread, 1299 struct binder_transaction_data *tr, int reply) 1300{ 1301 struct binder_transaction *t; 1302 struct binder_work *tcomplete; 1303 size_t *offp, *off_end; 1304 struct binder_proc *target_proc; 1305 struct binder_thread *target_thread = NULL; 1306 struct binder_node *target_node = NULL; 1307 struct list_head *target_list; 1308 wait_queue_head_t *target_wait; 1309 struct binder_transaction *in_reply_to = NULL; 1310 struct binder_transaction_log_entry *e; 1311 uint32_t return_error; 1312 1313 e = binder_transaction_log_add(&binder_transaction_log); 1314 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1315 e->from_proc = proc->pid; 1316 e->from_thread = thread->pid; 1317 e->target_handle = tr->target.handle; 1318 e->data_size = tr->data_size; 1319 e->offsets_size = tr->offsets_size; 1320 1321 if (reply) { 1322 in_reply_to = thread->transaction_stack; 1323 if (in_reply_to == NULL) { 1324 binder_user_error("binder: %d:%d got reply transaction " 1325 "with no transaction stack\n", 1326 proc->pid, thread->pid); 1327 return_error = BR_FAILED_REPLY; 1328 goto err_empty_call_stack; 1329 } 1330 binder_set_nice(in_reply_to->saved_priority); 1331 if (in_reply_to->to_thread != thread) { 1332 binder_user_error("binder: %d:%d got reply transaction " 1333 "with bad transaction stack," 1334 " transaction %d has target %d:%d\n", 1335 proc->pid, thread->pid, in_reply_to->debug_id, 1336 in_reply_to->to_proc ? 1337 in_reply_to->to_proc->pid : 0, 1338 in_reply_to->to_thread ? 1339 in_reply_to->to_thread->pid : 0); 1340 return_error = BR_FAILED_REPLY; 1341 in_reply_to = NULL; 1342 goto err_bad_call_stack; 1343 } 1344 thread->transaction_stack = in_reply_to->to_parent; 1345 target_thread = in_reply_to->from; 1346 if (target_thread == NULL) { 1347 return_error = BR_DEAD_REPLY; 1348 goto err_dead_binder; 1349 } 1350 if (target_thread->transaction_stack != in_reply_to) { 1351 binder_user_error("binder: %d:%d got reply transaction " 1352 "with bad target transaction stack %d, " 1353 "expected %d\n", 1354 proc->pid, thread->pid, 1355 target_thread->transaction_stack ? 1356 target_thread->transaction_stack->debug_id : 0, 1357 in_reply_to->debug_id); 1358 return_error = BR_FAILED_REPLY; 1359 in_reply_to = NULL; 1360 target_thread = NULL; 1361 goto err_dead_binder; 1362 } 1363 target_proc = target_thread->proc; 1364 } else { 1365 if (tr->target.handle) { 1366 struct binder_ref *ref; 1367 ref = binder_get_ref(proc, tr->target.handle); 1368 if (ref == NULL) { 1369 binder_user_error("binder: %d:%d got " 1370 "transaction to invalid handle\n", 1371 proc->pid, thread->pid); 1372 return_error = BR_FAILED_REPLY; 1373 goto err_invalid_target_handle; 1374 } 1375 target_node = ref->node; 1376 } else { 1377 target_node = binder_context_mgr_node; 1378 if (target_node == NULL) { 1379 return_error = BR_DEAD_REPLY; 1380 goto err_no_context_mgr_node; 1381 } 1382 } 1383 e->to_node = target_node->debug_id; 1384 target_proc = target_node->proc; 1385 if (target_proc == NULL) { 1386 return_error = BR_DEAD_REPLY; 1387 goto err_dead_binder; 1388 } 1389 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1390 struct binder_transaction *tmp; 1391 tmp = thread->transaction_stack; 1392 if (tmp->to_thread != thread) { 1393 binder_user_error("binder: %d:%d got new " 1394 "transaction with bad transaction stack" 1395 ", transaction %d has target %d:%d\n", 1396 proc->pid, thread->pid, tmp->debug_id, 1397 tmp->to_proc ? tmp->to_proc->pid : 0, 1398 tmp->to_thread ? 1399 tmp->to_thread->pid : 0); 1400 return_error = BR_FAILED_REPLY; 1401 goto err_bad_call_stack; 1402 } 1403 while (tmp) { 1404 if (tmp->from && tmp->from->proc == target_proc) 1405 target_thread = tmp->from; 1406 tmp = tmp->from_parent; 1407 } 1408 } 1409 } 1410 if (target_thread) { 1411 e->to_thread = target_thread->pid; 1412 target_list = &target_thread->todo; 1413 target_wait = &target_thread->wait; 1414 } else { 1415 target_list = &target_proc->todo; 1416 target_wait = &target_proc->wait; 1417 } 1418 e->to_proc = target_proc->pid; 1419 1420 /* TODO: reuse incoming transaction for reply */ 1421 t = kzalloc(sizeof(*t), GFP_KERNEL); 1422 if (t == NULL) { 1423 return_error = BR_FAILED_REPLY; 1424 goto err_alloc_t_failed; 1425 } 1426 binder_stats_created(BINDER_STAT_TRANSACTION); 1427 1428 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1429 if (tcomplete == NULL) { 1430 return_error = BR_FAILED_REPLY; 1431 goto err_alloc_tcomplete_failed; 1432 } 1433 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1434 1435 t->debug_id = ++binder_last_id; 1436 e->debug_id = t->debug_id; 1437 1438 if (reply) 1439 binder_debug(BINDER_DEBUG_TRANSACTION, 1440 "binder: %d:%d BC_REPLY %d -> %d:%d, " 1441 "data %p-%p size %zd-%zd\n", 1442 proc->pid, thread->pid, t->debug_id, 1443 target_proc->pid, target_thread->pid, 1444 tr->data.ptr.buffer, tr->data.ptr.offsets, 1445 tr->data_size, tr->offsets_size); 1446 else 1447 binder_debug(BINDER_DEBUG_TRANSACTION, 1448 "binder: %d:%d BC_TRANSACTION %d -> " 1449 "%d - node %d, data %p-%p size %zd-%zd\n", 1450 proc->pid, thread->pid, t->debug_id, 1451 target_proc->pid, target_node->debug_id, 1452 tr->data.ptr.buffer, tr->data.ptr.offsets, 1453 tr->data_size, tr->offsets_size); 1454 1455 if (!reply && !(tr->flags & TF_ONE_WAY)) 1456 t->from = thread; 1457 else 1458 t->from = NULL; 1459 t->sender_euid = proc->tsk->cred->euid; 1460 t->to_proc = target_proc; 1461 t->to_thread = target_thread; 1462 t->code = tr->code; 1463 t->flags = tr->flags; 1464 t->priority = task_nice(current); 1465 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1466 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1467 if (t->buffer == NULL) { 1468 return_error = BR_FAILED_REPLY; 1469 goto err_binder_alloc_buf_failed; 1470 } 1471 t->buffer->allow_user_free = 0; 1472 t->buffer->debug_id = t->debug_id; 1473 t->buffer->transaction = t; 1474 t->buffer->target_node = target_node; 1475 if (target_node) 1476 binder_inc_node(target_node, 1, 0, NULL); 1477 1478 offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); 1479 1480 if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { 1481 binder_user_error("binder: %d:%d got transaction with invalid " 1482 "data ptr\n", proc->pid, thread->pid); 1483 return_error = BR_FAILED_REPLY; 1484 goto err_copy_data_failed; 1485 } 1486 if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { 1487 binder_user_error("binder: %d:%d got transaction with invalid " 1488 "offsets ptr\n", proc->pid, thread->pid); 1489 return_error = BR_FAILED_REPLY; 1490 goto err_copy_data_failed; 1491 } 1492 if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { 1493 binder_user_error("binder: %d:%d got transaction with " 1494 "invalid offsets size, %zd\n", 1495 proc->pid, thread->pid, tr->offsets_size); 1496 return_error = BR_FAILED_REPLY; 1497 goto err_bad_offset; 1498 } 1499 off_end = (void *)offp + tr->offsets_size; 1500 for (; offp < off_end; offp++) { 1501 struct flat_binder_object *fp; 1502 if (*offp > t->buffer->data_size - sizeof(*fp) || 1503 t->buffer->data_size < sizeof(*fp) || 1504 !IS_ALIGNED(*offp, sizeof(void *))) { 1505 binder_user_error("binder: %d:%d got transaction with " 1506 "invalid offset, %zd\n", 1507 proc->pid, thread->pid, *offp); 1508 return_error = BR_FAILED_REPLY; 1509 goto err_bad_offset; 1510 } 1511 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1512 switch (fp->type) { 1513 case BINDER_TYPE_BINDER: 1514 case BINDER_TYPE_WEAK_BINDER: { 1515 struct binder_ref *ref; 1516 struct binder_node *node = binder_get_node(proc, fp->binder); 1517 if (node == NULL) { 1518 node = binder_new_node(proc, fp->binder, fp->cookie); 1519 if (node == NULL) { 1520 return_error = BR_FAILED_REPLY; 1521 goto err_binder_new_node_failed; 1522 } 1523 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1524 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1525 } 1526 if (fp->cookie != node->cookie) { 1527 binder_user_error("binder: %d:%d sending u%p " 1528 "node %d, cookie mismatch %p != %p\n", 1529 proc->pid, thread->pid, 1530 fp->binder, node->debug_id, 1531 fp->cookie, node->cookie); 1532 goto err_binder_get_ref_for_node_failed; 1533 } 1534 ref = binder_get_ref_for_node(target_proc, node); 1535 if (ref == NULL) { 1536 return_error = BR_FAILED_REPLY; 1537 goto err_binder_get_ref_for_node_failed; 1538 } 1539 if (fp->type == BINDER_TYPE_BINDER) 1540 fp->type = BINDER_TYPE_HANDLE; 1541 else 1542 fp->type = BINDER_TYPE_WEAK_HANDLE; 1543 fp->handle = ref->desc; 1544 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1545 &thread->todo); 1546 1547 binder_debug(BINDER_DEBUG_TRANSACTION, 1548 " node %d u%p -> ref %d desc %d\n", 1549 node->debug_id, node->ptr, ref->debug_id, 1550 ref->desc); 1551 } break; 1552 case BINDER_TYPE_HANDLE: 1553 case BINDER_TYPE_WEAK_HANDLE: { 1554 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1555 if (ref == NULL) { 1556 binder_user_error("binder: %d:%d got " 1557 "transaction with invalid " 1558 "handle, %ld\n", proc->pid, 1559 thread->pid, fp->handle); 1560 return_error = BR_FAILED_REPLY; 1561 goto err_binder_get_ref_failed; 1562 } 1563 if (ref->node->proc == target_proc) { 1564 if (fp->type == BINDER_TYPE_HANDLE) 1565 fp->type = BINDER_TYPE_BINDER; 1566 else 1567 fp->type = BINDER_TYPE_WEAK_BINDER; 1568 fp->binder = ref->node->ptr; 1569 fp->cookie = ref->node->cookie; 1570 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1571 binder_debug(BINDER_DEBUG_TRANSACTION, 1572 " ref %d desc %d -> node %d u%p\n", 1573 ref->debug_id, ref->desc, ref->node->debug_id, 1574 ref->node->ptr); 1575 } else { 1576 struct binder_ref *new_ref; 1577 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1578 if (new_ref == NULL) { 1579 return_error = BR_FAILED_REPLY; 1580 goto err_binder_get_ref_for_node_failed; 1581 } 1582 fp->handle = new_ref->desc; 1583 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1584 binder_debug(BINDER_DEBUG_TRANSACTION, 1585 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1586 ref->debug_id, ref->desc, new_ref->debug_id, 1587 new_ref->desc, ref->node->debug_id); 1588 } 1589 } break; 1590 1591 case BINDER_TYPE_FD: { 1592 int target_fd; 1593 struct file *file; 1594 1595 if (reply) { 1596 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1597 binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", 1598 proc->pid, thread->pid, fp->handle); 1599 return_error = BR_FAILED_REPLY; 1600 goto err_fd_not_allowed; 1601 } 1602 } else if (!target_node->accept_fds) { 1603 binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", 1604 proc->pid, thread->pid, fp->handle); 1605 return_error = BR_FAILED_REPLY; 1606 goto err_fd_not_allowed; 1607 } 1608 1609 file = fget(fp->handle); 1610 if (file == NULL) { 1611 binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", 1612 proc->pid, thread->pid, fp->handle); 1613 return_error = BR_FAILED_REPLY; 1614 goto err_fget_failed; 1615 } 1616 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1617 if (target_fd < 0) { 1618 fput(file); 1619 return_error = BR_FAILED_REPLY; 1620 goto err_get_unused_fd_failed; 1621 } 1622 task_fd_install(target_proc, target_fd, file); 1623 binder_debug(BINDER_DEBUG_TRANSACTION, 1624 " fd %ld -> %d\n", fp->handle, target_fd); 1625 /* TODO: fput? */ 1626 fp->handle = target_fd; 1627 } break; 1628 1629 default: 1630 binder_user_error("binder: %d:%d got transactio" 1631 "n with invalid object type, %lx\n", 1632 proc->pid, thread->pid, fp->type); 1633 return_error = BR_FAILED_REPLY; 1634 goto err_bad_object_type; 1635 } 1636 } 1637 if (reply) { 1638 BUG_ON(t->buffer->async_transaction != 0); 1639 binder_pop_transaction(target_thread, in_reply_to); 1640 } else if (!(t->flags & TF_ONE_WAY)) { 1641 BUG_ON(t->buffer->async_transaction != 0); 1642 t->need_reply = 1; 1643 t->from_parent = thread->transaction_stack; 1644 thread->transaction_stack = t; 1645 } else { 1646 BUG_ON(target_node == NULL); 1647 BUG_ON(t->buffer->async_transaction != 1); 1648 if (target_node->has_async_transaction) { 1649 target_list = &target_node->async_todo; 1650 target_wait = NULL; 1651 } else 1652 target_node->has_async_transaction = 1; 1653 } 1654 t->work.type = BINDER_WORK_TRANSACTION; 1655 list_add_tail(&t->work.entry, target_list); 1656 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1657 list_add_tail(&tcomplete->entry, &thread->todo); 1658 if (target_wait) 1659 wake_up_interruptible(target_wait); 1660 return; 1661 1662err_get_unused_fd_failed: 1663err_fget_failed: 1664err_fd_not_allowed: 1665err_binder_get_ref_for_node_failed: 1666err_binder_get_ref_failed: 1667err_binder_new_node_failed: 1668err_bad_object_type: 1669err_bad_offset: 1670err_copy_data_failed: 1671 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1672 t->buffer->transaction = NULL; 1673 binder_free_buf(target_proc, t->buffer); 1674err_binder_alloc_buf_failed: 1675 kfree(tcomplete); 1676 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1677err_alloc_tcomplete_failed: 1678 kfree(t); 1679 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1680err_alloc_t_failed: 1681err_bad_call_stack: 1682err_empty_call_stack: 1683err_dead_binder: 1684err_invalid_target_handle: 1685err_no_context_mgr_node: 1686 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1687 "binder: %d:%d transaction failed %d, size %zd-%zd\n", 1688 proc->pid, thread->pid, return_error, 1689 tr->data_size, tr->offsets_size); 1690 1691 { 1692 struct binder_transaction_log_entry *fe; 1693 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1694 *fe = *e; 1695 } 1696 1697 BUG_ON(thread->return_error != BR_OK); 1698 if (in_reply_to) { 1699 thread->return_error = BR_TRANSACTION_COMPLETE; 1700 binder_send_failed_reply(in_reply_to, return_error); 1701 } else 1702 thread->return_error = return_error; 1703} 1704 1705int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, 1706 void __user *buffer, int size, signed long *consumed) 1707{ 1708 uint32_t cmd; 1709 void __user *ptr = buffer + *consumed; 1710 void __user *end = buffer + size; 1711 1712 while (ptr < end && thread->return_error == BR_OK) { 1713 if (get_user(cmd, (uint32_t __user *)ptr)) 1714 return -EFAULT; 1715 ptr += sizeof(uint32_t); 1716 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1717 binder_stats.bc[_IOC_NR(cmd)]++; 1718 proc->stats.bc[_IOC_NR(cmd)]++; 1719 thread->stats.bc[_IOC_NR(cmd)]++; 1720 } 1721 switch (cmd) { 1722 case BC_INCREFS: 1723 case BC_ACQUIRE: 1724 case BC_RELEASE: 1725 case BC_DECREFS: { 1726 uint32_t target; 1727 struct binder_ref *ref; 1728 const char *debug_string; 1729 1730 if (get_user(target, (uint32_t __user *)ptr)) 1731 return -EFAULT; 1732 ptr += sizeof(uint32_t); 1733 if (target == 0 && binder_context_mgr_node && 1734 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1735 ref = binder_get_ref_for_node(proc, 1736 binder_context_mgr_node); 1737 if (ref->desc != target) { 1738 binder_user_error("binder: %d:" 1739 "%d tried to acquire " 1740 "reference to desc 0, " 1741 "got %d instead\n", 1742 proc->pid, thread->pid, 1743 ref->desc); 1744 } 1745 } else 1746 ref = binder_get_ref(proc, target); 1747 if (ref == NULL) { 1748 binder_user_error("binder: %d:%d refcou" 1749 "nt change on invalid ref %d\n", 1750 proc->pid, thread->pid, target); 1751 break; 1752 } 1753 switch (cmd) { 1754 case BC_INCREFS: 1755 debug_string = "IncRefs"; 1756 binder_inc_ref(ref, 0, NULL); 1757 break; 1758 case BC_ACQUIRE: 1759 debug_string = "Acquire"; 1760 binder_inc_ref(ref, 1, NULL); 1761 break; 1762 case BC_RELEASE: 1763 debug_string = "Release"; 1764 binder_dec_ref(ref, 1); 1765 break; 1766 case BC_DECREFS: 1767 default: 1768 debug_string = "DecRefs"; 1769 binder_dec_ref(ref, 0); 1770 break; 1771 } 1772 binder_debug(BINDER_DEBUG_USER_REFS, 1773 "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", 1774 proc->pid, thread->pid, debug_string, ref->debug_id, 1775 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1776 break; 1777 } 1778 case BC_INCREFS_DONE: 1779 case BC_ACQUIRE_DONE: { 1780 void __user *node_ptr; 1781 void *cookie; 1782 struct binder_node *node; 1783 1784 if (get_user(node_ptr, (void * __user *)ptr)) 1785 return -EFAULT; 1786 ptr += sizeof(void *); 1787 if (get_user(cookie, (void * __user *)ptr)) 1788 return -EFAULT; 1789 ptr += sizeof(void *); 1790 node = binder_get_node(proc, node_ptr); 1791 if (node == NULL) { 1792 binder_user_error("binder: %d:%d " 1793 "%s u%p no match\n", 1794 proc->pid, thread->pid, 1795 cmd == BC_INCREFS_DONE ? 1796 "BC_INCREFS_DONE" : 1797 "BC_ACQUIRE_DONE", 1798 node_ptr); 1799 break; 1800 } 1801 if (cookie != node->cookie) { 1802 binder_user_error("binder: %d:%d %s u%p node %d" 1803 " cookie mismatch %p != %p\n", 1804 proc->pid, thread->pid, 1805 cmd == BC_INCREFS_DONE ? 1806 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1807 node_ptr, node->debug_id, 1808 cookie, node->cookie); 1809 break; 1810 } 1811 if (cmd == BC_ACQUIRE_DONE) { 1812 if (node->pending_strong_ref == 0) { 1813 binder_user_error("binder: %d:%d " 1814 "BC_ACQUIRE_DONE node %d has " 1815 "no pending acquire request\n", 1816 proc->pid, thread->pid, 1817 node->debug_id); 1818 break; 1819 } 1820 node->pending_strong_ref = 0; 1821 } else { 1822 if (node->pending_weak_ref == 0) { 1823 binder_user_error("binder: %d:%d " 1824 "BC_INCREFS_DONE node %d has " 1825 "no pending increfs request\n", 1826 proc->pid, thread->pid, 1827 node->debug_id); 1828 break; 1829 } 1830 node->pending_weak_ref = 0; 1831 } 1832 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1833 binder_debug(BINDER_DEBUG_USER_REFS, 1834 "binder: %d:%d %s node %d ls %d lw %d\n", 1835 proc->pid, thread->pid, 1836 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1837 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1838 break; 1839 } 1840 case BC_ATTEMPT_ACQUIRE: 1841 pr_err("binder: BC_ATTEMPT_ACQUIRE not supported\n"); 1842 return -EINVAL; 1843 case BC_ACQUIRE_RESULT: 1844 pr_err("binder: BC_ACQUIRE_RESULT not supported\n"); 1845 return -EINVAL; 1846 1847 case BC_FREE_BUFFER: { 1848 void __user *data_ptr; 1849 struct binder_buffer *buffer; 1850 1851 if (get_user(data_ptr, (void * __user *)ptr)) 1852 return -EFAULT; 1853 ptr += sizeof(void *); 1854 1855 buffer = binder_buffer_lookup(proc, data_ptr); 1856 if (buffer == NULL) { 1857 binder_user_error("binder: %d:%d " 1858 "BC_FREE_BUFFER u%p no match\n", 1859 proc->pid, thread->pid, data_ptr); 1860 break; 1861 } 1862 if (!buffer->allow_user_free) { 1863 binder_user_error("binder: %d:%d " 1864 "BC_FREE_BUFFER u%p matched " 1865 "unreturned buffer\n", 1866 proc->pid, thread->pid, data_ptr); 1867 break; 1868 } 1869 binder_debug(BINDER_DEBUG_FREE_BUFFER, 1870 "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", 1871 proc->pid, thread->pid, data_ptr, buffer->debug_id, 1872 buffer->transaction ? "active" : "finished"); 1873 1874 if (buffer->transaction) { 1875 buffer->transaction->buffer = NULL; 1876 buffer->transaction = NULL; 1877 } 1878 if (buffer->async_transaction && buffer->target_node) { 1879 BUG_ON(!buffer->target_node->has_async_transaction); 1880 if (list_empty(&buffer->target_node->async_todo)) 1881 buffer->target_node->has_async_transaction = 0; 1882 else 1883 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1884 } 1885 binder_transaction_buffer_release(proc, buffer, NULL); 1886 binder_free_buf(proc, buffer); 1887 break; 1888 } 1889 1890 case BC_TRANSACTION: 1891 case BC_REPLY: { 1892 struct binder_transaction_data tr; 1893 1894 if (copy_from_user(&tr, ptr, sizeof(tr))) 1895 return -EFAULT; 1896 ptr += sizeof(tr); 1897 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1898 break; 1899 } 1900 1901 case BC_REGISTER_LOOPER: 1902 binder_debug(BINDER_DEBUG_THREADS, 1903 "binder: %d:%d BC_REGISTER_LOOPER\n", 1904 proc->pid, thread->pid); 1905 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1906 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1907 binder_user_error("binder: %d:%d ERROR:" 1908 " BC_REGISTER_LOOPER called " 1909 "after BC_ENTER_LOOPER\n", 1910 proc->pid, thread->pid); 1911 } else if (proc->requested_threads == 0) { 1912 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1913 binder_user_error("binder: %d:%d ERROR:" 1914 " BC_REGISTER_LOOPER called " 1915 "without request\n", 1916 proc->pid, thread->pid); 1917 } else { 1918 proc->requested_threads--; 1919 proc->requested_threads_started++; 1920 } 1921 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 1922 break; 1923 case BC_ENTER_LOOPER: 1924 binder_debug(BINDER_DEBUG_THREADS, 1925 "binder: %d:%d BC_ENTER_LOOPER\n", 1926 proc->pid, thread->pid); 1927 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 1928 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1929 binder_user_error("binder: %d:%d ERROR:" 1930 " BC_ENTER_LOOPER called after " 1931 "BC_REGISTER_LOOPER\n", 1932 proc->pid, thread->pid); 1933 } 1934 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 1935 break; 1936 case BC_EXIT_LOOPER: 1937 binder_debug(BINDER_DEBUG_THREADS, 1938 "binder: %d:%d BC_EXIT_LOOPER\n", 1939 proc->pid, thread->pid); 1940 thread->looper |= BINDER_LOOPER_STATE_EXITED; 1941 break; 1942 1943 case BC_REQUEST_DEATH_NOTIFICATION: 1944 case BC_CLEAR_DEATH_NOTIFICATION: { 1945 uint32_t target; 1946 void __user *cookie; 1947 struct binder_ref *ref; 1948 struct binder_ref_death *death; 1949 1950 if (get_user(target, (uint32_t __user *)ptr)) 1951 return -EFAULT; 1952 ptr += sizeof(uint32_t); 1953 if (get_user(cookie, (void __user * __user *)ptr)) 1954 return -EFAULT; 1955 ptr += sizeof(void *); 1956 ref = binder_get_ref(proc, target); 1957 if (ref == NULL) { 1958 binder_user_error("binder: %d:%d %s " 1959 "invalid ref %d\n", 1960 proc->pid, thread->pid, 1961 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1962 "BC_REQUEST_DEATH_NOTIFICATION" : 1963 "BC_CLEAR_DEATH_NOTIFICATION", 1964 target); 1965 break; 1966 } 1967 1968 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 1969 "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", 1970 proc->pid, thread->pid, 1971 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1972 "BC_REQUEST_DEATH_NOTIFICATION" : 1973 "BC_CLEAR_DEATH_NOTIFICATION", 1974 cookie, ref->debug_id, ref->desc, 1975 ref->strong, ref->weak, ref->node->debug_id); 1976 1977 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 1978 if (ref->death) { 1979 binder_user_error("binder: %d:%" 1980 "d BC_REQUEST_DEATH_NOTI" 1981 "FICATION death notific" 1982 "ation already set\n", 1983 proc->pid, thread->pid); 1984 break; 1985 } 1986 death = kzalloc(sizeof(*death), GFP_KERNEL); 1987 if (death == NULL) { 1988 thread->return_error = BR_ERROR; 1989 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1990 "binder: %d:%d " 1991 "BC_REQUEST_DEATH_NOTIFICATION failed\n", 1992 proc->pid, thread->pid); 1993 break; 1994 } 1995 binder_stats_created(BINDER_STAT_DEATH); 1996 INIT_LIST_HEAD(&death->work.entry); 1997 death->cookie = cookie; 1998 ref->death = death; 1999 if (ref->node->proc == NULL) { 2000 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2001 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2002 list_add_tail(&ref->death->work.entry, &thread->todo); 2003 } else { 2004 list_add_tail(&ref->death->work.entry, &proc->todo); 2005 wake_up_interruptible(&proc->wait); 2006 } 2007 } 2008 } else { 2009 if (ref->death == NULL) { 2010 binder_user_error("binder: %d:%" 2011 "d BC_CLEAR_DEATH_NOTIFI" 2012 "CATION death notificat" 2013 "ion not active\n", 2014 proc->pid, thread->pid); 2015 break; 2016 } 2017 death = ref->death; 2018 if (death->cookie != cookie) { 2019 binder_user_error("binder: %d:%" 2020 "d BC_CLEAR_DEATH_NOTIFI" 2021 "CATION death notificat" 2022 "ion cookie mismatch " 2023 "%p != %p\n", 2024 proc->pid, thread->pid, 2025 death->cookie, cookie); 2026 break; 2027 } 2028 ref->death = NULL; 2029 if (list_empty(&death->work.entry)) { 2030 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2031 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2032 list_add_tail(&death->work.entry, &thread->todo); 2033 } else { 2034 list_add_tail(&death->work.entry, &proc->todo); 2035 wake_up_interruptible(&proc->wait); 2036 } 2037 } else { 2038 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2039 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2040 } 2041 } 2042 } break; 2043 case BC_DEAD_BINDER_DONE: { 2044 struct binder_work *w; 2045 void __user *cookie; 2046 struct binder_ref_death *death = NULL; 2047 if (get_user(cookie, (void __user * __user *)ptr)) 2048 return -EFAULT; 2049 2050 ptr += sizeof(void *); 2051 list_for_each_entry(w, &proc->delivered_death, entry) { 2052 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2053 if (tmp_death->cookie == cookie) { 2054 death = tmp_death; 2055 break; 2056 } 2057 } 2058 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2059 "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", 2060 proc->pid, thread->pid, cookie, death); 2061 if (death == NULL) { 2062 binder_user_error("binder: %d:%d BC_DEAD" 2063 "_BINDER_DONE %p not found\n", 2064 proc->pid, thread->pid, cookie); 2065 break; 2066 } 2067 2068 list_del_init(&death->work.entry); 2069 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2070 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2071 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2072 list_add_tail(&death->work.entry, &thread->todo); 2073 } else { 2074 list_add_tail(&death->work.entry, &proc->todo); 2075 wake_up_interruptible(&proc->wait); 2076 } 2077 } 2078 } break; 2079 2080 default: 2081 pr_err("binder: %d:%d unknown command %d\n", 2082 proc->pid, thread->pid, cmd); 2083 return -EINVAL; 2084 } 2085 *consumed = ptr - buffer; 2086 } 2087 return 0; 2088} 2089 2090void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, 2091 uint32_t cmd) 2092{ 2093 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2094 binder_stats.br[_IOC_NR(cmd)]++; 2095 proc->stats.br[_IOC_NR(cmd)]++; 2096 thread->stats.br[_IOC_NR(cmd)]++; 2097 } 2098} 2099 2100static int binder_has_proc_work(struct binder_proc *proc, 2101 struct binder_thread *thread) 2102{ 2103 return !list_empty(&proc->todo) || 2104 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2105} 2106 2107static int binder_has_thread_work(struct binder_thread *thread) 2108{ 2109 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2110 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2111} 2112 2113static int binder_thread_read(struct binder_proc *proc, 2114 struct binder_thread *thread, 2115 void __user *buffer, int size, 2116 signed long *consumed, int non_block) 2117{ 2118 void __user *ptr = buffer + *consumed; 2119 void __user *end = buffer + size; 2120 2121 int ret = 0; 2122 int wait_for_proc_work; 2123 2124 if (*consumed == 0) { 2125 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2126 return -EFAULT; 2127 ptr += sizeof(uint32_t); 2128 } 2129 2130retry: 2131 wait_for_proc_work = thread->transaction_stack == NULL && 2132 list_empty(&thread->todo); 2133 2134 if (thread->return_error != BR_OK && ptr < end) { 2135 if (thread->return_error2 != BR_OK) { 2136 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2137 return -EFAULT; 2138 ptr += sizeof(uint32_t); 2139 if (ptr == end) 2140 goto done; 2141 thread->return_error2 = BR_OK; 2142 } 2143 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2144 return -EFAULT; 2145 ptr += sizeof(uint32_t); 2146 thread->return_error = BR_OK; 2147 goto done; 2148 } 2149 2150 2151 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2152 if (wait_for_proc_work) 2153 proc->ready_threads++; 2154 mutex_unlock(&binder_lock); 2155 if (wait_for_proc_work) { 2156 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2157 BINDER_LOOPER_STATE_ENTERED))) { 2158 binder_user_error("binder: %d:%d ERROR: Thread waiting " 2159 "for process work before calling BC_REGISTER_" 2160 "LOOPER or BC_ENTER_LOOPER (state %x)\n", 2161 proc->pid, thread->pid, thread->looper); 2162 wait_event_interruptible(binder_user_error_wait, 2163 binder_stop_on_user_error < 2); 2164 } 2165 binder_set_nice(proc->default_priority); 2166 if (non_block) { 2167 if (!binder_has_proc_work(proc, thread)) 2168 ret = -EAGAIN; 2169 } else 2170 ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2171 } else { 2172 if (non_block) { 2173 if (!binder_has_thread_work(thread)) 2174 ret = -EAGAIN; 2175 } else 2176 ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); 2177 } 2178 mutex_lock(&binder_lock); 2179 if (wait_for_proc_work) 2180 proc->ready_threads--; 2181 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2182 2183 if (ret) 2184 return ret; 2185 2186 while (1) { 2187 uint32_t cmd; 2188 struct binder_transaction_data tr; 2189 struct binder_work *w; 2190 struct binder_transaction *t = NULL; 2191 2192 if (!list_empty(&thread->todo)) 2193 w = list_first_entry(&thread->todo, struct binder_work, entry); 2194 else if (!list_empty(&proc->todo) && wait_for_proc_work) 2195 w = list_first_entry(&proc->todo, struct binder_work, entry); 2196 else { 2197 if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ 2198 goto retry; 2199 break; 2200 } 2201 2202 if (end - ptr < sizeof(tr) + 4) 2203 break; 2204 2205 switch (w->type) { 2206 case BINDER_WORK_TRANSACTION: { 2207 t = container_of(w, struct binder_transaction, work); 2208 } break; 2209 case BINDER_WORK_TRANSACTION_COMPLETE: { 2210 cmd = BR_TRANSACTION_COMPLETE; 2211 if (put_user(cmd, (uint32_t __user *)ptr)) 2212 return -EFAULT; 2213 ptr += sizeof(uint32_t); 2214 2215 binder_stat_br(proc, thread, cmd); 2216 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2217 "binder: %d:%d BR_TRANSACTION_COMPLETE\n", 2218 proc->pid, thread->pid); 2219 2220 list_del(&w->entry); 2221 kfree(w); 2222 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2223 } break; 2224 case BINDER_WORK_NODE: { 2225 struct binder_node *node = container_of(w, struct binder_node, work); 2226 uint32_t cmd = BR_NOOP; 2227 const char *cmd_name; 2228 int strong = node->internal_strong_refs || node->local_strong_refs; 2229 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2230 if (weak && !node->has_weak_ref) { 2231 cmd = BR_INCREFS; 2232 cmd_name = "BR_INCREFS"; 2233 node->has_weak_ref = 1; 2234 node->pending_weak_ref = 1; 2235 node->local_weak_refs++; 2236 } else if (strong && !node->has_strong_ref) { 2237 cmd = BR_ACQUIRE; 2238 cmd_name = "BR_ACQUIRE"; 2239 node->has_strong_ref = 1; 2240 node->pending_strong_ref = 1; 2241 node->local_strong_refs++; 2242 } else if (!strong && node->has_strong_ref) { 2243 cmd = BR_RELEASE; 2244 cmd_name = "BR_RELEASE"; 2245 node->has_strong_ref = 0; 2246 } else if (!weak && node->has_weak_ref) { 2247 cmd = BR_DECREFS; 2248 cmd_name = "BR_DECREFS"; 2249 node->has_weak_ref = 0; 2250 } 2251 if (cmd != BR_NOOP) { 2252 if (put_user(cmd, (uint32_t __user *)ptr)) 2253 return -EFAULT; 2254 ptr += sizeof(uint32_t); 2255 if (put_user(node->ptr, (void * __user *)ptr)) 2256 return -EFAULT; 2257 ptr += sizeof(void *); 2258 if (put_user(node->cookie, (void * __user *)ptr)) 2259 return -EFAULT; 2260 ptr += sizeof(void *); 2261 2262 binder_stat_br(proc, thread, cmd); 2263 binder_debug(BINDER_DEBUG_USER_REFS, 2264 "binder: %d:%d %s %d u%p c%p\n", 2265 proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); 2266 } else { 2267 list_del_init(&w->entry); 2268 if (!weak && !strong) { 2269 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2270 "binder: %d:%d node %d u%p c%p deleted\n", 2271 proc->pid, thread->pid, node->debug_id, 2272 node->ptr, node->cookie); 2273 rb_erase(&node->rb_node, &proc->nodes); 2274 kfree(node); 2275 binder_stats_deleted(BINDER_STAT_NODE); 2276 } else { 2277 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2278 "binder: %d:%d node %d u%p c%p state unchanged\n", 2279 proc->pid, thread->pid, node->debug_id, node->ptr, 2280 node->cookie); 2281 } 2282 } 2283 } break; 2284 case BINDER_WORK_DEAD_BINDER: 2285 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2286 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2287 struct binder_ref_death *death; 2288 uint32_t cmd; 2289 2290 death = container_of(w, struct binder_ref_death, work); 2291 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2292 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2293 else 2294 cmd = BR_DEAD_BINDER; 2295 if (put_user(cmd, (uint32_t __user *)ptr)) 2296 return -EFAULT; 2297 ptr += sizeof(uint32_t); 2298 if (put_user(death->cookie, (void * __user *)ptr)) 2299 return -EFAULT; 2300 ptr += sizeof(void *); 2301 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2302 "binder: %d:%d %s %p\n", 2303 proc->pid, thread->pid, 2304 cmd == BR_DEAD_BINDER ? 2305 "BR_DEAD_BINDER" : 2306 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2307 death->cookie); 2308 2309 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2310 list_del(&w->entry); 2311 kfree(death); 2312 binder_stats_deleted(BINDER_STAT_DEATH); 2313 } else 2314 list_move(&w->entry, &proc->delivered_death); 2315 if (cmd == BR_DEAD_BINDER) 2316 goto done; /* DEAD_BINDER notifications can cause transactions */ 2317 } break; 2318 } 2319 2320 if (!t) 2321 continue; 2322 2323 BUG_ON(t->buffer == NULL); 2324 if (t->buffer->target_node) { 2325 struct binder_node *target_node = t->buffer->target_node; 2326 tr.target.ptr = target_node->ptr; 2327 tr.cookie = target_node->cookie; 2328 t->saved_priority = task_nice(current); 2329 if (t->priority < target_node->min_priority && 2330 !(t->flags & TF_ONE_WAY)) 2331 binder_set_nice(t->priority); 2332 else if (!(t->flags & TF_ONE_WAY) || 2333 t->saved_priority > target_node->min_priority) 2334 binder_set_nice(target_node->min_priority); 2335 cmd = BR_TRANSACTION; 2336 } else { 2337 tr.target.ptr = NULL; 2338 tr.cookie = NULL; 2339 cmd = BR_REPLY; 2340 } 2341 tr.code = t->code; 2342 tr.flags = t->flags; 2343 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2344 2345 if (t->from) { 2346 struct task_struct *sender = t->from->proc->tsk; 2347 tr.sender_pid = task_tgid_nr_ns(sender, 2348 task_active_pid_ns(current)); 2349 } else { 2350 tr.sender_pid = 0; 2351 } 2352 2353 tr.data_size = t->buffer->data_size; 2354 tr.offsets_size = t->buffer->offsets_size; 2355 tr.data.ptr.buffer = (void *)t->buffer->data + 2356 proc->user_buffer_offset; 2357 tr.data.ptr.offsets = tr.data.ptr.buffer + 2358 ALIGN(t->buffer->data_size, 2359 sizeof(void *)); 2360 2361 if (put_user(cmd, (uint32_t __user *)ptr)) 2362 return -EFAULT; 2363 ptr += sizeof(uint32_t); 2364 if (copy_to_user(ptr, &tr, sizeof(tr))) 2365 return -EFAULT; 2366 ptr += sizeof(tr); 2367 2368 binder_stat_br(proc, thread, cmd); 2369 binder_debug(BINDER_DEBUG_TRANSACTION, 2370 "binder: %d:%d %s %d %d:%d, cmd %d" 2371 "size %zd-%zd ptr %p-%p\n", 2372 proc->pid, thread->pid, 2373 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2374 "BR_REPLY", 2375 t->debug_id, t->from ? t->from->proc->pid : 0, 2376 t->from ? t->from->pid : 0, cmd, 2377 t->buffer->data_size, t->buffer->offsets_size, 2378 tr.data.ptr.buffer, tr.data.ptr.offsets); 2379 2380 list_del(&t->work.entry); 2381 t->buffer->allow_user_free = 1; 2382 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2383 t->to_parent = thread->transaction_stack; 2384 t->to_thread = thread; 2385 thread->transaction_stack = t; 2386 } else { 2387 t->buffer->transaction = NULL; 2388 kfree(t); 2389 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2390 } 2391 break; 2392 } 2393 2394done: 2395 2396 *consumed = ptr - buffer; 2397 if (proc->requested_threads + proc->ready_threads == 0 && 2398 proc->requested_threads_started < proc->max_threads && 2399 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2400 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2401 /*spawn a new thread if we leave this out */) { 2402 proc->requested_threads++; 2403 binder_debug(BINDER_DEBUG_THREADS, 2404 "binder: %d:%d BR_SPAWN_LOOPER\n", 2405 proc->pid, thread->pid); 2406 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2407 return -EFAULT; 2408 } 2409 return 0; 2410} 2411 2412static void binder_release_work(struct list_head *list) 2413{ 2414 struct binder_work *w; 2415 while (!list_empty(list)) { 2416 w = list_first_entry(list, struct binder_work, entry); 2417 list_del_init(&w->entry); 2418 switch (w->type) { 2419 case BINDER_WORK_TRANSACTION: { 2420 struct binder_transaction *t; 2421 2422 t = container_of(w, struct binder_transaction, work); 2423 if (t->buffer->target_node && 2424 !(t->flags & TF_ONE_WAY)) { 2425 binder_send_failed_reply(t, BR_DEAD_REPLY); 2426 } else { 2427 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2428 "binder: undelivered transaction %d\n", 2429 t->debug_id); 2430 t->buffer->transaction = NULL; 2431 kfree(t); 2432 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2433 } 2434 } break; 2435 case BINDER_WORK_TRANSACTION_COMPLETE: { 2436 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2437 "binder: undelivered TRANSACTION_COMPLETE\n"); 2438 kfree(w); 2439 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2440 } break; 2441 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2442 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2443 struct binder_ref_death *death; 2444 2445 death = container_of(w, struct binder_ref_death, work); 2446 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2447 "binder: undelivered death notification, %p\n", 2448 death->cookie); 2449 kfree(death); 2450 binder_stats_deleted(BINDER_STAT_DEATH); 2451 } break; 2452 default: 2453 pr_err("binder: unexpected work type, %d, not freed\n", 2454 w->type); 2455 break; 2456 } 2457 } 2458 2459} 2460 2461static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2462{ 2463 struct binder_thread *thread = NULL; 2464 struct rb_node *parent = NULL; 2465 struct rb_node **p = &proc->threads.rb_node; 2466 2467 while (*p) { 2468 parent = *p; 2469 thread = rb_entry(parent, struct binder_thread, rb_node); 2470 2471 if (current->pid < thread->pid) 2472 p = &(*p)->rb_left; 2473 else if (current->pid > thread->pid) 2474 p = &(*p)->rb_right; 2475 else 2476 break; 2477 } 2478 if (*p == NULL) { 2479 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2480 if (thread == NULL) 2481 return NULL; 2482 binder_stats_created(BINDER_STAT_THREAD); 2483 thread->proc = proc; 2484 thread->pid = current->pid; 2485 init_waitqueue_head(&thread->wait); 2486 INIT_LIST_HEAD(&thread->todo); 2487 rb_link_node(&thread->rb_node, parent, p); 2488 rb_insert_color(&thread->rb_node, &proc->threads); 2489 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2490 thread->return_error = BR_OK; 2491 thread->return_error2 = BR_OK; 2492 } 2493 return thread; 2494} 2495 2496static int binder_free_thread(struct binder_proc *proc, 2497 struct binder_thread *thread) 2498{ 2499 struct binder_transaction *t; 2500 struct binder_transaction *send_reply = NULL; 2501 int active_transactions = 0; 2502 2503 rb_erase(&thread->rb_node, &proc->threads); 2504 t = thread->transaction_stack; 2505 if (t && t->to_thread == thread) 2506 send_reply = t; 2507 while (t) { 2508 active_transactions++; 2509 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2510 "binder: release %d:%d transaction %d " 2511 "%s, still active\n", proc->pid, thread->pid, 2512 t->debug_id, 2513 (t->to_thread == thread) ? "in" : "out"); 2514 2515 if (t->to_thread == thread) { 2516 t->to_proc = NULL; 2517 t->to_thread = NULL; 2518 if (t->buffer) { 2519 t->buffer->transaction = NULL; 2520 t->buffer = NULL; 2521 } 2522 t = t->to_parent; 2523 } else if (t->from == thread) { 2524 t->from = NULL; 2525 t = t->from_parent; 2526 } else 2527 BUG(); 2528 } 2529 if (send_reply) 2530 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2531 binder_release_work(&thread->todo); 2532 kfree(thread); 2533 binder_stats_deleted(BINDER_STAT_THREAD); 2534 return active_transactions; 2535} 2536 2537static unsigned int binder_poll(struct file *filp, 2538 struct poll_table_struct *wait) 2539{ 2540 struct binder_proc *proc = filp->private_data; 2541 struct binder_thread *thread = NULL; 2542 int wait_for_proc_work; 2543 2544 mutex_lock(&binder_lock); 2545 thread = binder_get_thread(proc); 2546 2547 wait_for_proc_work = thread->transaction_stack == NULL && 2548 list_empty(&thread->todo) && thread->return_error == BR_OK; 2549 mutex_unlock(&binder_lock); 2550 2551 if (wait_for_proc_work) { 2552 if (binder_has_proc_work(proc, thread)) 2553 return POLLIN; 2554 poll_wait(filp, &proc->wait, wait); 2555 if (binder_has_proc_work(proc, thread)) 2556 return POLLIN; 2557 } else { 2558 if (binder_has_thread_work(thread)) 2559 return POLLIN; 2560 poll_wait(filp, &thread->wait, wait); 2561 if (binder_has_thread_work(thread)) 2562 return POLLIN; 2563 } 2564 return 0; 2565} 2566 2567static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2568{ 2569 int ret; 2570 struct binder_proc *proc = filp->private_data; 2571 struct binder_thread *thread; 2572 unsigned int size = _IOC_SIZE(cmd); 2573 void __user *ubuf = (void __user *)arg; 2574 2575 /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ 2576 2577 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2578 if (ret) 2579 return ret; 2580 2581 mutex_lock(&binder_lock); 2582 thread = binder_get_thread(proc); 2583 if (thread == NULL) { 2584 ret = -ENOMEM; 2585 goto err; 2586 } 2587 2588 switch (cmd) { 2589 case BINDER_WRITE_READ: { 2590 struct binder_write_read bwr; 2591 if (size != sizeof(struct binder_write_read)) { 2592 ret = -EINVAL; 2593 goto err; 2594 } 2595 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2596 ret = -EFAULT; 2597 goto err; 2598 } 2599 binder_debug(BINDER_DEBUG_READ_WRITE, 2600 "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", 2601 proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, 2602 bwr.read_size, bwr.read_buffer); 2603 2604 if (bwr.write_size > 0) { 2605 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); 2606 if (ret < 0) { 2607 bwr.read_consumed = 0; 2608 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2609 ret = -EFAULT; 2610 goto err; 2611 } 2612 } 2613 if (bwr.read_size > 0) { 2614 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); 2615 if (!list_empty(&proc->todo)) 2616 wake_up_interruptible(&proc->wait); 2617 if (ret < 0) { 2618 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2619 ret = -EFAULT; 2620 goto err; 2621 } 2622 } 2623 binder_debug(BINDER_DEBUG_READ_WRITE, 2624 "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", 2625 proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, 2626 bwr.read_consumed, bwr.read_size); 2627 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2628 ret = -EFAULT; 2629 goto err; 2630 } 2631 break; 2632 } 2633 case BINDER_SET_MAX_THREADS: 2634 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2635 ret = -EINVAL; 2636 goto err; 2637 } 2638 break; 2639 case BINDER_SET_CONTEXT_MGR: 2640 if (binder_context_mgr_node != NULL) { 2641 pr_err("binder: BINDER_SET_CONTEXT_MGR already set\n"); 2642 ret = -EBUSY; 2643 goto err; 2644 } 2645 if (uid_valid(binder_context_mgr_uid)) { 2646 if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) { 2647 pr_err("binder: BINDER_SET_" 2648 "CONTEXT_MGR bad uid %d != %d\n", 2649 from_kuid(&init_user_ns, current->cred->euid), 2650 from_kuid(&init_user_ns, binder_context_mgr_uid)); 2651 ret = -EPERM; 2652 goto err; 2653 } 2654 } else 2655 binder_context_mgr_uid = current->cred->euid; 2656 binder_context_mgr_node = binder_new_node(proc, NULL, NULL); 2657 if (binder_context_mgr_node == NULL) { 2658 ret = -ENOMEM; 2659 goto err; 2660 } 2661 binder_context_mgr_node->local_weak_refs++; 2662 binder_context_mgr_node->local_strong_refs++; 2663 binder_context_mgr_node->has_strong_ref = 1; 2664 binder_context_mgr_node->has_weak_ref = 1; 2665 break; 2666 case BINDER_THREAD_EXIT: 2667 binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", 2668 proc->pid, thread->pid); 2669 binder_free_thread(proc, thread); 2670 thread = NULL; 2671 break; 2672 case BINDER_VERSION: 2673 if (size != sizeof(struct binder_version)) { 2674 ret = -EINVAL; 2675 goto err; 2676 } 2677 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { 2678 ret = -EINVAL; 2679 goto err; 2680 } 2681 break; 2682 default: 2683 ret = -EINVAL; 2684 goto err; 2685 } 2686 ret = 0; 2687err: 2688 if (thread) 2689 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2690 mutex_unlock(&binder_lock); 2691 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2692 if (ret && ret != -ERESTARTSYS) 2693 pr_info("binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2694 return ret; 2695} 2696 2697static void binder_vma_open(struct vm_area_struct *vma) 2698{ 2699 struct binder_proc *proc = vma->vm_private_data; 2700 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2701 "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2702 proc->pid, vma->vm_start, vma->vm_end, 2703 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2704 (unsigned long)pgprot_val(vma->vm_page_prot)); 2705} 2706 2707static void binder_vma_close(struct vm_area_struct *vma) 2708{ 2709 struct binder_proc *proc = vma->vm_private_data; 2710 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2711 "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2712 proc->pid, vma->vm_start, vma->vm_end, 2713 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2714 (unsigned long)pgprot_val(vma->vm_page_prot)); 2715 proc->vma = NULL; 2716 proc->vma_vm_mm = NULL; 2717 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2718} 2719 2720static struct vm_operations_struct binder_vm_ops = { 2721 .open = binder_vma_open, 2722 .close = binder_vma_close, 2723}; 2724 2725static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2726{ 2727 int ret; 2728 struct vm_struct *area; 2729 struct binder_proc *proc = filp->private_data; 2730 const char *failure_string; 2731 struct binder_buffer *buffer; 2732 2733 if (proc->tsk != current) 2734 return -EINVAL; 2735 2736 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2737 vma->vm_end = vma->vm_start + SZ_4M; 2738 2739 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2740 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2741 proc->pid, vma->vm_start, vma->vm_end, 2742 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2743 (unsigned long)pgprot_val(vma->vm_page_prot)); 2744 2745 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2746 ret = -EPERM; 2747 failure_string = "bad vm_flags"; 2748 goto err_bad_arg; 2749 } 2750 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2751 2752 mutex_lock(&binder_mmap_lock); 2753 if (proc->buffer) { 2754 ret = -EBUSY; 2755 failure_string = "already mapped"; 2756 goto err_already_mapped; 2757 } 2758 2759 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2760 if (area == NULL) { 2761 ret = -ENOMEM; 2762 failure_string = "get_vm_area"; 2763 goto err_get_vm_area_failed; 2764 } 2765 proc->buffer = area->addr; 2766 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2767 mutex_unlock(&binder_mmap_lock); 2768 2769#ifdef CONFIG_CPU_CACHE_VIPT 2770 if (cache_is_vipt_aliasing()) { 2771 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2772 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2773 vma->vm_start += PAGE_SIZE; 2774 } 2775 } 2776#endif 2777 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2778 if (proc->pages == NULL) { 2779 ret = -ENOMEM; 2780 failure_string = "alloc page array"; 2781 goto err_alloc_pages_failed; 2782 } 2783 proc->buffer_size = vma->vm_end - vma->vm_start; 2784 2785 vma->vm_ops = &binder_vm_ops; 2786 vma->vm_private_data = proc; 2787 2788 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2789 ret = -ENOMEM; 2790 failure_string = "alloc small buf"; 2791 goto err_alloc_small_buf_failed; 2792 } 2793 buffer = proc->buffer; 2794 INIT_LIST_HEAD(&proc->buffers); 2795 list_add(&buffer->entry, &proc->buffers); 2796 buffer->free = 1; 2797 binder_insert_free_buffer(proc, buffer); 2798 proc->free_async_space = proc->buffer_size / 2; 2799 barrier(); 2800 proc->files = get_files_struct(current); 2801 proc->vma = vma; 2802 proc->vma_vm_mm = vma->vm_mm; 2803 2804 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 2805 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2806 return 0; 2807 2808err_alloc_small_buf_failed: 2809 kfree(proc->pages); 2810 proc->pages = NULL; 2811err_alloc_pages_failed: 2812 mutex_lock(&binder_mmap_lock); 2813 vfree(proc->buffer); 2814 proc->buffer = NULL; 2815err_get_vm_area_failed: 2816err_already_mapped: 2817 mutex_unlock(&binder_mmap_lock); 2818err_bad_arg: 2819 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 2820 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2821 return ret; 2822} 2823 2824static int binder_open(struct inode *nodp, struct file *filp) 2825{ 2826 struct binder_proc *proc; 2827 2828 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 2829 current->group_leader->pid, current->pid); 2830 2831 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2832 if (proc == NULL) 2833 return -ENOMEM; 2834 get_task_struct(current); 2835 proc->tsk = current; 2836 INIT_LIST_HEAD(&proc->todo); 2837 init_waitqueue_head(&proc->wait); 2838 proc->default_priority = task_nice(current); 2839 mutex_lock(&binder_lock); 2840 binder_stats_created(BINDER_STAT_PROC); 2841 hlist_add_head(&proc->proc_node, &binder_procs); 2842 proc->pid = current->group_leader->pid; 2843 INIT_LIST_HEAD(&proc->delivered_death); 2844 filp->private_data = proc; 2845 mutex_unlock(&binder_lock); 2846 2847 if (binder_debugfs_dir_entry_proc) { 2848 char strbuf[11]; 2849 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2850 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 2851 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); 2852 } 2853 2854 return 0; 2855} 2856 2857static int binder_flush(struct file *filp, fl_owner_t id) 2858{ 2859 struct binder_proc *proc = filp->private_data; 2860 2861 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2862 2863 return 0; 2864} 2865 2866static void binder_deferred_flush(struct binder_proc *proc) 2867{ 2868 struct rb_node *n; 2869 int wake_count = 0; 2870 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2871 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2872 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2873 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2874 wake_up_interruptible(&thread->wait); 2875 wake_count++; 2876 } 2877 } 2878 wake_up_interruptible_all(&proc->wait); 2879 2880 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2881 "binder_flush: %d woke %d threads\n", proc->pid, 2882 wake_count); 2883} 2884 2885static int binder_release(struct inode *nodp, struct file *filp) 2886{ 2887 struct binder_proc *proc = filp->private_data; 2888 debugfs_remove(proc->debugfs_entry); 2889 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 2890 2891 return 0; 2892} 2893 2894static void binder_deferred_release(struct binder_proc *proc) 2895{ 2896 struct hlist_node *pos; 2897 struct binder_transaction *t; 2898 struct rb_node *n; 2899 int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; 2900 2901 BUG_ON(proc->vma); 2902 BUG_ON(proc->files); 2903 2904 hlist_del(&proc->proc_node); 2905 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 2906 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2907 "binder_release: %d context_mgr_node gone\n", 2908 proc->pid); 2909 binder_context_mgr_node = NULL; 2910 } 2911 2912 threads = 0; 2913 active_transactions = 0; 2914 while ((n = rb_first(&proc->threads))) { 2915 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2916 threads++; 2917 active_transactions += binder_free_thread(proc, thread); 2918 } 2919 nodes = 0; 2920 incoming_refs = 0; 2921 while ((n = rb_first(&proc->nodes))) { 2922 struct binder_node *node = rb_entry(n, struct binder_node, rb_node); 2923 2924 nodes++; 2925 rb_erase(&node->rb_node, &proc->nodes); 2926 list_del_init(&node->work.entry); 2927 binder_release_work(&node->async_todo); 2928 if (hlist_empty(&node->refs)) { 2929 kfree(node); 2930 binder_stats_deleted(BINDER_STAT_NODE); 2931 } else { 2932 struct binder_ref *ref; 2933 int death = 0; 2934 2935 node->proc = NULL; 2936 node->local_strong_refs = 0; 2937 node->local_weak_refs = 0; 2938 hlist_add_head(&node->dead_node, &binder_dead_nodes); 2939 2940 hlist_for_each_entry(ref, pos, &node->refs, node_entry) { 2941 incoming_refs++; 2942 if (ref->death) { 2943 death++; 2944 if (list_empty(&ref->death->work.entry)) { 2945 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2946 list_add_tail(&ref->death->work.entry, &ref->proc->todo); 2947 wake_up_interruptible(&ref->proc->wait); 2948 } else 2949 BUG(); 2950 } 2951 } 2952 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2953 "binder: node %d now dead, " 2954 "refs %d, death %d\n", node->debug_id, 2955 incoming_refs, death); 2956 } 2957 } 2958 outgoing_refs = 0; 2959 while ((n = rb_first(&proc->refs_by_desc))) { 2960 struct binder_ref *ref = rb_entry(n, struct binder_ref, 2961 rb_node_desc); 2962 outgoing_refs++; 2963 binder_delete_ref(ref); 2964 } 2965 binder_release_work(&proc->todo); 2966 binder_release_work(&proc->delivered_death); 2967 buffers = 0; 2968 2969 while ((n = rb_first(&proc->allocated_buffers))) { 2970 struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, 2971 rb_node); 2972 t = buffer->transaction; 2973 if (t) { 2974 t->buffer = NULL; 2975 buffer->transaction = NULL; 2976 pr_err("binder: release proc %d, " 2977 "transaction %d, not freed\n", 2978 proc->pid, t->debug_id); 2979 /*BUG();*/ 2980 } 2981 binder_free_buf(proc, buffer); 2982 buffers++; 2983 } 2984 2985 binder_stats_deleted(BINDER_STAT_PROC); 2986 2987 page_count = 0; 2988 if (proc->pages) { 2989 int i; 2990 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 2991 if (proc->pages[i]) { 2992 void *page_addr = proc->buffer + i * PAGE_SIZE; 2993 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 2994 "binder_release: %d: " 2995 "page %d at %p not freed\n", 2996 proc->pid, i, 2997 page_addr); 2998 unmap_kernel_range((unsigned long)page_addr, 2999 PAGE_SIZE); 3000 __free_page(proc->pages[i]); 3001 page_count++; 3002 } 3003 } 3004 kfree(proc->pages); 3005 vfree(proc->buffer); 3006 } 3007 3008 put_task_struct(proc->tsk); 3009 3010 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3011 "binder_release: %d threads %d, nodes %d (ref %d), " 3012 "refs %d, active transactions %d, buffers %d, " 3013 "pages %d\n", 3014 proc->pid, threads, nodes, incoming_refs, outgoing_refs, 3015 active_transactions, buffers, page_count); 3016 3017 kfree(proc); 3018} 3019 3020static void binder_deferred_func(struct work_struct *work) 3021{ 3022 struct binder_proc *proc; 3023 struct files_struct *files; 3024 3025 int defer; 3026 do { 3027 mutex_lock(&binder_lock); 3028 mutex_lock(&binder_deferred_lock); 3029 if (!hlist_empty(&binder_deferred_list)) { 3030 proc = hlist_entry(binder_deferred_list.first, 3031 struct binder_proc, deferred_work_node); 3032 hlist_del_init(&proc->deferred_work_node); 3033 defer = proc->deferred_work; 3034 proc->deferred_work = 0; 3035 } else { 3036 proc = NULL; 3037 defer = 0; 3038 } 3039 mutex_unlock(&binder_deferred_lock); 3040 3041 files = NULL; 3042 if (defer & BINDER_DEFERRED_PUT_FILES) { 3043 files = proc->files; 3044 if (files) 3045 proc->files = NULL; 3046 } 3047 3048 if (defer & BINDER_DEFERRED_FLUSH) 3049 binder_deferred_flush(proc); 3050 3051 if (defer & BINDER_DEFERRED_RELEASE) 3052 binder_deferred_release(proc); /* frees proc */ 3053 3054 mutex_unlock(&binder_lock); 3055 if (files) 3056 put_files_struct(files); 3057 } while (proc); 3058} 3059static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3060 3061static void 3062binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3063{ 3064 mutex_lock(&binder_deferred_lock); 3065 proc->deferred_work |= defer; 3066 if (hlist_unhashed(&proc->deferred_work_node)) { 3067 hlist_add_head(&proc->deferred_work_node, 3068 &binder_deferred_list); 3069 queue_work(binder_deferred_workqueue, &binder_deferred_work); 3070 } 3071 mutex_unlock(&binder_deferred_lock); 3072} 3073 3074static void print_binder_transaction(struct seq_file *m, const char *prefix, 3075 struct binder_transaction *t) 3076{ 3077 seq_printf(m, 3078 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3079 prefix, t->debug_id, t, 3080 t->from ? t->from->proc->pid : 0, 3081 t->from ? t->from->pid : 0, 3082 t->to_proc ? t->to_proc->pid : 0, 3083 t->to_thread ? t->to_thread->pid : 0, 3084 t->code, t->flags, t->priority, t->need_reply); 3085 if (t->buffer == NULL) { 3086 seq_puts(m, " buffer free\n"); 3087 return; 3088 } 3089 if (t->buffer->target_node) 3090 seq_printf(m, " node %d", 3091 t->buffer->target_node->debug_id); 3092 seq_printf(m, " size %zd:%zd data %p\n", 3093 t->buffer->data_size, t->buffer->offsets_size, 3094 t->buffer->data); 3095} 3096 3097static void print_binder_buffer(struct seq_file *m, const char *prefix, 3098 struct binder_buffer *buffer) 3099{ 3100 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3101 prefix, buffer->debug_id, buffer->data, 3102 buffer->data_size, buffer->offsets_size, 3103 buffer->transaction ? "active" : "delivered"); 3104} 3105 3106static void print_binder_work(struct seq_file *m, const char *prefix, 3107 const char *transaction_prefix, 3108 struct binder_work *w) 3109{ 3110 struct binder_node *node; 3111 struct binder_transaction *t; 3112 3113 switch (w->type) { 3114 case BINDER_WORK_TRANSACTION: 3115 t = container_of(w, struct binder_transaction, work); 3116 print_binder_transaction(m, transaction_prefix, t); 3117 break; 3118 case BINDER_WORK_TRANSACTION_COMPLETE: 3119 seq_printf(m, "%stransaction complete\n", prefix); 3120 break; 3121 case BINDER_WORK_NODE: 3122 node = container_of(w, struct binder_node, work); 3123 seq_printf(m, "%snode work %d: u%p c%p\n", 3124 prefix, node->debug_id, node->ptr, node->cookie); 3125 break; 3126 case BINDER_WORK_DEAD_BINDER: 3127 seq_printf(m, "%shas dead binder\n", prefix); 3128 break; 3129 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3130 seq_printf(m, "%shas cleared dead binder\n", prefix); 3131 break; 3132 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3133 seq_printf(m, "%shas cleared death notification\n", prefix); 3134 break; 3135 default: 3136 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3137 break; 3138 } 3139} 3140 3141static void print_binder_thread(struct seq_file *m, 3142 struct binder_thread *thread, 3143 int print_always) 3144{ 3145 struct binder_transaction *t; 3146 struct binder_work *w; 3147 size_t start_pos = m->count; 3148 size_t header_pos; 3149 3150 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3151 header_pos = m->count; 3152 t = thread->transaction_stack; 3153 while (t) { 3154 if (t->from == thread) { 3155 print_binder_transaction(m, 3156 " outgoing transaction", t); 3157 t = t->from_parent; 3158 } else if (t->to_thread == thread) { 3159 print_binder_transaction(m, 3160 " incoming transaction", t); 3161 t = t->to_parent; 3162 } else { 3163 print_binder_transaction(m, " bad transaction", t); 3164 t = NULL; 3165 } 3166 } 3167 list_for_each_entry(w, &thread->todo, entry) { 3168 print_binder_work(m, " ", " pending transaction", w); 3169 } 3170 if (!print_always && m->count == header_pos) 3171 m->count = start_pos; 3172} 3173 3174static void print_binder_node(struct seq_file *m, struct binder_node *node) 3175{ 3176 struct binder_ref *ref; 3177 struct hlist_node *pos; 3178 struct binder_work *w; 3179 int count; 3180 3181 count = 0; 3182 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3183 count++; 3184 3185 seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", 3186 node->debug_id, node->ptr, node->cookie, 3187 node->has_strong_ref, node->has_weak_ref, 3188 node->local_strong_refs, node->local_weak_refs, 3189 node->internal_strong_refs, count); 3190 if (count) { 3191 seq_puts(m, " proc"); 3192 hlist_for_each_entry(ref, pos, &node->refs, node_entry) 3193 seq_printf(m, " %d", ref->proc->pid); 3194 } 3195 seq_puts(m, "\n"); 3196 list_for_each_entry(w, &node->async_todo, entry) 3197 print_binder_work(m, " ", 3198 " pending async transaction", w); 3199} 3200 3201static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3202{ 3203 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3204 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3205 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3206} 3207 3208static void print_binder_proc(struct seq_file *m, 3209 struct binder_proc *proc, int print_all) 3210{ 3211 struct binder_work *w; 3212 struct rb_node *n; 3213 size_t start_pos = m->count; 3214 size_t header_pos; 3215 3216 seq_printf(m, "proc %d\n", proc->pid); 3217 header_pos = m->count; 3218 3219 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3220 print_binder_thread(m, rb_entry(n, struct binder_thread, 3221 rb_node), print_all); 3222 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3223 struct binder_node *node = rb_entry(n, struct binder_node, 3224 rb_node); 3225 if (print_all || node->has_async_transaction) 3226 print_binder_node(m, node); 3227 } 3228 if (print_all) { 3229 for (n = rb_first(&proc->refs_by_desc); 3230 n != NULL; 3231 n = rb_next(n)) 3232 print_binder_ref(m, rb_entry(n, struct binder_ref, 3233 rb_node_desc)); 3234 } 3235 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3236 print_binder_buffer(m, " buffer", 3237 rb_entry(n, struct binder_buffer, rb_node)); 3238 list_for_each_entry(w, &proc->todo, entry) 3239 print_binder_work(m, " ", " pending transaction", w); 3240 list_for_each_entry(w, &proc->delivered_death, entry) { 3241 seq_puts(m, " has delivered dead binder\n"); 3242 break; 3243 } 3244 if (!print_all && m->count == header_pos) 3245 m->count = start_pos; 3246} 3247 3248static const char *binder_return_strings[] = { 3249 "BR_ERROR", 3250 "BR_OK", 3251 "BR_TRANSACTION", 3252 "BR_REPLY", 3253 "BR_ACQUIRE_RESULT", 3254 "BR_DEAD_REPLY", 3255 "BR_TRANSACTION_COMPLETE", 3256 "BR_INCREFS", 3257 "BR_ACQUIRE", 3258 "BR_RELEASE", 3259 "BR_DECREFS", 3260 "BR_ATTEMPT_ACQUIRE", 3261 "BR_NOOP", 3262 "BR_SPAWN_LOOPER", 3263 "BR_FINISHED", 3264 "BR_DEAD_BINDER", 3265 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3266 "BR_FAILED_REPLY" 3267}; 3268 3269static const char *binder_command_strings[] = { 3270 "BC_TRANSACTION", 3271 "BC_REPLY", 3272 "BC_ACQUIRE_RESULT", 3273 "BC_FREE_BUFFER", 3274 "BC_INCREFS", 3275 "BC_ACQUIRE", 3276 "BC_RELEASE", 3277 "BC_DECREFS", 3278 "BC_INCREFS_DONE", 3279 "BC_ACQUIRE_DONE", 3280 "BC_ATTEMPT_ACQUIRE", 3281 "BC_REGISTER_LOOPER", 3282 "BC_ENTER_LOOPER", 3283 "BC_EXIT_LOOPER", 3284 "BC_REQUEST_DEATH_NOTIFICATION", 3285 "BC_CLEAR_DEATH_NOTIFICATION", 3286 "BC_DEAD_BINDER_DONE" 3287}; 3288 3289static const char *binder_objstat_strings[] = { 3290 "proc", 3291 "thread", 3292 "node", 3293 "ref", 3294 "death", 3295 "transaction", 3296 "transaction_complete" 3297}; 3298 3299static void print_binder_stats(struct seq_file *m, const char *prefix, 3300 struct binder_stats *stats) 3301{ 3302 int i; 3303 3304 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3305 ARRAY_SIZE(binder_command_strings)); 3306 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3307 if (stats->bc[i]) 3308 seq_printf(m, "%s%s: %d\n", prefix, 3309 binder_command_strings[i], stats->bc[i]); 3310 } 3311 3312 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3313 ARRAY_SIZE(binder_return_strings)); 3314 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3315 if (stats->br[i]) 3316 seq_printf(m, "%s%s: %d\n", prefix, 3317 binder_return_strings[i], stats->br[i]); 3318 } 3319 3320 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3321 ARRAY_SIZE(binder_objstat_strings)); 3322 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3323 ARRAY_SIZE(stats->obj_deleted)); 3324 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3325 if (stats->obj_created[i] || stats->obj_deleted[i]) 3326 seq_printf(m, "%s%s: active %d total %d\n", prefix, 3327 binder_objstat_strings[i], 3328 stats->obj_created[i] - stats->obj_deleted[i], 3329 stats->obj_created[i]); 3330 } 3331} 3332 3333static void print_binder_proc_stats(struct seq_file *m, 3334 struct binder_proc *proc) 3335{ 3336 struct binder_work *w; 3337 struct rb_node *n; 3338 int count, strong, weak; 3339 3340 seq_printf(m, "proc %d\n", proc->pid); 3341 count = 0; 3342 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3343 count++; 3344 seq_printf(m, " threads: %d\n", count); 3345 seq_printf(m, " requested threads: %d+%d/%d\n" 3346 " ready threads %d\n" 3347 " free async space %zd\n", proc->requested_threads, 3348 proc->requested_threads_started, proc->max_threads, 3349 proc->ready_threads, proc->free_async_space); 3350 count = 0; 3351 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3352 count++; 3353 seq_printf(m, " nodes: %d\n", count); 3354 count = 0; 3355 strong = 0; 3356 weak = 0; 3357 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3358 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3359 rb_node_desc); 3360 count++; 3361 strong += ref->strong; 3362 weak += ref->weak; 3363 } 3364 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3365 3366 count = 0; 3367 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3368 count++; 3369 seq_printf(m, " buffers: %d\n", count); 3370 3371 count = 0; 3372 list_for_each_entry(w, &proc->todo, entry) { 3373 switch (w->type) { 3374 case BINDER_WORK_TRANSACTION: 3375 count++; 3376 break; 3377 default: 3378 break; 3379 } 3380 } 3381 seq_printf(m, " pending transactions: %d\n", count); 3382 3383 print_binder_stats(m, " ", &proc->stats); 3384} 3385 3386 3387static int binder_state_show(struct seq_file *m, void *unused) 3388{ 3389 struct binder_proc *proc; 3390 struct hlist_node *pos; 3391 struct binder_node *node; 3392 int do_lock = !binder_debug_no_lock; 3393 3394 if (do_lock) 3395 mutex_lock(&binder_lock); 3396 3397 seq_puts(m, "binder state:\n"); 3398 3399 if (!hlist_empty(&binder_dead_nodes)) 3400 seq_puts(m, "dead nodes:\n"); 3401 hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) 3402 print_binder_node(m, node); 3403 3404 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3405 print_binder_proc(m, proc, 1); 3406 if (do_lock) 3407 mutex_unlock(&binder_lock); 3408 return 0; 3409} 3410 3411static int binder_stats_show(struct seq_file *m, void *unused) 3412{ 3413 struct binder_proc *proc; 3414 struct hlist_node *pos; 3415 int do_lock = !binder_debug_no_lock; 3416 3417 if (do_lock) 3418 mutex_lock(&binder_lock); 3419 3420 seq_puts(m, "binder stats:\n"); 3421 3422 print_binder_stats(m, "", &binder_stats); 3423 3424 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3425 print_binder_proc_stats(m, proc); 3426 if (do_lock) 3427 mutex_unlock(&binder_lock); 3428 return 0; 3429} 3430 3431static int binder_transactions_show(struct seq_file *m, void *unused) 3432{ 3433 struct binder_proc *proc; 3434 struct hlist_node *pos; 3435 int do_lock = !binder_debug_no_lock; 3436 3437 if (do_lock) 3438 mutex_lock(&binder_lock); 3439 3440 seq_puts(m, "binder transactions:\n"); 3441 hlist_for_each_entry(proc, pos, &binder_procs, proc_node) 3442 print_binder_proc(m, proc, 0); 3443 if (do_lock) 3444 mutex_unlock(&binder_lock); 3445 return 0; 3446} 3447 3448static int binder_proc_show(struct seq_file *m, void *unused) 3449{ 3450 struct binder_proc *proc = m->private; 3451 int do_lock = !binder_debug_no_lock; 3452 3453 if (do_lock) 3454 mutex_lock(&binder_lock); 3455 seq_puts(m, "binder proc state:\n"); 3456 print_binder_proc(m, proc, 1); 3457 if (do_lock) 3458 mutex_unlock(&binder_lock); 3459 return 0; 3460} 3461 3462static void print_binder_transaction_log_entry(struct seq_file *m, 3463 struct binder_transaction_log_entry *e) 3464{ 3465 seq_printf(m, 3466 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 3467 e->debug_id, (e->call_type == 2) ? "reply" : 3468 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3469 e->from_thread, e->to_proc, e->to_thread, e->to_node, 3470 e->target_handle, e->data_size, e->offsets_size); 3471} 3472 3473static int binder_transaction_log_show(struct seq_file *m, void *unused) 3474{ 3475 struct binder_transaction_log *log = m->private; 3476 int i; 3477 3478 if (log->full) { 3479 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 3480 print_binder_transaction_log_entry(m, &log->entry[i]); 3481 } 3482 for (i = 0; i < log->next; i++) 3483 print_binder_transaction_log_entry(m, &log->entry[i]); 3484 return 0; 3485} 3486 3487static const struct file_operations binder_fops = { 3488 .owner = THIS_MODULE, 3489 .poll = binder_poll, 3490 .unlocked_ioctl = binder_ioctl, 3491 .mmap = binder_mmap, 3492 .open = binder_open, 3493 .flush = binder_flush, 3494 .release = binder_release, 3495}; 3496 3497static struct miscdevice binder_miscdev = { 3498 .minor = MISC_DYNAMIC_MINOR, 3499 .name = "binder", 3500 .fops = &binder_fops 3501}; 3502 3503BINDER_DEBUG_ENTRY(state); 3504BINDER_DEBUG_ENTRY(stats); 3505BINDER_DEBUG_ENTRY(transactions); 3506BINDER_DEBUG_ENTRY(transaction_log); 3507 3508static int __init binder_init(void) 3509{ 3510 int ret; 3511 3512 binder_deferred_workqueue = create_singlethread_workqueue("binder"); 3513 if (!binder_deferred_workqueue) 3514 return -ENOMEM; 3515 3516 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3517 if (binder_debugfs_dir_entry_root) 3518 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3519 binder_debugfs_dir_entry_root); 3520 ret = misc_register(&binder_miscdev); 3521 if (binder_debugfs_dir_entry_root) { 3522 debugfs_create_file("state", 3523 S_IRUGO, 3524 binder_debugfs_dir_entry_root, 3525 NULL, 3526 &binder_state_fops); 3527 debugfs_create_file("stats", 3528 S_IRUGO, 3529 binder_debugfs_dir_entry_root, 3530 NULL, 3531 &binder_stats_fops); 3532 debugfs_create_file("transactions", 3533 S_IRUGO, 3534 binder_debugfs_dir_entry_root, 3535 NULL, 3536 &binder_transactions_fops); 3537 debugfs_create_file("transaction_log", 3538 S_IRUGO, 3539 binder_debugfs_dir_entry_root, 3540 &binder_transaction_log, 3541 &binder_transaction_log_fops); 3542 debugfs_create_file("failed_transaction_log", 3543 S_IRUGO, 3544 binder_debugfs_dir_entry_root, 3545 &binder_transaction_log_failed, 3546 &binder_transaction_log_fops); 3547 } 3548 return ret; 3549} 3550 3551device_initcall(binder_init); 3552 3553MODULE_LICENSE("GPL v2"); 3554