binder.c revision 9b98710b14fbd720039c756637dcd5cf423b690b
1/* binder.c 2 * 3 * Android IPC Subsystem 4 * 5 * Copyright (C) 2007-2008 Google, Inc. 6 * 7 * This software is licensed under the terms of the GNU General Public 8 * License version 2, as published by the Free Software Foundation, and 9 * may be copied, distributed, and modified under those terms. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 */ 17 18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 19 20#include <asm/cacheflush.h> 21#include <linux/fdtable.h> 22#include <linux/file.h> 23#include <linux/freezer.h> 24#include <linux/fs.h> 25#include <linux/list.h> 26#include <linux/miscdevice.h> 27#include <linux/mm.h> 28#include <linux/module.h> 29#include <linux/mutex.h> 30#include <linux/nsproxy.h> 31#include <linux/poll.h> 32#include <linux/debugfs.h> 33#include <linux/rbtree.h> 34#include <linux/sched.h> 35#include <linux/seq_file.h> 36#include <linux/uaccess.h> 37#include <linux/vmalloc.h> 38#include <linux/slab.h> 39#include <linux/pid_namespace.h> 40#include <linux/security.h> 41 42#include "binder.h" 43#include "binder_trace.h" 44 45static DEFINE_MUTEX(binder_main_lock); 46static DEFINE_MUTEX(binder_deferred_lock); 47static DEFINE_MUTEX(binder_mmap_lock); 48 49static HLIST_HEAD(binder_procs); 50static HLIST_HEAD(binder_deferred_list); 51static HLIST_HEAD(binder_dead_nodes); 52 53static struct dentry *binder_debugfs_dir_entry_root; 54static struct dentry *binder_debugfs_dir_entry_proc; 55static struct binder_node *binder_context_mgr_node; 56static kuid_t binder_context_mgr_uid = INVALID_UID; 57static int binder_last_id; 58static struct workqueue_struct *binder_deferred_workqueue; 59 60#define BINDER_DEBUG_ENTRY(name) \ 61static int binder_##name##_open(struct inode *inode, struct file *file) \ 62{ \ 63 return single_open(file, binder_##name##_show, inode->i_private); \ 64} \ 65\ 66static const struct file_operations binder_##name##_fops = { \ 67 .owner = THIS_MODULE, \ 68 .open = binder_##name##_open, \ 69 .read = seq_read, \ 70 .llseek = seq_lseek, \ 71 .release = single_release, \ 72} 73 74static int binder_proc_show(struct seq_file *m, void *unused); 75BINDER_DEBUG_ENTRY(proc); 76 77/* This is only defined in include/asm-arm/sizes.h */ 78#ifndef SZ_1K 79#define SZ_1K 0x400 80#endif 81 82#ifndef SZ_4M 83#define SZ_4M 0x400000 84#endif 85 86#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) 87 88#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) 89 90enum { 91 BINDER_DEBUG_USER_ERROR = 1U << 0, 92 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, 93 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, 94 BINDER_DEBUG_OPEN_CLOSE = 1U << 3, 95 BINDER_DEBUG_DEAD_BINDER = 1U << 4, 96 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, 97 BINDER_DEBUG_READ_WRITE = 1U << 6, 98 BINDER_DEBUG_USER_REFS = 1U << 7, 99 BINDER_DEBUG_THREADS = 1U << 8, 100 BINDER_DEBUG_TRANSACTION = 1U << 9, 101 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, 102 BINDER_DEBUG_FREE_BUFFER = 1U << 11, 103 BINDER_DEBUG_INTERNAL_REFS = 1U << 12, 104 BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, 105 BINDER_DEBUG_PRIORITY_CAP = 1U << 14, 106 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, 107}; 108static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | 109 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; 110module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); 111 112static bool binder_debug_no_lock; 113module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); 114 115static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); 116static int binder_stop_on_user_error; 117 118static int binder_set_stop_on_user_error(const char *val, 119 struct kernel_param *kp) 120{ 121 int ret; 122 123 ret = param_set_int(val, kp); 124 if (binder_stop_on_user_error < 2) 125 wake_up(&binder_user_error_wait); 126 return ret; 127} 128module_param_call(stop_on_user_error, binder_set_stop_on_user_error, 129 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); 130 131#define binder_debug(mask, x...) \ 132 do { \ 133 if (binder_debug_mask & mask) \ 134 pr_info(x); \ 135 } while (0) 136 137#define binder_user_error(x...) \ 138 do { \ 139 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ 140 pr_info(x); \ 141 if (binder_stop_on_user_error) \ 142 binder_stop_on_user_error = 2; \ 143 } while (0) 144 145enum binder_stat_types { 146 BINDER_STAT_PROC, 147 BINDER_STAT_THREAD, 148 BINDER_STAT_NODE, 149 BINDER_STAT_REF, 150 BINDER_STAT_DEATH, 151 BINDER_STAT_TRANSACTION, 152 BINDER_STAT_TRANSACTION_COMPLETE, 153 BINDER_STAT_COUNT 154}; 155 156struct binder_stats { 157 int br[_IOC_NR(BR_FAILED_REPLY) + 1]; 158 int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; 159 int obj_created[BINDER_STAT_COUNT]; 160 int obj_deleted[BINDER_STAT_COUNT]; 161}; 162 163static struct binder_stats binder_stats; 164 165static inline void binder_stats_deleted(enum binder_stat_types type) 166{ 167 binder_stats.obj_deleted[type]++; 168} 169 170static inline void binder_stats_created(enum binder_stat_types type) 171{ 172 binder_stats.obj_created[type]++; 173} 174 175struct binder_transaction_log_entry { 176 int debug_id; 177 int call_type; 178 int from_proc; 179 int from_thread; 180 int target_handle; 181 int to_proc; 182 int to_thread; 183 int to_node; 184 int data_size; 185 int offsets_size; 186}; 187struct binder_transaction_log { 188 int next; 189 int full; 190 struct binder_transaction_log_entry entry[32]; 191}; 192static struct binder_transaction_log binder_transaction_log; 193static struct binder_transaction_log binder_transaction_log_failed; 194 195static struct binder_transaction_log_entry *binder_transaction_log_add( 196 struct binder_transaction_log *log) 197{ 198 struct binder_transaction_log_entry *e; 199 200 e = &log->entry[log->next]; 201 memset(e, 0, sizeof(*e)); 202 log->next++; 203 if (log->next == ARRAY_SIZE(log->entry)) { 204 log->next = 0; 205 log->full = 1; 206 } 207 return e; 208} 209 210struct binder_work { 211 struct list_head entry; 212 enum { 213 BINDER_WORK_TRANSACTION = 1, 214 BINDER_WORK_TRANSACTION_COMPLETE, 215 BINDER_WORK_NODE, 216 BINDER_WORK_DEAD_BINDER, 217 BINDER_WORK_DEAD_BINDER_AND_CLEAR, 218 BINDER_WORK_CLEAR_DEATH_NOTIFICATION, 219 } type; 220}; 221 222struct binder_node { 223 int debug_id; 224 struct binder_work work; 225 union { 226 struct rb_node rb_node; 227 struct hlist_node dead_node; 228 }; 229 struct binder_proc *proc; 230 struct hlist_head refs; 231 int internal_strong_refs; 232 int local_weak_refs; 233 int local_strong_refs; 234 binder_uintptr_t ptr; 235 binder_uintptr_t cookie; 236 unsigned has_strong_ref:1; 237 unsigned pending_strong_ref:1; 238 unsigned has_weak_ref:1; 239 unsigned pending_weak_ref:1; 240 unsigned has_async_transaction:1; 241 unsigned accept_fds:1; 242 unsigned min_priority:8; 243 struct list_head async_todo; 244}; 245 246struct binder_ref_death { 247 struct binder_work work; 248 binder_uintptr_t cookie; 249}; 250 251struct binder_ref { 252 /* Lookups needed: */ 253 /* node + proc => ref (transaction) */ 254 /* desc + proc => ref (transaction, inc/dec ref) */ 255 /* node => refs + procs (proc exit) */ 256 int debug_id; 257 struct rb_node rb_node_desc; 258 struct rb_node rb_node_node; 259 struct hlist_node node_entry; 260 struct binder_proc *proc; 261 struct binder_node *node; 262 uint32_t desc; 263 int strong; 264 int weak; 265 struct binder_ref_death *death; 266}; 267 268struct binder_buffer { 269 struct list_head entry; /* free and allocated entries by address */ 270 struct rb_node rb_node; /* free entry by size or allocated entry */ 271 /* by address */ 272 unsigned free:1; 273 unsigned allow_user_free:1; 274 unsigned async_transaction:1; 275 unsigned debug_id:29; 276 277 struct binder_transaction *transaction; 278 279 struct binder_node *target_node; 280 size_t data_size; 281 size_t offsets_size; 282 uint8_t data[0]; 283}; 284 285enum binder_deferred_state { 286 BINDER_DEFERRED_PUT_FILES = 0x01, 287 BINDER_DEFERRED_FLUSH = 0x02, 288 BINDER_DEFERRED_RELEASE = 0x04, 289}; 290 291struct binder_proc { 292 struct hlist_node proc_node; 293 struct rb_root threads; 294 struct rb_root nodes; 295 struct rb_root refs_by_desc; 296 struct rb_root refs_by_node; 297 int pid; 298 struct vm_area_struct *vma; 299 struct mm_struct *vma_vm_mm; 300 struct task_struct *tsk; 301 struct files_struct *files; 302 struct hlist_node deferred_work_node; 303 int deferred_work; 304 void *buffer; 305 ptrdiff_t user_buffer_offset; 306 307 struct list_head buffers; 308 struct rb_root free_buffers; 309 struct rb_root allocated_buffers; 310 size_t free_async_space; 311 312 struct page **pages; 313 size_t buffer_size; 314 uint32_t buffer_free; 315 struct list_head todo; 316 wait_queue_head_t wait; 317 struct binder_stats stats; 318 struct list_head delivered_death; 319 int max_threads; 320 int requested_threads; 321 int requested_threads_started; 322 int ready_threads; 323 long default_priority; 324 struct dentry *debugfs_entry; 325}; 326 327enum { 328 BINDER_LOOPER_STATE_REGISTERED = 0x01, 329 BINDER_LOOPER_STATE_ENTERED = 0x02, 330 BINDER_LOOPER_STATE_EXITED = 0x04, 331 BINDER_LOOPER_STATE_INVALID = 0x08, 332 BINDER_LOOPER_STATE_WAITING = 0x10, 333 BINDER_LOOPER_STATE_NEED_RETURN = 0x20 334}; 335 336struct binder_thread { 337 struct binder_proc *proc; 338 struct rb_node rb_node; 339 int pid; 340 int looper; 341 struct binder_transaction *transaction_stack; 342 struct list_head todo; 343 uint32_t return_error; /* Write failed, return error code in read buf */ 344 uint32_t return_error2; /* Write failed, return error code in read */ 345 /* buffer. Used when sending a reply to a dead process that */ 346 /* we are also waiting on */ 347 wait_queue_head_t wait; 348 struct binder_stats stats; 349}; 350 351struct binder_transaction { 352 int debug_id; 353 struct binder_work work; 354 struct binder_thread *from; 355 struct binder_transaction *from_parent; 356 struct binder_proc *to_proc; 357 struct binder_thread *to_thread; 358 struct binder_transaction *to_parent; 359 unsigned need_reply:1; 360 /* unsigned is_dead:1; */ /* not used at the moment */ 361 362 struct binder_buffer *buffer; 363 unsigned int code; 364 unsigned int flags; 365 long priority; 366 long saved_priority; 367 kuid_t sender_euid; 368}; 369 370static void 371binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); 372 373static int task_get_unused_fd_flags(struct binder_proc *proc, int flags) 374{ 375 struct files_struct *files = proc->files; 376 unsigned long rlim_cur; 377 unsigned long irqs; 378 379 if (files == NULL) 380 return -ESRCH; 381 382 if (!lock_task_sighand(proc->tsk, &irqs)) 383 return -EMFILE; 384 385 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE); 386 unlock_task_sighand(proc->tsk, &irqs); 387 388 return __alloc_fd(files, 0, rlim_cur, flags); 389} 390 391/* 392 * copied from fd_install 393 */ 394static void task_fd_install( 395 struct binder_proc *proc, unsigned int fd, struct file *file) 396{ 397 if (proc->files) 398 __fd_install(proc->files, fd, file); 399} 400 401/* 402 * copied from sys_close 403 */ 404static long task_close_fd(struct binder_proc *proc, unsigned int fd) 405{ 406 int retval; 407 408 if (proc->files == NULL) 409 return -ESRCH; 410 411 retval = __close_fd(proc->files, fd); 412 /* can't restart close syscall because file table entry was cleared */ 413 if (unlikely(retval == -ERESTARTSYS || 414 retval == -ERESTARTNOINTR || 415 retval == -ERESTARTNOHAND || 416 retval == -ERESTART_RESTARTBLOCK)) 417 retval = -EINTR; 418 419 return retval; 420} 421 422static inline void binder_lock(const char *tag) 423{ 424 trace_binder_lock(tag); 425 mutex_lock(&binder_main_lock); 426 trace_binder_locked(tag); 427} 428 429static inline void binder_unlock(const char *tag) 430{ 431 trace_binder_unlock(tag); 432 mutex_unlock(&binder_main_lock); 433} 434 435static void binder_set_nice(long nice) 436{ 437 long min_nice; 438 439 if (can_nice(current, nice)) { 440 set_user_nice(current, nice); 441 return; 442 } 443 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur); 444 binder_debug(BINDER_DEBUG_PRIORITY_CAP, 445 "%d: nice value %ld not allowed use %ld instead\n", 446 current->pid, nice, min_nice); 447 set_user_nice(current, min_nice); 448 if (min_nice <= MAX_NICE) 449 return; 450 binder_user_error("%d RLIMIT_NICE not set\n", current->pid); 451} 452 453static size_t binder_buffer_size(struct binder_proc *proc, 454 struct binder_buffer *buffer) 455{ 456 if (list_is_last(&buffer->entry, &proc->buffers)) 457 return proc->buffer + proc->buffer_size - (void *)buffer->data; 458 return (size_t)list_entry(buffer->entry.next, 459 struct binder_buffer, entry) - (size_t)buffer->data; 460} 461 462static void binder_insert_free_buffer(struct binder_proc *proc, 463 struct binder_buffer *new_buffer) 464{ 465 struct rb_node **p = &proc->free_buffers.rb_node; 466 struct rb_node *parent = NULL; 467 struct binder_buffer *buffer; 468 size_t buffer_size; 469 size_t new_buffer_size; 470 471 BUG_ON(!new_buffer->free); 472 473 new_buffer_size = binder_buffer_size(proc, new_buffer); 474 475 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 476 "%d: add free buffer, size %zd, at %p\n", 477 proc->pid, new_buffer_size, new_buffer); 478 479 while (*p) { 480 parent = *p; 481 buffer = rb_entry(parent, struct binder_buffer, rb_node); 482 BUG_ON(!buffer->free); 483 484 buffer_size = binder_buffer_size(proc, buffer); 485 486 if (new_buffer_size < buffer_size) 487 p = &parent->rb_left; 488 else 489 p = &parent->rb_right; 490 } 491 rb_link_node(&new_buffer->rb_node, parent, p); 492 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); 493} 494 495static void binder_insert_allocated_buffer(struct binder_proc *proc, 496 struct binder_buffer *new_buffer) 497{ 498 struct rb_node **p = &proc->allocated_buffers.rb_node; 499 struct rb_node *parent = NULL; 500 struct binder_buffer *buffer; 501 502 BUG_ON(new_buffer->free); 503 504 while (*p) { 505 parent = *p; 506 buffer = rb_entry(parent, struct binder_buffer, rb_node); 507 BUG_ON(buffer->free); 508 509 if (new_buffer < buffer) 510 p = &parent->rb_left; 511 else if (new_buffer > buffer) 512 p = &parent->rb_right; 513 else 514 BUG(); 515 } 516 rb_link_node(&new_buffer->rb_node, parent, p); 517 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); 518} 519 520static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, 521 uintptr_t user_ptr) 522{ 523 struct rb_node *n = proc->allocated_buffers.rb_node; 524 struct binder_buffer *buffer; 525 struct binder_buffer *kern_ptr; 526 527 kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset 528 - offsetof(struct binder_buffer, data)); 529 530 while (n) { 531 buffer = rb_entry(n, struct binder_buffer, rb_node); 532 BUG_ON(buffer->free); 533 534 if (kern_ptr < buffer) 535 n = n->rb_left; 536 else if (kern_ptr > buffer) 537 n = n->rb_right; 538 else 539 return buffer; 540 } 541 return NULL; 542} 543 544static int binder_update_page_range(struct binder_proc *proc, int allocate, 545 void *start, void *end, 546 struct vm_area_struct *vma) 547{ 548 void *page_addr; 549 unsigned long user_page_addr; 550 struct vm_struct tmp_area; 551 struct page **page; 552 struct mm_struct *mm; 553 554 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 555 "%d: %s pages %p-%p\n", proc->pid, 556 allocate ? "allocate" : "free", start, end); 557 558 if (end <= start) 559 return 0; 560 561 trace_binder_update_page_range(proc, allocate, start, end); 562 563 if (vma) 564 mm = NULL; 565 else 566 mm = get_task_mm(proc->tsk); 567 568 if (mm) { 569 down_write(&mm->mmap_sem); 570 vma = proc->vma; 571 if (vma && mm != proc->vma_vm_mm) { 572 pr_err("%d: vma mm and task mm mismatch\n", 573 proc->pid); 574 vma = NULL; 575 } 576 } 577 578 if (allocate == 0) 579 goto free_range; 580 581 if (vma == NULL) { 582 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", 583 proc->pid); 584 goto err_no_vma; 585 } 586 587 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { 588 int ret; 589 590 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 591 592 BUG_ON(*page); 593 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); 594 if (*page == NULL) { 595 pr_err("%d: binder_alloc_buf failed for page at %p\n", 596 proc->pid, page_addr); 597 goto err_alloc_page_failed; 598 } 599 tmp_area.addr = page_addr; 600 tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; 601 ret = map_vm_area(&tmp_area, PAGE_KERNEL, page); 602 if (ret) { 603 pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", 604 proc->pid, page_addr); 605 goto err_map_kernel_failed; 606 } 607 user_page_addr = 608 (uintptr_t)page_addr + proc->user_buffer_offset; 609 ret = vm_insert_page(vma, user_page_addr, page[0]); 610 if (ret) { 611 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", 612 proc->pid, user_page_addr); 613 goto err_vm_insert_page_failed; 614 } 615 /* vm_insert_page does not seem to increment the refcount */ 616 } 617 if (mm) { 618 up_write(&mm->mmap_sem); 619 mmput(mm); 620 } 621 return 0; 622 623free_range: 624 for (page_addr = end - PAGE_SIZE; page_addr >= start; 625 page_addr -= PAGE_SIZE) { 626 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; 627 if (vma) 628 zap_page_range(vma, (uintptr_t)page_addr + 629 proc->user_buffer_offset, PAGE_SIZE, NULL); 630err_vm_insert_page_failed: 631 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 632err_map_kernel_failed: 633 __free_page(*page); 634 *page = NULL; 635err_alloc_page_failed: 636 ; 637 } 638err_no_vma: 639 if (mm) { 640 up_write(&mm->mmap_sem); 641 mmput(mm); 642 } 643 return -ENOMEM; 644} 645 646static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, 647 size_t data_size, 648 size_t offsets_size, int is_async) 649{ 650 struct rb_node *n = proc->free_buffers.rb_node; 651 struct binder_buffer *buffer; 652 size_t buffer_size; 653 struct rb_node *best_fit = NULL; 654 void *has_page_addr; 655 void *end_page_addr; 656 size_t size; 657 658 if (proc->vma == NULL) { 659 pr_err("%d: binder_alloc_buf, no vma\n", 660 proc->pid); 661 return NULL; 662 } 663 664 size = ALIGN(data_size, sizeof(void *)) + 665 ALIGN(offsets_size, sizeof(void *)); 666 667 if (size < data_size || size < offsets_size) { 668 binder_user_error("%d: got transaction with invalid size %zd-%zd\n", 669 proc->pid, data_size, offsets_size); 670 return NULL; 671 } 672 673 if (is_async && 674 proc->free_async_space < size + sizeof(struct binder_buffer)) { 675 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 676 "%d: binder_alloc_buf size %zd failed, no async space left\n", 677 proc->pid, size); 678 return NULL; 679 } 680 681 while (n) { 682 buffer = rb_entry(n, struct binder_buffer, rb_node); 683 BUG_ON(!buffer->free); 684 buffer_size = binder_buffer_size(proc, buffer); 685 686 if (size < buffer_size) { 687 best_fit = n; 688 n = n->rb_left; 689 } else if (size > buffer_size) 690 n = n->rb_right; 691 else { 692 best_fit = n; 693 break; 694 } 695 } 696 if (best_fit == NULL) { 697 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", 698 proc->pid, size); 699 return NULL; 700 } 701 if (n == NULL) { 702 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); 703 buffer_size = binder_buffer_size(proc, buffer); 704 } 705 706 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 707 "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", 708 proc->pid, size, buffer, buffer_size); 709 710 has_page_addr = 711 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); 712 if (n == NULL) { 713 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) 714 buffer_size = size; /* no room for other buffers */ 715 else 716 buffer_size = size + sizeof(struct binder_buffer); 717 } 718 end_page_addr = 719 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); 720 if (end_page_addr > has_page_addr) 721 end_page_addr = has_page_addr; 722 if (binder_update_page_range(proc, 1, 723 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) 724 return NULL; 725 726 rb_erase(best_fit, &proc->free_buffers); 727 buffer->free = 0; 728 binder_insert_allocated_buffer(proc, buffer); 729 if (buffer_size != size) { 730 struct binder_buffer *new_buffer = (void *)buffer->data + size; 731 732 list_add(&new_buffer->entry, &buffer->entry); 733 new_buffer->free = 1; 734 binder_insert_free_buffer(proc, new_buffer); 735 } 736 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 737 "%d: binder_alloc_buf size %zd got %p\n", 738 proc->pid, size, buffer); 739 buffer->data_size = data_size; 740 buffer->offsets_size = offsets_size; 741 buffer->async_transaction = is_async; 742 if (is_async) { 743 proc->free_async_space -= size + sizeof(struct binder_buffer); 744 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 745 "%d: binder_alloc_buf size %zd async free %zd\n", 746 proc->pid, size, proc->free_async_space); 747 } 748 749 return buffer; 750} 751 752static void *buffer_start_page(struct binder_buffer *buffer) 753{ 754 return (void *)((uintptr_t)buffer & PAGE_MASK); 755} 756 757static void *buffer_end_page(struct binder_buffer *buffer) 758{ 759 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); 760} 761 762static void binder_delete_free_buffer(struct binder_proc *proc, 763 struct binder_buffer *buffer) 764{ 765 struct binder_buffer *prev, *next = NULL; 766 int free_page_end = 1; 767 int free_page_start = 1; 768 769 BUG_ON(proc->buffers.next == &buffer->entry); 770 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); 771 BUG_ON(!prev->free); 772 if (buffer_end_page(prev) == buffer_start_page(buffer)) { 773 free_page_start = 0; 774 if (buffer_end_page(prev) == buffer_end_page(buffer)) 775 free_page_end = 0; 776 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 777 "%d: merge free, buffer %p share page with %p\n", 778 proc->pid, buffer, prev); 779 } 780 781 if (!list_is_last(&buffer->entry, &proc->buffers)) { 782 next = list_entry(buffer->entry.next, 783 struct binder_buffer, entry); 784 if (buffer_start_page(next) == buffer_end_page(buffer)) { 785 free_page_end = 0; 786 if (buffer_start_page(next) == 787 buffer_start_page(buffer)) 788 free_page_start = 0; 789 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 790 "%d: merge free, buffer %p share page with %p\n", 791 proc->pid, buffer, prev); 792 } 793 } 794 list_del(&buffer->entry); 795 if (free_page_start || free_page_end) { 796 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 797 "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", 798 proc->pid, buffer, free_page_start ? "" : " end", 799 free_page_end ? "" : " start", prev, next); 800 binder_update_page_range(proc, 0, free_page_start ? 801 buffer_start_page(buffer) : buffer_end_page(buffer), 802 (free_page_end ? buffer_end_page(buffer) : 803 buffer_start_page(buffer)) + PAGE_SIZE, NULL); 804 } 805} 806 807static void binder_free_buf(struct binder_proc *proc, 808 struct binder_buffer *buffer) 809{ 810 size_t size, buffer_size; 811 812 buffer_size = binder_buffer_size(proc, buffer); 813 814 size = ALIGN(buffer->data_size, sizeof(void *)) + 815 ALIGN(buffer->offsets_size, sizeof(void *)); 816 817 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 818 "%d: binder_free_buf %p size %zd buffer_size %zd\n", 819 proc->pid, buffer, size, buffer_size); 820 821 BUG_ON(buffer->free); 822 BUG_ON(size > buffer_size); 823 BUG_ON(buffer->transaction != NULL); 824 BUG_ON((void *)buffer < proc->buffer); 825 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); 826 827 if (buffer->async_transaction) { 828 proc->free_async_space += size + sizeof(struct binder_buffer); 829 830 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, 831 "%d: binder_free_buf size %zd async free %zd\n", 832 proc->pid, size, proc->free_async_space); 833 } 834 835 binder_update_page_range(proc, 0, 836 (void *)PAGE_ALIGN((uintptr_t)buffer->data), 837 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), 838 NULL); 839 rb_erase(&buffer->rb_node, &proc->allocated_buffers); 840 buffer->free = 1; 841 if (!list_is_last(&buffer->entry, &proc->buffers)) { 842 struct binder_buffer *next = list_entry(buffer->entry.next, 843 struct binder_buffer, entry); 844 845 if (next->free) { 846 rb_erase(&next->rb_node, &proc->free_buffers); 847 binder_delete_free_buffer(proc, next); 848 } 849 } 850 if (proc->buffers.next != &buffer->entry) { 851 struct binder_buffer *prev = list_entry(buffer->entry.prev, 852 struct binder_buffer, entry); 853 854 if (prev->free) { 855 binder_delete_free_buffer(proc, buffer); 856 rb_erase(&prev->rb_node, &proc->free_buffers); 857 buffer = prev; 858 } 859 } 860 binder_insert_free_buffer(proc, buffer); 861} 862 863static struct binder_node *binder_get_node(struct binder_proc *proc, 864 binder_uintptr_t ptr) 865{ 866 struct rb_node *n = proc->nodes.rb_node; 867 struct binder_node *node; 868 869 while (n) { 870 node = rb_entry(n, struct binder_node, rb_node); 871 872 if (ptr < node->ptr) 873 n = n->rb_left; 874 else if (ptr > node->ptr) 875 n = n->rb_right; 876 else 877 return node; 878 } 879 return NULL; 880} 881 882static struct binder_node *binder_new_node(struct binder_proc *proc, 883 binder_uintptr_t ptr, 884 binder_uintptr_t cookie) 885{ 886 struct rb_node **p = &proc->nodes.rb_node; 887 struct rb_node *parent = NULL; 888 struct binder_node *node; 889 890 while (*p) { 891 parent = *p; 892 node = rb_entry(parent, struct binder_node, rb_node); 893 894 if (ptr < node->ptr) 895 p = &(*p)->rb_left; 896 else if (ptr > node->ptr) 897 p = &(*p)->rb_right; 898 else 899 return NULL; 900 } 901 902 node = kzalloc(sizeof(*node), GFP_KERNEL); 903 if (node == NULL) 904 return NULL; 905 binder_stats_created(BINDER_STAT_NODE); 906 rb_link_node(&node->rb_node, parent, p); 907 rb_insert_color(&node->rb_node, &proc->nodes); 908 node->debug_id = ++binder_last_id; 909 node->proc = proc; 910 node->ptr = ptr; 911 node->cookie = cookie; 912 node->work.type = BINDER_WORK_NODE; 913 INIT_LIST_HEAD(&node->work.entry); 914 INIT_LIST_HEAD(&node->async_todo); 915 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 916 "%d:%d node %d u%016llx c%016llx created\n", 917 proc->pid, current->pid, node->debug_id, 918 (u64)node->ptr, (u64)node->cookie); 919 return node; 920} 921 922static int binder_inc_node(struct binder_node *node, int strong, int internal, 923 struct list_head *target_list) 924{ 925 if (strong) { 926 if (internal) { 927 if (target_list == NULL && 928 node->internal_strong_refs == 0 && 929 !(node == binder_context_mgr_node && 930 node->has_strong_ref)) { 931 pr_err("invalid inc strong node for %d\n", 932 node->debug_id); 933 return -EINVAL; 934 } 935 node->internal_strong_refs++; 936 } else 937 node->local_strong_refs++; 938 if (!node->has_strong_ref && target_list) { 939 list_del_init(&node->work.entry); 940 list_add_tail(&node->work.entry, target_list); 941 } 942 } else { 943 if (!internal) 944 node->local_weak_refs++; 945 if (!node->has_weak_ref && list_empty(&node->work.entry)) { 946 if (target_list == NULL) { 947 pr_err("invalid inc weak node for %d\n", 948 node->debug_id); 949 return -EINVAL; 950 } 951 list_add_tail(&node->work.entry, target_list); 952 } 953 } 954 return 0; 955} 956 957static int binder_dec_node(struct binder_node *node, int strong, int internal) 958{ 959 if (strong) { 960 if (internal) 961 node->internal_strong_refs--; 962 else 963 node->local_strong_refs--; 964 if (node->local_strong_refs || node->internal_strong_refs) 965 return 0; 966 } else { 967 if (!internal) 968 node->local_weak_refs--; 969 if (node->local_weak_refs || !hlist_empty(&node->refs)) 970 return 0; 971 } 972 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { 973 if (list_empty(&node->work.entry)) { 974 list_add_tail(&node->work.entry, &node->proc->todo); 975 wake_up_interruptible(&node->proc->wait); 976 } 977 } else { 978 if (hlist_empty(&node->refs) && !node->local_strong_refs && 979 !node->local_weak_refs) { 980 list_del_init(&node->work.entry); 981 if (node->proc) { 982 rb_erase(&node->rb_node, &node->proc->nodes); 983 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 984 "refless node %d deleted\n", 985 node->debug_id); 986 } else { 987 hlist_del(&node->dead_node); 988 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 989 "dead node %d deleted\n", 990 node->debug_id); 991 } 992 kfree(node); 993 binder_stats_deleted(BINDER_STAT_NODE); 994 } 995 } 996 997 return 0; 998} 999 1000 1001static struct binder_ref *binder_get_ref(struct binder_proc *proc, 1002 uint32_t desc) 1003{ 1004 struct rb_node *n = proc->refs_by_desc.rb_node; 1005 struct binder_ref *ref; 1006 1007 while (n) { 1008 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1009 1010 if (desc < ref->desc) 1011 n = n->rb_left; 1012 else if (desc > ref->desc) 1013 n = n->rb_right; 1014 else 1015 return ref; 1016 } 1017 return NULL; 1018} 1019 1020static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, 1021 struct binder_node *node) 1022{ 1023 struct rb_node *n; 1024 struct rb_node **p = &proc->refs_by_node.rb_node; 1025 struct rb_node *parent = NULL; 1026 struct binder_ref *ref, *new_ref; 1027 1028 while (*p) { 1029 parent = *p; 1030 ref = rb_entry(parent, struct binder_ref, rb_node_node); 1031 1032 if (node < ref->node) 1033 p = &(*p)->rb_left; 1034 else if (node > ref->node) 1035 p = &(*p)->rb_right; 1036 else 1037 return ref; 1038 } 1039 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); 1040 if (new_ref == NULL) 1041 return NULL; 1042 binder_stats_created(BINDER_STAT_REF); 1043 new_ref->debug_id = ++binder_last_id; 1044 new_ref->proc = proc; 1045 new_ref->node = node; 1046 rb_link_node(&new_ref->rb_node_node, parent, p); 1047 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); 1048 1049 new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; 1050 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 1051 ref = rb_entry(n, struct binder_ref, rb_node_desc); 1052 if (ref->desc > new_ref->desc) 1053 break; 1054 new_ref->desc = ref->desc + 1; 1055 } 1056 1057 p = &proc->refs_by_desc.rb_node; 1058 while (*p) { 1059 parent = *p; 1060 ref = rb_entry(parent, struct binder_ref, rb_node_desc); 1061 1062 if (new_ref->desc < ref->desc) 1063 p = &(*p)->rb_left; 1064 else if (new_ref->desc > ref->desc) 1065 p = &(*p)->rb_right; 1066 else 1067 BUG(); 1068 } 1069 rb_link_node(&new_ref->rb_node_desc, parent, p); 1070 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); 1071 if (node) { 1072 hlist_add_head(&new_ref->node_entry, &node->refs); 1073 1074 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1075 "%d new ref %d desc %d for node %d\n", 1076 proc->pid, new_ref->debug_id, new_ref->desc, 1077 node->debug_id); 1078 } else { 1079 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1080 "%d new ref %d desc %d for dead node\n", 1081 proc->pid, new_ref->debug_id, new_ref->desc); 1082 } 1083 return new_ref; 1084} 1085 1086static void binder_delete_ref(struct binder_ref *ref) 1087{ 1088 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 1089 "%d delete ref %d desc %d for node %d\n", 1090 ref->proc->pid, ref->debug_id, ref->desc, 1091 ref->node->debug_id); 1092 1093 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); 1094 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); 1095 if (ref->strong) 1096 binder_dec_node(ref->node, 1, 1); 1097 hlist_del(&ref->node_entry); 1098 binder_dec_node(ref->node, 0, 1); 1099 if (ref->death) { 1100 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1101 "%d delete ref %d desc %d has death notification\n", 1102 ref->proc->pid, ref->debug_id, ref->desc); 1103 list_del(&ref->death->work.entry); 1104 kfree(ref->death); 1105 binder_stats_deleted(BINDER_STAT_DEATH); 1106 } 1107 kfree(ref); 1108 binder_stats_deleted(BINDER_STAT_REF); 1109} 1110 1111static int binder_inc_ref(struct binder_ref *ref, int strong, 1112 struct list_head *target_list) 1113{ 1114 int ret; 1115 1116 if (strong) { 1117 if (ref->strong == 0) { 1118 ret = binder_inc_node(ref->node, 1, 1, target_list); 1119 if (ret) 1120 return ret; 1121 } 1122 ref->strong++; 1123 } else { 1124 if (ref->weak == 0) { 1125 ret = binder_inc_node(ref->node, 0, 1, target_list); 1126 if (ret) 1127 return ret; 1128 } 1129 ref->weak++; 1130 } 1131 return 0; 1132} 1133 1134 1135static int binder_dec_ref(struct binder_ref *ref, int strong) 1136{ 1137 if (strong) { 1138 if (ref->strong == 0) { 1139 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", 1140 ref->proc->pid, ref->debug_id, 1141 ref->desc, ref->strong, ref->weak); 1142 return -EINVAL; 1143 } 1144 ref->strong--; 1145 if (ref->strong == 0) { 1146 int ret; 1147 1148 ret = binder_dec_node(ref->node, strong, 1); 1149 if (ret) 1150 return ret; 1151 } 1152 } else { 1153 if (ref->weak == 0) { 1154 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", 1155 ref->proc->pid, ref->debug_id, 1156 ref->desc, ref->strong, ref->weak); 1157 return -EINVAL; 1158 } 1159 ref->weak--; 1160 } 1161 if (ref->strong == 0 && ref->weak == 0) 1162 binder_delete_ref(ref); 1163 return 0; 1164} 1165 1166static void binder_pop_transaction(struct binder_thread *target_thread, 1167 struct binder_transaction *t) 1168{ 1169 if (target_thread) { 1170 BUG_ON(target_thread->transaction_stack != t); 1171 BUG_ON(target_thread->transaction_stack->from != target_thread); 1172 target_thread->transaction_stack = 1173 target_thread->transaction_stack->from_parent; 1174 t->from = NULL; 1175 } 1176 t->need_reply = 0; 1177 if (t->buffer) 1178 t->buffer->transaction = NULL; 1179 kfree(t); 1180 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1181} 1182 1183static void binder_send_failed_reply(struct binder_transaction *t, 1184 uint32_t error_code) 1185{ 1186 struct binder_thread *target_thread; 1187 struct binder_transaction *next; 1188 1189 BUG_ON(t->flags & TF_ONE_WAY); 1190 while (1) { 1191 target_thread = t->from; 1192 if (target_thread) { 1193 if (target_thread->return_error != BR_OK && 1194 target_thread->return_error2 == BR_OK) { 1195 target_thread->return_error2 = 1196 target_thread->return_error; 1197 target_thread->return_error = BR_OK; 1198 } 1199 if (target_thread->return_error == BR_OK) { 1200 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1201 "send failed reply for transaction %d to %d:%d\n", 1202 t->debug_id, 1203 target_thread->proc->pid, 1204 target_thread->pid); 1205 1206 binder_pop_transaction(target_thread, t); 1207 target_thread->return_error = error_code; 1208 wake_up_interruptible(&target_thread->wait); 1209 } else { 1210 pr_err("reply failed, target thread, %d:%d, has error code %d already\n", 1211 target_thread->proc->pid, 1212 target_thread->pid, 1213 target_thread->return_error); 1214 } 1215 return; 1216 } 1217 next = t->from_parent; 1218 1219 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1220 "send failed reply for transaction %d, target dead\n", 1221 t->debug_id); 1222 1223 binder_pop_transaction(target_thread, t); 1224 if (next == NULL) { 1225 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1226 "reply failed, no target thread at root\n"); 1227 return; 1228 } 1229 t = next; 1230 binder_debug(BINDER_DEBUG_DEAD_BINDER, 1231 "reply failed, no target thread -- retry %d\n", 1232 t->debug_id); 1233 } 1234} 1235 1236static void binder_transaction_buffer_release(struct binder_proc *proc, 1237 struct binder_buffer *buffer, 1238 binder_size_t *failed_at) 1239{ 1240 binder_size_t *offp, *off_end; 1241 int debug_id = buffer->debug_id; 1242 1243 binder_debug(BINDER_DEBUG_TRANSACTION, 1244 "%d buffer release %d, size %zd-%zd, failed at %p\n", 1245 proc->pid, buffer->debug_id, 1246 buffer->data_size, buffer->offsets_size, failed_at); 1247 1248 if (buffer->target_node) 1249 binder_dec_node(buffer->target_node, 1, 0); 1250 1251 offp = (binder_size_t *)(buffer->data + 1252 ALIGN(buffer->data_size, sizeof(void *))); 1253 if (failed_at) 1254 off_end = failed_at; 1255 else 1256 off_end = (void *)offp + buffer->offsets_size; 1257 for (; offp < off_end; offp++) { 1258 struct flat_binder_object *fp; 1259 1260 if (*offp > buffer->data_size - sizeof(*fp) || 1261 buffer->data_size < sizeof(*fp) || 1262 !IS_ALIGNED(*offp, sizeof(u32))) { 1263 pr_err("transaction release %d bad offset %lld, size %zd\n", 1264 debug_id, (u64)*offp, buffer->data_size); 1265 continue; 1266 } 1267 fp = (struct flat_binder_object *)(buffer->data + *offp); 1268 switch (fp->type) { 1269 case BINDER_TYPE_BINDER: 1270 case BINDER_TYPE_WEAK_BINDER: { 1271 struct binder_node *node = binder_get_node(proc, fp->binder); 1272 1273 if (node == NULL) { 1274 pr_err("transaction release %d bad node %016llx\n", 1275 debug_id, (u64)fp->binder); 1276 break; 1277 } 1278 binder_debug(BINDER_DEBUG_TRANSACTION, 1279 " node %d u%016llx\n", 1280 node->debug_id, (u64)node->ptr); 1281 binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); 1282 } break; 1283 case BINDER_TYPE_HANDLE: 1284 case BINDER_TYPE_WEAK_HANDLE: { 1285 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1286 1287 if (ref == NULL) { 1288 pr_err("transaction release %d bad handle %d\n", 1289 debug_id, fp->handle); 1290 break; 1291 } 1292 binder_debug(BINDER_DEBUG_TRANSACTION, 1293 " ref %d desc %d (node %d)\n", 1294 ref->debug_id, ref->desc, ref->node->debug_id); 1295 binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); 1296 } break; 1297 1298 case BINDER_TYPE_FD: 1299 binder_debug(BINDER_DEBUG_TRANSACTION, 1300 " fd %d\n", fp->handle); 1301 if (failed_at) 1302 task_close_fd(proc, fp->handle); 1303 break; 1304 1305 default: 1306 pr_err("transaction release %d bad object type %x\n", 1307 debug_id, fp->type); 1308 break; 1309 } 1310 } 1311} 1312 1313static void binder_transaction(struct binder_proc *proc, 1314 struct binder_thread *thread, 1315 struct binder_transaction_data *tr, int reply) 1316{ 1317 struct binder_transaction *t; 1318 struct binder_work *tcomplete; 1319 binder_size_t *offp, *off_end; 1320 binder_size_t off_min; 1321 struct binder_proc *target_proc; 1322 struct binder_thread *target_thread = NULL; 1323 struct binder_node *target_node = NULL; 1324 struct list_head *target_list; 1325 wait_queue_head_t *target_wait; 1326 struct binder_transaction *in_reply_to = NULL; 1327 struct binder_transaction_log_entry *e; 1328 uint32_t return_error; 1329 1330 e = binder_transaction_log_add(&binder_transaction_log); 1331 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); 1332 e->from_proc = proc->pid; 1333 e->from_thread = thread->pid; 1334 e->target_handle = tr->target.handle; 1335 e->data_size = tr->data_size; 1336 e->offsets_size = tr->offsets_size; 1337 1338 if (reply) { 1339 in_reply_to = thread->transaction_stack; 1340 if (in_reply_to == NULL) { 1341 binder_user_error("%d:%d got reply transaction with no transaction stack\n", 1342 proc->pid, thread->pid); 1343 return_error = BR_FAILED_REPLY; 1344 goto err_empty_call_stack; 1345 } 1346 binder_set_nice(in_reply_to->saved_priority); 1347 if (in_reply_to->to_thread != thread) { 1348 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", 1349 proc->pid, thread->pid, in_reply_to->debug_id, 1350 in_reply_to->to_proc ? 1351 in_reply_to->to_proc->pid : 0, 1352 in_reply_to->to_thread ? 1353 in_reply_to->to_thread->pid : 0); 1354 return_error = BR_FAILED_REPLY; 1355 in_reply_to = NULL; 1356 goto err_bad_call_stack; 1357 } 1358 thread->transaction_stack = in_reply_to->to_parent; 1359 target_thread = in_reply_to->from; 1360 if (target_thread == NULL) { 1361 return_error = BR_DEAD_REPLY; 1362 goto err_dead_binder; 1363 } 1364 if (target_thread->transaction_stack != in_reply_to) { 1365 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", 1366 proc->pid, thread->pid, 1367 target_thread->transaction_stack ? 1368 target_thread->transaction_stack->debug_id : 0, 1369 in_reply_to->debug_id); 1370 return_error = BR_FAILED_REPLY; 1371 in_reply_to = NULL; 1372 target_thread = NULL; 1373 goto err_dead_binder; 1374 } 1375 target_proc = target_thread->proc; 1376 } else { 1377 if (tr->target.handle) { 1378 struct binder_ref *ref; 1379 1380 ref = binder_get_ref(proc, tr->target.handle); 1381 if (ref == NULL) { 1382 binder_user_error("%d:%d got transaction to invalid handle\n", 1383 proc->pid, thread->pid); 1384 return_error = BR_FAILED_REPLY; 1385 goto err_invalid_target_handle; 1386 } 1387 target_node = ref->node; 1388 } else { 1389 target_node = binder_context_mgr_node; 1390 if (target_node == NULL) { 1391 return_error = BR_DEAD_REPLY; 1392 goto err_no_context_mgr_node; 1393 } 1394 } 1395 e->to_node = target_node->debug_id; 1396 target_proc = target_node->proc; 1397 if (target_proc == NULL) { 1398 return_error = BR_DEAD_REPLY; 1399 goto err_dead_binder; 1400 } 1401 if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) { 1402 return_error = BR_FAILED_REPLY; 1403 goto err_invalid_target_handle; 1404 } 1405 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { 1406 struct binder_transaction *tmp; 1407 1408 tmp = thread->transaction_stack; 1409 if (tmp->to_thread != thread) { 1410 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", 1411 proc->pid, thread->pid, tmp->debug_id, 1412 tmp->to_proc ? tmp->to_proc->pid : 0, 1413 tmp->to_thread ? 1414 tmp->to_thread->pid : 0); 1415 return_error = BR_FAILED_REPLY; 1416 goto err_bad_call_stack; 1417 } 1418 while (tmp) { 1419 if (tmp->from && tmp->from->proc == target_proc) 1420 target_thread = tmp->from; 1421 tmp = tmp->from_parent; 1422 } 1423 } 1424 } 1425 if (target_thread) { 1426 e->to_thread = target_thread->pid; 1427 target_list = &target_thread->todo; 1428 target_wait = &target_thread->wait; 1429 } else { 1430 target_list = &target_proc->todo; 1431 target_wait = &target_proc->wait; 1432 } 1433 e->to_proc = target_proc->pid; 1434 1435 /* TODO: reuse incoming transaction for reply */ 1436 t = kzalloc(sizeof(*t), GFP_KERNEL); 1437 if (t == NULL) { 1438 return_error = BR_FAILED_REPLY; 1439 goto err_alloc_t_failed; 1440 } 1441 binder_stats_created(BINDER_STAT_TRANSACTION); 1442 1443 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); 1444 if (tcomplete == NULL) { 1445 return_error = BR_FAILED_REPLY; 1446 goto err_alloc_tcomplete_failed; 1447 } 1448 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); 1449 1450 t->debug_id = ++binder_last_id; 1451 e->debug_id = t->debug_id; 1452 1453 if (reply) 1454 binder_debug(BINDER_DEBUG_TRANSACTION, 1455 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n", 1456 proc->pid, thread->pid, t->debug_id, 1457 target_proc->pid, target_thread->pid, 1458 (u64)tr->data.ptr.buffer, 1459 (u64)tr->data.ptr.offsets, 1460 (u64)tr->data_size, (u64)tr->offsets_size); 1461 else 1462 binder_debug(BINDER_DEBUG_TRANSACTION, 1463 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n", 1464 proc->pid, thread->pid, t->debug_id, 1465 target_proc->pid, target_node->debug_id, 1466 (u64)tr->data.ptr.buffer, 1467 (u64)tr->data.ptr.offsets, 1468 (u64)tr->data_size, (u64)tr->offsets_size); 1469 1470 if (!reply && !(tr->flags & TF_ONE_WAY)) 1471 t->from = thread; 1472 else 1473 t->from = NULL; 1474 t->sender_euid = task_euid(proc->tsk); 1475 t->to_proc = target_proc; 1476 t->to_thread = target_thread; 1477 t->code = tr->code; 1478 t->flags = tr->flags; 1479 t->priority = task_nice(current); 1480 1481 trace_binder_transaction(reply, t, target_node); 1482 1483 t->buffer = binder_alloc_buf(target_proc, tr->data_size, 1484 tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); 1485 if (t->buffer == NULL) { 1486 return_error = BR_FAILED_REPLY; 1487 goto err_binder_alloc_buf_failed; 1488 } 1489 t->buffer->allow_user_free = 0; 1490 t->buffer->debug_id = t->debug_id; 1491 t->buffer->transaction = t; 1492 t->buffer->target_node = target_node; 1493 trace_binder_transaction_alloc_buf(t->buffer); 1494 if (target_node) 1495 binder_inc_node(target_node, 1, 0, NULL); 1496 1497 offp = (binder_size_t *)(t->buffer->data + 1498 ALIGN(tr->data_size, sizeof(void *))); 1499 1500 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t) 1501 tr->data.ptr.buffer, tr->data_size)) { 1502 binder_user_error("%d:%d got transaction with invalid data ptr\n", 1503 proc->pid, thread->pid); 1504 return_error = BR_FAILED_REPLY; 1505 goto err_copy_data_failed; 1506 } 1507 if (copy_from_user(offp, (const void __user *)(uintptr_t) 1508 tr->data.ptr.offsets, tr->offsets_size)) { 1509 binder_user_error("%d:%d got transaction with invalid offsets ptr\n", 1510 proc->pid, thread->pid); 1511 return_error = BR_FAILED_REPLY; 1512 goto err_copy_data_failed; 1513 } 1514 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { 1515 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", 1516 proc->pid, thread->pid, (u64)tr->offsets_size); 1517 return_error = BR_FAILED_REPLY; 1518 goto err_bad_offset; 1519 } 1520 off_end = (void *)offp + tr->offsets_size; 1521 off_min = 0; 1522 for (; offp < off_end; offp++) { 1523 struct flat_binder_object *fp; 1524 1525 if (*offp > t->buffer->data_size - sizeof(*fp) || 1526 *offp < off_min || 1527 t->buffer->data_size < sizeof(*fp) || 1528 !IS_ALIGNED(*offp, sizeof(u32))) { 1529 binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n", 1530 proc->pid, thread->pid, (u64)*offp, 1531 (u64)off_min, 1532 (u64)(t->buffer->data_size - 1533 sizeof(*fp))); 1534 return_error = BR_FAILED_REPLY; 1535 goto err_bad_offset; 1536 } 1537 fp = (struct flat_binder_object *)(t->buffer->data + *offp); 1538 off_min = *offp + sizeof(struct flat_binder_object); 1539 switch (fp->type) { 1540 case BINDER_TYPE_BINDER: 1541 case BINDER_TYPE_WEAK_BINDER: { 1542 struct binder_ref *ref; 1543 struct binder_node *node = binder_get_node(proc, fp->binder); 1544 1545 if (node == NULL) { 1546 node = binder_new_node(proc, fp->binder, fp->cookie); 1547 if (node == NULL) { 1548 return_error = BR_FAILED_REPLY; 1549 goto err_binder_new_node_failed; 1550 } 1551 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; 1552 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); 1553 } 1554 if (fp->cookie != node->cookie) { 1555 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", 1556 proc->pid, thread->pid, 1557 (u64)fp->binder, node->debug_id, 1558 (u64)fp->cookie, (u64)node->cookie); 1559 return_error = BR_FAILED_REPLY; 1560 goto err_binder_get_ref_for_node_failed; 1561 } 1562 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 1563 return_error = BR_FAILED_REPLY; 1564 goto err_binder_get_ref_for_node_failed; 1565 } 1566 ref = binder_get_ref_for_node(target_proc, node); 1567 if (ref == NULL) { 1568 return_error = BR_FAILED_REPLY; 1569 goto err_binder_get_ref_for_node_failed; 1570 } 1571 if (fp->type == BINDER_TYPE_BINDER) 1572 fp->type = BINDER_TYPE_HANDLE; 1573 else 1574 fp->type = BINDER_TYPE_WEAK_HANDLE; 1575 fp->handle = ref->desc; 1576 binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, 1577 &thread->todo); 1578 1579 trace_binder_transaction_node_to_ref(t, node, ref); 1580 binder_debug(BINDER_DEBUG_TRANSACTION, 1581 " node %d u%016llx -> ref %d desc %d\n", 1582 node->debug_id, (u64)node->ptr, 1583 ref->debug_id, ref->desc); 1584 } break; 1585 case BINDER_TYPE_HANDLE: 1586 case BINDER_TYPE_WEAK_HANDLE: { 1587 struct binder_ref *ref = binder_get_ref(proc, fp->handle); 1588 1589 if (ref == NULL) { 1590 binder_user_error("%d:%d got transaction with invalid handle, %d\n", 1591 proc->pid, 1592 thread->pid, fp->handle); 1593 return_error = BR_FAILED_REPLY; 1594 goto err_binder_get_ref_failed; 1595 } 1596 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) { 1597 return_error = BR_FAILED_REPLY; 1598 goto err_binder_get_ref_failed; 1599 } 1600 if (ref->node->proc == target_proc) { 1601 if (fp->type == BINDER_TYPE_HANDLE) 1602 fp->type = BINDER_TYPE_BINDER; 1603 else 1604 fp->type = BINDER_TYPE_WEAK_BINDER; 1605 fp->binder = ref->node->ptr; 1606 fp->cookie = ref->node->cookie; 1607 binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); 1608 trace_binder_transaction_ref_to_node(t, ref); 1609 binder_debug(BINDER_DEBUG_TRANSACTION, 1610 " ref %d desc %d -> node %d u%016llx\n", 1611 ref->debug_id, ref->desc, ref->node->debug_id, 1612 (u64)ref->node->ptr); 1613 } else { 1614 struct binder_ref *new_ref; 1615 1616 new_ref = binder_get_ref_for_node(target_proc, ref->node); 1617 if (new_ref == NULL) { 1618 return_error = BR_FAILED_REPLY; 1619 goto err_binder_get_ref_for_node_failed; 1620 } 1621 fp->handle = new_ref->desc; 1622 binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); 1623 trace_binder_transaction_ref_to_ref(t, ref, 1624 new_ref); 1625 binder_debug(BINDER_DEBUG_TRANSACTION, 1626 " ref %d desc %d -> ref %d desc %d (node %d)\n", 1627 ref->debug_id, ref->desc, new_ref->debug_id, 1628 new_ref->desc, ref->node->debug_id); 1629 } 1630 } break; 1631 1632 case BINDER_TYPE_FD: { 1633 int target_fd; 1634 struct file *file; 1635 1636 if (reply) { 1637 if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { 1638 binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n", 1639 proc->pid, thread->pid, fp->handle); 1640 return_error = BR_FAILED_REPLY; 1641 goto err_fd_not_allowed; 1642 } 1643 } else if (!target_node->accept_fds) { 1644 binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n", 1645 proc->pid, thread->pid, fp->handle); 1646 return_error = BR_FAILED_REPLY; 1647 goto err_fd_not_allowed; 1648 } 1649 1650 file = fget(fp->handle); 1651 if (file == NULL) { 1652 binder_user_error("%d:%d got transaction with invalid fd, %d\n", 1653 proc->pid, thread->pid, fp->handle); 1654 return_error = BR_FAILED_REPLY; 1655 goto err_fget_failed; 1656 } 1657 if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) { 1658 fput(file); 1659 return_error = BR_FAILED_REPLY; 1660 goto err_get_unused_fd_failed; 1661 } 1662 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); 1663 if (target_fd < 0) { 1664 fput(file); 1665 return_error = BR_FAILED_REPLY; 1666 goto err_get_unused_fd_failed; 1667 } 1668 task_fd_install(target_proc, target_fd, file); 1669 trace_binder_transaction_fd(t, fp->handle, target_fd); 1670 binder_debug(BINDER_DEBUG_TRANSACTION, 1671 " fd %d -> %d\n", fp->handle, target_fd); 1672 /* TODO: fput? */ 1673 fp->handle = target_fd; 1674 } break; 1675 1676 default: 1677 binder_user_error("%d:%d got transaction with invalid object type, %x\n", 1678 proc->pid, thread->pid, fp->type); 1679 return_error = BR_FAILED_REPLY; 1680 goto err_bad_object_type; 1681 } 1682 } 1683 if (reply) { 1684 BUG_ON(t->buffer->async_transaction != 0); 1685 binder_pop_transaction(target_thread, in_reply_to); 1686 } else if (!(t->flags & TF_ONE_WAY)) { 1687 BUG_ON(t->buffer->async_transaction != 0); 1688 t->need_reply = 1; 1689 t->from_parent = thread->transaction_stack; 1690 thread->transaction_stack = t; 1691 } else { 1692 BUG_ON(target_node == NULL); 1693 BUG_ON(t->buffer->async_transaction != 1); 1694 if (target_node->has_async_transaction) { 1695 target_list = &target_node->async_todo; 1696 target_wait = NULL; 1697 } else 1698 target_node->has_async_transaction = 1; 1699 } 1700 t->work.type = BINDER_WORK_TRANSACTION; 1701 list_add_tail(&t->work.entry, target_list); 1702 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; 1703 list_add_tail(&tcomplete->entry, &thread->todo); 1704 if (target_wait) 1705 wake_up_interruptible(target_wait); 1706 return; 1707 1708err_get_unused_fd_failed: 1709err_fget_failed: 1710err_fd_not_allowed: 1711err_binder_get_ref_for_node_failed: 1712err_binder_get_ref_failed: 1713err_binder_new_node_failed: 1714err_bad_object_type: 1715err_bad_offset: 1716err_copy_data_failed: 1717 trace_binder_transaction_failed_buffer_release(t->buffer); 1718 binder_transaction_buffer_release(target_proc, t->buffer, offp); 1719 t->buffer->transaction = NULL; 1720 binder_free_buf(target_proc, t->buffer); 1721err_binder_alloc_buf_failed: 1722 kfree(tcomplete); 1723 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 1724err_alloc_tcomplete_failed: 1725 kfree(t); 1726 binder_stats_deleted(BINDER_STAT_TRANSACTION); 1727err_alloc_t_failed: 1728err_bad_call_stack: 1729err_empty_call_stack: 1730err_dead_binder: 1731err_invalid_target_handle: 1732err_no_context_mgr_node: 1733 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 1734 "%d:%d transaction failed %d, size %lld-%lld\n", 1735 proc->pid, thread->pid, return_error, 1736 (u64)tr->data_size, (u64)tr->offsets_size); 1737 1738 { 1739 struct binder_transaction_log_entry *fe; 1740 1741 fe = binder_transaction_log_add(&binder_transaction_log_failed); 1742 *fe = *e; 1743 } 1744 1745 BUG_ON(thread->return_error != BR_OK); 1746 if (in_reply_to) { 1747 thread->return_error = BR_TRANSACTION_COMPLETE; 1748 binder_send_failed_reply(in_reply_to, return_error); 1749 } else 1750 thread->return_error = return_error; 1751} 1752 1753static int binder_thread_write(struct binder_proc *proc, 1754 struct binder_thread *thread, 1755 binder_uintptr_t binder_buffer, size_t size, 1756 binder_size_t *consumed) 1757{ 1758 uint32_t cmd; 1759 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 1760 void __user *ptr = buffer + *consumed; 1761 void __user *end = buffer + size; 1762 1763 while (ptr < end && thread->return_error == BR_OK) { 1764 if (get_user(cmd, (uint32_t __user *)ptr)) 1765 return -EFAULT; 1766 ptr += sizeof(uint32_t); 1767 trace_binder_command(cmd); 1768 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { 1769 binder_stats.bc[_IOC_NR(cmd)]++; 1770 proc->stats.bc[_IOC_NR(cmd)]++; 1771 thread->stats.bc[_IOC_NR(cmd)]++; 1772 } 1773 switch (cmd) { 1774 case BC_INCREFS: 1775 case BC_ACQUIRE: 1776 case BC_RELEASE: 1777 case BC_DECREFS: { 1778 uint32_t target; 1779 struct binder_ref *ref; 1780 const char *debug_string; 1781 1782 if (get_user(target, (uint32_t __user *)ptr)) 1783 return -EFAULT; 1784 ptr += sizeof(uint32_t); 1785 if (target == 0 && binder_context_mgr_node && 1786 (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { 1787 ref = binder_get_ref_for_node(proc, 1788 binder_context_mgr_node); 1789 if (ref->desc != target) { 1790 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n", 1791 proc->pid, thread->pid, 1792 ref->desc); 1793 } 1794 } else 1795 ref = binder_get_ref(proc, target); 1796 if (ref == NULL) { 1797 binder_user_error("%d:%d refcount change on invalid ref %d\n", 1798 proc->pid, thread->pid, target); 1799 break; 1800 } 1801 switch (cmd) { 1802 case BC_INCREFS: 1803 debug_string = "IncRefs"; 1804 binder_inc_ref(ref, 0, NULL); 1805 break; 1806 case BC_ACQUIRE: 1807 debug_string = "Acquire"; 1808 binder_inc_ref(ref, 1, NULL); 1809 break; 1810 case BC_RELEASE: 1811 debug_string = "Release"; 1812 binder_dec_ref(ref, 1); 1813 break; 1814 case BC_DECREFS: 1815 default: 1816 debug_string = "DecRefs"; 1817 binder_dec_ref(ref, 0); 1818 break; 1819 } 1820 binder_debug(BINDER_DEBUG_USER_REFS, 1821 "%d:%d %s ref %d desc %d s %d w %d for node %d\n", 1822 proc->pid, thread->pid, debug_string, ref->debug_id, 1823 ref->desc, ref->strong, ref->weak, ref->node->debug_id); 1824 break; 1825 } 1826 case BC_INCREFS_DONE: 1827 case BC_ACQUIRE_DONE: { 1828 binder_uintptr_t node_ptr; 1829 binder_uintptr_t cookie; 1830 struct binder_node *node; 1831 1832 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) 1833 return -EFAULT; 1834 ptr += sizeof(binder_uintptr_t); 1835 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1836 return -EFAULT; 1837 ptr += sizeof(binder_uintptr_t); 1838 node = binder_get_node(proc, node_ptr); 1839 if (node == NULL) { 1840 binder_user_error("%d:%d %s u%016llx no match\n", 1841 proc->pid, thread->pid, 1842 cmd == BC_INCREFS_DONE ? 1843 "BC_INCREFS_DONE" : 1844 "BC_ACQUIRE_DONE", 1845 (u64)node_ptr); 1846 break; 1847 } 1848 if (cookie != node->cookie) { 1849 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", 1850 proc->pid, thread->pid, 1851 cmd == BC_INCREFS_DONE ? 1852 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1853 (u64)node_ptr, node->debug_id, 1854 (u64)cookie, (u64)node->cookie); 1855 break; 1856 } 1857 if (cmd == BC_ACQUIRE_DONE) { 1858 if (node->pending_strong_ref == 0) { 1859 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", 1860 proc->pid, thread->pid, 1861 node->debug_id); 1862 break; 1863 } 1864 node->pending_strong_ref = 0; 1865 } else { 1866 if (node->pending_weak_ref == 0) { 1867 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", 1868 proc->pid, thread->pid, 1869 node->debug_id); 1870 break; 1871 } 1872 node->pending_weak_ref = 0; 1873 } 1874 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); 1875 binder_debug(BINDER_DEBUG_USER_REFS, 1876 "%d:%d %s node %d ls %d lw %d\n", 1877 proc->pid, thread->pid, 1878 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", 1879 node->debug_id, node->local_strong_refs, node->local_weak_refs); 1880 break; 1881 } 1882 case BC_ATTEMPT_ACQUIRE: 1883 pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); 1884 return -EINVAL; 1885 case BC_ACQUIRE_RESULT: 1886 pr_err("BC_ACQUIRE_RESULT not supported\n"); 1887 return -EINVAL; 1888 1889 case BC_FREE_BUFFER: { 1890 binder_uintptr_t data_ptr; 1891 struct binder_buffer *buffer; 1892 1893 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) 1894 return -EFAULT; 1895 ptr += sizeof(binder_uintptr_t); 1896 1897 buffer = binder_buffer_lookup(proc, data_ptr); 1898 if (buffer == NULL) { 1899 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n", 1900 proc->pid, thread->pid, (u64)data_ptr); 1901 break; 1902 } 1903 if (!buffer->allow_user_free) { 1904 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n", 1905 proc->pid, thread->pid, (u64)data_ptr); 1906 break; 1907 } 1908 binder_debug(BINDER_DEBUG_FREE_BUFFER, 1909 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", 1910 proc->pid, thread->pid, (u64)data_ptr, 1911 buffer->debug_id, 1912 buffer->transaction ? "active" : "finished"); 1913 1914 if (buffer->transaction) { 1915 buffer->transaction->buffer = NULL; 1916 buffer->transaction = NULL; 1917 } 1918 if (buffer->async_transaction && buffer->target_node) { 1919 BUG_ON(!buffer->target_node->has_async_transaction); 1920 if (list_empty(&buffer->target_node->async_todo)) 1921 buffer->target_node->has_async_transaction = 0; 1922 else 1923 list_move_tail(buffer->target_node->async_todo.next, &thread->todo); 1924 } 1925 trace_binder_transaction_buffer_release(buffer); 1926 binder_transaction_buffer_release(proc, buffer, NULL); 1927 binder_free_buf(proc, buffer); 1928 break; 1929 } 1930 1931 case BC_TRANSACTION: 1932 case BC_REPLY: { 1933 struct binder_transaction_data tr; 1934 1935 if (copy_from_user(&tr, ptr, sizeof(tr))) 1936 return -EFAULT; 1937 ptr += sizeof(tr); 1938 binder_transaction(proc, thread, &tr, cmd == BC_REPLY); 1939 break; 1940 } 1941 1942 case BC_REGISTER_LOOPER: 1943 binder_debug(BINDER_DEBUG_THREADS, 1944 "%d:%d BC_REGISTER_LOOPER\n", 1945 proc->pid, thread->pid); 1946 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { 1947 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1948 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", 1949 proc->pid, thread->pid); 1950 } else if (proc->requested_threads == 0) { 1951 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1952 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", 1953 proc->pid, thread->pid); 1954 } else { 1955 proc->requested_threads--; 1956 proc->requested_threads_started++; 1957 } 1958 thread->looper |= BINDER_LOOPER_STATE_REGISTERED; 1959 break; 1960 case BC_ENTER_LOOPER: 1961 binder_debug(BINDER_DEBUG_THREADS, 1962 "%d:%d BC_ENTER_LOOPER\n", 1963 proc->pid, thread->pid); 1964 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { 1965 thread->looper |= BINDER_LOOPER_STATE_INVALID; 1966 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", 1967 proc->pid, thread->pid); 1968 } 1969 thread->looper |= BINDER_LOOPER_STATE_ENTERED; 1970 break; 1971 case BC_EXIT_LOOPER: 1972 binder_debug(BINDER_DEBUG_THREADS, 1973 "%d:%d BC_EXIT_LOOPER\n", 1974 proc->pid, thread->pid); 1975 thread->looper |= BINDER_LOOPER_STATE_EXITED; 1976 break; 1977 1978 case BC_REQUEST_DEATH_NOTIFICATION: 1979 case BC_CLEAR_DEATH_NOTIFICATION: { 1980 uint32_t target; 1981 binder_uintptr_t cookie; 1982 struct binder_ref *ref; 1983 struct binder_ref_death *death; 1984 1985 if (get_user(target, (uint32_t __user *)ptr)) 1986 return -EFAULT; 1987 ptr += sizeof(uint32_t); 1988 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 1989 return -EFAULT; 1990 ptr += sizeof(binder_uintptr_t); 1991 ref = binder_get_ref(proc, target); 1992 if (ref == NULL) { 1993 binder_user_error("%d:%d %s invalid ref %d\n", 1994 proc->pid, thread->pid, 1995 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 1996 "BC_REQUEST_DEATH_NOTIFICATION" : 1997 "BC_CLEAR_DEATH_NOTIFICATION", 1998 target); 1999 break; 2000 } 2001 2002 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2003 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", 2004 proc->pid, thread->pid, 2005 cmd == BC_REQUEST_DEATH_NOTIFICATION ? 2006 "BC_REQUEST_DEATH_NOTIFICATION" : 2007 "BC_CLEAR_DEATH_NOTIFICATION", 2008 (u64)cookie, ref->debug_id, ref->desc, 2009 ref->strong, ref->weak, ref->node->debug_id); 2010 2011 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { 2012 if (ref->death) { 2013 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", 2014 proc->pid, thread->pid); 2015 break; 2016 } 2017 death = kzalloc(sizeof(*death), GFP_KERNEL); 2018 if (death == NULL) { 2019 thread->return_error = BR_ERROR; 2020 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, 2021 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", 2022 proc->pid, thread->pid); 2023 break; 2024 } 2025 binder_stats_created(BINDER_STAT_DEATH); 2026 INIT_LIST_HEAD(&death->work.entry); 2027 death->cookie = cookie; 2028 ref->death = death; 2029 if (ref->node->proc == NULL) { 2030 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 2031 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2032 list_add_tail(&ref->death->work.entry, &thread->todo); 2033 } else { 2034 list_add_tail(&ref->death->work.entry, &proc->todo); 2035 wake_up_interruptible(&proc->wait); 2036 } 2037 } 2038 } else { 2039 if (ref->death == NULL) { 2040 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", 2041 proc->pid, thread->pid); 2042 break; 2043 } 2044 death = ref->death; 2045 if (death->cookie != cookie) { 2046 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", 2047 proc->pid, thread->pid, 2048 (u64)death->cookie, 2049 (u64)cookie); 2050 break; 2051 } 2052 ref->death = NULL; 2053 if (list_empty(&death->work.entry)) { 2054 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2055 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2056 list_add_tail(&death->work.entry, &thread->todo); 2057 } else { 2058 list_add_tail(&death->work.entry, &proc->todo); 2059 wake_up_interruptible(&proc->wait); 2060 } 2061 } else { 2062 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); 2063 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; 2064 } 2065 } 2066 } break; 2067 case BC_DEAD_BINDER_DONE: { 2068 struct binder_work *w; 2069 binder_uintptr_t cookie; 2070 struct binder_ref_death *death = NULL; 2071 2072 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2073 return -EFAULT; 2074 2075 ptr += sizeof(void *); 2076 list_for_each_entry(w, &proc->delivered_death, entry) { 2077 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2078 2079 if (tmp_death->cookie == cookie) { 2080 death = tmp_death; 2081 break; 2082 } 2083 } 2084 binder_debug(BINDER_DEBUG_DEAD_BINDER, 2085 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", 2086 proc->pid, thread->pid, (u64)cookie, 2087 death); 2088 if (death == NULL) { 2089 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", 2090 proc->pid, thread->pid, (u64)cookie); 2091 break; 2092 } 2093 2094 list_del_init(&death->work.entry); 2095 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { 2096 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; 2097 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { 2098 list_add_tail(&death->work.entry, &thread->todo); 2099 } else { 2100 list_add_tail(&death->work.entry, &proc->todo); 2101 wake_up_interruptible(&proc->wait); 2102 } 2103 } 2104 } break; 2105 2106 default: 2107 pr_err("%d:%d unknown command %d\n", 2108 proc->pid, thread->pid, cmd); 2109 return -EINVAL; 2110 } 2111 *consumed = ptr - buffer; 2112 } 2113 return 0; 2114} 2115 2116static void binder_stat_br(struct binder_proc *proc, 2117 struct binder_thread *thread, uint32_t cmd) 2118{ 2119 trace_binder_return(cmd); 2120 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { 2121 binder_stats.br[_IOC_NR(cmd)]++; 2122 proc->stats.br[_IOC_NR(cmd)]++; 2123 thread->stats.br[_IOC_NR(cmd)]++; 2124 } 2125} 2126 2127static int binder_has_proc_work(struct binder_proc *proc, 2128 struct binder_thread *thread) 2129{ 2130 return !list_empty(&proc->todo) || 2131 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2132} 2133 2134static int binder_has_thread_work(struct binder_thread *thread) 2135{ 2136 return !list_empty(&thread->todo) || thread->return_error != BR_OK || 2137 (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); 2138} 2139 2140static int binder_thread_read(struct binder_proc *proc, 2141 struct binder_thread *thread, 2142 binder_uintptr_t binder_buffer, size_t size, 2143 binder_size_t *consumed, int non_block) 2144{ 2145 void __user *buffer = (void __user *)(uintptr_t)binder_buffer; 2146 void __user *ptr = buffer + *consumed; 2147 void __user *end = buffer + size; 2148 2149 int ret = 0; 2150 int wait_for_proc_work; 2151 2152 if (*consumed == 0) { 2153 if (put_user(BR_NOOP, (uint32_t __user *)ptr)) 2154 return -EFAULT; 2155 ptr += sizeof(uint32_t); 2156 } 2157 2158retry: 2159 wait_for_proc_work = thread->transaction_stack == NULL && 2160 list_empty(&thread->todo); 2161 2162 if (thread->return_error != BR_OK && ptr < end) { 2163 if (thread->return_error2 != BR_OK) { 2164 if (put_user(thread->return_error2, (uint32_t __user *)ptr)) 2165 return -EFAULT; 2166 ptr += sizeof(uint32_t); 2167 binder_stat_br(proc, thread, thread->return_error2); 2168 if (ptr == end) 2169 goto done; 2170 thread->return_error2 = BR_OK; 2171 } 2172 if (put_user(thread->return_error, (uint32_t __user *)ptr)) 2173 return -EFAULT; 2174 ptr += sizeof(uint32_t); 2175 binder_stat_br(proc, thread, thread->return_error); 2176 thread->return_error = BR_OK; 2177 goto done; 2178 } 2179 2180 2181 thread->looper |= BINDER_LOOPER_STATE_WAITING; 2182 if (wait_for_proc_work) 2183 proc->ready_threads++; 2184 2185 binder_unlock(__func__); 2186 2187 trace_binder_wait_for_work(wait_for_proc_work, 2188 !!thread->transaction_stack, 2189 !list_empty(&thread->todo)); 2190 if (wait_for_proc_work) { 2191 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2192 BINDER_LOOPER_STATE_ENTERED))) { 2193 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", 2194 proc->pid, thread->pid, thread->looper); 2195 wait_event_interruptible(binder_user_error_wait, 2196 binder_stop_on_user_error < 2); 2197 } 2198 binder_set_nice(proc->default_priority); 2199 if (non_block) { 2200 if (!binder_has_proc_work(proc, thread)) 2201 ret = -EAGAIN; 2202 } else 2203 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread)); 2204 } else { 2205 if (non_block) { 2206 if (!binder_has_thread_work(thread)) 2207 ret = -EAGAIN; 2208 } else 2209 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread)); 2210 } 2211 2212 binder_lock(__func__); 2213 2214 if (wait_for_proc_work) 2215 proc->ready_threads--; 2216 thread->looper &= ~BINDER_LOOPER_STATE_WAITING; 2217 2218 if (ret) 2219 return ret; 2220 2221 while (1) { 2222 uint32_t cmd; 2223 struct binder_transaction_data tr; 2224 struct binder_work *w; 2225 struct binder_transaction *t = NULL; 2226 2227 if (!list_empty(&thread->todo)) { 2228 w = list_first_entry(&thread->todo, struct binder_work, 2229 entry); 2230 } else if (!list_empty(&proc->todo) && wait_for_proc_work) { 2231 w = list_first_entry(&proc->todo, struct binder_work, 2232 entry); 2233 } else { 2234 /* no data added */ 2235 if (ptr - buffer == 4 && 2236 !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) 2237 goto retry; 2238 break; 2239 } 2240 2241 if (end - ptr < sizeof(tr) + 4) 2242 break; 2243 2244 switch (w->type) { 2245 case BINDER_WORK_TRANSACTION: { 2246 t = container_of(w, struct binder_transaction, work); 2247 } break; 2248 case BINDER_WORK_TRANSACTION_COMPLETE: { 2249 cmd = BR_TRANSACTION_COMPLETE; 2250 if (put_user(cmd, (uint32_t __user *)ptr)) 2251 return -EFAULT; 2252 ptr += sizeof(uint32_t); 2253 2254 binder_stat_br(proc, thread, cmd); 2255 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, 2256 "%d:%d BR_TRANSACTION_COMPLETE\n", 2257 proc->pid, thread->pid); 2258 2259 list_del(&w->entry); 2260 kfree(w); 2261 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2262 } break; 2263 case BINDER_WORK_NODE: { 2264 struct binder_node *node = container_of(w, struct binder_node, work); 2265 uint32_t cmd = BR_NOOP; 2266 const char *cmd_name; 2267 int strong = node->internal_strong_refs || node->local_strong_refs; 2268 int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; 2269 2270 if (weak && !node->has_weak_ref) { 2271 cmd = BR_INCREFS; 2272 cmd_name = "BR_INCREFS"; 2273 node->has_weak_ref = 1; 2274 node->pending_weak_ref = 1; 2275 node->local_weak_refs++; 2276 } else if (strong && !node->has_strong_ref) { 2277 cmd = BR_ACQUIRE; 2278 cmd_name = "BR_ACQUIRE"; 2279 node->has_strong_ref = 1; 2280 node->pending_strong_ref = 1; 2281 node->local_strong_refs++; 2282 } else if (!strong && node->has_strong_ref) { 2283 cmd = BR_RELEASE; 2284 cmd_name = "BR_RELEASE"; 2285 node->has_strong_ref = 0; 2286 } else if (!weak && node->has_weak_ref) { 2287 cmd = BR_DECREFS; 2288 cmd_name = "BR_DECREFS"; 2289 node->has_weak_ref = 0; 2290 } 2291 if (cmd != BR_NOOP) { 2292 if (put_user(cmd, (uint32_t __user *)ptr)) 2293 return -EFAULT; 2294 ptr += sizeof(uint32_t); 2295 if (put_user(node->ptr, 2296 (binder_uintptr_t __user *)ptr)) 2297 return -EFAULT; 2298 ptr += sizeof(binder_uintptr_t); 2299 if (put_user(node->cookie, 2300 (binder_uintptr_t __user *)ptr)) 2301 return -EFAULT; 2302 ptr += sizeof(binder_uintptr_t); 2303 2304 binder_stat_br(proc, thread, cmd); 2305 binder_debug(BINDER_DEBUG_USER_REFS, 2306 "%d:%d %s %d u%016llx c%016llx\n", 2307 proc->pid, thread->pid, cmd_name, 2308 node->debug_id, 2309 (u64)node->ptr, (u64)node->cookie); 2310 } else { 2311 list_del_init(&w->entry); 2312 if (!weak && !strong) { 2313 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2314 "%d:%d node %d u%016llx c%016llx deleted\n", 2315 proc->pid, thread->pid, 2316 node->debug_id, 2317 (u64)node->ptr, 2318 (u64)node->cookie); 2319 rb_erase(&node->rb_node, &proc->nodes); 2320 kfree(node); 2321 binder_stats_deleted(BINDER_STAT_NODE); 2322 } else { 2323 binder_debug(BINDER_DEBUG_INTERNAL_REFS, 2324 "%d:%d node %d u%016llx c%016llx state unchanged\n", 2325 proc->pid, thread->pid, 2326 node->debug_id, 2327 (u64)node->ptr, 2328 (u64)node->cookie); 2329 } 2330 } 2331 } break; 2332 case BINDER_WORK_DEAD_BINDER: 2333 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2334 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2335 struct binder_ref_death *death; 2336 uint32_t cmd; 2337 2338 death = container_of(w, struct binder_ref_death, work); 2339 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) 2340 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; 2341 else 2342 cmd = BR_DEAD_BINDER; 2343 if (put_user(cmd, (uint32_t __user *)ptr)) 2344 return -EFAULT; 2345 ptr += sizeof(uint32_t); 2346 if (put_user(death->cookie, 2347 (binder_uintptr_t __user *)ptr)) 2348 return -EFAULT; 2349 ptr += sizeof(binder_uintptr_t); 2350 binder_stat_br(proc, thread, cmd); 2351 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, 2352 "%d:%d %s %016llx\n", 2353 proc->pid, thread->pid, 2354 cmd == BR_DEAD_BINDER ? 2355 "BR_DEAD_BINDER" : 2356 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 2357 (u64)death->cookie); 2358 2359 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { 2360 list_del(&w->entry); 2361 kfree(death); 2362 binder_stats_deleted(BINDER_STAT_DEATH); 2363 } else 2364 list_move(&w->entry, &proc->delivered_death); 2365 if (cmd == BR_DEAD_BINDER) 2366 goto done; /* DEAD_BINDER notifications can cause transactions */ 2367 } break; 2368 } 2369 2370 if (!t) 2371 continue; 2372 2373 BUG_ON(t->buffer == NULL); 2374 if (t->buffer->target_node) { 2375 struct binder_node *target_node = t->buffer->target_node; 2376 2377 tr.target.ptr = target_node->ptr; 2378 tr.cookie = target_node->cookie; 2379 t->saved_priority = task_nice(current); 2380 if (t->priority < target_node->min_priority && 2381 !(t->flags & TF_ONE_WAY)) 2382 binder_set_nice(t->priority); 2383 else if (!(t->flags & TF_ONE_WAY) || 2384 t->saved_priority > target_node->min_priority) 2385 binder_set_nice(target_node->min_priority); 2386 cmd = BR_TRANSACTION; 2387 } else { 2388 tr.target.ptr = 0; 2389 tr.cookie = 0; 2390 cmd = BR_REPLY; 2391 } 2392 tr.code = t->code; 2393 tr.flags = t->flags; 2394 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); 2395 2396 if (t->from) { 2397 struct task_struct *sender = t->from->proc->tsk; 2398 2399 tr.sender_pid = task_tgid_nr_ns(sender, 2400 task_active_pid_ns(current)); 2401 } else { 2402 tr.sender_pid = 0; 2403 } 2404 2405 tr.data_size = t->buffer->data_size; 2406 tr.offsets_size = t->buffer->offsets_size; 2407 tr.data.ptr.buffer = (binder_uintptr_t)( 2408 (uintptr_t)t->buffer->data + 2409 proc->user_buffer_offset); 2410 tr.data.ptr.offsets = tr.data.ptr.buffer + 2411 ALIGN(t->buffer->data_size, 2412 sizeof(void *)); 2413 2414 if (put_user(cmd, (uint32_t __user *)ptr)) 2415 return -EFAULT; 2416 ptr += sizeof(uint32_t); 2417 if (copy_to_user(ptr, &tr, sizeof(tr))) 2418 return -EFAULT; 2419 ptr += sizeof(tr); 2420 2421 trace_binder_transaction_received(t); 2422 binder_stat_br(proc, thread, cmd); 2423 binder_debug(BINDER_DEBUG_TRANSACTION, 2424 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", 2425 proc->pid, thread->pid, 2426 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : 2427 "BR_REPLY", 2428 t->debug_id, t->from ? t->from->proc->pid : 0, 2429 t->from ? t->from->pid : 0, cmd, 2430 t->buffer->data_size, t->buffer->offsets_size, 2431 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); 2432 2433 list_del(&t->work.entry); 2434 t->buffer->allow_user_free = 1; 2435 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { 2436 t->to_parent = thread->transaction_stack; 2437 t->to_thread = thread; 2438 thread->transaction_stack = t; 2439 } else { 2440 t->buffer->transaction = NULL; 2441 kfree(t); 2442 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2443 } 2444 break; 2445 } 2446 2447done: 2448 2449 *consumed = ptr - buffer; 2450 if (proc->requested_threads + proc->ready_threads == 0 && 2451 proc->requested_threads_started < proc->max_threads && 2452 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | 2453 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ 2454 /*spawn a new thread if we leave this out */) { 2455 proc->requested_threads++; 2456 binder_debug(BINDER_DEBUG_THREADS, 2457 "%d:%d BR_SPAWN_LOOPER\n", 2458 proc->pid, thread->pid); 2459 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) 2460 return -EFAULT; 2461 binder_stat_br(proc, thread, BR_SPAWN_LOOPER); 2462 } 2463 return 0; 2464} 2465 2466static void binder_release_work(struct list_head *list) 2467{ 2468 struct binder_work *w; 2469 2470 while (!list_empty(list)) { 2471 w = list_first_entry(list, struct binder_work, entry); 2472 list_del_init(&w->entry); 2473 switch (w->type) { 2474 case BINDER_WORK_TRANSACTION: { 2475 struct binder_transaction *t; 2476 2477 t = container_of(w, struct binder_transaction, work); 2478 if (t->buffer->target_node && 2479 !(t->flags & TF_ONE_WAY)) { 2480 binder_send_failed_reply(t, BR_DEAD_REPLY); 2481 } else { 2482 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2483 "undelivered transaction %d\n", 2484 t->debug_id); 2485 t->buffer->transaction = NULL; 2486 kfree(t); 2487 binder_stats_deleted(BINDER_STAT_TRANSACTION); 2488 } 2489 } break; 2490 case BINDER_WORK_TRANSACTION_COMPLETE: { 2491 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2492 "undelivered TRANSACTION_COMPLETE\n"); 2493 kfree(w); 2494 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); 2495 } break; 2496 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 2497 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { 2498 struct binder_ref_death *death; 2499 2500 death = container_of(w, struct binder_ref_death, work); 2501 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2502 "undelivered death notification, %016llx\n", 2503 (u64)death->cookie); 2504 kfree(death); 2505 binder_stats_deleted(BINDER_STAT_DEATH); 2506 } break; 2507 default: 2508 pr_err("unexpected work type, %d, not freed\n", 2509 w->type); 2510 break; 2511 } 2512 } 2513 2514} 2515 2516static struct binder_thread *binder_get_thread(struct binder_proc *proc) 2517{ 2518 struct binder_thread *thread = NULL; 2519 struct rb_node *parent = NULL; 2520 struct rb_node **p = &proc->threads.rb_node; 2521 2522 while (*p) { 2523 parent = *p; 2524 thread = rb_entry(parent, struct binder_thread, rb_node); 2525 2526 if (current->pid < thread->pid) 2527 p = &(*p)->rb_left; 2528 else if (current->pid > thread->pid) 2529 p = &(*p)->rb_right; 2530 else 2531 break; 2532 } 2533 if (*p == NULL) { 2534 thread = kzalloc(sizeof(*thread), GFP_KERNEL); 2535 if (thread == NULL) 2536 return NULL; 2537 binder_stats_created(BINDER_STAT_THREAD); 2538 thread->proc = proc; 2539 thread->pid = current->pid; 2540 init_waitqueue_head(&thread->wait); 2541 INIT_LIST_HEAD(&thread->todo); 2542 rb_link_node(&thread->rb_node, parent, p); 2543 rb_insert_color(&thread->rb_node, &proc->threads); 2544 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2545 thread->return_error = BR_OK; 2546 thread->return_error2 = BR_OK; 2547 } 2548 return thread; 2549} 2550 2551static int binder_free_thread(struct binder_proc *proc, 2552 struct binder_thread *thread) 2553{ 2554 struct binder_transaction *t; 2555 struct binder_transaction *send_reply = NULL; 2556 int active_transactions = 0; 2557 2558 rb_erase(&thread->rb_node, &proc->threads); 2559 t = thread->transaction_stack; 2560 if (t && t->to_thread == thread) 2561 send_reply = t; 2562 while (t) { 2563 active_transactions++; 2564 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, 2565 "release %d:%d transaction %d %s, still active\n", 2566 proc->pid, thread->pid, 2567 t->debug_id, 2568 (t->to_thread == thread) ? "in" : "out"); 2569 2570 if (t->to_thread == thread) { 2571 t->to_proc = NULL; 2572 t->to_thread = NULL; 2573 if (t->buffer) { 2574 t->buffer->transaction = NULL; 2575 t->buffer = NULL; 2576 } 2577 t = t->to_parent; 2578 } else if (t->from == thread) { 2579 t->from = NULL; 2580 t = t->from_parent; 2581 } else 2582 BUG(); 2583 } 2584 if (send_reply) 2585 binder_send_failed_reply(send_reply, BR_DEAD_REPLY); 2586 binder_release_work(&thread->todo); 2587 kfree(thread); 2588 binder_stats_deleted(BINDER_STAT_THREAD); 2589 return active_transactions; 2590} 2591 2592static unsigned int binder_poll(struct file *filp, 2593 struct poll_table_struct *wait) 2594{ 2595 struct binder_proc *proc = filp->private_data; 2596 struct binder_thread *thread = NULL; 2597 int wait_for_proc_work; 2598 2599 binder_lock(__func__); 2600 2601 thread = binder_get_thread(proc); 2602 2603 wait_for_proc_work = thread->transaction_stack == NULL && 2604 list_empty(&thread->todo) && thread->return_error == BR_OK; 2605 2606 binder_unlock(__func__); 2607 2608 if (wait_for_proc_work) { 2609 if (binder_has_proc_work(proc, thread)) 2610 return POLLIN; 2611 poll_wait(filp, &proc->wait, wait); 2612 if (binder_has_proc_work(proc, thread)) 2613 return POLLIN; 2614 } else { 2615 if (binder_has_thread_work(thread)) 2616 return POLLIN; 2617 poll_wait(filp, &thread->wait, wait); 2618 if (binder_has_thread_work(thread)) 2619 return POLLIN; 2620 } 2621 return 0; 2622} 2623 2624static int binder_ioctl_write_read(struct file *filp, 2625 unsigned int cmd, unsigned long arg, 2626 struct binder_thread *thread) 2627{ 2628 int ret = 0; 2629 struct binder_proc *proc = filp->private_data; 2630 unsigned int size = _IOC_SIZE(cmd); 2631 void __user *ubuf = (void __user *)arg; 2632 struct binder_write_read bwr; 2633 2634 if (size != sizeof(struct binder_write_read)) { 2635 ret = -EINVAL; 2636 goto out; 2637 } 2638 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { 2639 ret = -EFAULT; 2640 goto out; 2641 } 2642 binder_debug(BINDER_DEBUG_READ_WRITE, 2643 "%d:%d write %lld at %016llx, read %lld at %016llx\n", 2644 proc->pid, thread->pid, 2645 (u64)bwr.write_size, (u64)bwr.write_buffer, 2646 (u64)bwr.read_size, (u64)bwr.read_buffer); 2647 2648 if (bwr.write_size > 0) { 2649 ret = binder_thread_write(proc, thread, 2650 bwr.write_buffer, 2651 bwr.write_size, 2652 &bwr.write_consumed); 2653 trace_binder_write_done(ret); 2654 if (ret < 0) { 2655 bwr.read_consumed = 0; 2656 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2657 ret = -EFAULT; 2658 goto out; 2659 } 2660 } 2661 if (bwr.read_size > 0) { 2662 ret = binder_thread_read(proc, thread, bwr.read_buffer, 2663 bwr.read_size, 2664 &bwr.read_consumed, 2665 filp->f_flags & O_NONBLOCK); 2666 trace_binder_read_done(ret); 2667 if (!list_empty(&proc->todo)) 2668 wake_up_interruptible(&proc->wait); 2669 if (ret < 0) { 2670 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) 2671 ret = -EFAULT; 2672 goto out; 2673 } 2674 } 2675 binder_debug(BINDER_DEBUG_READ_WRITE, 2676 "%d:%d wrote %lld of %lld, read return %lld of %lld\n", 2677 proc->pid, thread->pid, 2678 (u64)bwr.write_consumed, (u64)bwr.write_size, 2679 (u64)bwr.read_consumed, (u64)bwr.read_size); 2680 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { 2681 ret = -EFAULT; 2682 goto out; 2683 } 2684out: 2685 return ret; 2686} 2687 2688static int binder_ioctl_set_ctx_mgr(struct file *filp) 2689{ 2690 int ret = 0; 2691 struct binder_proc *proc = filp->private_data; 2692 kuid_t curr_euid = current_euid(); 2693 2694 if (binder_context_mgr_node != NULL) { 2695 pr_err("BINDER_SET_CONTEXT_MGR already set\n"); 2696 ret = -EBUSY; 2697 goto out; 2698 } 2699 if (uid_valid(binder_context_mgr_uid)) { 2700 if (!uid_eq(binder_context_mgr_uid, curr_euid)) { 2701 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", 2702 from_kuid(&init_user_ns, curr_euid), 2703 from_kuid(&init_user_ns, 2704 binder_context_mgr_uid)); 2705 ret = -EPERM; 2706 goto out; 2707 } 2708 } else { 2709 binder_context_mgr_uid = curr_euid; 2710 } 2711 binder_context_mgr_node = binder_new_node(proc, 0, 0); 2712 if (binder_context_mgr_node == NULL) { 2713 ret = -ENOMEM; 2714 goto out; 2715 } 2716 binder_context_mgr_node->local_weak_refs++; 2717 binder_context_mgr_node->local_strong_refs++; 2718 binder_context_mgr_node->has_strong_ref = 1; 2719 binder_context_mgr_node->has_weak_ref = 1; 2720out: 2721 return ret; 2722} 2723 2724static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 2725{ 2726 int ret; 2727 struct binder_proc *proc = filp->private_data; 2728 struct binder_thread *thread; 2729 unsigned int size = _IOC_SIZE(cmd); 2730 void __user *ubuf = (void __user *)arg; 2731 2732 /*pr_info("binder_ioctl: %d:%d %x %lx\n", 2733 proc->pid, current->pid, cmd, arg);*/ 2734 2735 trace_binder_ioctl(cmd, arg); 2736 2737 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2738 if (ret) 2739 goto err_unlocked; 2740 2741 binder_lock(__func__); 2742 thread = binder_get_thread(proc); 2743 if (thread == NULL) { 2744 ret = -ENOMEM; 2745 goto err; 2746 } 2747 2748 switch (cmd) { 2749 case BINDER_WRITE_READ: 2750 ret = binder_ioctl_write_read(filp, cmd, arg, thread); 2751 if (ret) 2752 goto err; 2753 break; 2754 case BINDER_SET_MAX_THREADS: 2755 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { 2756 ret = -EINVAL; 2757 goto err; 2758 } 2759 break; 2760 case BINDER_SET_CONTEXT_MGR: 2761 ret = binder_ioctl_set_ctx_mgr(filp); 2762 if (ret) 2763 goto err; 2764 ret = security_binder_set_context_mgr(proc->tsk); 2765 if (ret < 0) 2766 goto err; 2767 break; 2768 case BINDER_THREAD_EXIT: 2769 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", 2770 proc->pid, thread->pid); 2771 binder_free_thread(proc, thread); 2772 thread = NULL; 2773 break; 2774 case BINDER_VERSION: { 2775 struct binder_version __user *ver = ubuf; 2776 2777 if (size != sizeof(struct binder_version)) { 2778 ret = -EINVAL; 2779 goto err; 2780 } 2781 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, 2782 &ver->protocol_version)) { 2783 ret = -EINVAL; 2784 goto err; 2785 } 2786 break; 2787 } 2788 default: 2789 ret = -EINVAL; 2790 goto err; 2791 } 2792 ret = 0; 2793err: 2794 if (thread) 2795 thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; 2796 binder_unlock(__func__); 2797 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); 2798 if (ret && ret != -ERESTARTSYS) 2799 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); 2800err_unlocked: 2801 trace_binder_ioctl_done(ret); 2802 return ret; 2803} 2804 2805static void binder_vma_open(struct vm_area_struct *vma) 2806{ 2807 struct binder_proc *proc = vma->vm_private_data; 2808 2809 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2810 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2811 proc->pid, vma->vm_start, vma->vm_end, 2812 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2813 (unsigned long)pgprot_val(vma->vm_page_prot)); 2814} 2815 2816static void binder_vma_close(struct vm_area_struct *vma) 2817{ 2818 struct binder_proc *proc = vma->vm_private_data; 2819 2820 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2821 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", 2822 proc->pid, vma->vm_start, vma->vm_end, 2823 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2824 (unsigned long)pgprot_val(vma->vm_page_prot)); 2825 proc->vma = NULL; 2826 proc->vma_vm_mm = NULL; 2827 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); 2828} 2829 2830static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2831{ 2832 return VM_FAULT_SIGBUS; 2833} 2834 2835static struct vm_operations_struct binder_vm_ops = { 2836 .open = binder_vma_open, 2837 .close = binder_vma_close, 2838 .fault = binder_vm_fault, 2839}; 2840 2841static int binder_mmap(struct file *filp, struct vm_area_struct *vma) 2842{ 2843 int ret; 2844 struct vm_struct *area; 2845 struct binder_proc *proc = filp->private_data; 2846 const char *failure_string; 2847 struct binder_buffer *buffer; 2848 2849 if (proc->tsk != current) 2850 return -EINVAL; 2851 2852 if ((vma->vm_end - vma->vm_start) > SZ_4M) 2853 vma->vm_end = vma->vm_start + SZ_4M; 2854 2855 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 2856 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", 2857 proc->pid, vma->vm_start, vma->vm_end, 2858 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, 2859 (unsigned long)pgprot_val(vma->vm_page_prot)); 2860 2861 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { 2862 ret = -EPERM; 2863 failure_string = "bad vm_flags"; 2864 goto err_bad_arg; 2865 } 2866 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; 2867 2868 mutex_lock(&binder_mmap_lock); 2869 if (proc->buffer) { 2870 ret = -EBUSY; 2871 failure_string = "already mapped"; 2872 goto err_already_mapped; 2873 } 2874 2875 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); 2876 if (area == NULL) { 2877 ret = -ENOMEM; 2878 failure_string = "get_vm_area"; 2879 goto err_get_vm_area_failed; 2880 } 2881 proc->buffer = area->addr; 2882 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; 2883 mutex_unlock(&binder_mmap_lock); 2884 2885#ifdef CONFIG_CPU_CACHE_VIPT 2886 if (cache_is_vipt_aliasing()) { 2887 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { 2888 pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); 2889 vma->vm_start += PAGE_SIZE; 2890 } 2891 } 2892#endif 2893 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); 2894 if (proc->pages == NULL) { 2895 ret = -ENOMEM; 2896 failure_string = "alloc page array"; 2897 goto err_alloc_pages_failed; 2898 } 2899 proc->buffer_size = vma->vm_end - vma->vm_start; 2900 2901 vma->vm_ops = &binder_vm_ops; 2902 vma->vm_private_data = proc; 2903 2904 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { 2905 ret = -ENOMEM; 2906 failure_string = "alloc small buf"; 2907 goto err_alloc_small_buf_failed; 2908 } 2909 buffer = proc->buffer; 2910 INIT_LIST_HEAD(&proc->buffers); 2911 list_add(&buffer->entry, &proc->buffers); 2912 buffer->free = 1; 2913 binder_insert_free_buffer(proc, buffer); 2914 proc->free_async_space = proc->buffer_size / 2; 2915 barrier(); 2916 proc->files = get_files_struct(current); 2917 proc->vma = vma; 2918 proc->vma_vm_mm = vma->vm_mm; 2919 2920 /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", 2921 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ 2922 return 0; 2923 2924err_alloc_small_buf_failed: 2925 kfree(proc->pages); 2926 proc->pages = NULL; 2927err_alloc_pages_failed: 2928 mutex_lock(&binder_mmap_lock); 2929 vfree(proc->buffer); 2930 proc->buffer = NULL; 2931err_get_vm_area_failed: 2932err_already_mapped: 2933 mutex_unlock(&binder_mmap_lock); 2934err_bad_arg: 2935 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n", 2936 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); 2937 return ret; 2938} 2939 2940static int binder_open(struct inode *nodp, struct file *filp) 2941{ 2942 struct binder_proc *proc; 2943 2944 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", 2945 current->group_leader->pid, current->pid); 2946 2947 proc = kzalloc(sizeof(*proc), GFP_KERNEL); 2948 if (proc == NULL) 2949 return -ENOMEM; 2950 get_task_struct(current); 2951 proc->tsk = current; 2952 INIT_LIST_HEAD(&proc->todo); 2953 init_waitqueue_head(&proc->wait); 2954 proc->default_priority = task_nice(current); 2955 2956 binder_lock(__func__); 2957 2958 binder_stats_created(BINDER_STAT_PROC); 2959 hlist_add_head(&proc->proc_node, &binder_procs); 2960 proc->pid = current->group_leader->pid; 2961 INIT_LIST_HEAD(&proc->delivered_death); 2962 filp->private_data = proc; 2963 2964 binder_unlock(__func__); 2965 2966 if (binder_debugfs_dir_entry_proc) { 2967 char strbuf[11]; 2968 2969 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); 2970 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, 2971 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); 2972 } 2973 2974 return 0; 2975} 2976 2977static int binder_flush(struct file *filp, fl_owner_t id) 2978{ 2979 struct binder_proc *proc = filp->private_data; 2980 2981 binder_defer_work(proc, BINDER_DEFERRED_FLUSH); 2982 2983 return 0; 2984} 2985 2986static void binder_deferred_flush(struct binder_proc *proc) 2987{ 2988 struct rb_node *n; 2989 int wake_count = 0; 2990 2991 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { 2992 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); 2993 2994 thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; 2995 if (thread->looper & BINDER_LOOPER_STATE_WAITING) { 2996 wake_up_interruptible(&thread->wait); 2997 wake_count++; 2998 } 2999 } 3000 wake_up_interruptible_all(&proc->wait); 3001 3002 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3003 "binder_flush: %d woke %d threads\n", proc->pid, 3004 wake_count); 3005} 3006 3007static int binder_release(struct inode *nodp, struct file *filp) 3008{ 3009 struct binder_proc *proc = filp->private_data; 3010 3011 debugfs_remove(proc->debugfs_entry); 3012 binder_defer_work(proc, BINDER_DEFERRED_RELEASE); 3013 3014 return 0; 3015} 3016 3017static int binder_node_release(struct binder_node *node, int refs) 3018{ 3019 struct binder_ref *ref; 3020 int death = 0; 3021 3022 list_del_init(&node->work.entry); 3023 binder_release_work(&node->async_todo); 3024 3025 if (hlist_empty(&node->refs)) { 3026 kfree(node); 3027 binder_stats_deleted(BINDER_STAT_NODE); 3028 3029 return refs; 3030 } 3031 3032 node->proc = NULL; 3033 node->local_strong_refs = 0; 3034 node->local_weak_refs = 0; 3035 hlist_add_head(&node->dead_node, &binder_dead_nodes); 3036 3037 hlist_for_each_entry(ref, &node->refs, node_entry) { 3038 refs++; 3039 3040 if (!ref->death) 3041 continue; 3042 3043 death++; 3044 3045 if (list_empty(&ref->death->work.entry)) { 3046 ref->death->work.type = BINDER_WORK_DEAD_BINDER; 3047 list_add_tail(&ref->death->work.entry, 3048 &ref->proc->todo); 3049 wake_up_interruptible(&ref->proc->wait); 3050 } else 3051 BUG(); 3052 } 3053 3054 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3055 "node %d now dead, refs %d, death %d\n", 3056 node->debug_id, refs, death); 3057 3058 return refs; 3059} 3060 3061static void binder_deferred_release(struct binder_proc *proc) 3062{ 3063 struct binder_transaction *t; 3064 struct rb_node *n; 3065 int threads, nodes, incoming_refs, outgoing_refs, buffers, 3066 active_transactions, page_count; 3067 3068 BUG_ON(proc->vma); 3069 BUG_ON(proc->files); 3070 3071 hlist_del(&proc->proc_node); 3072 3073 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { 3074 binder_debug(BINDER_DEBUG_DEAD_BINDER, 3075 "%s: %d context_mgr_node gone\n", 3076 __func__, proc->pid); 3077 binder_context_mgr_node = NULL; 3078 } 3079 3080 threads = 0; 3081 active_transactions = 0; 3082 while ((n = rb_first(&proc->threads))) { 3083 struct binder_thread *thread; 3084 3085 thread = rb_entry(n, struct binder_thread, rb_node); 3086 threads++; 3087 active_transactions += binder_free_thread(proc, thread); 3088 } 3089 3090 nodes = 0; 3091 incoming_refs = 0; 3092 while ((n = rb_first(&proc->nodes))) { 3093 struct binder_node *node; 3094 3095 node = rb_entry(n, struct binder_node, rb_node); 3096 nodes++; 3097 rb_erase(&node->rb_node, &proc->nodes); 3098 incoming_refs = binder_node_release(node, incoming_refs); 3099 } 3100 3101 outgoing_refs = 0; 3102 while ((n = rb_first(&proc->refs_by_desc))) { 3103 struct binder_ref *ref; 3104 3105 ref = rb_entry(n, struct binder_ref, rb_node_desc); 3106 outgoing_refs++; 3107 binder_delete_ref(ref); 3108 } 3109 3110 binder_release_work(&proc->todo); 3111 binder_release_work(&proc->delivered_death); 3112 3113 buffers = 0; 3114 while ((n = rb_first(&proc->allocated_buffers))) { 3115 struct binder_buffer *buffer; 3116 3117 buffer = rb_entry(n, struct binder_buffer, rb_node); 3118 3119 t = buffer->transaction; 3120 if (t) { 3121 t->buffer = NULL; 3122 buffer->transaction = NULL; 3123 pr_err("release proc %d, transaction %d, not freed\n", 3124 proc->pid, t->debug_id); 3125 /*BUG();*/ 3126 } 3127 3128 binder_free_buf(proc, buffer); 3129 buffers++; 3130 } 3131 3132 binder_stats_deleted(BINDER_STAT_PROC); 3133 3134 page_count = 0; 3135 if (proc->pages) { 3136 int i; 3137 3138 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { 3139 void *page_addr; 3140 3141 if (!proc->pages[i]) 3142 continue; 3143 3144 page_addr = proc->buffer + i * PAGE_SIZE; 3145 binder_debug(BINDER_DEBUG_BUFFER_ALLOC, 3146 "%s: %d: page %d at %p not freed\n", 3147 __func__, proc->pid, i, page_addr); 3148 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); 3149 __free_page(proc->pages[i]); 3150 page_count++; 3151 } 3152 kfree(proc->pages); 3153 vfree(proc->buffer); 3154 } 3155 3156 put_task_struct(proc->tsk); 3157 3158 binder_debug(BINDER_DEBUG_OPEN_CLOSE, 3159 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n", 3160 __func__, proc->pid, threads, nodes, incoming_refs, 3161 outgoing_refs, active_transactions, buffers, page_count); 3162 3163 kfree(proc); 3164} 3165 3166static void binder_deferred_func(struct work_struct *work) 3167{ 3168 struct binder_proc *proc; 3169 struct files_struct *files; 3170 3171 int defer; 3172 3173 do { 3174 binder_lock(__func__); 3175 mutex_lock(&binder_deferred_lock); 3176 if (!hlist_empty(&binder_deferred_list)) { 3177 proc = hlist_entry(binder_deferred_list.first, 3178 struct binder_proc, deferred_work_node); 3179 hlist_del_init(&proc->deferred_work_node); 3180 defer = proc->deferred_work; 3181 proc->deferred_work = 0; 3182 } else { 3183 proc = NULL; 3184 defer = 0; 3185 } 3186 mutex_unlock(&binder_deferred_lock); 3187 3188 files = NULL; 3189 if (defer & BINDER_DEFERRED_PUT_FILES) { 3190 files = proc->files; 3191 if (files) 3192 proc->files = NULL; 3193 } 3194 3195 if (defer & BINDER_DEFERRED_FLUSH) 3196 binder_deferred_flush(proc); 3197 3198 if (defer & BINDER_DEFERRED_RELEASE) 3199 binder_deferred_release(proc); /* frees proc */ 3200 3201 binder_unlock(__func__); 3202 if (files) 3203 put_files_struct(files); 3204 } while (proc); 3205} 3206static DECLARE_WORK(binder_deferred_work, binder_deferred_func); 3207 3208static void 3209binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) 3210{ 3211 mutex_lock(&binder_deferred_lock); 3212 proc->deferred_work |= defer; 3213 if (hlist_unhashed(&proc->deferred_work_node)) { 3214 hlist_add_head(&proc->deferred_work_node, 3215 &binder_deferred_list); 3216 queue_work(binder_deferred_workqueue, &binder_deferred_work); 3217 } 3218 mutex_unlock(&binder_deferred_lock); 3219} 3220 3221static void print_binder_transaction(struct seq_file *m, const char *prefix, 3222 struct binder_transaction *t) 3223{ 3224 seq_printf(m, 3225 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", 3226 prefix, t->debug_id, t, 3227 t->from ? t->from->proc->pid : 0, 3228 t->from ? t->from->pid : 0, 3229 t->to_proc ? t->to_proc->pid : 0, 3230 t->to_thread ? t->to_thread->pid : 0, 3231 t->code, t->flags, t->priority, t->need_reply); 3232 if (t->buffer == NULL) { 3233 seq_puts(m, " buffer free\n"); 3234 return; 3235 } 3236 if (t->buffer->target_node) 3237 seq_printf(m, " node %d", 3238 t->buffer->target_node->debug_id); 3239 seq_printf(m, " size %zd:%zd data %p\n", 3240 t->buffer->data_size, t->buffer->offsets_size, 3241 t->buffer->data); 3242} 3243 3244static void print_binder_buffer(struct seq_file *m, const char *prefix, 3245 struct binder_buffer *buffer) 3246{ 3247 seq_printf(m, "%s %d: %p size %zd:%zd %s\n", 3248 prefix, buffer->debug_id, buffer->data, 3249 buffer->data_size, buffer->offsets_size, 3250 buffer->transaction ? "active" : "delivered"); 3251} 3252 3253static void print_binder_work(struct seq_file *m, const char *prefix, 3254 const char *transaction_prefix, 3255 struct binder_work *w) 3256{ 3257 struct binder_node *node; 3258 struct binder_transaction *t; 3259 3260 switch (w->type) { 3261 case BINDER_WORK_TRANSACTION: 3262 t = container_of(w, struct binder_transaction, work); 3263 print_binder_transaction(m, transaction_prefix, t); 3264 break; 3265 case BINDER_WORK_TRANSACTION_COMPLETE: 3266 seq_printf(m, "%stransaction complete\n", prefix); 3267 break; 3268 case BINDER_WORK_NODE: 3269 node = container_of(w, struct binder_node, work); 3270 seq_printf(m, "%snode work %d: u%016llx c%016llx\n", 3271 prefix, node->debug_id, 3272 (u64)node->ptr, (u64)node->cookie); 3273 break; 3274 case BINDER_WORK_DEAD_BINDER: 3275 seq_printf(m, "%shas dead binder\n", prefix); 3276 break; 3277 case BINDER_WORK_DEAD_BINDER_AND_CLEAR: 3278 seq_printf(m, "%shas cleared dead binder\n", prefix); 3279 break; 3280 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: 3281 seq_printf(m, "%shas cleared death notification\n", prefix); 3282 break; 3283 default: 3284 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); 3285 break; 3286 } 3287} 3288 3289static void print_binder_thread(struct seq_file *m, 3290 struct binder_thread *thread, 3291 int print_always) 3292{ 3293 struct binder_transaction *t; 3294 struct binder_work *w; 3295 size_t start_pos = m->count; 3296 size_t header_pos; 3297 3298 seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); 3299 header_pos = m->count; 3300 t = thread->transaction_stack; 3301 while (t) { 3302 if (t->from == thread) { 3303 print_binder_transaction(m, 3304 " outgoing transaction", t); 3305 t = t->from_parent; 3306 } else if (t->to_thread == thread) { 3307 print_binder_transaction(m, 3308 " incoming transaction", t); 3309 t = t->to_parent; 3310 } else { 3311 print_binder_transaction(m, " bad transaction", t); 3312 t = NULL; 3313 } 3314 } 3315 list_for_each_entry(w, &thread->todo, entry) { 3316 print_binder_work(m, " ", " pending transaction", w); 3317 } 3318 if (!print_always && m->count == header_pos) 3319 m->count = start_pos; 3320} 3321 3322static void print_binder_node(struct seq_file *m, struct binder_node *node) 3323{ 3324 struct binder_ref *ref; 3325 struct binder_work *w; 3326 int count; 3327 3328 count = 0; 3329 hlist_for_each_entry(ref, &node->refs, node_entry) 3330 count++; 3331 3332 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d", 3333 node->debug_id, (u64)node->ptr, (u64)node->cookie, 3334 node->has_strong_ref, node->has_weak_ref, 3335 node->local_strong_refs, node->local_weak_refs, 3336 node->internal_strong_refs, count); 3337 if (count) { 3338 seq_puts(m, " proc"); 3339 hlist_for_each_entry(ref, &node->refs, node_entry) 3340 seq_printf(m, " %d", ref->proc->pid); 3341 } 3342 seq_puts(m, "\n"); 3343 list_for_each_entry(w, &node->async_todo, entry) 3344 print_binder_work(m, " ", 3345 " pending async transaction", w); 3346} 3347 3348static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) 3349{ 3350 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", 3351 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", 3352 ref->node->debug_id, ref->strong, ref->weak, ref->death); 3353} 3354 3355static void print_binder_proc(struct seq_file *m, 3356 struct binder_proc *proc, int print_all) 3357{ 3358 struct binder_work *w; 3359 struct rb_node *n; 3360 size_t start_pos = m->count; 3361 size_t header_pos; 3362 3363 seq_printf(m, "proc %d\n", proc->pid); 3364 header_pos = m->count; 3365 3366 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3367 print_binder_thread(m, rb_entry(n, struct binder_thread, 3368 rb_node), print_all); 3369 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { 3370 struct binder_node *node = rb_entry(n, struct binder_node, 3371 rb_node); 3372 if (print_all || node->has_async_transaction) 3373 print_binder_node(m, node); 3374 } 3375 if (print_all) { 3376 for (n = rb_first(&proc->refs_by_desc); 3377 n != NULL; 3378 n = rb_next(n)) 3379 print_binder_ref(m, rb_entry(n, struct binder_ref, 3380 rb_node_desc)); 3381 } 3382 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3383 print_binder_buffer(m, " buffer", 3384 rb_entry(n, struct binder_buffer, rb_node)); 3385 list_for_each_entry(w, &proc->todo, entry) 3386 print_binder_work(m, " ", " pending transaction", w); 3387 list_for_each_entry(w, &proc->delivered_death, entry) { 3388 seq_puts(m, " has delivered dead binder\n"); 3389 break; 3390 } 3391 if (!print_all && m->count == header_pos) 3392 m->count = start_pos; 3393} 3394 3395static const char * const binder_return_strings[] = { 3396 "BR_ERROR", 3397 "BR_OK", 3398 "BR_TRANSACTION", 3399 "BR_REPLY", 3400 "BR_ACQUIRE_RESULT", 3401 "BR_DEAD_REPLY", 3402 "BR_TRANSACTION_COMPLETE", 3403 "BR_INCREFS", 3404 "BR_ACQUIRE", 3405 "BR_RELEASE", 3406 "BR_DECREFS", 3407 "BR_ATTEMPT_ACQUIRE", 3408 "BR_NOOP", 3409 "BR_SPAWN_LOOPER", 3410 "BR_FINISHED", 3411 "BR_DEAD_BINDER", 3412 "BR_CLEAR_DEATH_NOTIFICATION_DONE", 3413 "BR_FAILED_REPLY" 3414}; 3415 3416static const char * const binder_command_strings[] = { 3417 "BC_TRANSACTION", 3418 "BC_REPLY", 3419 "BC_ACQUIRE_RESULT", 3420 "BC_FREE_BUFFER", 3421 "BC_INCREFS", 3422 "BC_ACQUIRE", 3423 "BC_RELEASE", 3424 "BC_DECREFS", 3425 "BC_INCREFS_DONE", 3426 "BC_ACQUIRE_DONE", 3427 "BC_ATTEMPT_ACQUIRE", 3428 "BC_REGISTER_LOOPER", 3429 "BC_ENTER_LOOPER", 3430 "BC_EXIT_LOOPER", 3431 "BC_REQUEST_DEATH_NOTIFICATION", 3432 "BC_CLEAR_DEATH_NOTIFICATION", 3433 "BC_DEAD_BINDER_DONE" 3434}; 3435 3436static const char * const binder_objstat_strings[] = { 3437 "proc", 3438 "thread", 3439 "node", 3440 "ref", 3441 "death", 3442 "transaction", 3443 "transaction_complete" 3444}; 3445 3446static void print_binder_stats(struct seq_file *m, const char *prefix, 3447 struct binder_stats *stats) 3448{ 3449 int i; 3450 3451 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != 3452 ARRAY_SIZE(binder_command_strings)); 3453 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { 3454 if (stats->bc[i]) 3455 seq_printf(m, "%s%s: %d\n", prefix, 3456 binder_command_strings[i], stats->bc[i]); 3457 } 3458 3459 BUILD_BUG_ON(ARRAY_SIZE(stats->br) != 3460 ARRAY_SIZE(binder_return_strings)); 3461 for (i = 0; i < ARRAY_SIZE(stats->br); i++) { 3462 if (stats->br[i]) 3463 seq_printf(m, "%s%s: %d\n", prefix, 3464 binder_return_strings[i], stats->br[i]); 3465 } 3466 3467 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3468 ARRAY_SIZE(binder_objstat_strings)); 3469 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != 3470 ARRAY_SIZE(stats->obj_deleted)); 3471 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { 3472 if (stats->obj_created[i] || stats->obj_deleted[i]) 3473 seq_printf(m, "%s%s: active %d total %d\n", prefix, 3474 binder_objstat_strings[i], 3475 stats->obj_created[i] - stats->obj_deleted[i], 3476 stats->obj_created[i]); 3477 } 3478} 3479 3480static void print_binder_proc_stats(struct seq_file *m, 3481 struct binder_proc *proc) 3482{ 3483 struct binder_work *w; 3484 struct rb_node *n; 3485 int count, strong, weak; 3486 3487 seq_printf(m, "proc %d\n", proc->pid); 3488 count = 0; 3489 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) 3490 count++; 3491 seq_printf(m, " threads: %d\n", count); 3492 seq_printf(m, " requested threads: %d+%d/%d\n" 3493 " ready threads %d\n" 3494 " free async space %zd\n", proc->requested_threads, 3495 proc->requested_threads_started, proc->max_threads, 3496 proc->ready_threads, proc->free_async_space); 3497 count = 0; 3498 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) 3499 count++; 3500 seq_printf(m, " nodes: %d\n", count); 3501 count = 0; 3502 strong = 0; 3503 weak = 0; 3504 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { 3505 struct binder_ref *ref = rb_entry(n, struct binder_ref, 3506 rb_node_desc); 3507 count++; 3508 strong += ref->strong; 3509 weak += ref->weak; 3510 } 3511 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); 3512 3513 count = 0; 3514 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) 3515 count++; 3516 seq_printf(m, " buffers: %d\n", count); 3517 3518 count = 0; 3519 list_for_each_entry(w, &proc->todo, entry) { 3520 switch (w->type) { 3521 case BINDER_WORK_TRANSACTION: 3522 count++; 3523 break; 3524 default: 3525 break; 3526 } 3527 } 3528 seq_printf(m, " pending transactions: %d\n", count); 3529 3530 print_binder_stats(m, " ", &proc->stats); 3531} 3532 3533 3534static int binder_state_show(struct seq_file *m, void *unused) 3535{ 3536 struct binder_proc *proc; 3537 struct binder_node *node; 3538 int do_lock = !binder_debug_no_lock; 3539 3540 if (do_lock) 3541 binder_lock(__func__); 3542 3543 seq_puts(m, "binder state:\n"); 3544 3545 if (!hlist_empty(&binder_dead_nodes)) 3546 seq_puts(m, "dead nodes:\n"); 3547 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) 3548 print_binder_node(m, node); 3549 3550 hlist_for_each_entry(proc, &binder_procs, proc_node) 3551 print_binder_proc(m, proc, 1); 3552 if (do_lock) 3553 binder_unlock(__func__); 3554 return 0; 3555} 3556 3557static int binder_stats_show(struct seq_file *m, void *unused) 3558{ 3559 struct binder_proc *proc; 3560 int do_lock = !binder_debug_no_lock; 3561 3562 if (do_lock) 3563 binder_lock(__func__); 3564 3565 seq_puts(m, "binder stats:\n"); 3566 3567 print_binder_stats(m, "", &binder_stats); 3568 3569 hlist_for_each_entry(proc, &binder_procs, proc_node) 3570 print_binder_proc_stats(m, proc); 3571 if (do_lock) 3572 binder_unlock(__func__); 3573 return 0; 3574} 3575 3576static int binder_transactions_show(struct seq_file *m, void *unused) 3577{ 3578 struct binder_proc *proc; 3579 int do_lock = !binder_debug_no_lock; 3580 3581 if (do_lock) 3582 binder_lock(__func__); 3583 3584 seq_puts(m, "binder transactions:\n"); 3585 hlist_for_each_entry(proc, &binder_procs, proc_node) 3586 print_binder_proc(m, proc, 0); 3587 if (do_lock) 3588 binder_unlock(__func__); 3589 return 0; 3590} 3591 3592static int binder_proc_show(struct seq_file *m, void *unused) 3593{ 3594 struct binder_proc *proc = m->private; 3595 int do_lock = !binder_debug_no_lock; 3596 3597 if (do_lock) 3598 binder_lock(__func__); 3599 seq_puts(m, "binder proc state:\n"); 3600 print_binder_proc(m, proc, 1); 3601 if (do_lock) 3602 binder_unlock(__func__); 3603 return 0; 3604} 3605 3606static void print_binder_transaction_log_entry(struct seq_file *m, 3607 struct binder_transaction_log_entry *e) 3608{ 3609 seq_printf(m, 3610 "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", 3611 e->debug_id, (e->call_type == 2) ? "reply" : 3612 ((e->call_type == 1) ? "async" : "call "), e->from_proc, 3613 e->from_thread, e->to_proc, e->to_thread, e->to_node, 3614 e->target_handle, e->data_size, e->offsets_size); 3615} 3616 3617static int binder_transaction_log_show(struct seq_file *m, void *unused) 3618{ 3619 struct binder_transaction_log *log = m->private; 3620 int i; 3621 3622 if (log->full) { 3623 for (i = log->next; i < ARRAY_SIZE(log->entry); i++) 3624 print_binder_transaction_log_entry(m, &log->entry[i]); 3625 } 3626 for (i = 0; i < log->next; i++) 3627 print_binder_transaction_log_entry(m, &log->entry[i]); 3628 return 0; 3629} 3630 3631static const struct file_operations binder_fops = { 3632 .owner = THIS_MODULE, 3633 .poll = binder_poll, 3634 .unlocked_ioctl = binder_ioctl, 3635 .compat_ioctl = binder_ioctl, 3636 .mmap = binder_mmap, 3637 .open = binder_open, 3638 .flush = binder_flush, 3639 .release = binder_release, 3640}; 3641 3642static struct miscdevice binder_miscdev = { 3643 .minor = MISC_DYNAMIC_MINOR, 3644 .name = "binder", 3645 .fops = &binder_fops 3646}; 3647 3648BINDER_DEBUG_ENTRY(state); 3649BINDER_DEBUG_ENTRY(stats); 3650BINDER_DEBUG_ENTRY(transactions); 3651BINDER_DEBUG_ENTRY(transaction_log); 3652 3653static int __init binder_init(void) 3654{ 3655 int ret; 3656 3657 binder_deferred_workqueue = create_singlethread_workqueue("binder"); 3658 if (!binder_deferred_workqueue) 3659 return -ENOMEM; 3660 3661 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); 3662 if (binder_debugfs_dir_entry_root) 3663 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", 3664 binder_debugfs_dir_entry_root); 3665 ret = misc_register(&binder_miscdev); 3666 if (binder_debugfs_dir_entry_root) { 3667 debugfs_create_file("state", 3668 S_IRUGO, 3669 binder_debugfs_dir_entry_root, 3670 NULL, 3671 &binder_state_fops); 3672 debugfs_create_file("stats", 3673 S_IRUGO, 3674 binder_debugfs_dir_entry_root, 3675 NULL, 3676 &binder_stats_fops); 3677 debugfs_create_file("transactions", 3678 S_IRUGO, 3679 binder_debugfs_dir_entry_root, 3680 NULL, 3681 &binder_transactions_fops); 3682 debugfs_create_file("transaction_log", 3683 S_IRUGO, 3684 binder_debugfs_dir_entry_root, 3685 &binder_transaction_log, 3686 &binder_transaction_log_fops); 3687 debugfs_create_file("failed_transaction_log", 3688 S_IRUGO, 3689 binder_debugfs_dir_entry_root, 3690 &binder_transaction_log_failed, 3691 &binder_transaction_log_fops); 3692 } 3693 return ret; 3694} 3695 3696device_initcall(binder_init); 3697 3698#define CREATE_TRACE_POINTS 3699#include "binder_trace.h" 3700 3701MODULE_LICENSE("GPL v2"); 3702