Lines Matching defs:proc

70 BINDER_DEBUG_ENTRY(proc);
222 struct binder_proc *proc;
246 /* node + proc => ref (transaction) */
247 /* desc + proc => ref (transaction, inc/dec ref) */
248 /* node => refs + procs (proc exit) */
253 struct binder_proc *proc;
330 struct binder_proc *proc;
364 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
369 int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
371 struct files_struct *files = proc->files;
392 if (lock_task_sighand(proc->tsk, &irqs)) {
393 rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
394 unlock_task_sighand(proc->tsk, &irqs);
437 struct binder_proc *proc, unsigned int fd, struct file *file)
439 struct files_struct *files = proc->files;
466 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
469 struct files_struct *files = proc->files;
533 static size_t binder_buffer_size(struct binder_proc *proc,
536 if (list_is_last(&buffer->entry, &proc->buffers))
537 return proc->buffer + proc->buffer_size - (void *)buffer->data;
543 static void binder_insert_free_buffer(struct binder_proc *proc,
546 struct rb_node **p = &proc->free_buffers.rb_node;
554 new_buffer_size = binder_buffer_size(proc, new_buffer);
558 "at %p\n", proc->pid, new_buffer_size, new_buffer);
565 buffer_size = binder_buffer_size(proc, buffer);
573 rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
576 static void binder_insert_allocated_buffer(struct binder_proc *proc,
579 struct rb_node **p = &proc->allocated_buffers.rb_node;
598 rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
601 static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
604 struct rb_node *n = proc->allocated_buffers.rb_node;
608 kern_ptr = user_ptr - proc->user_buffer_offset
625 static int binder_update_page_range(struct binder_proc *proc, int allocate,
636 "binder: %d: %s pages %p-%p\n", proc->pid,
642 trace_binder_update_page_range(proc, allocate, start, end);
647 mm = get_task_mm(proc->tsk);
651 vma = proc->vma;
652 if (vma && mm != proc->vma_vm_mm) {
654 proc->pid);
664 "map pages in userspace, no vma\n", proc->pid);
671 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
677 "for page at %p\n", proc->pid, page_addr);
687 proc->pid, page_addr);
691 (uintptr_t)page_addr + proc->user_buffer_offset;
696 proc->pid, user_page_addr);
710 page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
713 proc->user_buffer_offset, PAGE_SIZE, NULL);
730 static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
734 struct rb_node *n = proc->free_buffers.rb_node;
742 if (proc->vma == NULL) {
744 proc->pid);
753 "size %zd-%zd\n", proc->pid, data_size, offsets_size);
758 proc->free_async_space < size + sizeof(struct binder_buffer)) {
761 "failed, no async space left\n", proc->pid, size);
768 buffer_size = binder_buffer_size(proc, buffer);
782 "no address space\n", proc->pid, size);
787 buffer_size = binder_buffer_size(proc, buffer);
792 "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
806 if (binder_update_page_range(proc, 1,
810 rb_erase(best_fit, &proc->free_buffers);
812 binder_insert_allocated_buffer(proc, buffer);
817 binder_insert_free_buffer(proc, new_buffer);
821 "%p\n", proc->pid, size, buffer);
826 proc->free_async_space -= size + sizeof(struct binder_buffer);
829 "async free %zd\n", proc->pid, size,
830 proc->free_async_space);
846 static void binder_delete_free_buffer(struct binder_proc *proc,
853 BUG_ON(proc->buffers.next == &buffer->entry);
862 "share page with %p\n", proc->pid, buffer, prev);
865 if (!list_is_last(&buffer->entry, &proc->buffers)) {
875 " %p share page with %p\n", proc->pid,
884 proc->pid, buffer, free_page_start ? "" : " end",
886 binder_update_page_range(proc, 0, free_page_start ?
893 static void binder_free_buf(struct binder_proc *proc,
898 buffer_size = binder_buffer_size(proc, buffer);
905 "_size %zd\n", proc->pid, buffer, size, buffer_size);
910 BUG_ON((void *)buffer < proc->buffer);
911 BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
914 proc->free_async_space += size + sizeof(struct binder_buffer);
918 "async free %zd\n", proc->pid, size,
919 proc->free_async_space);
922 binder_update_page_range(proc, 0,
926 rb_erase(&buffer->rb_node, &proc->allocated_buffers);
928 if (!list_is_last(&buffer->entry, &proc->buffers)) {
932 rb_erase(&next->rb_node, &proc->free_buffers);
933 binder_delete_free_buffer(proc, next);
936 if (proc->buffers.next != &buffer->entry) {
940 binder_delete_free_buffer(proc, buffer);
941 rb_erase(&prev->rb_node, &proc->free_buffers);
945 binder_insert_free_buffer(proc, buffer);
948 static struct binder_node *binder_get_node(struct binder_proc *proc,
951 struct rb_node *n = proc->nodes.rb_node;
967 static struct binder_node *binder_new_node(struct binder_proc *proc,
971 struct rb_node **p = &proc->nodes.rb_node;
992 rb_insert_color(&node->rb_node, &proc->nodes);
994 node->proc = proc;
1002 proc->pid, current->pid, node->debug_id,
1057 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
1059 list_add_tail(&node->work.entry, &node->proc->todo);
1060 wake_up_interruptible(&node->proc->wait);
1066 if (node->proc) {
1067 rb_erase(&node->rb_node, &node->proc->nodes);
1086 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
1089 struct rb_node *n = proc->refs_by_desc.rb_node;
1105 static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
1109 struct rb_node **p = &proc->refs_by_node.rb_node;
1129 new_ref->proc = proc;
1132 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1135 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1142 p = &proc->refs_by_desc.rb_node;
1155 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1161 "node %d\n", proc->pid, new_ref->debug_id,
1166 "dead node\n", proc->pid, new_ref->debug_id,
1176 "node %d\n", ref->proc->pid, ref->debug_id,
1179 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1180 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1188 "has death notification\n", ref->proc->pid,
1227 ref->proc->pid, ref->debug_id,
1242 ref->proc->pid, ref->debug_id,
1288 t->debug_id, target_thread->proc->pid,
1297 "already\n", target_thread->proc->pid,
1325 static void binder_transaction_buffer_release(struct binder_proc *proc,
1334 proc->pid, buffer->debug_id,
1359 struct binder_node *node = binder_get_node(proc, fp->binder);
1372 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1389 task_close_fd(proc, fp->handle);
1400 static void binder_transaction(struct binder_proc *proc,
1418 e->from_proc = proc->pid;
1429 proc->pid, thread->pid);
1438 proc->pid, thread->pid, in_reply_to->debug_id,
1457 proc->pid, thread->pid,
1466 target_proc = target_thread->proc;
1470 ref = binder_get_ref(proc, tr->target.handle);
1474 proc->pid, thread->pid);
1487 target_proc = target_node->proc;
1499 proc->pid, thread->pid, tmp->debug_id,
1507 if (tmp->from && tmp->from->proc == target_proc)
1545 proc->pid, thread->pid, t->debug_id,
1553 proc->pid, thread->pid, t->debug_id,
1562 t->sender_euid = proc->tsk->cred->euid;
1589 "data ptr\n", proc->pid, thread->pid);
1595 "offsets ptr\n", proc->pid, thread->pid);
1602 proc->pid, thread->pid, tr->offsets_size);
1614 proc->pid, thread->pid, *offp);
1623 struct binder_node *node = binder_get_node(proc, fp->binder);
1625 node = binder_new_node(proc, fp->binder, fp->cookie);
1636 proc->pid, thread->pid,
1662 struct binder_ref *ref = binder_get_ref(proc, fp->handle);
1666 "handle, %ld\n", proc->pid,
1671 if (ref->node->proc == target_proc) {
1709 proc->pid, thread->pid, fp->handle);
1715 proc->pid, thread->pid, fp->handle);
1723 proc->pid, thread->pid, fp->handle);
1744 proc->pid, thread->pid, fp->type);
1801 proc->pid, thread->pid, return_error,
1818 int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
1832 proc->stats.bc[_IOC_NR(cmd)]++;
1849 ref = binder_get_ref_for_node(proc,
1856 proc->pid, thread->pid,
1860 ref = binder_get_ref(proc, target);
1864 proc->pid, thread->pid, target);
1888 proc->pid, thread->pid, debug_string, ref->debug_id,
1904 node = binder_get_node(proc, node_ptr);
1908 proc->pid, thread->pid,
1918 proc->pid, thread->pid,
1930 proc->pid, thread->pid,
1940 proc->pid, thread->pid,
1949 proc->pid, thread->pid,
1969 buffer = binder_buffer_lookup(proc, data_ptr);
1973 proc->pid, thread->pid, data_ptr);
1980 proc->pid, thread->pid, data_ptr);
1985 proc->pid, thread->pid, data_ptr, buffer->debug_id,
2000 binder_transaction_buffer_release(proc, buffer, NULL);
2001 binder_free_buf(proc, buffer);
2012 binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
2019 proc->pid, thread->pid);
2025 proc->pid, thread->pid);
2026 } else if (proc->requested_threads == 0) {
2031 proc->pid, thread->pid);
2033 proc->requested_threads--;
2034 proc->requested_threads_started++;
2041 proc->pid, thread->pid);
2047 proc->pid, thread->pid);
2054 proc->pid, thread->pid);
2071 ref = binder_get_ref(proc, target);
2075 proc->pid, thread->pid,
2085 proc->pid, thread->pid,
2098 proc->pid, thread->pid);
2107 proc->pid, thread->pid);
2114 if (ref->node->proc == NULL) {
2119 list_add_tail(&ref->death->work.entry, &proc->todo);
2120 wake_up_interruptible(&proc->wait);
2129 proc->pid, thread->pid);
2139 proc->pid, thread->pid,
2149 list_add_tail(&death->work.entry, &proc->todo);
2150 wake_up_interruptible(&proc->wait);
2166 list_for_each_entry(w, &proc->delivered_death, entry) {
2175 proc->pid, thread->pid, cookie, death);
2179 proc->pid, thread->pid, cookie);
2189 list_add_tail(&death->work.entry, &proc->todo);
2190 wake_up_interruptible(&proc->wait);
2197 proc->pid, thread->pid, cmd);
2205 void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
2211 proc->stats.br[_IOC_NR(cmd)]++;
2216 static int binder_has_proc_work(struct binder_proc *proc,
2219 return !list_empty(&proc->todo) ||
2229 static int binder_thread_read(struct binder_proc *proc,
2255 binder_stat_br(proc, thread, thread->return_error2);
2263 binder_stat_br(proc, thread, thread->return_error);
2271 proc->ready_threads++;
2284 proc->pid, thread->pid, thread->looper);
2288 binder_set_nice(proc->default_priority);
2290 if (!binder_has_proc_work(proc, thread))
2293 ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
2305 proc->ready_threads--;
2319 else if (!list_empty(&proc->todo) && wait_for_proc_work)
2320 w = list_first_entry(&proc->todo, struct binder_work, entry);
2340 binder_stat_br(proc, thread, cmd);
2343 proc->pid, thread->pid);
2387 binder_stat_br(proc, thread, cmd);
2390 proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
2396 proc->pid, thread->pid, node->debug_id,
2398 rb_erase(&node->rb_node, &proc->nodes);
2404 proc->pid, thread->pid, node->debug_id, node->ptr,
2426 binder_stat_br(proc, thread, cmd);
2429 proc->pid, thread->pid,
2440 list_move(&w->entry, &proc->delivered_death);
2472 struct task_struct *sender = t->from->proc->tsk;
2482 proc->user_buffer_offset;
2495 binder_stat_br(proc, thread, cmd);
2499 proc->pid, thread->pid,
2502 t->debug_id, t->from ? t->from->proc->pid : 0,
2524 if (proc->requested_threads + proc->ready_threads == 0 &&
2525 proc->requested_threads_started < proc->max_threads &&
2529 proc->requested_threads++;
2532 proc->pid, thread->pid);
2535 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
2565 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
2569 struct rb_node **p = &proc->threads.rb_node;
2587 thread->proc = proc;
2592 rb_insert_color(&thread->rb_node, &proc->threads);
2600 static int binder_free_thread(struct binder_proc *proc,
2607 rb_erase(&thread->rb_node, &proc->threads);
2615 "%s, still active\n", proc->pid, thread->pid,
2644 struct binder_proc *proc = filp->private_data;
2650 thread = binder_get_thread(proc);
2658 if (binder_has_proc_work(proc, thread))
2660 poll_wait(filp, &proc->wait, wait);
2661 if (binder_has_proc_work(proc, thread))
2676 struct binder_proc *proc = filp->private_data;
2681 /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
2690 thread = binder_get_thread(proc);
2709 proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
2713 ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
2723 ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
2725 if (!list_empty(&proc->todo))
2726 wake_up_interruptible(&proc->wait);
2735 proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
2744 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
2766 binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
2778 proc->pid, thread->pid);
2779 binder_free_thread(proc, thread);
2803 printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
2811 struct binder_proc *proc = vma->vm_private_data;
2814 proc->pid, vma->vm_start, vma->vm_end,
2821 struct binder_proc *proc = vma->vm_private_data;
2824 proc->pid, vma->vm_start, vma->vm_end,
2827 proc->vma = NULL;
2828 proc->vma_vm_mm = NULL;
2829 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
2841 struct binder_proc *proc = filp->private_data;
2850 proc->pid, vma->vm_start, vma->vm_end,
2862 if (proc->buffer) {
2874 proc->buffer = area->addr;
2875 proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
2880 while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
2881 printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
2886 proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
2887 if (proc->pages == NULL) {
2892 proc->buffer_size = vma->vm_end - vma->vm_start;
2895 vma->vm_private_data = proc;
2897 if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
2902 buffer = proc->buffer;
2903 INIT_LIST_HEAD(&proc->buffers);
2904 list_add(&buffer->entry, &proc->buffers);
2906 binder_insert_free_buffer(proc, buffer);
2907 proc->free_async_space = proc->buffer_size / 2;
2909 proc->files = get_files_struct(proc->tsk);
2910 proc->vma = vma;
2911 proc->vma_vm_mm = vma->vm_mm;
2914 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
2918 kfree(proc->pages);
2919 proc->pages = NULL;
2922 vfree(proc->buffer);
2923 proc->buffer = NULL;
2929 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
2935 struct binder_proc *proc;
2940 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
2941 if (proc == NULL)
2944 proc->tsk = current;
2945 INIT_LIST_HEAD(&proc->todo);
2946 init_waitqueue_head(&proc->wait);
2947 proc->default_priority = task_nice(current);
2952 hlist_add_head(&proc->proc_node, &binder_procs);
2953 proc->pid = current->group_leader->pid;
2954 INIT_LIST_HEAD(&proc->delivered_death);
2955 filp->private_data = proc;
2961 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
2962 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
2963 binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
2971 struct binder_proc *proc = filp->private_data;
2973 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
2978 static void binder_deferred_flush(struct binder_proc *proc)
2982 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
2990 wake_up_interruptible_all(&proc->wait);
2993 "binder_flush: %d woke %d threads\n", proc->pid,
2999 struct binder_proc *proc = filp->private_data;
3000 debugfs_remove(proc->debugfs_entry);
3001 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3006 static void binder_deferred_release(struct binder_proc *proc)
3013 BUG_ON(proc->vma);
3014 BUG_ON(proc->files);
3016 hlist_del(&proc->proc_node);
3017 if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
3020 proc->pid);
3026 while ((n = rb_first(&proc->threads))) {
3029 active_transactions += binder_free_thread(proc, thread);
3033 while ((n = rb_first(&proc->nodes))) {
3037 rb_erase(&node->rb_node, &proc->nodes);
3046 node->proc = NULL;
3057 list_add_tail(&ref->death->work.entry, &ref->proc->todo);
3058 wake_up_interruptible(&ref->proc->wait);
3070 while ((n = rb_first(&proc->refs_by_desc))) {
3076 binder_release_work(&proc->todo);
3079 while ((n = rb_first(&proc->allocated_buffers))) {
3086 printk(KERN_ERR "binder: release proc %d, "
3088 proc->pid, t->debug_id);
3091 binder_free_buf(proc, buffer);
3098 if (proc->pages) {
3100 for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
3101 if (proc->pages[i]) {
3102 void *page_addr = proc->buffer + i * PAGE_SIZE;
3106 proc->pid, i,
3110 __free_page(proc->pages[i]);
3114 kfree(proc->pages);
3115 vfree(proc->buffer);
3118 put_task_struct(proc->tsk);
3124 proc->pid, threads, nodes, incoming_refs, outgoing_refs,
3127 kfree(proc);
3132 struct binder_proc *proc;
3140 proc = hlist_entry(binder_deferred_list.first,
3142 hlist_del_init(&proc->deferred_work_node);
3143 defer = proc->deferred_work;
3144 proc->deferred_work = 0;
3146 proc = NULL;
3153 files = proc->files;
3155 proc->files = NULL;
3159 binder_deferred_flush(proc);
3162 binder_deferred_release(proc); /* frees proc */
3167 } while (proc);
3172 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3175 proc->deferred_work |= defer;
3176 if (hlist_unhashed(&proc->deferred_work_node)) {
3177 hlist_add_head(&proc->deferred_work_node,
3190 t->from ? t->from->proc->pid : 0,
3301 seq_puts(m, " proc");
3303 seq_printf(m, " %d", ref->proc->pid);
3314 ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
3319 struct binder_proc *proc, int print_all)
3326 seq_printf(m, "proc %d\n", proc->pid);
3329 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3332 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
3339 for (n = rb_first(&proc->refs_by_desc);
3345 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3348 list_for_each_entry(w, &proc->todo, entry)
3350 list_for_each_entry(w, &proc->delivered_death, entry) {
3400 "proc",
3444 struct binder_proc *proc)
3450 seq_printf(m, "proc %d\n", proc->pid);
3452 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3457 " free async space %zd\n", proc->requested_threads,
3458 proc->requested_threads_started, proc->max_threads,
3459 proc->ready_threads, proc->free_async_space);
3461 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
3467 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
3477 for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
3482 list_for_each_entry(w, &proc->todo, entry) {
3493 print_binder_stats(m, " ", &proc->stats);
3499 struct binder_proc *proc;
3514 hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3515 print_binder_proc(m, proc, 1);
3523 struct binder_proc *proc;
3534 hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3535 print_binder_proc_stats(m, proc);
3543 struct binder_proc *proc;
3551 hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
3552 print_binder_proc(m, proc, 0);
3560 struct binder_proc *proc = m->private;
3565 seq_puts(m, "binder proc state:\n");
3566 print_binder_proc(m, proc, 1);
3628 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",