/kernel/gcov/ |
H A D | fs.c | 32 * @list: list head for child node list 35 * @parent: parent node 141 * Return a profiling data set associated with the given node. This is 145 static struct gcov_info *get_node_info(struct gcov_node *node) argument 147 if (node->num_loaded > 0) 148 return node->loaded_info[0]; 150 return node->unloaded_info; 155 * all profiling data associated with the given node. 157 static struct gcov_info *get_accumulated_info(struct gcov_node *node) argument 162 if (node 180 struct gcov_node *node = inode->i_private; local 240 struct gcov_node *node; local 255 reset_node(struct gcov_node *node) argument 277 struct gcov_node *node; local 366 add_links(struct gcov_node *node, struct dentry *parent) argument 412 init_node(struct gcov_node *node, struct gcov_info *info, const char *name, struct gcov_node *parent) argument 434 struct gcov_node *node; local 471 remove_links(struct gcov_node *node) argument 487 release_node(struct gcov_node *node) argument 500 remove_node(struct gcov_node *node) argument 518 struct gcov_node *node; local 535 struct gcov_node *node; local 577 struct gcov_node *node; local 621 add_info(struct gcov_node *node, struct gcov_info *info) argument 674 get_info_index(struct gcov_node *node, struct gcov_info *info) argument 688 save_info(struct gcov_node *node, struct gcov_info *info) argument 706 remove_info(struct gcov_node *node, struct gcov_info *info) argument 737 struct gcov_node *node; local [all...] |
/kernel/locking/ |
H A D | mcs_spinlock.c | 11 * Using a single mcs node per CPU is safe because sleeping locks should not be 34 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 39 struct optimistic_spin_node *node, 47 * If there is a prev node in queue, then the 'old' value will be 48 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if 65 * We must xchg() the @node->next value, because if we were to 67 * @node->next might complete Step-A and think its @prev is 72 * wait for a new @node->next from its Step-C. 74 if (node->next) { 75 next = xchg(&node 38 osq_wait_next(struct optimistic_spin_queue *lock, struct optimistic_spin_node *node, struct optimistic_spin_node *prev) argument 88 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node); local 183 struct optimistic_spin_node *node, *next; local [all...] |
H A D | mcs_spinlock.h | 54 * In order to acquire the lock, the caller should declare a local node and 55 * pass a reference of the node to this function in addition to the lock. 57 * on this node->locked until the previous lock holder sets the node->locked 61 void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node) argument 65 /* Init node */ 66 node->locked = 0; 67 node->next = NULL; 69 prev = xchg(lock, node); 72 * Lock acquired, don't need to set node 92 mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node) argument [all...] |
/kernel/power/ |
H A D | wakelock.c | 27 struct rb_node node; member in struct:wakelock 38 struct rb_node *node; local 45 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) { 46 wl = rb_entry(node, struct wakelock, node); 122 rb_erase(&wl->node, &wakelocks_tree); 140 struct rb_node **node local [all...] |
H A D | qos.c | 20 * subsystem by opening the device node /dev/... and writing there request to 21 * the node. As long as the process holds a file handle open to the node the 149 struct plist_node *node; local 163 plist_for_each(node, &c->list) 164 total_value += node->prio; 189 * @node: request to add to the list, to update or to remove 196 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node, argument 212 plist_del(node, &c->list); 220 plist_del(node, [all...] |
H A D | snapshot.c | 261 * The struct rtree_node represents one node of the radix tree. 299 struct rtree_node *node; member in struct:bm_position 324 * alloc_rtree_node - Allocate a new node and add it to the radix tree. 327 * leave nodes of the radix tree. It also adds the node to the 334 struct rtree_node *node; local 336 node = chain_alloc(ca, sizeof(struct rtree_node)); 337 if (!node) 340 node->data = get_image_page(gfp_mask, safe_needed); 341 if (!node->data) 344 list_add_tail(&node 359 struct rtree_node *node, *block, **dst; local 466 struct rtree_node *node; local 637 struct rtree_node *node; local [all...] |
H A D | swap.c | 114 struct rb_node node; member in struct:swsusp_extent 127 /* Figure out where to put the new node */ 129 ext = rb_entry(*new, struct swsusp_extent, node); 150 /* Add the new node and rebalance the tree. */ 157 rb_link_node(&ext->node, parent, new); 158 rb_insert_color(&ext->node, &swsusp_extents); 189 struct rb_node *node; local 191 while ((node = swsusp_extents.rb_node)) { 195 ext = container_of(node, struct swsusp_extent, node); [all...] |
/kernel/trace/ |
H A D | trace_stat.c | 26 struct rb_node node; member in struct:stat_node 50 rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { 87 * Figure out where to put new node 94 this = container_of(*new, struct stat_node, node); 104 rb_link_node(&data->node, parent, new); 105 rb_insert_color(&data->node, root); 111 * This one will force an insertion as right-most node 175 struct rb_node *node; local 189 node = rb_first(&session->stat_root); 190 for (i = 0; node 199 struct rb_node *node = p; local 304 struct stat_session *session, *node; local 348 struct stat_session *node, *tmp; local [all...] |
/kernel/irq/ |
H A D | irqdesc.c | 39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) argument 41 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) 45 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { 53 static void desc_smp_init(struct irq_desc *desc, int node) argument 55 desc->irq_data.node = node; 64 return desc->irq_data.node; 69 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } argument 70 static inline void desc_smp_init(struct irq_desc *desc, int node) { } argument 74 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, argument 135 alloc_desc(int irq, int node, struct module *owner) argument 180 alloc_descs(unsigned int start, unsigned int cnt, int node, struct module *owner) argument 216 int i, initcnt, node = first_online_node; local 254 int count, i, node = first_online_node; local 290 alloc_descs(unsigned int start, unsigned int cnt, int node, struct module *owner) argument 415 __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, struct module *owner) argument 468 irq_alloc_hwirqs(int cnt, int node) argument [all...] |
H A D | internals.h | 85 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
H A D | irqdomain.c | 28 * @of_node: optional device-tree node of the interrupt controller 83 * node when all entries are removed. Shout if there are 107 * @of_node: pointer to interrupt controller's device tree node. 153 * @of_node: pointer to interrupt controller's device tree node. 187 * irq_find_host() - Locates a domain for a given device node 188 * @node: device-tree node of the interrupt controller 190 struct irq_domain *irq_find_host(struct device_node *node) argument 197 * the absence of a device node. This isn't a problem so far 203 rc = h->ops->match(h, node); [all...] |
/kernel/ |
H A D | workqueue_internal.h | 40 struct list_head node; /* A: anchored at pool->workers */ member in struct:worker 41 /* A: runs through worker->node */
|
H A D | audit_tree.c | 31 struct node { struct in struct:audit_chunk 65 * node.index allows to get from node.list to containing chunk. 143 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); 210 static struct audit_chunk *find_chunk(struct node *p) 217 static void untag_chunk(struct node *p) 360 struct node *p; 496 struct node *p; 498 p = list_entry(victim->chunks.next, struct node, list); 518 struct node *nod local 527 struct node *node; local 592 struct node *node; local 692 struct node *node; local 794 struct node *node; local [all...] |
H A D | kthread.c | 32 int node; member in struct:kthread_create_info 213 /* called from do_fork() to get node information for about to be created task */ 228 current->pref_node_fork = create->node; 249 * @node: memory node number. 256 * If thread is going to be bound on a particular cpu, give its node 257 * in @node, to get NUMA affinity for kthread stack, or else give -1. 268 void *data, int node, 281 create->node = node; 267 kthread_create_on_node(int (*threadfn)(void *data), void *data, int node, const char namefmt[], ...) argument [all...] |
H A D | profile.c | 333 int node, cpu = (unsigned long)__cpu; local 339 node = cpu_to_mem(cpu); 342 page = alloc_pages_exact_node(node, 350 page = alloc_pages_exact_node(node, 544 int node = cpu_to_mem(cpu); local 547 page = alloc_pages_exact_node(node, 554 page = alloc_pages_exact_node(node,
|
H A D | workqueue.c | 142 int node; /* I: the associated node ID */ member in struct:worker_pool 169 struct hlist_node hash_node; /* PL: unbound_pool_hash node */ 203 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 204 struct list_head mayday_node; /* MD: node on wq->maydays */ 263 struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */ 269 /* possible CPUs of each node */ 374 list_for_each_entry((worker), &(pool)->workers, node) \ 541 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node 543 * @node 551 unbound_pwq_by_node(struct workqueue_struct *wq, int node) argument 1584 alloc_worker(int node) argument 3006 int node, written = 0; local 3460 int node; local 3697 wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, int cpu_going_down, cpumask_t *cpumask) argument 3721 numa_pwq_tbl_install(struct workqueue_struct *wq, int node, struct pool_workqueue *pwq) argument 3758 int node, ret; local 3882 int node = cpu_to_node(cpu); local 4115 int node; local 4784 int node, cpu; local [all...] |
H A D | pid.c | 386 hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); 399 hlist_del_rcu(&link->node); 426 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); 437 result = hlist_entry(first, struct task_struct, pids[(type)].node);
|
H A D | cpuset.c | 244 * iteration and the first node to be visited. 1009 * read_mems_allowed_begin(). If at least one node remains unchanged and 2099 * removing that CPU or node from all cpusets. If this removes the 2100 * last CPU or node from a cpuset, then move the tasks in the empty 2341 * Description: Finish top cpuset after cpu, node maps are initialized 2453 * cpuset_node_allowed_softwall - Can we allocate on a memory node? 2454 * @node: is this an allowed node? 2458 * set, yes, we can always allocate. If node is in our task's mems_allowed, 2459 * yes. If it's not a __GFP_HARDWALL request and this node i 2513 __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) argument 2570 __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) argument 2614 int node; local [all...] |
H A D | fork.c | 128 static inline struct task_struct *alloc_task_struct_node(int node) argument 130 return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node); 151 int node) 153 struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP, 167 int node) 169 return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node); 325 int node = tsk_fork_get_node(orig); local 328 tsk = alloc_task_struct_node(node); 332 ti = alloc_thread_info_node(tsk, node); 1618 INIT_HLIST_NODE(&links[type].node); /* no 150 alloc_thread_info_node(struct task_struct *tsk, int node) argument 166 alloc_thread_info_node(struct task_struct *tsk, int node) argument [all...] |
/kernel/time/ |
H A D | alarmtimer.c | 141 timerqueue_del(&base->timerqueue, &alarm->node); 143 timerqueue_add(&base->timerqueue, &alarm->node); 161 timerqueue_del(&base->timerqueue, &alarm->node); 192 hrtimer_set_expires(&alarm->timer, alarm->node.expires); 205 return ktime_sub(alarm->node.expires, base->gettime()); 305 timerqueue_init(&alarm->node); 327 alarm->node.expires = start; 329 ret = hrtimer_start(&alarm->timer, alarm->node.expires, 356 hrtimer_set_expires(&alarm->timer, alarm->node.expires); 409 delta = ktime_sub(now, alarm->node [all...] |
H A D | hrtimer.c | 504 timer = container_of(next, struct hrtimer, node); 835 timerqueue_add(&base->active, &timer->node); 844 return (&timer->node == base->active.next); 866 timerqueue_del(&base->active, &timer->node); 867 if (&timer->node == next_timer) { 1123 timer = container_of(next, struct hrtimer, node); 1154 timerqueue_init(&timer->node); 1268 struct timerqueue_node *node; local 1277 while ((node = timerqueue_getnext(&base->active))) { 1280 timer = container_of(node, struc 1442 struct timerqueue_node *node; local 1643 struct timerqueue_node *node; local [all...] |
/kernel/rcu/ |
H A D | tree.h | 122 * Definition for node within the RCU grace-period-detection hierarchy. 127 unsigned long gpnum; /* Current grace period for this node. */ 130 unsigned long completed; /* Last GP completed for this node. */ 224 for ((rnp) = &(rsp)->node[0]; \ 225 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 233 for ((rnp) = &(rsp)->node[0]; \ 240 * It is still a leaf node, even if it is also the root node. 244 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++) 415 * RCU global state, including node hierarch 425 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ member in struct:rcu_state [all...] |
H A D | tree_trace.c | 277 for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) { 319 struct rcu_node *rnp = &rsp->node[0];
|
/kernel/sched/ |
H A D | debug.c | 512 int node, i; local 526 for_each_online_node(node) { 532 nr_faults = p->numa_faults_memory[2*node + i]; 534 cpu_current = !i ? (task_node(p) == node) : 535 (pol && node_isset(node, pol->v.nodes)); 537 home_node = (p->numa_preferred_nid == node); 540 i, node, cpu_current, home_node, nr_faults);
|
/kernel/events/ |
H A D | ring_buffer.c | 266 int node; local 268 node = (cpu == -1) ? cpu : cpu_to_node(cpu); 269 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
|