/net/rds/ |
H A D | ib_stats.c | 81 int cpu; local 86 for_each_online_cpu(cpu) { 87 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
|
H A D | iw_stats.c | 79 int cpu; local 84 for_each_online_cpu(cpu) { 85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu));
|
H A D | tcp_stats.c | 58 int cpu; local 63 for_each_online_cpu(cpu) { 64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
|
H A D | page.c | 35 #include <linux/cpu.h> 91 * page in a per-cpu structure. Future partial-page allocations may be 186 long cpu = (long)hcpu; local 188 rem = &per_cpu(rds_page_remainders, cpu); 190 rdsdebug("cpu %ld action 0x%lx\n", cpu, action);
|
H A D | stats.c | 116 int cpu; local 126 for_each_online_cpu(cpu) { 127 src = (uint64_t *)&(per_cpu(rds_stats, cpu));
|
H A D | ib_rdma.c | 283 int cpu; local 286 for_each_online_cpu(cpu) { 287 flag = &per_cpu(clean_list_grace, cpu);
|
H A D | ib_recv.c | 103 int cpu; local 109 for_each_possible_cpu(cpu) { 110 head = per_cpu_ptr(cache->percpu, cpu); 138 int cpu; local 140 for_each_possible_cpu(cpu) { 141 head = per_cpu_ptr(cache->percpu, cpu); 441 * Return our per-cpu first list to the cache's xfer by atomically 442 * grabbing the current xfer list, appending it to our per-cpu list,
|
/net/netfilter/ |
H A D | xt_NFQUEUE.c | 96 int cpu = smp_processor_id(); local 98 queue = info->queuenum + cpu % info->queues_total;
|
H A D | nft_queue.c | 40 int cpu = smp_processor_id(); local 42 queue = priv->queuenum + cpu % priv->queues_total;
|
H A D | nf_conntrack_ecache.c | 84 int cpu, delay = -1; local 89 for_each_possible_cpu(cpu) { 92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
|
H A D | nf_conntrack_helper.c | 397 int cpu; local 418 for_each_possible_cpu(cpu) { 419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
|
H A D | nf_synproxy_core.c | 247 int cpu; local 252 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) { 253 if (!cpu_possible(cpu)) 255 *pos = cpu + 1; 256 return per_cpu_ptr(snet->stats, cpu); 265 int cpu; local 267 for (cpu = *pos; cpu < nr_cpu_id [all...] |
H A D | nf_conntrack_standalone.c | 276 int cpu; local 281 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 282 if (!cpu_possible(cpu)) 284 *pos = cpu + 1; 285 return per_cpu_ptr(net->ct.stat, cpu); 294 int cpu; local 296 for (cpu = *pos; cpu < nr_cpu_id [all...] |
H A D | nf_conntrack_core.c | 251 /* add this conntrack to the (per cpu) dying list */ 252 ct->cpu = smp_processor_id(); 253 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); 266 /* add this conntrack to the (per cpu) unconfirmed list */ 267 ct->cpu = smp_processor_id(); 268 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); 282 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); 552 /* add this conntrack to the (per cpu) tmpl list */ 554 tmpl->cpu = smp_processor_id(); 555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu); 1359 int cpu; local 1440 int cnt = 0, cpu; local 1615 int cpu; local 1625 int i, ret, cpu; local 1745 int cpu; local [all...] |
/net/core/ |
H A D | gen_stats.c | 102 struct gnet_stats_basic_cpu __percpu *cpu) 107 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); 125 struct gnet_stats_basic_cpu __percpu *cpu, 128 if (cpu) { 129 __gnet_stats_copy_basic_cpu(bstats, cpu); 150 struct gnet_stats_basic_cpu __percpu *cpu, 155 __gnet_stats_copy_basic(&bstats, cpu, b); 236 const struct gnet_stats_queue __percpu *cpu, 240 if (cpu) { 241 __gnet_stats_copy_queue_cpu(qstats, cpu); 101 __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu) argument 124 __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) argument 149 gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) argument 235 __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, const struct gnet_stats_queue __percpu *cpu, const struct gnet_stats_queue *q, __u32 qlen) argument [all...] |
H A D | drop_monitor.c | 360 int cpu, rc; local 385 for_each_possible_cpu(cpu) { 386 data = &per_cpu(dm_cpu_data, cpu); 407 int cpu; local 418 for_each_possible_cpu(cpu) { 419 data = &per_cpu(dm_cpu_data, cpu);
|
H A D | flow.c | 21 #include <linux/cpu.h> 313 * Return whether a cpu needs flushing. Conservatively, we assume 316 * on the same core as the per-cpu cache component. 318 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) argument 323 fcp = per_cpu_ptr(fc->percpu, cpu); 393 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) argument 395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); 399 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); 417 int res, cpu = (unsigned long) hcpu; local 418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); [all...] |
/net/xfrm/ |
H A D | xfrm_ipcomp.c | 48 const int cpu = get_cpu(); local 49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); 250 int cpu; local 268 for_each_possible_cpu(cpu) { 269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); 279 int cpu; local 306 for_each_possible_cpu(cpu) { 311 *per_cpu_ptr(tfms, cpu) = tfm;
|
/net/bridge/ |
H A D | br_device.c | 144 unsigned int cpu; local 146 for_each_possible_cpu(cpu) { 149 = per_cpu_ptr(br->stats, cpu);
|
/net/ipv4/netfilter/ |
H A D | nf_conntrack_l3proto_ipv4_compat.c | 327 int cpu; local 332 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 333 if (!cpu_possible(cpu)) 335 *pos = cpu+1; 336 return per_cpu_ptr(net->ct.stat, cpu); 345 int cpu; local 347 for (cpu = *pos; cpu < nr_cpu_id [all...] |
/net/batman-adv/ |
H A D | main.h | 323 /* Stop preemption on local cpu while incrementing the counter */ 332 /* Sum and return the cpu-local counters for index 'idx' */ 337 int cpu; local 339 for_each_possible_cpu(cpu) { 340 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
|
/net/sched/ |
H A D | cls_u32.c | 927 int cpu; local 986 for_each_possible_cpu(cpu) { 988 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
|
/net/sunrpc/ |
H A D | svc.c | 44 SVC_POOL_PERCPU, /* one pool per cpu */ 59 unsigned int *pool_to; /* maps pool id to cpu or node */ 60 unsigned int *to_pool; /* maps cpu or node to pool id */ 143 * want to divide the pools on cpu boundaries. 184 unsigned int cpu; local 191 for_each_online_cpu(cpu) { 193 m->to_pool[cpu] = pidx; 194 m->pool_to[pidx] = cpu; 350 svc_pool_for_cpu(struct svc_serv *serv, int cpu) argument 363 pidx = m->to_pool[cpu]; [all...] |
/net/ipv4/ |
H A D | af_inet.c | 1485 int cpu; local 1487 for_each_possible_cpu(cpu) { 1493 bhptr = per_cpu_ptr(mib, cpu); 1789 * Initialise per-cpu ipv4 mibs
|
H A D | fib_semantics.c | 188 int cpu; local 193 for_each_possible_cpu(cpu) { 196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
|