Searched defs:cpu (Results 1 - 25 of 43) sorted by relevance

12

/net/rds/
H A Dib_stats.c81 int cpu; local
86 for_each_online_cpu(cpu) {
87 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
H A Diw_stats.c79 int cpu; local
84 for_each_online_cpu(cpu) {
85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu));
H A Dtcp_stats.c58 int cpu; local
63 for_each_online_cpu(cpu) {
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
H A Dpage.c35 #include <linux/cpu.h>
91 * page in a per-cpu structure. Future partial-page allocations may be
186 long cpu = (long)hcpu; local
188 rem = &per_cpu(rds_page_remainders, cpu);
190 rdsdebug("cpu %ld action 0x%lx\n", cpu, action);
H A Dstats.c116 int cpu; local
126 for_each_online_cpu(cpu) {
127 src = (uint64_t *)&(per_cpu(rds_stats, cpu));
H A Dib_rdma.c283 int cpu; local
286 for_each_online_cpu(cpu) {
287 flag = &per_cpu(clean_list_grace, cpu);
H A Dib_recv.c103 int cpu; local
109 for_each_possible_cpu(cpu) {
110 head = per_cpu_ptr(cache->percpu, cpu);
138 int cpu; local
140 for_each_possible_cpu(cpu) {
141 head = per_cpu_ptr(cache->percpu, cpu);
441 * Return our per-cpu first list to the cache's xfer by atomically
442 * grabbing the current xfer list, appending it to our per-cpu list,
/net/netfilter/
H A Dxt_NFQUEUE.c96 int cpu = smp_processor_id(); local
98 queue = info->queuenum + cpu % info->queues_total;
H A Dnft_queue.c40 int cpu = smp_processor_id(); local
42 queue = priv->queuenum + cpu % priv->queues_total;
H A Dnf_conntrack_ecache.c84 int cpu, delay = -1; local
89 for_each_possible_cpu(cpu) {
92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
H A Dnf_conntrack_helper.c397 int cpu; local
418 for_each_possible_cpu(cpu) {
419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
H A Dnf_synproxy_core.c247 int cpu; local
252 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
253 if (!cpu_possible(cpu))
255 *pos = cpu + 1;
256 return per_cpu_ptr(snet->stats, cpu);
265 int cpu; local
267 for (cpu = *pos; cpu < nr_cpu_id
[all...]
H A Dnf_conntrack_standalone.c276 int cpu; local
281 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
282 if (!cpu_possible(cpu))
284 *pos = cpu + 1;
285 return per_cpu_ptr(net->ct.stat, cpu);
294 int cpu; local
296 for (cpu = *pos; cpu < nr_cpu_id
[all...]
H A Dnf_conntrack_core.c251 /* add this conntrack to the (per cpu) dying list */
252 ct->cpu = smp_processor_id();
253 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
266 /* add this conntrack to the (per cpu) unconfirmed list */
267 ct->cpu = smp_processor_id();
268 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
282 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
552 /* add this conntrack to the (per cpu) tmpl list */
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
1359 int cpu; local
1440 int cnt = 0, cpu; local
1615 int cpu; local
1625 int i, ret, cpu; local
1745 int cpu; local
[all...]
/net/core/
H A Dgen_stats.c102 struct gnet_stats_basic_cpu __percpu *cpu)
107 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
125 struct gnet_stats_basic_cpu __percpu *cpu,
128 if (cpu) {
129 __gnet_stats_copy_basic_cpu(bstats, cpu);
150 struct gnet_stats_basic_cpu __percpu *cpu,
155 __gnet_stats_copy_basic(&bstats, cpu, b);
236 const struct gnet_stats_queue __percpu *cpu,
240 if (cpu) {
241 __gnet_stats_copy_queue_cpu(qstats, cpu);
101 __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu) argument
124 __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) argument
149 gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) argument
235 __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, const struct gnet_stats_queue __percpu *cpu, const struct gnet_stats_queue *q, __u32 qlen) argument
[all...]
H A Ddrop_monitor.c360 int cpu, rc; local
385 for_each_possible_cpu(cpu) {
386 data = &per_cpu(dm_cpu_data, cpu);
407 int cpu; local
418 for_each_possible_cpu(cpu) {
419 data = &per_cpu(dm_cpu_data, cpu);
H A Dflow.c21 #include <linux/cpu.h>
313 * Return whether a cpu needs flushing. Conservatively, we assume
316 * on the same core as the per-cpu cache component.
318 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) argument
323 fcp = per_cpu_ptr(fc->percpu, cpu);
393 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) argument
395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
399 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
417 int res, cpu = (unsigned long) hcpu; local
418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
[all...]
/net/xfrm/
H A Dxfrm_ipcomp.c48 const int cpu = get_cpu(); local
49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
250 int cpu; local
268 for_each_possible_cpu(cpu) {
269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
279 int cpu; local
306 for_each_possible_cpu(cpu) {
311 *per_cpu_ptr(tfms, cpu) = tfm;
/net/bridge/
H A Dbr_device.c144 unsigned int cpu; local
146 for_each_possible_cpu(cpu) {
149 = per_cpu_ptr(br->stats, cpu);
/net/ipv4/netfilter/
H A Dnf_conntrack_l3proto_ipv4_compat.c327 int cpu; local
332 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
333 if (!cpu_possible(cpu))
335 *pos = cpu+1;
336 return per_cpu_ptr(net->ct.stat, cpu);
345 int cpu; local
347 for (cpu = *pos; cpu < nr_cpu_id
[all...]
/net/batman-adv/
H A Dmain.h323 /* Stop preemption on local cpu while incrementing the counter */
332 /* Sum and return the cpu-local counters for index 'idx' */
337 int cpu; local
339 for_each_possible_cpu(cpu) {
340 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
/net/sched/
H A Dcls_u32.c927 int cpu; local
986 for_each_possible_cpu(cpu) {
988 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
/net/sunrpc/
H A Dsvc.c44 SVC_POOL_PERCPU, /* one pool per cpu */
59 unsigned int *pool_to; /* maps pool id to cpu or node */
60 unsigned int *to_pool; /* maps cpu or node to pool id */
143 * want to divide the pools on cpu boundaries.
184 unsigned int cpu; local
191 for_each_online_cpu(cpu) {
193 m->to_pool[cpu] = pidx;
194 m->pool_to[pidx] = cpu;
350 svc_pool_for_cpu(struct svc_serv *serv, int cpu) argument
363 pidx = m->to_pool[cpu];
[all...]
/net/ipv4/
H A Daf_inet.c1485 int cpu; local
1487 for_each_possible_cpu(cpu) {
1493 bhptr = per_cpu_ptr(mib, cpu);
1789 * Initialise per-cpu ipv4 mibs
H A Dfib_semantics.c188 int cpu; local
193 for_each_possible_cpu(cpu) {
196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);

Completed in 509 milliseconds

12