Searched refs:cpu (Results 1 - 25 of 44) sorted by relevance

12

/net/rds/
H A Dib_stats.c81 int cpu; local
86 for_each_online_cpu(cpu) {
87 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu));
H A Diw_stats.c79 int cpu; local
84 for_each_online_cpu(cpu) {
85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu));
H A Dtcp_stats.c58 int cpu; local
63 for_each_online_cpu(cpu) {
64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
H A Dstats.c116 int cpu; local
126 for_each_online_cpu(cpu) {
127 src = (uint64_t *)&(per_cpu(rds_stats, cpu));
H A Dpage.c35 #include <linux/cpu.h>
91 * page in a per-cpu structure. Future partial-page allocations may be
186 long cpu = (long)hcpu; local
188 rem = &per_cpu(rds_page_remainders, cpu);
190 rdsdebug("cpu %ld action 0x%lx\n", cpu, action);
/net/netfilter/
H A Dnf_synproxy_core.c247 int cpu; local
252 for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
253 if (!cpu_possible(cpu))
255 *pos = cpu + 1;
256 return per_cpu_ptr(snet->stats, cpu);
265 int cpu; local
267 for (cpu = *pos; cpu < nr_cpu_id
[all...]
H A Dnf_conntrack_standalone.c276 int cpu; local
281 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
282 if (!cpu_possible(cpu))
284 *pos = cpu + 1;
285 return per_cpu_ptr(net->ct.stat, cpu);
294 int cpu; local
296 for (cpu = *pos; cpu < nr_cpu_id
[all...]
H A Dxt_cpu.c41 return (info->cpu == smp_processor_id()) ^ info->invert;
45 .name = "cpu",
H A Dnft_queue.c40 int cpu = smp_processor_id(); local
42 queue = priv->queuenum + cpu % priv->queues_total;
H A Dx_tables.c662 int cpu; local
674 for_each_possible_cpu(cpu) {
676 newinfo->entries[cpu] = kmalloc_node(size,
678 cpu_to_node(cpu));
680 newinfo->entries[cpu] = vmalloc_node(size,
681 cpu_to_node(cpu));
683 if (newinfo->entries[cpu] == NULL) {
695 int cpu; local
697 for_each_possible_cpu(cpu)
698 kvfree(info->entries[cpu]);
753 int cpu; local
[all...]
H A Dxt_NFQUEUE.c96 int cpu = smp_processor_id(); local
98 queue = info->queuenum + cpu % info->queues_total;
H A Dnf_conntrack_core.c251 /* add this conntrack to the (per cpu) dying list */
252 ct->cpu = smp_processor_id();
253 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
266 /* add this conntrack to the (per cpu) unconfirmed list */
267 ct->cpu = smp_processor_id();
268 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
282 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
552 /* add this conntrack to the (per cpu) tmpl list */
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
1359 int cpu; local
1440 int cnt = 0, cpu; local
1615 int cpu; local
1625 int i, ret, cpu; local
1745 int cpu; local
[all...]
H A Dnf_conntrack_ecache.c84 int cpu, delay = -1; local
89 for_each_possible_cpu(cpu) {
92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
H A Dnf_conntrack_helper.c397 int cpu; local
418 for_each_possible_cpu(cpu) {
419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
H A Dnf_conntrack_netlink.c1161 int cpu; local
1170 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1173 if (!cpu_possible(cpu))
1176 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1198 cb->args[0] = cpu;
1855 __u16 cpu, const struct ip_conntrack_stat *st)
1869 nfmsg->res_id = htons(cpu);
1900 int cpu; local
1854 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, __u16 cpu, const struct ip_conntrack_stat *st) argument
3020 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, const struct ip_conntrack_stat *st) argument
3054 int cpu; local
[all...]
/net/core/
H A Dgen_stats.c102 struct gnet_stats_basic_cpu __percpu *cpu)
107 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
125 struct gnet_stats_basic_cpu __percpu *cpu,
128 if (cpu) {
129 __gnet_stats_copy_basic_cpu(bstats, cpu);
150 struct gnet_stats_basic_cpu __percpu *cpu,
155 __gnet_stats_copy_basic(&bstats, cpu, b);
236 const struct gnet_stats_queue __percpu *cpu,
240 if (cpu) {
241 __gnet_stats_copy_queue_cpu(qstats, cpu);
101 __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu) argument
124 __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) argument
149 gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) argument
235 __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, const struct gnet_stats_queue __percpu *cpu, const struct gnet_stats_queue *q, __u32 qlen) argument
[all...]
H A Ddrop_monitor.c360 int cpu, rc; local
385 for_each_possible_cpu(cpu) {
386 data = &per_cpu(dm_cpu_data, cpu);
407 int cpu; local
418 for_each_possible_cpu(cpu) {
419 data = &per_cpu(dm_cpu_data, cpu);
H A Dflow.c21 #include <linux/cpu.h>
313 * Return whether a cpu needs flushing. Conservatively, we assume
316 * on the same core as the per-cpu cache component.
318 static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) argument
323 fcp = per_cpu_ptr(fc->percpu, cpu);
393 static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) argument
395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
399 fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
417 int res, cpu = (unsigned long) hcpu; local
418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
[all...]
H A Ddev.c78 #include <linux/cpu.h>
1343 * can be even on different cpu. So just clear netif_running().
1850 int cpu, u16 index)
1856 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1863 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1877 int cpu, i; local
1886 for_each_possible_cpu(cpu) {
1888 if (!remove_xps_queue(dev_maps, cpu, i))
1909 int cpu, u16 index)
1931 cpu_to_node(cpu));
1849 remove_xps_queue(struct xps_dev_maps *dev_maps, int cpu, u16 index) argument
1908 expand_xps_map(struct xps_map *map, int cpu, u16 index) argument
1949 int cpu, numa_node_id = -2; local
2949 int cpu = smp_processor_id(); /* ok because BHs are off */ local
3093 int cpu = -1; local
3197 int cpu; local
3293 enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) argument
3346 int cpu; local
3789 int cpu, ret; local
6958 unsigned int cpu, oldcpu = (unsigned long)ocpu; local
[all...]
/net/iucv/
H A Diucv.c51 #include <linux/cpu.h>
206 * iucv_active_cpu: contains the number of the cpu executing the tasklet
309 * Anchor for per-cpu IUCV command parameter block.
375 * Allow iucv interrupts on this cpu.
379 int cpu = smp_processor_id(); local
391 parm = iucv_param_irq[cpu];
408 /* Set indication that iucv interrupts are allowed for this cpu. */
409 cpumask_set_cpu(cpu, &iucv_irq_cpumask);
416 * Block iucv interrupts on this cpu.
420 int cpu local
440 int cpu = smp_processor_id(); local
465 int cpu = smp_processor_id(); local
520 int cpu = smp_processor_id(); local
544 int cpu; local
564 int cpu; local
584 int cpu, rc; local
624 free_iucv_data(int cpu) argument
634 alloc_iucv_data(int cpu) argument
664 long cpu = (long) hcpu; local
1899 int cpu; local
2020 int cpu; local
2095 int cpu; local
[all...]
/net/ipv4/netfilter/
H A Dnf_conntrack_l3proto_ipv4_compat.c327 int cpu; local
332 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
333 if (!cpu_possible(cpu))
335 *pos = cpu+1;
336 return per_cpu_ptr(net->ct.stat, cpu);
345 int cpu; local
347 for (cpu = *pos; cpu < nr_cpu_id
[all...]
/net/xfrm/
H A Dxfrm_ipcomp.c48 const int cpu = get_cpu(); local
49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
250 int cpu; local
268 for_each_possible_cpu(cpu) {
269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu);
279 int cpu; local
306 for_each_possible_cpu(cpu) {
311 *per_cpu_ptr(tfms, cpu) = tfm;
/net/batman-adv/
H A Dmain.h323 /* Stop preemption on local cpu while incrementing the counter */
332 /* Sum and return the cpu-local counters for index 'idx' */
337 int cpu; local
339 for_each_possible_cpu(cpu) {
340 counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
/net/bridge/
H A Dbr_device.c144 unsigned int cpu; local
146 for_each_possible_cpu(cpu) {
149 = per_cpu_ptr(br->stats, cpu);
/net/sunrpc/
H A Dsvc.c44 SVC_POOL_PERCPU, /* one pool per cpu */
59 unsigned int *pool_to; /* maps pool id to cpu or node */
60 unsigned int *to_pool; /* maps cpu or node to pool id */
143 * want to divide the pools on cpu boundaries.
184 unsigned int cpu; local
191 for_each_online_cpu(cpu) {
193 m->to_pool[cpu] = pidx;
194 m->pool_to[pidx] = cpu;
350 svc_pool_for_cpu(struct svc_serv *serv, int cpu) argument
363 pidx = m->to_pool[cpu];
[all...]

Completed in 7931 milliseconds

12