/net/sunrpc/ |
H A D | svc_xprt.c | 344 int cpu; local 360 cpu = get_cpu(); 361 pool = svc_pool_for_cpu(xprt->xpt_server, cpu);
|
/net/bridge/netfilter/ |
H A D | ebtables.c | 39 * Each cpu has its own set of counters, so there is no need for write_lock in 48 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \ 49 COUNTER_OFFSET(n) * cpu)) 958 int i, cpu; local 961 /* counters of cpu 0 */ 965 /* add other counters to those of cpu 0 */ 966 for_each_possible_cpu(cpu) { 967 if (cpu == 0) 969 counter_base = COUNTER_BASE(oldcounters, nentries, cpu); 1312 /* we add to the counters of the first cpu */ [all...] |
/net/core/ |
H A D | neighbour.c | 1892 int cpu; local 1897 for_each_possible_cpu(cpu) { 1900 st = per_cpu_ptr(tbl->stats, cpu); 2685 int cpu; local 2690 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 2691 if (!cpu_possible(cpu)) 2693 *pos = cpu+1; 2694 return per_cpu_ptr(tbl->stats, cpu); 2702 int cpu; local [all...] |
H A D | net-sysfs.c | 606 int err, cpu, i; local 630 for_each_cpu_and(cpu, mask, cpu_online_mask) 631 map->cpus[i++] = cpu; 722 table->flows[count].cpu = RPS_NO_CPU;
|
H A D | sock.c | 2609 int cpu, idx = prot->inuse_idx; local 2612 for_each_possible_cpu(cpu) 2613 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; 2655 int cpu, idx = prot->inuse_idx; local 2658 for_each_possible_cpu(cpu) 2659 res += per_cpu(prot_inuse, cpu).val[idx];
|
H A D | pktgen.c | 431 int cpu; member in struct:pktgen_thread 3421 int cpu = t->cpu; local 3423 BUG_ON(smp_processor_id() != cpu); 3428 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current)); 3559 int node = cpu_to_node(t->cpu); 3641 static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn) argument 3648 cpu_to_node(cpu)); 3655 t->cpu = cpu; 3751 int cpu, ret = 0; local [all...] |
H A D | dev.c | 78 #include <linux/cpu.h> 1343 * can be even on different cpu. So just clear netif_running(). 1850 int cpu, u16 index) 1856 map = xmap_dereference(dev_maps->cpu_map[cpu]); 1863 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL); 1877 int cpu, i; local 1886 for_each_possible_cpu(cpu) { 1888 if (!remove_xps_queue(dev_maps, cpu, i)) 1909 int cpu, u16 index) 1931 cpu_to_node(cpu)); 1849 remove_xps_queue(struct xps_dev_maps *dev_maps, int cpu, u16 index) argument 1908 expand_xps_map(struct xps_map *map, int cpu, u16 index) argument 1949 int cpu, numa_node_id = -2; local 2949 int cpu = smp_processor_id(); /* ok because BHs are off */ local 3093 int cpu = -1; local 3197 int cpu; local 3293 enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) argument 3346 int cpu; local 3789 int cpu, ret; local 6958 unsigned int cpu, oldcpu = (unsigned long)ocpu; local [all...] |
/net/ipv4/netfilter/ |
H A D | arp_tables.c | 719 unsigned int cpu; local 722 for_each_possible_cpu(cpu) { 723 seqcount_t *s = &per_cpu(xt_recseq, cpu); 726 xt_entry_foreach(iter, t->entries[cpu], t->size) { 1085 /* choose the copy that is on our node/cpu */ 1509 /* choose the copy that is on our node/cpu */ 1615 /* choose the copy on our node/cpu */ 1788 /* choose the copy on our node/cpu */
|
H A D | ip_tables.c | 302 unsigned int *stackptr, origptr, cpu; local 329 cpu = smp_processor_id(); 335 table_base = private->entries[cpu]; 336 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 337 stackptr = per_cpu_ptr(private->stackptr, cpu); 884 unsigned int cpu; local 887 for_each_possible_cpu(cpu) { 888 seqcount_t *s = &per_cpu(xt_recseq, cpu); 891 xt_entry_foreach(iter, t->entries[cpu], t->size) { 943 /* choose the copy that is on our node/cpu, [all...] |
/net/ipv4/ |
H A D | route.c | 245 int cpu; local 250 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { 251 if (!cpu_possible(cpu)) 253 *pos = cpu+1; 254 return &per_cpu(rt_cache_stat, cpu); 261 int cpu; local 263 for (cpu = *pos; cpu < nr_cpu_id [all...] |
H A D | tcp.c | 2894 int cpu; local 2896 for_each_possible_cpu(cpu) { 2897 if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { 2903 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
|
/net/ipv6/netfilter/ |
H A D | ip6_tables.c | 327 unsigned int *stackptr, origptr, cpu; local 357 cpu = smp_processor_id(); 358 table_base = private->entries[cpu]; 359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 360 stackptr = per_cpu_ptr(private->stackptr, cpu); 894 unsigned int cpu; local 897 for_each_possible_cpu(cpu) { 898 seqcount_t *s = &per_cpu(xt_recseq, cpu); 901 xt_entry_foreach(iter, t->entries[cpu], t->size) { 953 /* choose the copy that is on our node/cpu, [all...] |
/net/iucv/ |
H A D | iucv.c | 51 #include <linux/cpu.h> 206 * iucv_active_cpu: contains the number of the cpu executing the tasklet 309 * Anchor for per-cpu IUCV command parameter block. 375 * Allow iucv interrupts on this cpu. 379 int cpu = smp_processor_id(); local 391 parm = iucv_param_irq[cpu]; 408 /* Set indication that iucv interrupts are allowed for this cpu. */ 409 cpumask_set_cpu(cpu, &iucv_irq_cpumask); 416 * Block iucv interrupts on this cpu. 420 int cpu local 440 int cpu = smp_processor_id(); local 465 int cpu = smp_processor_id(); local 520 int cpu = smp_processor_id(); local 544 int cpu; local 564 int cpu; local 584 int cpu, rc; local 624 free_iucv_data(int cpu) argument 634 alloc_iucv_data(int cpu) argument 664 long cpu = (long) hcpu; local 1899 int cpu; local 2020 int cpu; local 2095 int cpu; local [all...] |
/net/netfilter/ |
H A D | nf_conntrack_netlink.c | 1161 int cpu; local 1170 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 1173 if (!cpu_possible(cpu)) 1176 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); 1198 cb->args[0] = cpu; 1855 __u16 cpu, const struct ip_conntrack_stat *st) 1869 nfmsg->res_id = htons(cpu); 1900 int cpu; local 1854 ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq, __u16 cpu, const struct ip_conntrack_stat *st) argument 3020 ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 portid, u32 seq, int cpu, const struct ip_conntrack_stat *st) argument 3054 int cpu; local [all...] |
H A D | nf_tables_api.c | 889 int cpu; local 892 for_each_possible_cpu(cpu) { 893 cpu_stats = per_cpu_ptr(stats, cpu); 1130 /* Restore old counters on this cpu, no problem. Per-cpu statistics
|
H A D | x_tables.c | 662 int cpu; local 674 for_each_possible_cpu(cpu) { 676 newinfo->entries[cpu] = kmalloc_node(size, 678 cpu_to_node(cpu)); 680 newinfo->entries[cpu] = vmalloc_node(size, 681 cpu_to_node(cpu)); 683 if (newinfo->entries[cpu] == NULL) { 695 int cpu; local 697 for_each_possible_cpu(cpu) 698 kvfree(info->entries[cpu]); 753 int cpu; local [all...] |
/net/packet/ |
H A D | af_packet.c | 687 * timer fires on other cpu: 936 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't 1192 int cpu; local 1198 for_each_possible_cpu(cpu) 1199 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
|
/net/ |
H A D | socket.c | 2714 int cpu; local 2717 for_each_possible_cpu(cpu) 2718 counter += per_cpu(sockets_in_use, cpu);
|