Searched refs:bucket (Results 1 - 25 of 38) sorted by relevance

12

/net/ceph/crush/
H A Dmapper.c53 * bucket choose methods
55 * For each bucket algorithm, we have a "choose" method that, given a
57 * will produce an item in the bucket.
61 * Choose based on a random permutation of the bucket.
65 * calculate an actual random permutation of the bucket members.
69 static int bucket_perm_choose(struct crush_bucket *bucket, argument
72 unsigned int pr = r % bucket->size;
76 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
77 dprintk("bucket
129 bucket_uniform_choose(struct crush_bucket_uniform *bucket, int x, int r) argument
136 bucket_list_choose(struct crush_bucket_list *bucket, int x, int r) argument
189 bucket_tree_choose(struct crush_bucket_tree *bucket, int x, int r) argument
220 bucket_straw_choose(struct crush_bucket_straw *bucket, int x, int r) argument
293 crush_choose(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int x, int numrep, int type, int *out, int outpos, int firstn, int recurse_to_leaf, int descend_once, int *out2) argument
[all...]
/net/9p/
H A Derror.c196 int bucket; local
199 for (bucket = 0; bucket < ERRHASHSZ; bucket++)
200 INIT_HLIST_HEAD(&hash_errmap[bucket]);
205 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
207 hlist_add_head(&c->list, &hash_errmap[bucket]);
225 int bucket; local
229 bucket = jhash(errstr, len, 0) % ERRHASHSZ;
230 hlist_for_each_entry(c, &hash_errmap[bucket], lis
[all...]
/net/ipv4/netfilter/
H A Dnf_conntrack_l3proto_ipv4_compat.c29 unsigned int bucket; member in struct:ct_iter_state
38 for (st->bucket = 0;
39 st->bucket < net->ct.htable_size;
40 st->bucket++) {
42 hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
57 if (likely(get_nulls_value(head) == st->bucket)) {
58 if (++st->bucket >= net->ct.htable_size)
62 hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
215 unsigned int bucket; member in struct:ct_expect_iter_state
224 for (st->bucket
[all...]
/net/atm/
H A Dproc.c69 int bucket; member in struct:vcc_state
79 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) argument
84 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) {
85 struct hlist_head *head = &vcc_hash[*bucket];
99 if (!sk && ++*bucket < VCC_HTABLE_SIZE) {
100 sk = sk_head(&vcc_hash[*bucket]);
111 return __vcc_walk(&state->sk, state->family, &state->bucket, l) ?
/net/openvswitch/
H A Dvport.c87 struct hlist_head *bucket = hash_bucket(net, name); local
90 hlist_for_each_entry_rcu(vport, bucket, hash_node)
174 struct hlist_head *bucket; local
182 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
184 hlist_add_head_rcu(&vport->hash_node, bucket);
H A Dflow.h180 struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
/net/netfilter/
H A Dnf_conntrack_standalone.c50 unsigned int bucket; member in struct:ct_iter_state
60 for (st->bucket = 0;
61 st->bucket < net->ct.htable_size;
62 st->bucket++) {
63 n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
78 if (likely(get_nulls_value(head) == st->bucket)) {
79 if (++st->bucket >= net->ct.htable_size)
84 &net->ct.hash[st->bucket]));
H A Dxt_hashlimit.c42 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
747 unsigned int *bucket; variable
753 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
754 if (!bucket)
757 *bucket = *pos;
758 return bucket;
764 unsigned int *bucket = (unsigned int *)v; local
766 *pos = ++(*bucket);
771 return bucket;
778 unsigned int *bucket variable
781 kfree(bucket); variable
829 unsigned int *bucket = (unsigned int *)v; local
[all...]
H A Dnf_conntrack_expect.c440 unsigned int bucket; member in struct:ct_expect_iter_state
449 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
450 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
465 if (++st->bucket >= nf_ct_expect_hsize)
467 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
H A Dxt_recent.c458 unsigned int bucket; member in struct:recent_iter_state
471 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++)
472 list_for_each_entry(e, &t->iphash[st->bucket], list)
485 while (head == &t->iphash[st->bucket]) {
486 if (++st->bucket >= ip_list_hash_size)
488 head = t->iphash[st->bucket].next;
H A Dnfnetlink_log.c926 unsigned int bucket; member in struct:iter_state
937 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
938 struct hlist_head *head = &log->instance_table[st->bucket];
954 if (++st->bucket >= INSTANCE_BUCKETS)
958 head = &log->instance_table[st->bucket];
H A Dnf_conntrack_core.c327 unsigned int bucket = hash_bucket(hash, net); local
334 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
348 if (get_nulls_value(n) != bucket) {
1216 void *data, unsigned int *bucket)
1223 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1224 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1250 unsigned int bucket = 0; local
1252 while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
1439 int i, bucket, r local
1215 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) argument
[all...]
H A Dnfnetlink_queue_core.c1164 unsigned int bucket; member in struct:iter_state
1178 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1179 if (!hlist_empty(&q->instance_table[st->bucket]))
1180 return q->instance_table[st->bucket].first;
1194 if (++st->bucket >= INSTANCE_BUCKETS)
1198 h = q->instance_table[st->bucket].first;
/net/netfilter/ipvs/
H A Dip_vs_lblc.c107 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member in struct:ip_vs_lblc_table
174 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
187 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
278 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
334 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
373 INIT_HLIST_HEAD(&tbl->bucket[i]);
H A Dip_vs_lblcr.c287 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member in struct:ip_vs_lblcr_table
348 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
361 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
418 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
447 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
502 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
540 INIT_HLIST_HEAD(&tbl->bucket[i]);
/net/mac80211/
H A Dmesh_pathtbl.c336 struct hlist_head *bucket; local
339 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
340 hlist_for_each_entry_rcu(node, bucket, list) {
504 struct hlist_head *bucket; local
523 bucket = &tbl->hash_buckets[hash_idx];
527 hlist_for_each_entry(node, bucket, list) {
556 hlist_add_head_rcu(&new_node->list, bucket);
638 struct hlist_head *bucket; local
673 bucket = &tbl->hash_buckets[hash_idx];
678 hlist_for_each_entry(node, bucket, lis
859 struct hlist_head *bucket; local
[all...]
/net/core/
H A Dnet-procfs.c34 unsigned int bucket; local
41 bucket = get_bucket(*pos) + 1;
42 *pos = set_bucket_offset(bucket, 1);
43 } while (bucket < NETDEV_HASHENTRIES);
H A Dneighbour.c2403 int bucket = state->bucket; local
2406 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2407 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2431 state->bucket = bucket;
2473 if (++state->bucket >= (1 << nht->hash_shift))
2476 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
2505 int bucket = state->bucket; local
[all...]
/net/llc/
H A Dllc_proc.c67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) argument
72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
/net/ipv4/
H A Dtcp_ipv4.c119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
1548 * timewait bucket, so that all the necessary checks
2206 /* Clean up a referenced TCP bind bucket. */
2237 * starting from bucket given in st->bucket; when st->bucket is zero the
2250 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2256 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2311 if (++st->bucket < INET_LHTABLE_SIZ
[all...]
H A Draw.c914 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
915 ++state->bucket) {
916 sk_for_each(sk, &state->h->ht[state->bucket])
935 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
936 sk = sk_head(&state->h->ht[state->bucket]);
1008 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
H A Dping.c1019 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
1020 ++state->bucket) {
1024 hslot = &ping_table.hash[state->bucket];
1049 return ping_get_first(seq, state->bucket + 1);
1066 state->bucket = 0;
1092 int bucket, int *len)
1102 bucket, src, srcp, dest, destp, sp->sk_state,
1123 ping_format_sock(v, seq, state->bucket, &len);
1091 ping_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) argument
H A Dudp.c2024 for (state->bucket = start; state->bucket <= state->udp_table->mask;
2025 ++state->bucket) {
2027 struct udp_hslot *hslot = &state->udp_table->hash[state->bucket];
2056 if (state->bucket <= state->udp_table->mask)
2057 spin_unlock_bh(&state->udp_table->hash[state->bucket].lock);
2058 return udp_get_first(seq, state->bucket + 1);
2076 state->bucket = MAX_UDP_PORTS;
2098 if (state->bucket <= state->udp_table->mask)
2099 spin_unlock_bh(&state->udp_table->hash[state->bucket]
2145 udp4_format_sock(struct sock *sp, struct seq_file *f, int bucket, int *len) argument
[all...]
/net/ipv6/
H A Dip6_flowlabel.c664 int bucket; member in struct:ip6fl_iter_state
675 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
676 for_each_fl_rcu(state->bucket, fl) {
697 if (++state->bucket <= FL_HASH_MASK) {
698 for_each_fl_rcu(state->bucket, fl) {
/net/appletalk/
H A Daarp.c910 int bucket; member in struct:aarp_iter_state
922 int ct = iter->bucket;
932 iter->bucket = ct;
959 iter->bucket = 0;
975 /* next entry in current bucket */
979 /* next bucket or table */
981 ++iter->bucket;

Completed in 2941 milliseconds

12