Searched refs:bucket (Results 1 - 25 of 43) sorted by relevance

12

/net/ceph/crush/
H A Dmapper.c53 * bucket choose methods
55 * For each bucket algorithm, we have a "choose" method that, given a
57 * will produce an item in the bucket.
61 * Choose based on a random permutation of the bucket.
65 * calculate an actual random permutation of the bucket members.
69 static int bucket_perm_choose(struct crush_bucket *bucket, argument
72 unsigned int pr = r % bucket->size;
76 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
77 dprintk("bucket
129 bucket_uniform_choose(struct crush_bucket_uniform *bucket, int x, int r) argument
136 bucket_list_choose(struct crush_bucket_list *bucket, int x, int r) argument
189 bucket_tree_choose(struct crush_bucket_tree *bucket, int x, int r) argument
221 bucket_straw_choose(struct crush_bucket_straw *bucket, int x, int r) argument
302 crush_choose_firstn(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, unsigned int local_retries, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, int *out2, int parent_r) argument
477 crush_choose_indep(const struct crush_map *map, struct crush_bucket *bucket, const __u32 *weight, int weight_max, int x, int left, int numrep, int type, int *out, int outpos, unsigned int tries, unsigned int recurse_tries, int recurse_to_leaf, int *out2, int parent_r) argument
[all...]
/net/9p/
H A Derror.c196 int bucket; local
199 for (bucket = 0; bucket < ERRHASHSZ; bucket++)
200 INIT_HLIST_HEAD(&hash_errmap[bucket]);
205 bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ;
207 hlist_add_head(&c->list, &hash_errmap[bucket]);
225 int bucket; local
229 bucket = jhash(errstr, len, 0) % ERRHASHSZ;
230 hlist_for_each_entry(c, &hash_errmap[bucket], lis
[all...]
/net/sched/
H A Dsch_hhf.c21 * as heavy-hitter, it is immediately switched to the heavy-hitter bucket.
23 * in which the heavy-hitter bucket is served with less weight.
61 * dispatched to the heavy-hitter bucket accordingly.
68 * bucket.
71 * to the non-heavy-hitter bucket.
74 * send p to the heavy-hitter bucket.
105 WDRR_BUCKET_FOR_HH = 0, /* bucket id for heavy-hitters */
106 WDRR_BUCKET_FOR_NON_HH = 1 /* bucket id for non-heavy-hitters */
344 /* Removes one skb from head of bucket. */
345 static struct sk_buff *dequeue_head(struct wdrr_bucket *bucket) argument
355 bucket_add(struct wdrr_bucket *bucket, struct sk_buff *skb) argument
368 struct wdrr_bucket *bucket; local
392 struct wdrr_bucket *bucket; local
436 struct wdrr_bucket *bucket; local
661 struct wdrr_bucket *bucket = q->buckets + i; local
[all...]
/net/ipv4/netfilter/
H A Dnf_conntrack_l3proto_ipv4_compat.c29 unsigned int bucket; member in struct:ct_iter_state
38 for (st->bucket = 0;
39 st->bucket < net->ct.htable_size;
40 st->bucket++) {
42 hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
57 if (likely(get_nulls_value(head) == st->bucket)) {
58 if (++st->bucket >= net->ct.htable_size)
62 hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
215 unsigned int bucket; member in struct:ct_expect_iter_state
224 for (st->bucket
[all...]
/net/atm/
H A Dproc.c69 int bucket; member in struct:vcc_state
79 static int __vcc_walk(struct sock **sock, int family, int *bucket, loff_t l) argument
84 for (*bucket = 0; *bucket < VCC_HTABLE_SIZE; ++*bucket) {
85 struct hlist_head *head = &vcc_hash[*bucket];
99 if (!sk && ++*bucket < VCC_HTABLE_SIZE) {
100 sk = sk_head(&vcc_hash[*bucket]);
111 return __vcc_walk(&state->sk, state->family, &state->bucket, l) ?
/net/netfilter/
H A Dnf_conntrack_standalone.c50 unsigned int bucket; member in struct:ct_iter_state
60 for (st->bucket = 0;
61 st->bucket < net->ct.htable_size;
62 st->bucket++) {
63 n = rcu_dereference(hlist_nulls_first_rcu(&net->ct.hash[st->bucket]));
78 if (likely(get_nulls_value(head) == st->bucket)) {
79 if (++st->bucket >= net->ct.htable_size)
84 &net->ct.hash[st->bucket]));
H A Dxt_hashlimit.c42 MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
751 unsigned int *bucket; variable
757 bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
758 if (!bucket)
761 *bucket = *pos;
762 return bucket;
768 unsigned int *bucket = (unsigned int *)v; local
770 *pos = ++(*bucket);
775 return bucket;
782 unsigned int *bucket variable
785 kfree(bucket); variable
833 unsigned int *bucket = (unsigned int *)v; local
[all...]
H A Dnf_conntrack_expect.c462 unsigned int bucket; member in struct:ct_expect_iter_state
471 for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
472 n = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
487 if (++st->bucket >= nf_ct_expect_hsize)
489 head = rcu_dereference(hlist_first_rcu(&net->ct.expect_hash[st->bucket]));
H A Dxt_recent.c455 unsigned int bucket; member in struct:recent_iter_state
468 for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++)
469 list_for_each_entry(e, &t->iphash[st->bucket], list)
482 while (head == &t->iphash[st->bucket]) {
483 if (++st->bucket >= ip_list_hash_size)
485 head = t->iphash[st->bucket].next;
H A Dnf_conntrack_core.c414 unsigned int bucket = hash_bucket(hash, net); local
421 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
434 if (get_nulls_value(n) != bucket) {
1354 void *data, unsigned int *bucket)
1362 for (; *bucket < net->ct.htable_size; (*bucket)++) {
1363 lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
1366 if (*bucket < net->ct.htable_size) {
1367 hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
1403 unsigned int bucket local
1353 get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data, unsigned int *bucket) argument
1551 int i, bucket, rc; local
[all...]
H A Dnfnetlink_log.c930 unsigned int bucket; member in struct:iter_state
941 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
942 struct hlist_head *head = &log->instance_table[st->bucket];
958 if (++st->bucket >= INSTANCE_BUCKETS)
962 head = &log->instance_table[st->bucket];
/net/netfilter/ipvs/
H A Dip_vs_lblc.c107 struct hlist_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ member in struct:ip_vs_lblc_table
175 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
188 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
242 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
272 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
328 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
367 INIT_HLIST_HEAD(&tbl->bucket[i]);
H A Dip_vs_lblcr.c277 struct hlist_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ member in struct:ip_vs_lblcr_table
338 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
351 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
408 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
437 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
492 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
530 INIT_HLIST_HEAD(&tbl->bucket[i]);
/net/openvswitch/
H A Dvport.c100 struct hlist_head *bucket = hash_bucket(net, name); local
103 hlist_for_each_entry_rcu(vport, bucket, hash_node)
193 struct hlist_head *bucket; local
201 bucket = hash_bucket(ovs_dp_get_net(vport->dp),
203 hlist_add_head_rcu(&vport->hash_node, bucket);
H A Dflow_table.h73 u32 *bucket, u32 *idx);
H A Dflow_table.c261 u32 *bucket, u32 *last)
269 while (*bucket < ti->n_buckets) {
271 head = flex_array_get(ti->buckets, *bucket);
280 (*bucket)++;
260 ovs_flow_tbl_dump_next(struct table_instance *ti, u32 *bucket, u32 *last) argument
/net/mac80211/
H A Dmesh_pathtbl.c338 struct hlist_head *bucket; local
341 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
342 hlist_for_each_entry_rcu(node, bucket, list) {
506 struct hlist_head *bucket; local
525 bucket = &tbl->hash_buckets[hash_idx];
529 hlist_for_each_entry(node, bucket, list) {
558 hlist_add_head_rcu(&new_node->list, bucket);
640 struct hlist_head *bucket; local
675 bucket = &tbl->hash_buckets[hash_idx];
680 hlist_for_each_entry(node, bucket, lis
860 struct hlist_head *bucket; local
[all...]
/net/ipv6/
H A Dping.c208 int bucket = ((struct ping_iter_state *) seq->private)->bucket; local
212 ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
H A Dip6_flowlabel.c689 int bucket; member in struct:ip6fl_iter_state
700 for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
701 for_each_fl_rcu(state->bucket, fl) {
722 if (++state->bucket <= FL_HASH_MASK) {
723 for_each_fl_rcu(state->bucket, fl) {
/net/llc/
H A Dllc_proc.c67 static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket) argument
72 while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
/net/batman-adv/
H A Dfragmentation.c133 uint8_t bucket; local
146 bucket = seqno % BATADV_FRAG_BUFFER_COUNT;
159 chain = &orig_node->fragments[bucket];
/net/core/
H A Dnet-procfs.c34 unsigned int bucket; local
41 bucket = get_bucket(*pos) + 1;
42 *pos = set_bucket_offset(bucket, 1);
43 } while (bucket < NETDEV_HASHENTRIES);
/net/ipv4/
H A Dtcp_ipv4.c118 held not per host, but per port pair and TW bucket is used as state
121 If TW bucket has been already destroyed we fall back to VJ's scheme
1839 /* Clean up a referenced TCP bind bucket. */
1858 * starting from bucket given in st->bucket; when st->bucket is zero the
1871 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1877 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1932 if (++st->bucket < INET_LHTABLE_SIZE) {
1933 ilb = &tcp_hashinfo.listening_hash[st->bucket];
[all...]
H A Draw.c924 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
925 ++state->bucket) {
926 sk_for_each(sk, &state->h->ht[state->bucket])
945 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
946 sk = sk_head(&state->h->ht[state->bucket]);
1018 raw_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
H A Dping.c1018 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
1019 ++state->bucket) {
1023 hslot = &ping_table.hash[state->bucket];
1049 return ping_get_first(seq, state->bucket + 1);
1066 state->bucket = 0;
1101 int bucket)
1111 bucket, src, srcp, dest, destp, sp->sk_state,
1131 ping_v4_format_sock(v, seq, state->bucket);
1100 ping_v4_format_sock(struct sock *sp, struct seq_file *f, int bucket) argument

Completed in 168 milliseconds

12