Searched refs:head (Results 76 - 100 of 204) sorted by relevance

123456789

/net/irda/
H A Dwrapper.c251 rx_buff->head = newskb->data; /* NOT newskb->head */
269 rx_buff->data = rx_buff->head;
308 rx_buff->data = rx_buff->head;
/net/sunrpc/
H A Dsched.c162 * Swapper tasks always get inserted at the head of the queue.
578 struct list_head *head; local
581 head = &queue->tasks[queue->maxpriority];
583 while (!list_empty(head)) {
585 task = list_first_entry(head,
590 if (head == &queue->tasks[0])
592 head--;
607 struct list_head *head; local
610 head = &queue->tasks[queue->maxpriority];
612 while (!list_empty(head)) {
[all...]
H A Dsvcsock.c190 /* send head */
191 if (slen == xdr->head[0].iov_len)
194 xdr->head[0].iov_len, flags);
195 if (len != xdr->head[0].iov_len)
197 slen -= xdr->head[0].iov_len;
271 svsk, xdr->head[0].iov_base, xdr->head[0].iov_len,
590 rqstp->rq_arg.head[0].iov_base = skb->data +
592 rqstp->rq_arg.head[0].iov_len = len;
599 if (len <= rqstp->rq_arg.head[
[all...]
H A Dcache.c62 struct cache_head **head, **hp; local
65 head = &detail->hash_table[hash];
69 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
96 for (hp=head; *hp != NULL ; hp = &(*hp)->next) {
112 new->next = *head;
113 *head = new;
127 static void cache_fresh_locked(struct cache_head *head, time_t expiry) argument
129 head->expiry_time = expiry;
130 head->last_refresh = seconds_since_boot();
132 set_bit(CACHE_VALID, &head
135 cache_fresh_unlocked(struct cache_head *head, struct cache_detail *detail) argument
151 struct cache_head **head; local
[all...]
H A Dsvc_xprt.c435 space += rqstp->rq_res.head[0].iov_len;
470 rqstp->rq_res.head[0].iov_len = 0;
602 /* Make arg->head point to first page and arg->pages point to rest */
604 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]);
605 arg->head[0].iov_len = PAGE_SIZE;
837 xb->len = xb->head[0].iov_len +
1095 /* back up head to the start of the buffer and copy */
1096 skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1097 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
1116 rqstp->rq_arg.head[
[all...]
H A Dauth_generic.c117 generic_free_cred_callback(struct rcu_head *head) argument
119 struct rpc_cred *cred = container_of(head, struct rpc_cred, cr_rcu);
H A Dauth_unix.c99 unx_free_cred_callback(struct rcu_head *head) argument
101 struct unx_cred *unx_cred = container_of(head, struct unx_cred, uc_base.cr_rcu);
/net/wimax/
H A Dop-msg.c186 struct nlmsghdr *nlh = (void *) msg->head;
208 struct nlmsghdr *nlh = (void *) msg->head;
229 struct nlmsghdr *nlh = (void *) msg->head;
/net/batman-adv/
H A Dnetwork-coding.c288 struct hlist_head *head; local
297 head = &hash->table[i];
300 hlist_for_each_entry_rcu(orig_node, head, hash_entry)
322 struct hlist_head *head; local
329 head = &hash->table[i];
334 hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) {
442 struct hlist_head *head; local
450 head = &hash->table[index];
453 hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
570 struct hlist_head *head; local
1750 struct hlist_head *head; local
[all...]
/net/ipv4/
H A Dinet_connection_sock.c107 struct inet_bind_hashbucket *head; local
127 head = &hashinfo->bhash[inet_bhashfn(net, rover,
129 spin_lock(&head->lock);
130 inet_bind_bucket_for_each(tb, &head->chain)
155 spin_unlock(&head->lock);
181 head = &hashinfo->bhash[inet_bhashfn(net, snum,
183 spin_lock(&head->lock);
184 inet_bind_bucket_for_each(tb, &head->chain)
208 spin_unlock(&head->lock);
219 net, head, snu
[all...]
H A Dfib_frontend.c115 struct hlist_head *head; local
123 head = &net->ipv4.fib_table_hash[h];
124 hlist_for_each_entry_rcu(tb, head, tb_hlist) {
139 struct hlist_head *head; local
143 head = &net->ipv4.fib_table_hash[h];
144 hlist_for_each_entry(tb, head, tb_hlist)
658 struct hlist_head *head; local
670 head = &net->ipv4.fib_table_hash[h];
671 hlist_for_each_entry(tb, head, tb_hlist) {
1117 struct hlist_head *head; local
[all...]
H A Dudp_diag.c107 if (hlist_nulls_empty(&hslot->head))
111 sk_nulls_for_each(sk, node, &hslot->head) {
H A Dxfrm4_mode_tunnel.c130 #define for_each_input_rcu(head, handler) \
131 for (handler = rcu_dereference(head); \
H A Dfib_trie.c350 static void __alias_free_mem(struct rcu_head *head) argument
352 struct fib_alias *fa = container_of(head, struct fib_alias, rcu);
361 static void __leaf_free_rcu(struct rcu_head *head) argument
363 struct leaf *l = container_of(head, struct leaf, rcu);
385 static void __tnode_free_rcu(struct rcu_head *head) argument
387 struct tnode *tn = container_of(head, struct tnode, rcu);
913 struct hlist_head *head = &l->list; local
916 hlist_for_each_entry_rcu(li, head, hlist)
933 static void insert_leaf_info(struct hlist_head *head, struct leaf_info *new) argument
937 if (hlist_empty(head)) {
1709 trie_flush_list(struct list_head *head) argument
2188 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; local
2232 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; local
2284 struct hlist_head *head = &net->ipv4.fib_table_hash[h]; local
[all...]
/net/netlink/
H A Daf_netlink.c274 ring->head = 0;
435 return netlink_lookup_frame(ring, ring->head, status);
444 prev = ring->head ? ring->head - 1 : ring->frame_max;
450 ring->head = ring->head != ring->frame_max ? ring->head + 1 : 0;
455 unsigned int head = ring->head, pos = head; local
860 struct hlist_head *head; local
980 struct hlist_head *head; local
1187 struct hlist_head *head; local
[all...]
/net/sched/
H A Dact_api.c575 struct tc_action *head = NULL, *act, *act_prev = NULL; local
589 if (head == NULL)
590 head = act;
595 return head;
598 if (head != NULL)
599 tcf_action_destroy(head, bind);
850 struct tc_action *head = NULL, *act, *act_prev = NULL; local
871 if (head == NULL)
872 head = act;
879 ret = act_get_notify(net, portid, n, head, even
[all...]
/net/core/
H A Ddst.c68 struct dst_entry *dst, *next, head; local
69 struct dst_entry *last = &head;
114 dst_busy_list = head.next;
/net/ipv6/
H A Dip6_fib.c226 struct hlist_head *head; local
233 head = &net->ipv6.fib_table_hash[h];
234 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
365 struct hlist_head *head; local
398 head = &net->ipv6.fib_table_hash[h];
399 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
1521 struct hlist_head *head; local
1526 head = &net->ipv6.fib_table_hash[h];
1527 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1540 struct hlist_head *head; local
[all...]
/net/bridge/
H A Dbr_if.c113 static void destroy_nbp_rcu(struct rcu_head *head) argument
116 container_of(head, struct net_bridge_port, rcu);
166 void br_dev_delete(struct net_device *dev, struct list_head *head) argument
178 unregister_netdevice_queue(br->dev, head);
/net/netfilter/
H A Dnf_conntrack_helper.c288 list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
296 list_del_rcu(&n->head);
308 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
326 list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
H A Dcore.c127 unsigned int nf_iterate(struct list_head *head, argument
142 list_for_each_entry_continue_rcu((*elemp), head, list) {
/net/sunrpc/auth_gss/
H A Dauth_gss.c1026 gss_free_ctx_callback(struct rcu_head *head) argument
1028 struct gss_cl_ctx *ctx = container_of(head, struct gss_cl_ctx, gc_rcu);
1046 gss_free_cred_callback(struct rcu_head *head) argument
1048 struct gss_cred *gss_cred = container_of(head, struct gss_cred, gc_base.cr_rcu);
1189 req->rq_snd_buf.head[0].iov_base);
1356 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1366 /* guess whether we're in the head or the tail: */
1370 iov = snd_buf->head;
1448 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1462 * head whe
1620 struct kvec *head = ((struct rpc_rqst *)rqstp)->rq_rcv_buf.head; local
[all...]
H A Dgss_krb5_crypto.c410 /* Worst case is 4 fragments: head, end of page 1, start
414 page_pos = desc->pos - outbuf->head[0].iov_len;
502 /* Worst case is 4 fragments: head, end of page 1, start
564 * The server svcauth_gss code ensures that both the head and the
586 p = buf->head[0].iov_base + base;
588 memmove(p + shiftlen, p, buf->head[0].iov_len - base);
590 buf->head[0].iov_len += shiftlen;
675 gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
681 buf->tail[0].iov_base = buf->head[0].iov_base
682 + buf->head[
[all...]
/net/llc/
H A Dllc_proc.c43 struct hlist_nulls_head *head = &sap->sk_laddr_hash[i]; local
46 sk_nulls_for_each(sk, node, head) {
/net/openvswitch/
H A Dflow.c301 struct hlist_head *head = flex_array_get(table->buckets, i); local
305 hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
334 struct hlist_head *head; local
341 head = flex_array_get(table->buckets, *bucket);
342 hlist_for_each_entry_rcu(flow, head, hash_node[ver]) {
368 struct hlist_head *head; local
370 head = flex_array_get(old->buckets, i);
372 hlist_for_each_entry(flow, head, hash_node[old_ver])
771 struct hlist_head *head; local
776 head
789 struct hlist_head *head; local
[all...]

Completed in 293 milliseconds

123456789