Searched refs:node (Results 26 - 50 of 111) sorted by relevance

12345

/net/tipc/
H A DMakefile10 netlink.o node.o node_subscr.o \
H A Dname_table.c47 * @node_list: circular list of publications made by own node
56 * (The cluster and node lists may be empty.)
104 * @local_publ_count: number of publications issued by this node
123 u32 scope, u32 node, u32 port_ref,
136 publ->node = node;
250 u32 scope, u32 node, u32 port, u32 key)
272 (!publ->node || (publ->node == node)))
122 publ_create(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port_ref, u32 key) argument
248 tipc_nameseq_insert_publ(struct name_seq *nseq, u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) argument
369 tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, u32 node, u32 ref, u32 key) argument
481 tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, u32 scope, u32 node, u32 port, u32 key) argument
502 tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node, u32 ref, u32 key) argument
536 u32 node = 0; local
[all...]
H A Dbcast.h45 * struct tipc_node_map - set of node identifiers
47 * @map: bitmap of node identifiers that are in the set
57 * struct tipc_port_list - set of node local destination ports
74 * tipc_nmap_equal - test for equality of node maps
99 void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
H A Dsubscr.c94 u32 node)
105 sub->evt.port.node = htohl(node, sub->swap);
134 u32 node, int must)
141 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
92 subscr_send_event(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, u32 event, u32 port_ref, u32 node) argument
132 tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, u32 found_upper, u32 event, u32 port_ref, u32 node, int must) argument
/net/caif/
H A Dcfmuxl.c70 list_add_rcu(&dn->node, &muxl->frml_list);
78 list_for_each_entry_rcu(lyr, list, node) {
96 list_del_rcu(&old->node);
98 list_add_rcu(&up->node, &muxl->srvl_list);
116 list_del_rcu(&dn->node);
168 list_del_rcu(&up->node);
253 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
/net/irda/
H A Ddiscovery.c62 discovery_t *discovery, *node; local
65 /* Set time of first discovery if node is new (see below) */
78 node = discovery;
83 if ((node->data.saddr == new->data.saddr) &&
84 ((node->data.daddr == new->data.daddr) ||
85 (strcmp(node->data.info, new->data.info) == 0)))
90 hashbin_remove_this(cachelog, (irda_queue_t *) node);
92 if (get_unaligned((__u16 *)node->data.hints) == get_unaligned((__u16 *)new->data.hints))
93 /* Set time of first discovery for this node */
94 new->firststamp = node
[all...]
H A Dirias_object.c129 struct ias_object *node; local
135 node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj);
136 if (!node)
157 struct ias_attrib *node; local
164 node = hashbin_remove_this(obj->attribs, (irda_queue_t *) attrib);
165 if (!node)
169 __irias_delete_attrib(node);
176 node = (struct ias_attrib *) hashbin_get_first(obj->attribs);
177 if (cleanobject && !node)
/net/802/
H A Dpsnap.c37 list_for_each_entry_rcu(p, &snap_list, node) {
145 list_add_rcu(&proto->node, &snap_list);
159 list_del_rcu(&proto->node);
H A Dgarp.c158 attr = rb_entry(parent, struct garp_attr, node);
179 attr = rb_entry(parent, struct garp_attr, node);
198 rb_link_node(&attr->node, parent, p);
199 rb_insert_color(&attr->node, &app->gid);
205 rb_erase(&attr->node, &app->gid);
385 struct rb_node *node, *next; local
388 for (node = rb_first(&app->gid);
389 next = node ? rb_next(node) : NULL, node !
[all...]
/net/llc/
H A Dllc_core.c53 list_for_each_entry(sap, &llc_sap_list, node)
107 list_add_tail_rcu(&sap->node, &llc_sap_list);
127 list_del_rcu(&sap->node);
H A Dllc_proc.c40 list_for_each_entry_rcu(sap, &llc_sap_list, node) {
44 struct hlist_nulls_node *node; local
46 sk_nulls_for_each(sk, node, head) {
69 struct hlist_nulls_node *node; local
73 sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
103 list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
/net/hsr/
H A DKconfig9 DANH ("Doubly attached node implementing HSR"). For this to work,
11 and it must be connected as a node in a ring network together with
16 instant fail-over network. Each HSR node in the ring acts like a
/net/ipv6/
H A Dinet6_hashtables.c110 const struct hlist_nulls_node *node; local
122 sk_nulls_for_each_rcu(sk, node, &head->chain) {
136 if (get_nulls_value(node) != slot)
177 const struct hlist_nulls_node *node; local
188 sk_nulls_for_each(sk, node, &ilb->head) {
211 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
258 const struct hlist_nulls_node *node; local
264 sk_nulls_for_each(sk2, node, &head->chain) {
H A Dip6_fib.c14 * Yuji SEKIYA @USAGI: Support default route on router node;
333 w->node = w->root;
416 * return the appropriate node for a routing tree "add" operation
418 * node.
434 /* insert node in tree */
461 /* clean up an intermediate node */
484 /* We should not create new node because
501 * Create new leaf node without children.
525 * we've to insert an intermediate node on the list
526 * this new node wil
1902 struct hlist_node *node; local
[all...]
/net/core/
H A Dgen_estimator.c93 struct rb_node node; member in struct:gen_estimator
158 e = rb_entry(parent, struct gen_estimator, node);
165 rb_link_node(&est->node, parent, p);
166 rb_insert_color(&est->node, &est_root);
178 e = rb_entry(p, struct gen_estimator, node);
273 rb_erase(&e->node, &est_root);
/net/sunrpc/
H A Dsvc.c45 SVC_POOL_PERNODE /* one pool per numa node */
59 unsigned int *pool_to; /* maps pool id to cpu or node */
60 unsigned int *to_pool; /* maps cpu or node to pool id */
127 unsigned int node; local
132 * so split pools on NUMA node boundaries
137 node = first_online_node;
138 if (nr_cpus_node(node) > 2) {
212 unsigned int node; local
219 for_each_node_with_cpus(node) {
222 m->to_pool[node]
320 unsigned int node = m->pool_to[pidx]; local
568 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) argument
607 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) argument
716 int node; local
[all...]
/net/netfilter/
H A Dxt_qtaguid_internal.h203 struct rb_node node; member in struct:tag_node
315 struct rb_node node; member in struct:uid_tag_data
341 struct rb_node node; member in struct:proc_qtu_data
/net/x25/
H A Dx25_link.c272 list_add(&nb->node, &x25_neigh_list);
288 if (nb->node.next) {
289 list_del(&nb->node);
305 nb = list_entry(entry, struct x25_neigh, node);
326 nb = list_entry(entry, struct x25_neigh, node);
405 nb = list_entry(entry, struct x25_neigh, node);
/net/netlink/
H A Daf_netlink.h52 struct rhash_head node; member in struct:netlink_sock
/net/openvswitch/
H A Dflow.c70 int node = numa_node_id(); local
72 stats = rcu_dereference(flow->stats[node]);
74 /* Check if already have node-specific stats. */
78 if (node == 0 && unlikely(flow->stats_last_writer != node))
79 flow->stats_last_writer = node;
84 /* If the current NUMA-node is the only writer on the
87 if (unlikely(flow->stats_last_writer != node)) {
89 * stats, so we need to check again. If node-specific
94 && likely(!rcu_access_pointer(flow->stats[node]))) {
132 int node; local
159 int node; local
[all...]
/net/rxrpc/
H A Dar-connection.c92 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
121 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
135 rb_link_node(&bundle->node, parent, pp);
136 rb_insert_color(&bundle->node, &trans->bundles);
182 rb_erase(&bundle->node, &trans->bundles);
252 xconn = rb_entry(parent, struct rxrpc_connection, node);
268 rb_link_node(&conn->node, parent, p);
269 rb_insert_color(&conn->node, &conn->trans->client_conns);
292 xconn = rb_entry(parent, struct rxrpc_connection, node);
644 conn = rb_entry(p, struct rxrpc_connection, node);
[all...]
/net/ipx/
H A Daf_ipx.c87 struct ipx_interface, node);
157 list_for_each_entry(i, &ipx_interfaces, node)
184 list_for_each_entry(i, &ipx_interfaces, node)
254 !memcmp(ipx_node, ipxs->node, IPX_NODE_LEN))
289 list_del(&intrfc->node);
328 list_for_each_entry_safe(i, tmp, &ipx_interfaces, node)
346 list_for_each_entry_safe(i, tmp, &ipx_interfaces, node)
368 int is_broadcast = !memcmp(ipx->ipx_dest.node, ipx_broadcast_node,
379 (is_broadcast || !memcmp(ipx->ipx_dest.node,
380 ipxs->node, IPX_NODE_LE
562 ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node) argument
[all...]
/net/ipv4/
H A Dinet_hashtables.c70 hlist_add_head(&tb->node, &head->chain);
81 __hlist_del(&tb->node);
209 struct hlist_nulls_node *node; local
219 sk_nulls_for_each_rcu(sk, node, &ilb->head) {
242 if (get_nulls_value(node) != hash + LISTENING_NULLS_BASE)
280 const struct hlist_nulls_node *node; local
290 sk_nulls_for_each_rcu(sk, node, &head->chain) {
310 if (get_nulls_value(node) != slot)
338 const struct hlist_nulls_node *node; local
344 sk_nulls_for_each(sk2, node,
[all...]
/net/rose/
H A Drose_route.c52 * Add a new route to a node, and in the process add the node and the
135 * This is a new node to be inserted into the list. Find where it needs
154 /* create new node */
176 if (rose_tmpp == NULL) { /* 1st node */
293 * "Delete" a node. Strictly speaking remove a route to a node. The node
404 * Add a loopback node.
449 * Delete a loopback node
680 struct rose_node *node; local
[all...]
/net/nfc/
H A Dllcp_commands.c140 INIT_HLIST_NODE(&sdres->node);
177 INIT_HLIST_NODE(&sdreq->node);
193 hlist_for_each_entry_safe(sdp, n, head, node) {
194 hlist_del(&sdp->node);
550 hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
553 hlist_del(&sdp->node);
580 hlist_for_each_entry_safe(sdreq, n, tlv_list, node) {
586 hlist_del(&sdreq->node);
588 hlist_add_head(&sdreq->node, &local->pending_sdreqs);

Completed in 291 milliseconds

12345