1/*
2 * net/tipc/bcast.c: TIPC broadcast code
3 *
4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 *    contributors may be used to endorse or promote products derived from
19 *    this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
39#include "link.h"
40#include "port.h"
41#include "bcast.h"
42#include "name_distr.h"
43
44#define MAX_PKT_DEFAULT_MCAST 1500	/* bcast link max packet size (fixed) */
45
46#define BCLINK_WIN_DEFAULT 20		/* bcast link window size (default) */
47
48/**
49 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
50 * @primary: pointer to primary bearer
51 * @secondary: pointer to secondary bearer
52 *
53 * Bearers must have same priority and same set of reachable destinations
54 * to be paired.
55 */
56
57struct tipc_bcbearer_pair {
58	struct tipc_bearer *primary;
59	struct tipc_bearer *secondary;
60};
61
62/**
63 * struct tipc_bcbearer - bearer used by broadcast link
64 * @bearer: (non-standard) broadcast bearer structure
65 * @media: (non-standard) broadcast media structure
66 * @bpairs: array of bearer pairs
67 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
68 * @remains: temporary node map used by tipc_bcbearer_send()
69 * @remains_new: temporary node map used tipc_bcbearer_send()
70 *
71 * Note: The fields labelled "temporary" are incorporated into the bearer
72 * to avoid consuming potentially limited stack space through the use of
73 * large local variables within multicast routines.  Concurrent access is
74 * prevented through use of the spinlock "bc_lock".
75 */
76struct tipc_bcbearer {
77	struct tipc_bearer bearer;
78	struct tipc_media media;
79	struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
80	struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
81	struct tipc_node_map remains;
82	struct tipc_node_map remains_new;
83};
84
85/**
86 * struct tipc_bclink - link used for broadcast messages
87 * @link: (non-standard) broadcast link structure
88 * @node: (non-standard) node structure representing b'cast link's peer node
89 * @bcast_nodes: map of broadcast-capable nodes
90 * @retransmit_to: node that most recently requested a retransmit
91 *
92 * Handles sequence numbering, fragmentation, bundling, etc.
93 */
94struct tipc_bclink {
95	struct tipc_link link;
96	struct tipc_node node;
97	struct tipc_node_map bcast_nodes;
98	struct tipc_node *retransmit_to;
99};
100
101static struct tipc_bcbearer bcast_bearer;
102static struct tipc_bclink bcast_link;
103
104static struct tipc_bcbearer *bcbearer = &bcast_bearer;
105static struct tipc_bclink *bclink = &bcast_link;
106static struct tipc_link *bcl = &bcast_link.link;
107
108static DEFINE_SPINLOCK(bc_lock);
109
110const char tipc_bclink_name[] = "broadcast-link";
111
112static void tipc_nmap_diff(struct tipc_node_map *nm_a,
113			   struct tipc_node_map *nm_b,
114			   struct tipc_node_map *nm_diff);
115
116static u32 bcbuf_acks(struct sk_buff *buf)
117{
118	return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
119}
120
121static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
122{
123	TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
124}
125
126static void bcbuf_decr_acks(struct sk_buff *buf)
127{
128	bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
129}
130
131void tipc_bclink_add_node(u32 addr)
132{
133	spin_lock_bh(&bc_lock);
134	tipc_nmap_add(&bclink->bcast_nodes, addr);
135	spin_unlock_bh(&bc_lock);
136}
137
138void tipc_bclink_remove_node(u32 addr)
139{
140	spin_lock_bh(&bc_lock);
141	tipc_nmap_remove(&bclink->bcast_nodes, addr);
142	spin_unlock_bh(&bc_lock);
143}
144
145static void bclink_set_last_sent(void)
146{
147	if (bcl->next_out)
148		bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
149	else
150		bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
151}
152
153u32 tipc_bclink_get_last_sent(void)
154{
155	return bcl->fsm_msg_cnt;
156}
157
158static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
159{
160	node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
161						seqno : node->bclink.last_sent;
162}
163
164
165/**
166 * tipc_bclink_retransmit_to - get most recent node to request retransmission
167 *
168 * Called with bc_lock locked
169 */
170struct tipc_node *tipc_bclink_retransmit_to(void)
171{
172	return bclink->retransmit_to;
173}
174
175/**
176 * bclink_retransmit_pkt - retransmit broadcast packets
177 * @after: sequence number of last packet to *not* retransmit
178 * @to: sequence number of last packet to retransmit
179 *
180 * Called with bc_lock locked
181 */
182static void bclink_retransmit_pkt(u32 after, u32 to)
183{
184	struct sk_buff *buf;
185
186	buf = bcl->first_out;
187	while (buf && less_eq(buf_seqno(buf), after))
188		buf = buf->next;
189	tipc_link_retransmit(bcl, buf, mod(to - after));
190}
191
192/**
193 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
194 * @n_ptr: node that sent acknowledgement info
195 * @acked: broadcast sequence # that has been acknowledged
196 *
197 * Node is locked, bc_lock unlocked.
198 */
199void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
200{
201	struct sk_buff *crs;
202	struct sk_buff *next;
203	unsigned int released = 0;
204
205	spin_lock_bh(&bc_lock);
206
207	/* Bail out if tx queue is empty (no clean up is required) */
208	crs = bcl->first_out;
209	if (!crs)
210		goto exit;
211
212	/* Determine which messages need to be acknowledged */
213	if (acked == INVALID_LINK_SEQ) {
214		/*
215		 * Contact with specified node has been lost, so need to
216		 * acknowledge sent messages only (if other nodes still exist)
217		 * or both sent and unsent messages (otherwise)
218		 */
219		if (bclink->bcast_nodes.count)
220			acked = bcl->fsm_msg_cnt;
221		else
222			acked = bcl->next_out_no;
223	} else {
224		/*
225		 * Bail out if specified sequence number does not correspond
226		 * to a message that has been sent and not yet acknowledged
227		 */
228		if (less(acked, buf_seqno(crs)) ||
229		    less(bcl->fsm_msg_cnt, acked) ||
230		    less_eq(acked, n_ptr->bclink.acked))
231			goto exit;
232	}
233
234	/* Skip over packets that node has previously acknowledged */
235	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
236		crs = crs->next;
237
238	/* Update packets that node is now acknowledging */
239
240	while (crs && less_eq(buf_seqno(crs), acked)) {
241		next = crs->next;
242
243		if (crs != bcl->next_out)
244			bcbuf_decr_acks(crs);
245		else {
246			bcbuf_set_acks(crs, 0);
247			bcl->next_out = next;
248			bclink_set_last_sent();
249		}
250
251		if (bcbuf_acks(crs) == 0) {
252			bcl->first_out = next;
253			bcl->out_queue_size--;
254			kfree_skb(crs);
255			released = 1;
256		}
257		crs = next;
258	}
259	n_ptr->bclink.acked = acked;
260
261	/* Try resolving broadcast link congestion, if necessary */
262
263	if (unlikely(bcl->next_out)) {
264		tipc_link_push_queue(bcl);
265		bclink_set_last_sent();
266	}
267	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
268		tipc_link_wakeup_ports(bcl, 0);
269exit:
270	spin_unlock_bh(&bc_lock);
271}
272
273/**
274 * tipc_bclink_update_link_state - update broadcast link state
275 *
276 * tipc_net_lock and node lock set
277 */
278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
279{
280	struct sk_buff *buf;
281
282	/* Ignore "stale" link state info */
283
284	if (less_eq(last_sent, n_ptr->bclink.last_in))
285		return;
286
287	/* Update link synchronization state; quit if in sync */
288
289	bclink_update_last_sent(n_ptr, last_sent);
290
291	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
292		return;
293
294	/* Update out-of-sync state; quit if loss is still unconfirmed */
295
296	if ((++n_ptr->bclink.oos_state) == 1) {
297		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
298			return;
299		n_ptr->bclink.oos_state++;
300	}
301
302	/* Don't NACK if one has been recently sent (or seen) */
303
304	if (n_ptr->bclink.oos_state & 0x1)
305		return;
306
307	/* Send NACK */
308
309	buf = tipc_buf_acquire(INT_H_SIZE);
310	if (buf) {
311		struct tipc_msg *msg = buf_msg(buf);
312
313		tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
314			      INT_H_SIZE, n_ptr->addr);
315		msg_set_non_seq(msg, 1);
316		msg_set_mc_netid(msg, tipc_net_id);
317		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
318		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
319		msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
320				 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
321				 : n_ptr->bclink.last_sent);
322
323		spin_lock_bh(&bc_lock);
324		tipc_bearer_send(&bcbearer->bearer, buf, NULL);
325		bcl->stats.sent_nacks++;
326		spin_unlock_bh(&bc_lock);
327		kfree_skb(buf);
328
329		n_ptr->bclink.oos_state++;
330	}
331}
332
333/**
334 * bclink_peek_nack - monitor retransmission requests sent by other nodes
335 *
336 * Delay any upcoming NACK by this node if another node has already
337 * requested the first message this node is going to ask for.
338 *
339 * Only tipc_net_lock set.
340 */
341static void bclink_peek_nack(struct tipc_msg *msg)
342{
343	struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
344
345	if (unlikely(!n_ptr))
346		return;
347
348	tipc_node_lock(n_ptr);
349
350	if (n_ptr->bclink.recv_permitted &&
351	    (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
352	    (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
353		n_ptr->bclink.oos_state = 2;
354
355	tipc_node_unlock(n_ptr);
356}
357
358/*
359 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
360 */
361int tipc_bclink_send_msg(struct sk_buff *buf)
362{
363	int res;
364
365	spin_lock_bh(&bc_lock);
366
367	if (!bclink->bcast_nodes.count) {
368		res = msg_data_sz(buf_msg(buf));
369		kfree_skb(buf);
370		goto exit;
371	}
372
373	res = tipc_link_send_buf(bcl, buf);
374	if (likely(res >= 0)) {
375		bclink_set_last_sent();
376		bcl->stats.queue_sz_counts++;
377		bcl->stats.accu_queue_sz += bcl->out_queue_size;
378	}
379exit:
380	spin_unlock_bh(&bc_lock);
381	return res;
382}
383
384/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 *
387 * Called with both sending node's lock and bc_lock taken.
388 */
389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
390{
391	bclink_update_last_sent(node, seqno);
392	node->bclink.last_in = seqno;
393	node->bclink.oos_state = 0;
394	bcl->stats.recv_info++;
395
396	/*
397	 * Unicast an ACK periodically, ensuring that
398	 * all nodes in the cluster don't ACK at the same time
399	 */
400
401	if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
402		tipc_link_send_proto_msg(
403			node->active_links[node->addr & 1],
404			STATE_MSG, 0, 0, 0, 0, 0);
405		bcl->stats.sent_acks++;
406	}
407}
408
409/**
410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
411 *
412 * tipc_net_lock is read_locked, no other locks set
413 */
414void tipc_bclink_recv_pkt(struct sk_buff *buf)
415{
416	struct tipc_msg *msg = buf_msg(buf);
417	struct tipc_node *node;
418	u32 next_in;
419	u32 seqno;
420	int deferred;
421
422	/* Screen out unwanted broadcast messages */
423
424	if (msg_mc_netid(msg) != tipc_net_id)
425		goto exit;
426
427	node = tipc_node_find(msg_prevnode(msg));
428	if (unlikely(!node))
429		goto exit;
430
431	tipc_node_lock(node);
432	if (unlikely(!node->bclink.recv_permitted))
433		goto unlock;
434
435	/* Handle broadcast protocol message */
436
437	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
438		if (msg_type(msg) != STATE_MSG)
439			goto unlock;
440		if (msg_destnode(msg) == tipc_own_addr) {
441			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
442			tipc_node_unlock(node);
443			spin_lock_bh(&bc_lock);
444			bcl->stats.recv_nacks++;
445			bclink->retransmit_to = node;
446			bclink_retransmit_pkt(msg_bcgap_after(msg),
447					      msg_bcgap_to(msg));
448			spin_unlock_bh(&bc_lock);
449		} else {
450			tipc_node_unlock(node);
451			bclink_peek_nack(msg);
452		}
453		goto exit;
454	}
455
456	/* Handle in-sequence broadcast message */
457
458	seqno = msg_seqno(msg);
459	next_in = mod(node->bclink.last_in + 1);
460
461	if (likely(seqno == next_in)) {
462receive:
463		/* Deliver message to destination */
464
465		if (likely(msg_isdata(msg))) {
466			spin_lock_bh(&bc_lock);
467			bclink_accept_pkt(node, seqno);
468			spin_unlock_bh(&bc_lock);
469			tipc_node_unlock(node);
470			if (likely(msg_mcast(msg)))
471				tipc_port_recv_mcast(buf, NULL);
472			else
473				kfree_skb(buf);
474		} else if (msg_user(msg) == MSG_BUNDLER) {
475			spin_lock_bh(&bc_lock);
476			bclink_accept_pkt(node, seqno);
477			bcl->stats.recv_bundles++;
478			bcl->stats.recv_bundled += msg_msgcnt(msg);
479			spin_unlock_bh(&bc_lock);
480			tipc_node_unlock(node);
481			tipc_link_recv_bundle(buf);
482		} else if (msg_user(msg) == MSG_FRAGMENTER) {
483			int ret = tipc_link_recv_fragment(&node->bclink.defragm,
484						      &buf, &msg);
485			if (ret < 0)
486				goto unlock;
487			spin_lock_bh(&bc_lock);
488			bclink_accept_pkt(node, seqno);
489			bcl->stats.recv_fragments++;
490			if (ret > 0)
491				bcl->stats.recv_fragmented++;
492			spin_unlock_bh(&bc_lock);
493			tipc_node_unlock(node);
494			tipc_net_route_msg(buf);
495		} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
496			spin_lock_bh(&bc_lock);
497			bclink_accept_pkt(node, seqno);
498			spin_unlock_bh(&bc_lock);
499			tipc_node_unlock(node);
500			tipc_named_recv(buf);
501		} else {
502			spin_lock_bh(&bc_lock);
503			bclink_accept_pkt(node, seqno);
504			spin_unlock_bh(&bc_lock);
505			tipc_node_unlock(node);
506			kfree_skb(buf);
507		}
508		buf = NULL;
509
510		/* Determine new synchronization state */
511
512		tipc_node_lock(node);
513		if (unlikely(!tipc_node_is_up(node)))
514			goto unlock;
515
516		if (node->bclink.last_in == node->bclink.last_sent)
517			goto unlock;
518
519		if (!node->bclink.deferred_head) {
520			node->bclink.oos_state = 1;
521			goto unlock;
522		}
523
524		msg = buf_msg(node->bclink.deferred_head);
525		seqno = msg_seqno(msg);
526		next_in = mod(next_in + 1);
527		if (seqno != next_in)
528			goto unlock;
529
530		/* Take in-sequence message from deferred queue & deliver it */
531
532		buf = node->bclink.deferred_head;
533		node->bclink.deferred_head = buf->next;
534		node->bclink.deferred_size--;
535		goto receive;
536	}
537
538	/* Handle out-of-sequence broadcast message */
539
540	if (less(next_in, seqno)) {
541		deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
542					       &node->bclink.deferred_tail,
543					       buf);
544		node->bclink.deferred_size += deferred;
545		bclink_update_last_sent(node, seqno);
546		buf = NULL;
547	} else
548		deferred = 0;
549
550	spin_lock_bh(&bc_lock);
551
552	if (deferred)
553		bcl->stats.deferred_recv++;
554	else
555		bcl->stats.duplicates++;
556
557	spin_unlock_bh(&bc_lock);
558
559unlock:
560	tipc_node_unlock(node);
561exit:
562	kfree_skb(buf);
563}
564
565u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
566{
567	return (n_ptr->bclink.recv_permitted &&
568		(tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
569}
570
571
572/**
573 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
574 *
575 * Send packet over as many bearers as necessary to reach all nodes
576 * that have joined the broadcast link.
577 *
578 * Returns 0 (packet sent successfully) under all circumstances,
579 * since the broadcast link's pseudo-bearer never blocks
580 */
581static int tipc_bcbearer_send(struct sk_buff *buf,
582			      struct tipc_bearer *unused1,
583			      struct tipc_media_addr *unused2)
584{
585	int bp_index;
586
587	/* Prepare broadcast link message for reliable transmission,
588	 * if first time trying to send it;
589	 * preparation is skipped for broadcast link protocol messages
590	 * since they are sent in an unreliable manner and don't need it
591	 */
592	if (likely(!msg_non_seq(buf_msg(buf)))) {
593		struct tipc_msg *msg;
594
595		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
596		msg = buf_msg(buf);
597		msg_set_non_seq(msg, 1);
598		msg_set_mc_netid(msg, tipc_net_id);
599		bcl->stats.sent_info++;
600
601		if (WARN_ON(!bclink->bcast_nodes.count)) {
602			dump_stack();
603			return 0;
604		}
605	}
606
607	/* Send buffer over bearers until all targets reached */
608	bcbearer->remains = bclink->bcast_nodes;
609
610	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
611		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
612		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
613		struct tipc_bearer *b = p;
614		struct sk_buff *tbuf;
615
616		if (!p)
617			break; /* No more bearers to try */
618
619		if (tipc_bearer_blocked(p)) {
620			if (!s || tipc_bearer_blocked(s))
621				continue; /* Can't use either bearer */
622			b = s;
623		}
624
625		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
626			       &bcbearer->remains_new);
627		if (bcbearer->remains_new.count == bcbearer->remains.count)
628			continue; /* Nothing added by bearer pair */
629
630		if (bp_index == 0) {
631			/* Use original buffer for first bearer */
632			tipc_bearer_send(b, buf, &b->bcast_addr);
633		} else {
634			/* Avoid concurrent buffer access */
635			tbuf = pskb_copy(buf, GFP_ATOMIC);
636			if (!tbuf)
637				break;
638			tipc_bearer_send(b, tbuf, &b->bcast_addr);
639			kfree_skb(tbuf); /* Bearer keeps a clone */
640		}
641
642		/* Swap bearers for next packet */
643		if (s) {
644			bcbearer->bpairs[bp_index].primary = s;
645			bcbearer->bpairs[bp_index].secondary = p;
646		}
647
648		if (bcbearer->remains_new.count == 0)
649			break; /* All targets reached */
650
651		bcbearer->remains = bcbearer->remains_new;
652	}
653
654	return 0;
655}
656
657/**
658 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
659 */
660void tipc_bcbearer_sort(void)
661{
662	struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
663	struct tipc_bcbearer_pair *bp_curr;
664	int b_index;
665	int pri;
666
667	spin_lock_bh(&bc_lock);
668
669	/* Group bearers by priority (can assume max of two per priority) */
670	memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
671
672	for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
673		struct tipc_bearer *b = &tipc_bearers[b_index];
674
675		if (!b->active || !b->nodes.count)
676			continue;
677
678		if (!bp_temp[b->priority].primary)
679			bp_temp[b->priority].primary = b;
680		else
681			bp_temp[b->priority].secondary = b;
682	}
683
684	/* Create array of bearer pairs for broadcasting */
685	bp_curr = bcbearer->bpairs;
686	memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
687
688	for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
689
690		if (!bp_temp[pri].primary)
691			continue;
692
693		bp_curr->primary = bp_temp[pri].primary;
694
695		if (bp_temp[pri].secondary) {
696			if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
697					    &bp_temp[pri].secondary->nodes)) {
698				bp_curr->secondary = bp_temp[pri].secondary;
699			} else {
700				bp_curr++;
701				bp_curr->primary = bp_temp[pri].secondary;
702			}
703		}
704
705		bp_curr++;
706	}
707
708	spin_unlock_bh(&bc_lock);
709}
710
711
712int tipc_bclink_stats(char *buf, const u32 buf_size)
713{
714	int ret;
715	struct tipc_stats *s;
716
717	if (!bcl)
718		return 0;
719
720	spin_lock_bh(&bc_lock);
721
722	s = &bcl->stats;
723
724	ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
725			    "  Window:%u packets\n",
726			    bcl->name, bcl->queue_limit[0]);
727	ret += tipc_snprintf(buf + ret, buf_size - ret,
728			     "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
729			     s->recv_info, s->recv_fragments,
730			     s->recv_fragmented, s->recv_bundles,
731			     s->recv_bundled);
732	ret += tipc_snprintf(buf + ret, buf_size - ret,
733			     "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
734			     s->sent_info, s->sent_fragments,
735			     s->sent_fragmented, s->sent_bundles,
736			     s->sent_bundled);
737	ret += tipc_snprintf(buf + ret, buf_size - ret,
738			     "  RX naks:%u defs:%u dups:%u\n",
739			     s->recv_nacks, s->deferred_recv, s->duplicates);
740	ret += tipc_snprintf(buf + ret, buf_size - ret,
741			     "  TX naks:%u acks:%u dups:%u\n",
742			     s->sent_nacks, s->sent_acks, s->retransmitted);
743	ret += tipc_snprintf(buf + ret, buf_size - ret,
744			     "  Congestion link:%u  Send queue max:%u avg:%u\n",
745			     s->link_congs, s->max_queue_sz,
746			     s->queue_sz_counts ?
747			     (s->accu_queue_sz / s->queue_sz_counts) : 0);
748
749	spin_unlock_bh(&bc_lock);
750	return ret;
751}
752
753int tipc_bclink_reset_stats(void)
754{
755	if (!bcl)
756		return -ENOPROTOOPT;
757
758	spin_lock_bh(&bc_lock);
759	memset(&bcl->stats, 0, sizeof(bcl->stats));
760	spin_unlock_bh(&bc_lock);
761	return 0;
762}
763
764int tipc_bclink_set_queue_limits(u32 limit)
765{
766	if (!bcl)
767		return -ENOPROTOOPT;
768	if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
769		return -EINVAL;
770
771	spin_lock_bh(&bc_lock);
772	tipc_link_set_queue_limits(bcl, limit);
773	spin_unlock_bh(&bc_lock);
774	return 0;
775}
776
777void tipc_bclink_init(void)
778{
779	bcbearer->bearer.media = &bcbearer->media;
780	bcbearer->media.send_msg = tipc_bcbearer_send;
781	sprintf(bcbearer->media.name, "tipc-broadcast");
782
783	INIT_LIST_HEAD(&bcl->waiting_ports);
784	bcl->next_out_no = 1;
785	spin_lock_init(&bclink->node.lock);
786	bcl->owner = &bclink->node;
787	bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
788	tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
789	spin_lock_init(&bcbearer->bearer.lock);
790	bcl->b_ptr = &bcbearer->bearer;
791	bcl->state = WORKING_WORKING;
792	strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
793}
794
795void tipc_bclink_stop(void)
796{
797	spin_lock_bh(&bc_lock);
798	tipc_link_stop(bcl);
799	spin_unlock_bh(&bc_lock);
800
801	memset(bclink, 0, sizeof(*bclink));
802	memset(bcbearer, 0, sizeof(*bcbearer));
803}
804
805
806/**
807 * tipc_nmap_add - add a node to a node map
808 */
809void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
810{
811	int n = tipc_node(node);
812	int w = n / WSIZE;
813	u32 mask = (1 << (n % WSIZE));
814
815	if ((nm_ptr->map[w] & mask) == 0) {
816		nm_ptr->count++;
817		nm_ptr->map[w] |= mask;
818	}
819}
820
821/**
822 * tipc_nmap_remove - remove a node from a node map
823 */
824void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
825{
826	int n = tipc_node(node);
827	int w = n / WSIZE;
828	u32 mask = (1 << (n % WSIZE));
829
830	if ((nm_ptr->map[w] & mask) != 0) {
831		nm_ptr->map[w] &= ~mask;
832		nm_ptr->count--;
833	}
834}
835
836/**
837 * tipc_nmap_diff - find differences between node maps
838 * @nm_a: input node map A
839 * @nm_b: input node map B
840 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
841 */
842static void tipc_nmap_diff(struct tipc_node_map *nm_a,
843			   struct tipc_node_map *nm_b,
844			   struct tipc_node_map *nm_diff)
845{
846	int stop = ARRAY_SIZE(nm_a->map);
847	int w;
848	int b;
849	u32 map;
850
851	memset(nm_diff, 0, sizeof(*nm_diff));
852	for (w = 0; w < stop; w++) {
853		map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
854		nm_diff->map[w] = map;
855		if (map != 0) {
856			for (b = 0 ; b < WSIZE; b++) {
857				if (map & (1 << b))
858					nm_diff->count++;
859			}
860		}
861	}
862}
863
864/**
865 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
866 */
867void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
868{
869	struct tipc_port_list *item = pl_ptr;
870	int i;
871	int item_sz = PLSIZE;
872	int cnt = pl_ptr->count;
873
874	for (; ; cnt -= item_sz, item = item->next) {
875		if (cnt < PLSIZE)
876			item_sz = cnt;
877		for (i = 0; i < item_sz; i++)
878			if (item->ports[i] == port)
879				return;
880		if (i < PLSIZE) {
881			item->ports[i] = port;
882			pl_ptr->count++;
883			return;
884		}
885		if (!item->next) {
886			item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
887			if (!item->next) {
888				pr_warn("Incomplete multicast delivery, no memory\n");
889				return;
890			}
891			item->next->next = NULL;
892		}
893	}
894}
895
896/**
897 * tipc_port_list_free - free dynamically created entries in port_list chain
898 *
899 */
900void tipc_port_list_free(struct tipc_port_list *pl_ptr)
901{
902	struct tipc_port_list *item;
903	struct tipc_port_list *next;
904
905	for (item = pl_ptr->next; item; item = next) {
906		next = item->next;
907		kfree(item);
908	}
909}
910