2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
38 #include <linux/tipc_config.h>
42 #include "name_distr.h"
46 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
47 #define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
48 #define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
50 const char tipc_bclink_name[] = "broadcast-link";
53 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
54 * @primary: pointer to primary bearer
55 * @secondary: pointer to secondary bearer
57 * Bearers must have same priority and same set of reachable destinations
61 struct tipc_bcbearer_pair {
62 struct tipc_bearer *primary;
63 struct tipc_bearer *secondary;
66 #define BCBEARER MAX_BEARERS
69 * struct tipc_bcbearer - bearer used by broadcast link
70 * @bearer: (non-standard) broadcast bearer structure
71 * @media: (non-standard) broadcast media structure
72 * @bpairs: array of bearer pairs
73 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
74 * @remains: temporary node map used by tipc_bcbearer_send()
75 * @remains_new: temporary node map used tipc_bcbearer_send()
77 * Note: The fields labelled "temporary" are incorporated into the bearer
78 * to avoid consuming potentially limited stack space through the use of
79 * large local variables within multicast routines. Concurrent access is
80 * prevented through use of the spinlock "bcast_lock".
82 struct tipc_bcbearer {
83 struct tipc_bearer bearer;
84 struct tipc_media media;
85 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
86 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
87 struct tipc_node_map remains;
88 struct tipc_node_map remains_new;
92 * struct tipc_bc_base - link used for broadcast messages
93 * @link: (non-standard) broadcast link structure
94 * @node: (non-standard) node structure representing b'cast link's peer node
95 * @bcast_nodes: map of broadcast-capable nodes
96 * @retransmit_to: node that most recently requested a retransmit
98 * Handles sequence numbering, fragmentation, bundling, etc.
100 struct tipc_bc_base {
101 struct tipc_link *link;
102 struct tipc_node node;
103 struct sk_buff_head arrvq;
104 struct sk_buff_head inputq;
105 struct sk_buff_head namedq;
106 struct tipc_node_map bcast_nodes;
107 struct tipc_node *retransmit_to;
110 static struct tipc_bc_base *tipc_bc_base(struct net *net)
112 return tipc_net(net)->bcbase;
116 * tipc_nmap_equal - test for equality of node maps
118 static int tipc_nmap_equal(struct tipc_node_map *nm_a,
119 struct tipc_node_map *nm_b)
121 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
124 static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq);
125 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
126 struct tipc_node_map *nm_b,
127 struct tipc_node_map *nm_diff);
128 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
129 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
130 static void tipc_bclink_lock(struct net *net)
132 tipc_bcast_lock(net);
135 static void tipc_bclink_unlock(struct net *net)
137 tipc_bcast_unlock(net);
140 void tipc_bclink_input(struct net *net)
142 struct tipc_net *tn = net_generic(net, tipc_net_id);
144 tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
147 uint tipc_bcast_get_mtu(void)
149 return MAX_PKT_DEFAULT_MCAST;
152 static u16 bcbuf_acks(struct sk_buff *skb)
154 return TIPC_SKB_CB(skb)->ackers;
157 static void bcbuf_set_acks(struct sk_buff *buf, u16 ackers)
159 TIPC_SKB_CB(buf)->ackers = ackers;
162 static void bcbuf_decr_acks(struct sk_buff *buf)
164 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
167 static void bclink_set_last_sent(struct net *net)
169 struct tipc_net *tn = net_generic(net, tipc_net_id);
170 struct tipc_link *bcl = tn->bcl;
172 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
175 u32 tipc_bclink_get_last_sent(struct net *net)
177 struct tipc_net *tn = net_generic(net, tipc_net_id);
179 return tn->bcl->silent_intv_cnt;
182 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
184 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
185 seqno : node->bclink.last_sent;
189 * tipc_bclink_retransmit_to - get most recent node to request retransmission
191 * Called with bclink_lock locked
193 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
195 struct tipc_net *tn = net_generic(net, tipc_net_id);
197 return tn->bcbase->retransmit_to;
201 * bclink_retransmit_pkt - retransmit broadcast packets
202 * @after: sequence number of last packet to *not* retransmit
203 * @to: sequence number of last packet to retransmit
205 * Called with bclink_lock locked
207 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
210 struct tipc_link *bcl = tn->bcl;
212 skb_queue_walk(&bcl->transmq, skb) {
213 if (more(buf_seqno(skb), after)) {
214 tipc_link_retransmit(bcl, skb, mod(to - after));
221 * bclink_prepare_wakeup - prepare users for wakeup after congestion
222 * @bcl: broadcast link
223 * @resultq: queue for users which can be woken up
224 * Move a number of waiting users, as permitted by available space in
225 * the send queue, from link wait queue to specified queue for wakeup
227 static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
229 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
231 struct sk_buff *skb, *tmp;
233 skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
234 imp = TIPC_SKB_CB(skb)->chain_imp;
235 lim = bcl->window + bcl->backlog[imp].limit;
236 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
237 if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
239 skb_unlink(skb, &bcl->wakeupq);
240 skb_queue_tail(resultq, skb);
245 * tipc_bclink_wakeup_users - wake up pending users
247 * Called with no locks taken
249 void tipc_bclink_wakeup_users(struct net *net)
251 struct tipc_net *tn = net_generic(net, tipc_net_id);
252 struct tipc_link *bcl = tn->bcl;
253 struct sk_buff_head resultq;
255 skb_queue_head_init(&resultq);
256 bclink_prepare_wakeup(bcl, &resultq);
257 tipc_sk_rcv(net, &resultq);
261 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
262 * @n_ptr: node that sent acknowledgement info
263 * @acked: broadcast sequence # that has been acknowledged
265 * Node is locked, bclink_lock unlocked.
267 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
269 struct sk_buff *skb, *tmp;
270 unsigned int released = 0;
271 struct net *net = n_ptr->net;
272 struct tipc_net *tn = net_generic(net, tipc_net_id);
274 if (unlikely(!n_ptr->bclink.recv_permitted))
276 tipc_bclink_lock(net);
278 /* Bail out if tx queue is empty (no clean up is required) */
279 skb = skb_peek(&tn->bcl->transmq);
283 /* Determine which messages need to be acknowledged */
284 if (acked == INVALID_LINK_SEQ) {
286 * Contact with specified node has been lost, so need to
287 * acknowledge sent messages only (if other nodes still exist)
288 * or both sent and unsent messages (otherwise)
290 if (tn->bcbase->bcast_nodes.count)
291 acked = tn->bcl->silent_intv_cnt;
293 acked = tn->bcl->snd_nxt;
296 * Bail out if specified sequence number does not correspond
297 * to a message that has been sent and not yet acknowledged
299 if (less(acked, buf_seqno(skb)) ||
300 less(tn->bcl->silent_intv_cnt, acked) ||
301 less_eq(acked, n_ptr->bclink.acked))
304 /* Skip over packets that node has previously acknowledged */
305 skb_queue_walk(&tn->bcl->transmq, skb) {
306 if (more(buf_seqno(skb), n_ptr->bclink.acked))
309 /* Update packets that node is now acknowledging */
310 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
311 if (more(buf_seqno(skb), acked))
313 bcbuf_decr_acks(skb);
314 bclink_set_last_sent(net);
315 if (bcbuf_acks(skb) == 0) {
316 __skb_unlink(skb, &tn->bcl->transmq);
321 n_ptr->bclink.acked = acked;
323 /* Try resolving broadcast link congestion, if necessary */
324 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
325 tipc_link_push_packets(tn->bcl);
326 bclink_set_last_sent(net);
328 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
329 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
331 tipc_bclink_unlock(net);
335 * tipc_bclink_update_link_state - update broadcast link state
337 * RCU and node lock set
339 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
343 struct net *net = n_ptr->net;
344 struct tipc_net *tn = net_generic(net, tipc_net_id);
345 struct tipc_link *bcl = tn->bcl;
347 /* Ignore "stale" link state info */
348 if (less_eq(last_sent, n_ptr->bclink.last_in))
351 /* Update link synchronization state; quit if in sync */
352 bclink_update_last_sent(n_ptr, last_sent);
354 /* This is a good location for statistical profiling */
355 bcl->stats.queue_sz_counts++;
356 bcl->stats.accu_queue_sz += skb_queue_len(&bcl->transmq);
358 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
361 /* Update out-of-sync state; quit if loss is still unconfirmed */
362 if ((++n_ptr->bclink.oos_state) == 1) {
363 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
365 n_ptr->bclink.oos_state++;
368 /* Don't NACK if one has been recently sent (or seen) */
369 if (n_ptr->bclink.oos_state & 0x1)
373 buf = tipc_buf_acquire(INT_H_SIZE);
375 struct tipc_msg *msg = buf_msg(buf);
376 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
377 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
379 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
380 INT_H_SIZE, n_ptr->addr);
381 msg_set_non_seq(msg, 1);
382 msg_set_mc_netid(msg, tn->net_id);
383 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
384 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
385 msg_set_bcgap_to(msg, to);
387 tipc_bclink_lock(net);
388 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
389 tn->bcl->stats.sent_nacks++;
390 tipc_bclink_unlock(net);
393 n_ptr->bclink.oos_state++;
397 void tipc_bclink_sync_state(struct tipc_node *n, struct tipc_msg *hdr)
399 u16 last = msg_last_bcast(hdr);
400 int mtyp = msg_type(hdr);
402 if (unlikely(msg_user(hdr) != LINK_PROTOCOL))
404 if (mtyp == STATE_MSG) {
405 tipc_bclink_update_link_state(n, last);
408 /* Compatibility: older nodes don't know BCAST_PROTOCOL synchronization,
409 * and transfer synch info in LINK_PROTOCOL messages.
411 if (tipc_node_is_up(n))
413 if ((mtyp != RESET_MSG) && (mtyp != ACTIVATE_MSG))
415 n->bclink.last_sent = last;
416 n->bclink.last_in = last;
417 n->bclink.oos_state = 0;
421 * bclink_peek_nack - monitor retransmission requests sent by other nodes
423 * Delay any upcoming NACK by this node if another node has already
424 * requested the first message this node is going to ask for.
426 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
428 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
430 if (unlikely(!n_ptr))
433 tipc_node_lock(n_ptr);
434 if (n_ptr->bclink.recv_permitted &&
435 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
436 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
437 n_ptr->bclink.oos_state = 2;
438 tipc_node_unlock(n_ptr);
439 tipc_node_put(n_ptr);
442 /* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
443 * and to identified node local sockets
444 * @net: the applicable net namespace
445 * @list: chain of buffers containing message
446 * Consumes the buffer chain, except when returning -ELINKCONG
447 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
449 int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
451 struct tipc_link *l = tipc_bc_sndlink(net);
452 struct sk_buff_head xmitq, inputq, rcvq;
455 __skb_queue_head_init(&rcvq);
456 __skb_queue_head_init(&xmitq);
457 skb_queue_head_init(&inputq);
459 /* Prepare message clone for local node */
460 if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
461 return -EHOSTUNREACH;
463 tipc_bcast_lock(net);
464 if (tipc_link_bc_peers(l))
465 rc = tipc_link_xmit(l, list, &xmitq);
466 bclink_set_last_sent(net);
467 tipc_bcast_unlock(net);
469 /* Don't send to local node if adding to link failed */
471 __skb_queue_purge(&rcvq);
475 /* Broadcast to all nodes, inluding local node */
476 tipc_bcbearer_xmit(net, &xmitq);
477 tipc_sk_mcast_rcv(net, &rcvq, &inputq);
478 __skb_queue_purge(list);
482 /* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
484 * RCU is locked, no other locks set
486 int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
488 struct tipc_msg *hdr = buf_msg(skb);
489 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
490 struct sk_buff_head xmitq;
493 __skb_queue_head_init(&xmitq);
495 if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
500 tipc_bcast_lock(net);
501 if (msg_user(hdr) == BCAST_PROTOCOL)
502 rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
504 rc = tipc_link_rcv(l, skb, NULL);
505 tipc_bcast_unlock(net);
507 if (!skb_queue_empty(&xmitq))
508 tipc_bcbearer_xmit(net, &xmitq);
510 /* Any socket wakeup messages ? */
511 if (!skb_queue_empty(inputq))
512 tipc_sk_rcv(net, inputq);
517 /* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
519 * RCU is locked, no other locks set
521 void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
523 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
524 struct sk_buff_head xmitq;
526 __skb_queue_head_init(&xmitq);
528 tipc_bcast_lock(net);
529 tipc_link_bc_ack_rcv(l, acked, &xmitq);
530 tipc_bcast_unlock(net);
532 tipc_bcbearer_xmit(net, &xmitq);
534 /* Any socket wakeup messages ? */
535 if (!skb_queue_empty(inputq))
536 tipc_sk_rcv(net, inputq);
539 /* tipc_bcast_synch_rcv - check and update rcv link with peer's send state
541 * RCU is locked, no other locks set
543 void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
544 struct tipc_msg *hdr)
546 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
547 struct sk_buff_head xmitq;
549 __skb_queue_head_init(&xmitq);
551 tipc_bcast_lock(net);
552 if (msg_type(hdr) == STATE_MSG) {
553 tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
554 tipc_link_bc_sync_rcv(l, hdr, &xmitq);
556 tipc_link_bc_init_rcv(l, hdr);
558 tipc_bcast_unlock(net);
560 tipc_bcbearer_xmit(net, &xmitq);
562 /* Any socket wakeup messages ? */
563 if (!skb_queue_empty(inputq))
564 tipc_sk_rcv(net, inputq);
567 /* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
569 * RCU is locked, node lock is set
571 void tipc_bcast_add_peer(struct net *net, u32 addr, struct tipc_link *uc_l,
572 struct sk_buff_head *xmitq)
574 struct tipc_net *tn = net_generic(net, tipc_net_id);
575 struct tipc_link *snd_l = tipc_bc_sndlink(net);
577 tipc_bclink_lock(net);
578 tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
579 tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
580 tipc_bclink_unlock(net);
583 /* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
585 * RCU is locked, node lock is set
587 void tipc_bcast_remove_peer(struct net *net, u32 addr,
588 struct tipc_link *rcv_l)
590 struct tipc_net *tn = net_generic(net, tipc_net_id);
591 struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
592 struct tipc_link *snd_l = tipc_bc_sndlink(net);
593 struct sk_buff_head xmitq;
595 __skb_queue_head_init(&xmitq);
597 tipc_bclink_lock(net);
598 tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
599 tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
600 tipc_bclink_unlock(net);
602 tipc_bcbearer_xmit(net, &xmitq);
604 /* Any socket wakeup messages ? */
605 if (!skb_queue_empty(inputq))
606 tipc_sk_rcv(net, inputq);
610 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
612 * Called with both sending node's lock and bclink_lock taken.
614 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
616 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
618 bclink_update_last_sent(node, seqno);
619 node->bclink.last_in = seqno;
620 node->bclink.oos_state = 0;
621 tn->bcl->stats.recv_info++;
624 * Unicast an ACK periodically, ensuring that
625 * all nodes in the cluster don't ACK at the same time
627 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
628 tipc_link_proto_xmit(node_active_link(node, node->addr),
629 STATE_MSG, 0, 0, 0, 0);
630 tn->bcl->stats.sent_acks++;
635 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
637 * RCU is locked, no other locks set
639 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
641 struct tipc_net *tn = net_generic(net, tipc_net_id);
642 struct tipc_link *bcl = tn->bcl;
643 struct tipc_msg *msg = buf_msg(buf);
644 struct tipc_node *node;
649 struct sk_buff *iskb;
650 struct sk_buff_head *arrvq, *inputq;
652 /* Screen out unwanted broadcast messages */
653 if (msg_mc_netid(msg) != tn->net_id)
656 node = tipc_node_find(net, msg_prevnode(msg));
659 tipc_node_lock(node);
660 if (unlikely(!node->bclink.recv_permitted))
663 /* Handle broadcast protocol message */
664 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
665 if (msg_type(msg) != STATE_MSG)
667 if (msg_destnode(msg) == tn->own_addr) {
668 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
669 tipc_bclink_lock(net);
670 bcl->stats.recv_nacks++;
671 tn->bcbase->retransmit_to = node;
672 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
674 tipc_bclink_unlock(net);
675 tipc_node_unlock(node);
677 tipc_node_unlock(node);
678 bclink_peek_nack(net, msg);
683 /* Handle in-sequence broadcast message */
684 seqno = msg_seqno(msg);
685 next_in = mod(node->bclink.last_in + 1);
686 arrvq = &tn->bcbase->arrvq;
687 inputq = &tn->bcbase->inputq;
689 if (likely(seqno == next_in)) {
691 /* Deliver message to destination */
692 if (likely(msg_isdata(msg))) {
693 tipc_bclink_lock(net);
694 bclink_accept_pkt(node, seqno);
695 spin_lock_bh(&inputq->lock);
696 __skb_queue_tail(arrvq, buf);
697 spin_unlock_bh(&inputq->lock);
698 node->action_flags |= TIPC_BCAST_MSG_EVT;
699 tipc_bclink_unlock(net);
700 tipc_node_unlock(node);
701 } else if (msg_user(msg) == MSG_BUNDLER) {
702 tipc_bclink_lock(net);
703 bclink_accept_pkt(node, seqno);
704 bcl->stats.recv_bundles++;
705 bcl->stats.recv_bundled += msg_msgcnt(msg);
707 while (tipc_msg_extract(buf, &iskb, &pos)) {
708 spin_lock_bh(&inputq->lock);
709 __skb_queue_tail(arrvq, iskb);
710 spin_unlock_bh(&inputq->lock);
712 node->action_flags |= TIPC_BCAST_MSG_EVT;
713 tipc_bclink_unlock(net);
714 tipc_node_unlock(node);
715 } else if (msg_user(msg) == MSG_FRAGMENTER) {
716 tipc_bclink_lock(net);
717 bclink_accept_pkt(node, seqno);
718 tipc_buf_append(&node->bclink.reasm_buf, &buf);
719 if (unlikely(!buf && !node->bclink.reasm_buf)) {
720 tipc_bclink_unlock(net);
723 bcl->stats.recv_fragments++;
725 bcl->stats.recv_fragmented++;
727 tipc_bclink_unlock(net);
730 tipc_bclink_unlock(net);
731 tipc_node_unlock(node);
733 tipc_bclink_lock(net);
734 bclink_accept_pkt(node, seqno);
735 tipc_bclink_unlock(net);
736 tipc_node_unlock(node);
741 /* Determine new synchronization state */
742 tipc_node_lock(node);
743 if (unlikely(!tipc_node_is_up(node)))
746 if (node->bclink.last_in == node->bclink.last_sent)
749 if (skb_queue_empty(&node->bclink.deferdq)) {
750 node->bclink.oos_state = 1;
754 msg = buf_msg(skb_peek(&node->bclink.deferdq));
755 seqno = msg_seqno(msg);
756 next_in = mod(next_in + 1);
757 if (seqno != next_in)
760 /* Take in-sequence message from deferred queue & deliver it */
761 buf = __skb_dequeue(&node->bclink.deferdq);
765 /* Handle out-of-sequence broadcast message */
766 if (less(next_in, seqno)) {
767 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
769 bclink_update_last_sent(node, seqno);
773 tipc_bclink_lock(net);
776 bcl->stats.deferred_recv++;
778 bcl->stats.duplicates++;
780 tipc_bclink_unlock(net);
783 tipc_node_unlock(node);
789 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
791 return (n_ptr->bclink.recv_permitted &&
792 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
797 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
799 * Send packet over as many bearers as necessary to reach all nodes
800 * that have joined the broadcast link.
802 * Returns 0 (packet sent successfully) under all circumstances,
803 * since the broadcast link's pseudo-bearer never blocks
805 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
806 struct tipc_bearer *unused1,
807 struct tipc_media_addr *unused2)
810 struct tipc_msg *msg = buf_msg(buf);
811 struct tipc_net *tn = net_generic(net, tipc_net_id);
812 struct tipc_bcbearer *bcbearer = tn->bcbearer;
813 struct tipc_bc_base *bclink = tn->bcbase;
815 /* Prepare broadcast link message for reliable transmission,
816 * if first time trying to send it;
817 * preparation is skipped for broadcast link protocol messages
818 * since they are sent in an unreliable manner and don't need it
820 if (likely(!msg_non_seq(buf_msg(buf)))) {
821 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
822 msg_set_non_seq(msg, 1);
823 msg_set_mc_netid(msg, tn->net_id);
824 tn->bcl->stats.sent_info++;
825 if (WARN_ON(!bclink->bcast_nodes.count)) {
830 msg_set_mc_netid(msg, tn->net_id);
832 /* Send buffer over bearers until all targets reached */
833 bcbearer->remains = bclink->bcast_nodes;
835 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
836 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
837 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
838 struct tipc_bearer *bp[2] = {p, s};
839 struct tipc_bearer *b = bp[msg_link_selector(msg)];
840 struct sk_buff *tbuf;
843 break; /* No more bearers to try */
846 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
847 &bcbearer->remains_new);
848 if (bcbearer->remains_new.count == bcbearer->remains.count)
849 continue; /* Nothing added by bearer pair */
852 /* Use original buffer for first bearer */
853 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
855 /* Avoid concurrent buffer access */
856 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
859 tipc_bearer_send(net, b->identity, tbuf,
861 kfree_skb(tbuf); /* Bearer keeps a clone */
863 if (bcbearer->remains_new.count == 0)
864 break; /* All targets reached */
866 bcbearer->remains = bcbearer->remains_new;
872 static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq)
874 struct sk_buff *skb, *tmp;
876 skb_queue_walk_safe(xmitq, skb, tmp) {
877 __skb_dequeue(xmitq);
878 tipc_bcbearer_send(net, skb, NULL, NULL);
880 /* Until we remove cloning in tipc_l2_send_msg(): */
886 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
888 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
889 u32 node, bool action)
891 struct tipc_net *tn = net_generic(net, tipc_net_id);
892 struct tipc_bcbearer *bcbearer = tn->bcbearer;
893 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
894 struct tipc_bcbearer_pair *bp_curr;
895 struct tipc_bearer *b;
899 tipc_bclink_lock(net);
902 tipc_nmap_add(nm_ptr, node);
904 tipc_nmap_remove(nm_ptr, node);
906 /* Group bearers by priority (can assume max of two per priority) */
907 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
910 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
911 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
912 if (!b || !b->nodes.count)
915 if (!bp_temp[b->priority].primary)
916 bp_temp[b->priority].primary = b;
918 bp_temp[b->priority].secondary = b;
922 /* Create array of bearer pairs for broadcasting */
923 bp_curr = bcbearer->bpairs;
924 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
926 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
928 if (!bp_temp[pri].primary)
931 bp_curr->primary = bp_temp[pri].primary;
933 if (bp_temp[pri].secondary) {
934 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
935 &bp_temp[pri].secondary->nodes)) {
936 bp_curr->secondary = bp_temp[pri].secondary;
939 bp_curr->primary = bp_temp[pri].secondary;
946 tipc_bclink_unlock(net);
949 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
950 struct tipc_stats *stats)
960 struct nla_map map[] = {
961 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
962 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
963 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
964 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
965 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
966 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
967 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
968 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
969 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
970 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
971 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
972 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
973 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
974 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
975 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
976 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
977 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
978 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
979 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
980 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
983 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
987 for (i = 0; i < ARRAY_SIZE(map); i++)
988 if (nla_put_u32(skb, map[i].key, map[i].val))
991 nla_nest_end(skb, nest);
995 nla_nest_cancel(skb, nest);
1000 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1004 struct nlattr *attrs;
1005 struct nlattr *prop;
1006 struct tipc_net *tn = net_generic(net, tipc_net_id);
1007 struct tipc_link *bcl = tn->bcl;
1012 tipc_bclink_lock(net);
1014 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1015 NLM_F_MULTI, TIPC_NL_LINK_GET);
1019 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1023 /* The broadcast link is always up */
1024 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1027 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1029 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1031 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1033 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1036 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1039 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1041 nla_nest_end(msg->skb, prop);
1043 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1047 tipc_bclink_unlock(net);
1048 nla_nest_end(msg->skb, attrs);
1049 genlmsg_end(msg->skb, hdr);
1054 nla_nest_cancel(msg->skb, prop);
1056 nla_nest_cancel(msg->skb, attrs);
1058 tipc_bclink_unlock(net);
1059 genlmsg_cancel(msg->skb, hdr);
1064 int tipc_bclink_reset_stats(struct net *net)
1066 struct tipc_net *tn = net_generic(net, tipc_net_id);
1067 struct tipc_link *bcl = tn->bcl;
1070 return -ENOPROTOOPT;
1072 tipc_bclink_lock(net);
1073 memset(&bcl->stats, 0, sizeof(bcl->stats));
1074 tipc_bclink_unlock(net);
1078 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
1080 struct tipc_net *tn = net_generic(net, tipc_net_id);
1081 struct tipc_link *bcl = tn->bcl;
1084 return -ENOPROTOOPT;
1085 if (limit < BCLINK_WIN_MIN)
1086 limit = BCLINK_WIN_MIN;
1087 if (limit > TIPC_MAX_LINK_WIN)
1089 tipc_bclink_lock(net);
1090 tipc_link_set_queue_limits(bcl, limit);
1091 tipc_bclink_unlock(net);
1095 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
1099 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1101 if (!attrs[TIPC_NLA_LINK_PROP])
1104 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
1108 if (!props[TIPC_NLA_PROP_WIN])
1111 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1113 return tipc_bclink_set_queue_limits(net, win);
1116 int tipc_bcast_init(struct net *net)
1118 struct tipc_net *tn = tipc_net(net);
1119 struct tipc_bcbearer *bcb = NULL;
1120 struct tipc_bc_base *bb = NULL;
1121 struct tipc_link *l = NULL;
1123 bcb = kzalloc(sizeof(*bcb), GFP_ATOMIC);
1128 bcb->bearer.window = BCLINK_WIN_DEFAULT;
1129 bcb->bearer.mtu = MAX_PKT_DEFAULT_MCAST;
1130 bcb->bearer.identity = MAX_BEARERS;
1132 bcb->bearer.media = &bcb->media;
1133 bcb->media.send_msg = tipc_bcbearer_send;
1134 sprintf(bcb->media.name, "tipc-broadcast");
1135 strcpy(bcb->bearer.name, bcb->media.name);
1137 bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
1141 __skb_queue_head_init(&bb->arrvq);
1142 spin_lock_init(&tipc_net(net)->bclock);
1145 if (!tipc_link_bc_create(&bb->node, 0, 0,
1146 MAX_PKT_DEFAULT_MCAST,
1156 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcb->bearer);
1165 void tipc_bcast_reinit(struct net *net)
1167 struct tipc_bc_base *b = tipc_bc_base(net);
1169 msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
1172 void tipc_bcast_stop(struct net *net)
1174 struct tipc_net *tn = net_generic(net, tipc_net_id);
1176 tipc_bclink_lock(net);
1177 tipc_link_purge_queues(tn->bcl);
1178 tipc_bclink_unlock(net);
1179 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
1181 kfree(tn->bcbearer);
1187 * tipc_nmap_add - add a node to a node map
1189 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
1191 int n = tipc_node(node);
1193 u32 mask = (1 << (n % WSIZE));
1195 if ((nm_ptr->map[w] & mask) == 0) {
1197 nm_ptr->map[w] |= mask;
1202 * tipc_nmap_remove - remove a node from a node map
1204 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
1206 int n = tipc_node(node);
1208 u32 mask = (1 << (n % WSIZE));
1210 if ((nm_ptr->map[w] & mask) != 0) {
1211 nm_ptr->map[w] &= ~mask;
1217 * tipc_nmap_diff - find differences between node maps
1218 * @nm_a: input node map A
1219 * @nm_b: input node map B
1220 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
1222 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
1223 struct tipc_node_map *nm_b,
1224 struct tipc_node_map *nm_diff)
1226 int stop = ARRAY_SIZE(nm_a->map);
1231 memset(nm_diff, 0, sizeof(*nm_diff));
1232 for (w = 0; w < stop; w++) {
1233 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
1234 nm_diff->map[w] = map;
1236 for (b = 0 ; b < WSIZE; b++) {