2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47 const char tipc_bclink_name[] = "broadcast-link";
49 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
50 struct tipc_node_map *nm_b,
51 struct tipc_node_map *nm_diff);
52 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
53 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
55 static void tipc_bclink_lock(struct net *net)
57 struct tipc_net *tn = net_generic(net, tipc_net_id);
59 spin_lock_bh(&tn->bclink->lock);
62 static void tipc_bclink_unlock(struct net *net)
64 struct tipc_net *tn = net_generic(net, tipc_net_id);
66 spin_unlock_bh(&tn->bclink->lock);
69 void tipc_bclink_input(struct net *net)
71 struct tipc_net *tn = net_generic(net, tipc_net_id);
73 tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
76 uint tipc_bclink_get_mtu(void)
78 return MAX_PKT_DEFAULT_MCAST;
81 static u32 bcbuf_acks(struct sk_buff *buf)
83 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
86 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
88 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
91 static void bcbuf_decr_acks(struct sk_buff *buf)
93 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
96 void tipc_bclink_add_node(struct net *net, u32 addr)
98 struct tipc_net *tn = net_generic(net, tipc_net_id);
100 tipc_bclink_lock(net);
101 tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
102 tipc_bclink_unlock(net);
105 void tipc_bclink_remove_node(struct net *net, u32 addr)
107 struct tipc_net *tn = net_generic(net, tipc_net_id);
109 tipc_bclink_lock(net);
110 tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
112 /* Last node? => reset backlog queue */
113 if (!tn->bclink->bcast_nodes.count)
114 tipc_link_purge_backlog(&tn->bclink->link);
116 tipc_bclink_unlock(net);
119 static void bclink_set_last_sent(struct net *net)
121 struct tipc_net *tn = net_generic(net, tipc_net_id);
122 struct tipc_link *bcl = tn->bcl;
124 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
127 u32 tipc_bclink_get_last_sent(struct net *net)
129 struct tipc_net *tn = net_generic(net, tipc_net_id);
131 return tn->bcl->silent_intv_cnt;
134 static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
136 node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ?
137 seqno : node->bclink.last_sent;
141 * tipc_bclink_retransmit_to - get most recent node to request retransmission
143 * Called with bclink_lock locked
145 struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
147 struct tipc_net *tn = net_generic(net, tipc_net_id);
149 return tn->bclink->retransmit_to;
153 * bclink_retransmit_pkt - retransmit broadcast packets
154 * @after: sequence number of last packet to *not* retransmit
155 * @to: sequence number of last packet to retransmit
157 * Called with bclink_lock locked
159 static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
162 struct tipc_link *bcl = tn->bcl;
164 skb_queue_walk(&bcl->transmq, skb) {
165 if (more(buf_seqno(skb), after)) {
166 tipc_link_retransmit(bcl, skb, mod(to - after));
173 * tipc_bclink_wakeup_users - wake up pending users
175 * Called with no locks taken
177 void tipc_bclink_wakeup_users(struct net *net)
179 struct tipc_net *tn = net_generic(net, tipc_net_id);
181 tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
185 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
186 * @n_ptr: node that sent acknowledgement info
187 * @acked: broadcast sequence # that has been acknowledged
189 * Node is locked, bclink_lock unlocked.
191 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
193 struct sk_buff *skb, *tmp;
194 unsigned int released = 0;
195 struct net *net = n_ptr->net;
196 struct tipc_net *tn = net_generic(net, tipc_net_id);
198 if (unlikely(!n_ptr->bclink.recv_permitted))
201 tipc_bclink_lock(net);
203 /* Bail out if tx queue is empty (no clean up is required) */
204 skb = skb_peek(&tn->bcl->transmq);
208 /* Determine which messages need to be acknowledged */
209 if (acked == INVALID_LINK_SEQ) {
211 * Contact with specified node has been lost, so need to
212 * acknowledge sent messages only (if other nodes still exist)
213 * or both sent and unsent messages (otherwise)
215 if (tn->bclink->bcast_nodes.count)
216 acked = tn->bcl->silent_intv_cnt;
218 acked = tn->bcl->snd_nxt;
221 * Bail out if specified sequence number does not correspond
222 * to a message that has been sent and not yet acknowledged
224 if (less(acked, buf_seqno(skb)) ||
225 less(tn->bcl->silent_intv_cnt, acked) ||
226 less_eq(acked, n_ptr->bclink.acked))
230 /* Skip over packets that node has previously acknowledged */
231 skb_queue_walk(&tn->bcl->transmq, skb) {
232 if (more(buf_seqno(skb), n_ptr->bclink.acked))
236 /* Update packets that node is now acknowledging */
237 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
238 if (more(buf_seqno(skb), acked))
240 bcbuf_decr_acks(skb);
241 bclink_set_last_sent(net);
242 if (bcbuf_acks(skb) == 0) {
243 __skb_unlink(skb, &tn->bcl->transmq);
248 n_ptr->bclink.acked = acked;
250 /* Try resolving broadcast link congestion, if necessary */
251 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
252 tipc_link_push_packets(tn->bcl);
253 bclink_set_last_sent(net);
255 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
256 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
258 tipc_bclink_unlock(net);
262 * tipc_bclink_update_link_state - update broadcast link state
264 * RCU and node lock set
266 void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
270 struct net *net = n_ptr->net;
271 struct tipc_net *tn = net_generic(net, tipc_net_id);
273 /* Ignore "stale" link state info */
274 if (less_eq(last_sent, n_ptr->bclink.last_in))
277 /* Update link synchronization state; quit if in sync */
278 bclink_update_last_sent(n_ptr, last_sent);
280 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
283 /* Update out-of-sync state; quit if loss is still unconfirmed */
284 if ((++n_ptr->bclink.oos_state) == 1) {
285 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
287 n_ptr->bclink.oos_state++;
290 /* Don't NACK if one has been recently sent (or seen) */
291 if (n_ptr->bclink.oos_state & 0x1)
295 buf = tipc_buf_acquire(INT_H_SIZE);
297 struct tipc_msg *msg = buf_msg(buf);
298 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
299 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
301 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
302 INT_H_SIZE, n_ptr->addr);
303 msg_set_non_seq(msg, 1);
304 msg_set_mc_netid(msg, tn->net_id);
305 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
306 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
307 msg_set_bcgap_to(msg, to);
309 tipc_bclink_lock(net);
310 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
311 tn->bcl->stats.sent_nacks++;
312 tipc_bclink_unlock(net);
315 n_ptr->bclink.oos_state++;
320 * bclink_peek_nack - monitor retransmission requests sent by other nodes
322 * Delay any upcoming NACK by this node if another node has already
323 * requested the first message this node is going to ask for.
325 static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
327 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
329 if (unlikely(!n_ptr))
332 tipc_node_lock(n_ptr);
333 if (n_ptr->bclink.recv_permitted &&
334 (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) &&
335 (n_ptr->bclink.last_in == msg_bcgap_after(msg)))
336 n_ptr->bclink.oos_state = 2;
337 tipc_node_unlock(n_ptr);
338 tipc_node_put(n_ptr);
341 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
342 * and to identified node local sockets
343 * @net: the applicable net namespace
344 * @list: chain of buffers containing message
345 * Consumes the buffer chain, except when returning -ELINKCONG
346 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
348 int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
350 struct tipc_net *tn = net_generic(net, tipc_net_id);
351 struct tipc_link *bcl = tn->bcl;
352 struct tipc_bclink *bclink = tn->bclink;
356 struct sk_buff_head arrvq;
357 struct sk_buff_head inputq;
359 /* Prepare clone of message for local node */
360 skb = tipc_msg_reassemble(list);
362 return -EHOSTUNREACH;
364 /* Broadcast to all nodes */
365 if (likely(bclink)) {
366 tipc_bclink_lock(net);
367 if (likely(bclink->bcast_nodes.count)) {
368 rc = __tipc_link_xmit(net, bcl, list);
370 u32 len = skb_queue_len(&bcl->transmq);
372 bclink_set_last_sent(net);
373 bcl->stats.queue_sz_counts++;
374 bcl->stats.accu_queue_sz += len;
378 tipc_bclink_unlock(net);
382 __skb_queue_purge(list);
388 /* Deliver message clone */
389 __skb_queue_head_init(&arrvq);
390 skb_queue_head_init(&inputq);
391 __skb_queue_tail(&arrvq, skb);
392 tipc_sk_mcast_rcv(net, &arrvq, &inputq);
397 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
399 * Called with both sending node's lock and bclink_lock taken.
401 static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
403 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
405 bclink_update_last_sent(node, seqno);
406 node->bclink.last_in = seqno;
407 node->bclink.oos_state = 0;
408 tn->bcl->stats.recv_info++;
411 * Unicast an ACK periodically, ensuring that
412 * all nodes in the cluster don't ACK at the same time
414 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
415 tipc_link_proto_xmit(node_active_link(node, node->addr),
416 STATE_MSG, 0, 0, 0, 0);
417 tn->bcl->stats.sent_acks++;
422 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
424 * RCU is locked, no other locks set
426 void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
428 struct tipc_net *tn = net_generic(net, tipc_net_id);
429 struct tipc_link *bcl = tn->bcl;
430 struct tipc_msg *msg = buf_msg(buf);
431 struct tipc_node *node;
436 struct sk_buff *iskb;
437 struct sk_buff_head *arrvq, *inputq;
439 /* Screen out unwanted broadcast messages */
440 if (msg_mc_netid(msg) != tn->net_id)
443 node = tipc_node_find(net, msg_prevnode(msg));
447 tipc_node_lock(node);
448 if (unlikely(!node->bclink.recv_permitted))
451 /* Handle broadcast protocol message */
452 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
453 if (msg_type(msg) != STATE_MSG)
455 if (msg_destnode(msg) == tn->own_addr) {
456 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
457 tipc_bclink_lock(net);
458 bcl->stats.recv_nacks++;
459 tn->bclink->retransmit_to = node;
460 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
462 tipc_bclink_unlock(net);
463 tipc_node_unlock(node);
465 tipc_node_unlock(node);
466 bclink_peek_nack(net, msg);
472 /* Handle in-sequence broadcast message */
473 seqno = msg_seqno(msg);
474 next_in = mod(node->bclink.last_in + 1);
475 arrvq = &tn->bclink->arrvq;
476 inputq = &tn->bclink->inputq;
478 if (likely(seqno == next_in)) {
480 /* Deliver message to destination */
481 if (likely(msg_isdata(msg))) {
482 tipc_bclink_lock(net);
483 bclink_accept_pkt(node, seqno);
484 spin_lock_bh(&inputq->lock);
485 __skb_queue_tail(arrvq, buf);
486 spin_unlock_bh(&inputq->lock);
487 node->action_flags |= TIPC_BCAST_MSG_EVT;
488 tipc_bclink_unlock(net);
489 tipc_node_unlock(node);
490 } else if (msg_user(msg) == MSG_BUNDLER) {
491 tipc_bclink_lock(net);
492 bclink_accept_pkt(node, seqno);
493 bcl->stats.recv_bundles++;
494 bcl->stats.recv_bundled += msg_msgcnt(msg);
496 while (tipc_msg_extract(buf, &iskb, &pos)) {
497 spin_lock_bh(&inputq->lock);
498 __skb_queue_tail(arrvq, iskb);
499 spin_unlock_bh(&inputq->lock);
501 node->action_flags |= TIPC_BCAST_MSG_EVT;
502 tipc_bclink_unlock(net);
503 tipc_node_unlock(node);
504 } else if (msg_user(msg) == MSG_FRAGMENTER) {
505 tipc_bclink_lock(net);
506 bclink_accept_pkt(node, seqno);
507 tipc_buf_append(&node->bclink.reasm_buf, &buf);
508 if (unlikely(!buf && !node->bclink.reasm_buf)) {
509 tipc_bclink_unlock(net);
512 bcl->stats.recv_fragments++;
514 bcl->stats.recv_fragmented++;
516 tipc_bclink_unlock(net);
519 tipc_bclink_unlock(net);
520 tipc_node_unlock(node);
522 tipc_bclink_lock(net);
523 bclink_accept_pkt(node, seqno);
524 tipc_bclink_unlock(net);
525 tipc_node_unlock(node);
530 /* Determine new synchronization state */
531 tipc_node_lock(node);
532 if (unlikely(!tipc_node_is_up(node)))
535 if (node->bclink.last_in == node->bclink.last_sent)
538 if (skb_queue_empty(&node->bclink.deferdq)) {
539 node->bclink.oos_state = 1;
543 msg = buf_msg(skb_peek(&node->bclink.deferdq));
544 seqno = msg_seqno(msg);
545 next_in = mod(next_in + 1);
546 if (seqno != next_in)
549 /* Take in-sequence message from deferred queue & deliver it */
550 buf = __skb_dequeue(&node->bclink.deferdq);
554 /* Handle out-of-sequence broadcast message */
555 if (less(next_in, seqno)) {
556 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
558 bclink_update_last_sent(node, seqno);
562 tipc_bclink_lock(net);
565 bcl->stats.deferred_recv++;
567 bcl->stats.duplicates++;
569 tipc_bclink_unlock(net);
572 tipc_node_unlock(node);
578 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
580 return (n_ptr->bclink.recv_permitted &&
581 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
586 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
588 * Send packet over as many bearers as necessary to reach all nodes
589 * that have joined the broadcast link.
591 * Returns 0 (packet sent successfully) under all circumstances,
592 * since the broadcast link's pseudo-bearer never blocks
594 static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
595 struct tipc_bearer *unused1,
596 struct tipc_media_addr *unused2)
599 struct tipc_msg *msg = buf_msg(buf);
600 struct tipc_net *tn = net_generic(net, tipc_net_id);
601 struct tipc_bcbearer *bcbearer = tn->bcbearer;
602 struct tipc_bclink *bclink = tn->bclink;
604 /* Prepare broadcast link message for reliable transmission,
605 * if first time trying to send it;
606 * preparation is skipped for broadcast link protocol messages
607 * since they are sent in an unreliable manner and don't need it
609 if (likely(!msg_non_seq(buf_msg(buf)))) {
610 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
611 msg_set_non_seq(msg, 1);
612 msg_set_mc_netid(msg, tn->net_id);
613 tn->bcl->stats.sent_info++;
614 if (WARN_ON(!bclink->bcast_nodes.count)) {
620 /* Send buffer over bearers until all targets reached */
621 bcbearer->remains = bclink->bcast_nodes;
623 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
624 struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
625 struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
626 struct tipc_bearer *bp[2] = {p, s};
627 struct tipc_bearer *b = bp[msg_link_selector(msg)];
628 struct sk_buff *tbuf;
631 break; /* No more bearers to try */
634 tipc_nmap_diff(&bcbearer->remains, &b->nodes,
635 &bcbearer->remains_new);
636 if (bcbearer->remains_new.count == bcbearer->remains.count)
637 continue; /* Nothing added by bearer pair */
640 /* Use original buffer for first bearer */
641 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
643 /* Avoid concurrent buffer access */
644 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
647 tipc_bearer_send(net, b->identity, tbuf,
649 kfree_skb(tbuf); /* Bearer keeps a clone */
651 if (bcbearer->remains_new.count == 0)
652 break; /* All targets reached */
654 bcbearer->remains = bcbearer->remains_new;
661 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
663 void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
664 u32 node, bool action)
666 struct tipc_net *tn = net_generic(net, tipc_net_id);
667 struct tipc_bcbearer *bcbearer = tn->bcbearer;
668 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
669 struct tipc_bcbearer_pair *bp_curr;
670 struct tipc_bearer *b;
674 tipc_bclink_lock(net);
677 tipc_nmap_add(nm_ptr, node);
679 tipc_nmap_remove(nm_ptr, node);
681 /* Group bearers by priority (can assume max of two per priority) */
682 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
685 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
686 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
687 if (!b || !b->nodes.count)
690 if (!bp_temp[b->priority].primary)
691 bp_temp[b->priority].primary = b;
693 bp_temp[b->priority].secondary = b;
697 /* Create array of bearer pairs for broadcasting */
698 bp_curr = bcbearer->bpairs;
699 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
701 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
703 if (!bp_temp[pri].primary)
706 bp_curr->primary = bp_temp[pri].primary;
708 if (bp_temp[pri].secondary) {
709 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
710 &bp_temp[pri].secondary->nodes)) {
711 bp_curr->secondary = bp_temp[pri].secondary;
714 bp_curr->primary = bp_temp[pri].secondary;
721 tipc_bclink_unlock(net);
724 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
725 struct tipc_stats *stats)
735 struct nla_map map[] = {
736 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
737 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
738 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
739 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
740 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
741 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
742 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
743 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
744 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
745 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
746 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
747 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
748 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
749 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
750 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
751 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
752 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
753 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
754 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
755 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
758 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
762 for (i = 0; i < ARRAY_SIZE(map); i++)
763 if (nla_put_u32(skb, map[i].key, map[i].val))
766 nla_nest_end(skb, nest);
770 nla_nest_cancel(skb, nest);
775 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
779 struct nlattr *attrs;
781 struct tipc_net *tn = net_generic(net, tipc_net_id);
782 struct tipc_link *bcl = tn->bcl;
787 tipc_bclink_lock(net);
789 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
790 NLM_F_MULTI, TIPC_NL_LINK_GET);
794 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
798 /* The broadcast link is always up */
799 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
802 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
804 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
806 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
808 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
811 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
814 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
816 nla_nest_end(msg->skb, prop);
818 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
822 tipc_bclink_unlock(net);
823 nla_nest_end(msg->skb, attrs);
824 genlmsg_end(msg->skb, hdr);
829 nla_nest_cancel(msg->skb, prop);
831 nla_nest_cancel(msg->skb, attrs);
833 tipc_bclink_unlock(net);
834 genlmsg_cancel(msg->skb, hdr);
839 int tipc_bclink_reset_stats(struct net *net)
841 struct tipc_net *tn = net_generic(net, tipc_net_id);
842 struct tipc_link *bcl = tn->bcl;
847 tipc_bclink_lock(net);
848 memset(&bcl->stats, 0, sizeof(bcl->stats));
849 tipc_bclink_unlock(net);
853 int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
855 struct tipc_net *tn = net_generic(net, tipc_net_id);
856 struct tipc_link *bcl = tn->bcl;
860 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
863 tipc_bclink_lock(net);
864 tipc_link_set_queue_limits(bcl, limit);
865 tipc_bclink_unlock(net);
869 int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
873 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
875 if (!attrs[TIPC_NLA_LINK_PROP])
878 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
882 if (!props[TIPC_NLA_PROP_WIN])
885 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
887 return tipc_bclink_set_queue_limits(net, win);
890 int tipc_bclink_init(struct net *net)
892 struct tipc_net *tn = net_generic(net, tipc_net_id);
893 struct tipc_bcbearer *bcbearer;
894 struct tipc_bclink *bclink;
895 struct tipc_link *bcl;
897 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
901 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
908 bcbearer->bearer.media = &bcbearer->media;
909 bcbearer->media.send_msg = tipc_bcbearer_send;
910 sprintf(bcbearer->media.name, "tipc-broadcast");
912 spin_lock_init(&bclink->lock);
913 __skb_queue_head_init(&bcl->transmq);
914 __skb_queue_head_init(&bcl->backlogq);
915 __skb_queue_head_init(&bcl->deferdq);
916 skb_queue_head_init(&bcl->wakeupq);
918 spin_lock_init(&bclink->node.lock);
919 __skb_queue_head_init(&bclink->arrvq);
920 skb_queue_head_init(&bclink->inputq);
921 bcl->owner = &bclink->node;
922 bcl->owner->net = net;
923 bcl->mtu = MAX_PKT_DEFAULT_MCAST;
924 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
925 bcl->bearer_id = MAX_BEARERS;
926 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
927 bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
928 msg_set_prevnode(bcl->pmsg, tn->own_addr);
929 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
930 tn->bcbearer = bcbearer;
936 void tipc_bclink_stop(struct net *net)
938 struct tipc_net *tn = net_generic(net, tipc_net_id);
940 tipc_bclink_lock(net);
941 tipc_link_purge_queues(tn->bcl);
942 tipc_bclink_unlock(net);
944 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
951 * tipc_nmap_add - add a node to a node map
953 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
955 int n = tipc_node(node);
957 u32 mask = (1 << (n % WSIZE));
959 if ((nm_ptr->map[w] & mask) == 0) {
961 nm_ptr->map[w] |= mask;
966 * tipc_nmap_remove - remove a node from a node map
968 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
970 int n = tipc_node(node);
972 u32 mask = (1 << (n % WSIZE));
974 if ((nm_ptr->map[w] & mask) != 0) {
975 nm_ptr->map[w] &= ~mask;
981 * tipc_nmap_diff - find differences between node maps
982 * @nm_a: input node map A
983 * @nm_b: input node map B
984 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
986 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
987 struct tipc_node_map *nm_b,
988 struct tipc_node_map *nm_diff)
990 int stop = ARRAY_SIZE(nm_a->map);
995 memset(nm_diff, 0, sizeof(*nm_diff));
996 for (w = 0; w < stop; w++) {
997 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
998 nm_diff->map[w] = map;
1000 for (b = 0 ; b < WSIZE; b++) {