tipc: let broadcast packet reception use new link receive function
[cascardo/linux.git] / net / tipc / bcast.c
index 8b010c9..ea28c29 100644 (file)
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <linux/tipc_config.h>
 #include "socket.h"
 #include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
-#include "core.h"
+#include "link.h"
+#include "node.h"
 
 #define        MAX_PKT_DEFAULT_MCAST   1500    /* bcast link max packet size (fixed) */
-#define        BCLINK_WIN_DEFAULT      20      /* bcast link window size (default) */
+#define        BCLINK_WIN_DEFAULT      50      /* bcast link window size (default) */
+#define        BCLINK_WIN_MIN          32      /* bcast minimum link window size */
 
 const char tipc_bclink_name[] = "broadcast-link";
 
+/**
+ * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ *
+ * Bearers must have same priority and same set of reachable destinations
+ * to be paired.
+ */
+
+struct tipc_bcbearer_pair {
+       struct tipc_bearer *primary;
+       struct tipc_bearer *secondary;
+};
+
+#define        BCBEARER                MAX_BEARERS
+
+/**
+ * struct tipc_bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
+ * @remains: temporary node map used by tipc_bcbearer_send()
+ * @remains_new: temporary node map used tipc_bcbearer_send()
+ *
+ * Note: The fields labelled "temporary" are incorporated into the bearer
+ * to avoid consuming potentially limited stack space through the use of
+ * large local variables within multicast routines.  Concurrent access is
+ * prevented through use of the spinlock "bcast_lock".
+ */
+struct tipc_bcbearer {
+       struct tipc_bearer bearer;
+       struct tipc_media media;
+       struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
+       struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
+       struct tipc_node_map remains;
+       struct tipc_node_map remains_new;
+};
+
+/**
+ * struct tipc_bc_base - link used for broadcast messages
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * @bcast_nodes: map of broadcast-capable nodes
+ * @retransmit_to: node that most recently requested a retransmit
+ *
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+struct tipc_bc_base {
+       struct tipc_link *link;
+       struct tipc_node node;
+       struct sk_buff_head arrvq;
+       struct sk_buff_head inputq;
+       struct sk_buff_head namedq;
+       struct tipc_node_map bcast_nodes;
+       struct tipc_node *retransmit_to;
+};
+
+static struct tipc_bc_base *tipc_bc_base(struct net *net)
+{
+       return tipc_net(net)->bcbase;
+}
+
+/**
+ * tipc_nmap_equal - test for equality of node maps
+ */
+static int tipc_nmap_equal(struct tipc_node_map *nm_a,
+                          struct tipc_node_map *nm_b)
+{
+       return !memcmp(nm_a, nm_b, sizeof(*nm_a));
+}
+
+static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq);
 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
                           struct tipc_node_map *nm_b,
                           struct tipc_node_map *nm_diff);
 static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
 static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
-
 static void tipc_bclink_lock(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       spin_lock_bh(&tn->bclink->lock);
+       tipc_bcast_lock(net);
 }
 
 static void tipc_bclink_unlock(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       spin_unlock_bh(&tn->bclink->lock);
+       tipc_bcast_unlock(net);
 }
 
 void tipc_bclink_input(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
+       tipc_sk_mcast_rcv(net, &tn->bcbase->arrvq, &tn->bcbase->inputq);
 }
 
-uint  tipc_bclink_get_mtu(void)
+uint  tipc_bcast_get_mtu(void)
 {
        return MAX_PKT_DEFAULT_MCAST;
 }
 
-static u32 bcbuf_acks(struct sk_buff *buf)
+static u16 bcbuf_acks(struct sk_buff *skb)
 {
-       return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
+       return TIPC_SKB_CB(skb)->ackers;
 }
 
-static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+static void bcbuf_set_acks(struct sk_buff *buf, u16 ackers)
 {
-       TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
+       TIPC_SKB_CB(buf)->ackers = ackers;
 }
 
 static void bcbuf_decr_acks(struct sk_buff *buf)
@@ -93,29 +164,6 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
        bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
 }
 
-void tipc_bclink_add_node(struct net *net, u32 addr)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tipc_bclink_lock(net);
-       tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
-       tipc_bclink_unlock(net);
-}
-
-void tipc_bclink_remove_node(struct net *net, u32 addr)
-{
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-
-       tipc_bclink_lock(net);
-       tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
-
-       /* Last node? => reset backlog queue */
-       if (!tn->bclink->bcast_nodes.count)
-               tipc_link_purge_backlog(&tn->bclink->link);
-
-       tipc_bclink_unlock(net);
-}
-
 static void bclink_set_last_sent(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -146,7 +194,7 @@ struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
-       return tn->bclink->retransmit_to;
+       return tn->bcbase->retransmit_to;
 }
 
 /**
@@ -169,6 +217,30 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
        }
 }
 
+/**
+ * bclink_prepare_wakeup - prepare users for wakeup after congestion
+ * @bcl: broadcast link
+ * @resultq: queue for users which can be woken up
+ * Move a number of waiting users, as permitted by available space in
+ * the send queue, from link wait queue to specified queue for wakeup
+ */
+static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+{
+       int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+       int imp, lim;
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
+               imp = TIPC_SKB_CB(skb)->chain_imp;
+               lim = bcl->window + bcl->backlog[imp].limit;
+               pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+               if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+                       continue;
+               skb_unlink(skb, &bcl->wakeupq);
+               skb_queue_tail(resultq, skb);
+       }
+}
+
 /**
  * tipc_bclink_wakeup_users - wake up pending users
  *
@@ -177,8 +249,12 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
 void tipc_bclink_wakeup_users(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+       struct sk_buff_head resultq;
 
-       tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
+       skb_queue_head_init(&resultq);
+       bclink_prepare_wakeup(bcl, &resultq);
+       tipc_sk_rcv(net, &resultq);
 }
 
 /**
@@ -197,7 +273,6 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
 
        if (unlikely(!n_ptr->bclink.recv_permitted))
                return;
-
        tipc_bclink_lock(net);
 
        /* Bail out if tx queue is empty (no clean up is required) */
@@ -212,7 +287,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
                 * acknowledge sent messages only (if other nodes still exist)
                 * or both sent and unsent messages (otherwise)
                 */
-               if (tn->bclink->bcast_nodes.count)
+               if (tn->bcbase->bcast_nodes.count)
                        acked = tn->bcl->silent_intv_cnt;
                else
                        acked = tn->bcl->snd_nxt;
@@ -226,13 +301,11 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
                    less_eq(acked, n_ptr->bclink.acked))
                        goto exit;
        }
-
        /* Skip over packets that node has previously acknowledged */
        skb_queue_walk(&tn->bcl->transmq, skb) {
                if (more(buf_seqno(skb), n_ptr->bclink.acked))
                        break;
        }
-
        /* Update packets that node is now acknowledging */
        skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
                if (more(buf_seqno(skb), acked))
@@ -269,6 +342,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
        struct sk_buff *buf;
        struct net *net = n_ptr->net;
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
 
        /* Ignore "stale" link state info */
        if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -277,6 +351,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
        /* Update link synchronization state; quit if in sync */
        bclink_update_last_sent(n_ptr, last_sent);
 
+       /* This is a good location for statistical profiling */
+       bcl->stats.queue_sz_counts++;
+       bcl->stats.accu_queue_sz += skb_queue_len(&bcl->transmq);
+
        if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
                return;
 
@@ -361,61 +439,173 @@ static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
        tipc_node_put(n_ptr);
 }
 
-/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
+/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
  *                    and to identified node local sockets
  * @net: the applicable net namespace
  * @list: chain of buffers containing message
  * Consumes the buffer chain, except when returning -ELINKCONG
  * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
+int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_link *bcl = tn->bcl;
-       struct tipc_bclink *bclink = tn->bclink;
+       struct tipc_link *l = tipc_bc_sndlink(net);
+       struct sk_buff_head xmitq, inputq, rcvq;
        int rc = 0;
-       int bc = 0;
-       struct sk_buff *skb;
-       struct sk_buff_head arrvq;
-       struct sk_buff_head inputq;
 
-       /* Prepare clone of message for local node */
-       skb = tipc_msg_reassemble(list);
-       if (unlikely(!skb))
+       __skb_queue_head_init(&rcvq);
+       __skb_queue_head_init(&xmitq);
+       skb_queue_head_init(&inputq);
+
+       /* Prepare message clone for local node */
+       if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
                return -EHOSTUNREACH;
 
-       /* Broadcast to all nodes */
-       if (likely(bclink)) {
-               tipc_bclink_lock(net);
-               if (likely(bclink->bcast_nodes.count)) {
-                       rc = __tipc_link_xmit(net, bcl, list);
-                       if (likely(!rc)) {
-                               u32 len = skb_queue_len(&bcl->transmq);
-
-                               bclink_set_last_sent(net);
-                               bcl->stats.queue_sz_counts++;
-                               bcl->stats.accu_queue_sz += len;
-                       }
-                       bc = 1;
-               }
-               tipc_bclink_unlock(net);
+       tipc_bcast_lock(net);
+       if (tipc_link_bc_peers(l))
+               rc = tipc_link_xmit(l, list, &xmitq);
+       bclink_set_last_sent(net);
+       tipc_bcast_unlock(net);
+
+       /* Don't send to local node if adding to link failed */
+       if (unlikely(rc)) {
+               __skb_queue_purge(&rcvq);
+               return rc;
        }
 
-       if (unlikely(!bc))
-               __skb_queue_purge(list);
+       /* Broadcast to all nodes, inluding local node */
+       tipc_bcbearer_xmit(net, &xmitq);
+       tipc_sk_mcast_rcv(net, &rcvq, &inputq);
+       __skb_queue_purge(list);
+       return 0;
+}
 
-       if (unlikely(rc)) {
+/* tipc_bcast_rcv - receive a broadcast packet, and deliver to rcv link
+ *
+ * RCU is locked, no other locks set
+ */
+int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb)
+{
+       struct tipc_msg *hdr = buf_msg(skb);
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
+       int rc;
+
+       __skb_queue_head_init(&xmitq);
+
+       if (msg_mc_netid(hdr) != tipc_netid(net) || !tipc_link_is_up(l)) {
                kfree_skb(skb);
-               return rc;
+               return 0;
        }
-       /* Deliver message clone */
-       __skb_queue_head_init(&arrvq);
-       skb_queue_head_init(&inputq);
-       __skb_queue_tail(&arrvq, skb);
-       tipc_sk_mcast_rcv(net, &arrvq, &inputq);
+
+       tipc_bcast_lock(net);
+       if (msg_user(hdr) == BCAST_PROTOCOL)
+               rc = tipc_link_bc_nack_rcv(l, skb, &xmitq);
+       else
+               rc = tipc_link_rcv(l, skb, NULL);
+       tipc_bcast_unlock(net);
+
+       if (!skb_queue_empty(&xmitq))
+               tipc_bcbearer_xmit(net, &xmitq);
+
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
+
        return rc;
 }
 
+/* tipc_bcast_ack_rcv - receive and handle a broadcast acknowledge
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, u32 acked)
+{
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
+
+       __skb_queue_head_init(&xmitq);
+
+       tipc_bcast_lock(net);
+       tipc_link_bc_ack_rcv(l, acked, &xmitq);
+       tipc_bcast_unlock(net);
+
+       tipc_bcbearer_xmit(net, &xmitq);
+
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
+}
+
+/* tipc_bcast_synch_rcv -  check and update rcv link with peer's send state
+ *
+ * RCU is locked, no other locks set
+ */
+void tipc_bcast_sync_rcv(struct net *net, struct tipc_link *l,
+                        struct tipc_msg *hdr)
+{
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct sk_buff_head xmitq;
+
+       __skb_queue_head_init(&xmitq);
+
+       tipc_bcast_lock(net);
+       if (msg_type(hdr) == STATE_MSG) {
+               tipc_link_bc_ack_rcv(l, msg_bcast_ack(hdr), &xmitq);
+               tipc_link_bc_sync_rcv(l, hdr, &xmitq);
+       } else {
+               tipc_link_bc_init_rcv(l, hdr);
+       }
+       tipc_bcast_unlock(net);
+
+       tipc_bcbearer_xmit(net, &xmitq);
+
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
+}
+
+/* tipc_bcast_add_peer - add a peer node to broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_add_peer(struct net *net, u32 addr, struct tipc_link *uc_l,
+                        struct sk_buff_head *xmitq)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *snd_l = tipc_bc_sndlink(net);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_add(&tn->bcbase->bcast_nodes, addr);
+       tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
+       tipc_bclink_unlock(net);
+}
+
+/* tipc_bcast_remove_peer - remove a peer node from broadcast link and bearer
+ *
+ * RCU is locked, node lock is set
+ */
+void tipc_bcast_remove_peer(struct net *net, u32 addr,
+                           struct tipc_link *rcv_l)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct sk_buff_head *inputq = &tipc_bc_base(net)->inputq;
+       struct tipc_link *snd_l = tipc_bc_sndlink(net);
+       struct sk_buff_head xmitq;
+
+       __skb_queue_head_init(&xmitq);
+
+       tipc_bclink_lock(net);
+       tipc_nmap_remove(&tn->bcbase->bcast_nodes, addr);
+       tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
+       tipc_bclink_unlock(net);
+
+       tipc_bcbearer_xmit(net, &xmitq);
+
+       /* Any socket wakeup messages ? */
+       if (!skb_queue_empty(inputq))
+               tipc_sk_rcv(net, inputq);
+}
+
 /**
  * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
  *
@@ -466,7 +656,6 @@ void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
        node = tipc_node_find(net, msg_prevnode(msg));
        if (unlikely(!node))
                goto exit;
-
        tipc_node_lock(node);
        if (unlikely(!node->bclink.recv_permitted))
                goto unlock;
@@ -479,7 +668,7 @@ void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
                        tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
                        tipc_bclink_lock(net);
                        bcl->stats.recv_nacks++;
-                       tn->bclink->retransmit_to = node;
+                       tn->bcbase->retransmit_to = node;
                        bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
                                              msg_bcgap_to(msg));
                        tipc_bclink_unlock(net);
@@ -491,12 +680,11 @@ void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
                tipc_node_put(node);
                goto exit;
        }
-
        /* Handle in-sequence broadcast message */
        seqno = msg_seqno(msg);
        next_in = mod(node->bclink.last_in + 1);
-       arrvq = &tn->bclink->arrvq;
-       inputq = &tn->bclink->inputq;
+       arrvq = &tn->bcbase->arrvq;
+       inputq = &tn->bcbase->inputq;
 
        if (likely(seqno == next_in)) {
 receive:
@@ -622,7 +810,7 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
        struct tipc_msg *msg = buf_msg(buf);
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_bcbearer *bcbearer = tn->bcbearer;
-       struct tipc_bclink *bclink = tn->bclink;
+       struct tipc_bc_base *bclink = tn->bcbase;
 
        /* Prepare broadcast link message for reliable transmission,
         * if first time trying to send it;
@@ -639,6 +827,7 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
                        return 0;
                }
        }
+       msg_set_mc_netid(msg, tn->net_id);
 
        /* Send buffer over bearers until all targets reached */
        bcbearer->remains = bclink->bcast_nodes;
@@ -680,6 +869,19 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
        return 0;
 }
 
+static void tipc_bcbearer_xmit(struct net *net, struct sk_buff_head *xmitq)
+{
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(xmitq, skb, tmp) {
+               __skb_dequeue(xmitq);
+               tipc_bcbearer_send(net, skb, NULL, NULL);
+
+               /* Until we remove cloning in tipc_l2_send_msg(): */
+               kfree_skb(skb);
+       }
+}
+
 /**
  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  */
@@ -880,9 +1082,10 @@ int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
 
        if (!bcl)
                return -ENOPROTOOPT;
-       if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+       if (limit < BCLINK_WIN_MIN)
+               limit = BCLINK_WIN_MIN;
+       if (limit > TIPC_MAX_LINK_WIN)
                return -EINVAL;
-
        tipc_bclink_lock(net);
        tipc_link_set_queue_limits(bcl, limit);
        tipc_bclink_unlock(net);
@@ -910,64 +1113,74 @@ int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[])
        return tipc_bclink_set_queue_limits(net, win);
 }
 
-int tipc_bclink_init(struct net *net)
+int tipc_bcast_init(struct net *net)
 {
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct tipc_bcbearer *bcbearer;
-       struct tipc_bclink *bclink;
-       struct tipc_link *bcl;
-
-       bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
-       if (!bcbearer)
-               return -ENOMEM;
-
-       bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
-       if (!bclink) {
-               kfree(bcbearer);
-               return -ENOMEM;
-       }
-
-       bcl = &bclink->link;
-       bcbearer->bearer.media = &bcbearer->media;
-       bcbearer->media.send_msg = tipc_bcbearer_send;
-       sprintf(bcbearer->media.name, "tipc-broadcast");
-
-       spin_lock_init(&bclink->lock);
-       __skb_queue_head_init(&bcl->transmq);
-       __skb_queue_head_init(&bcl->backlogq);
-       __skb_queue_head_init(&bcl->deferdq);
-       skb_queue_head_init(&bcl->wakeupq);
-       bcl->snd_nxt = 1;
-       spin_lock_init(&bclink->node.lock);
-       __skb_queue_head_init(&bclink->arrvq);
-       skb_queue_head_init(&bclink->inputq);
-       bcl->owner = &bclink->node;
-       bcl->owner->net = net;
-       bcl->mtu = MAX_PKT_DEFAULT_MCAST;
-       tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
-       bcl->bearer_id = MAX_BEARERS;
-       rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
-       bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
-       msg_set_prevnode(bcl->pmsg, tn->own_addr);
-       strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
-       tn->bcbearer = bcbearer;
-       tn->bclink = bclink;
-       tn->bcl = bcl;
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bcbearer *bcb = NULL;
+       struct tipc_bc_base *bb = NULL;
+       struct tipc_link *l = NULL;
+
+       bcb = kzalloc(sizeof(*bcb), GFP_ATOMIC);
+       if (!bcb)
+               goto enomem;
+       tn->bcbearer = bcb;
+
+       bcb->bearer.window = BCLINK_WIN_DEFAULT;
+       bcb->bearer.mtu = MAX_PKT_DEFAULT_MCAST;
+       bcb->bearer.identity = MAX_BEARERS;
+
+       bcb->bearer.media = &bcb->media;
+       bcb->media.send_msg = tipc_bcbearer_send;
+       sprintf(bcb->media.name, "tipc-broadcast");
+       strcpy(bcb->bearer.name, bcb->media.name);
+
+       bb = kzalloc(sizeof(*bb), GFP_ATOMIC);
+       if (!bb)
+               goto enomem;
+       tn->bcbase = bb;
+       __skb_queue_head_init(&bb->arrvq);
+       spin_lock_init(&tipc_net(net)->bclock);
+       bb->node.net = net;
+
+       if (!tipc_link_bc_create(&bb->node, 0, 0,
+                                MAX_PKT_DEFAULT_MCAST,
+                                BCLINK_WIN_DEFAULT,
+                                0,
+                                &bb->inputq,
+                                &bb->namedq,
+                                NULL,
+                                &l))
+               goto enomem;
+       bb->link = l;
+       tn->bcl = l;
+       rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcb->bearer);
        return 0;
+enomem:
+       kfree(bcb);
+       kfree(bb);
+       kfree(l);
+       return -ENOMEM;
 }
 
-void tipc_bclink_stop(struct net *net)
+void tipc_bcast_reinit(struct net *net)
+{
+       struct tipc_bc_base *b = tipc_bc_base(net);
+
+       msg_set_prevnode(b->link->pmsg, tipc_own_addr(net));
+}
+
+void tipc_bcast_stop(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
 
        tipc_bclink_lock(net);
        tipc_link_purge_queues(tn->bcl);
        tipc_bclink_unlock(net);
-
        RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
        synchronize_net();
        kfree(tn->bcbearer);
-       kfree(tn->bclink);
+       kfree(tn->bcbase);
+       kfree(tn->bcl);
 }
 
 /**