2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
59 .len = TIPC_MAX_LINK_NAME
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
80 * Interval between NACKs when packets arrive out of order
82 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
84 * Out-of-range value for link session numbers
86 #define WILDCARD_SESSION 0x10000
88 /* State value stored in 'failover_pkts'
90 #define FIRST_FAILOVER 0xffffu
92 /* Link FSM states and events:
98 TIPC_LINK_ESTABLISHING
102 PEER_RESET_EVT = RESET_MSG,
103 ACTIVATE_EVT = ACTIVATE_MSG,
104 TRAFFIC_EVT, /* Any other valid msg from peer */
105 SILENCE_EVT /* Peer was silent during last timer interval*/
108 /* Link FSM state checking routines
110 static int link_working(struct tipc_link *l)
112 return l->state == TIPC_LINK_WORKING;
115 static int link_probing(struct tipc_link *l)
117 return l->state == TIPC_LINK_PROBING;
120 static int link_resetting(struct tipc_link *l)
122 return l->state == TIPC_LINK_RESETTING;
125 static int link_establishing(struct tipc_link *l)
127 return l->state == TIPC_LINK_ESTABLISHING;
130 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
131 struct sk_buff_head *xmitq);
132 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
133 u16 rcvgap, int tolerance, int priority,
134 struct sk_buff_head *xmitq);
135 static void link_reset_statistics(struct tipc_link *l_ptr);
136 static void link_print(struct tipc_link *l_ptr, const char *str);
137 static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
138 struct sk_buff_head *xmitq);
139 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
140 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
141 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
142 static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
145 * Simple link routines
147 static unsigned int align(unsigned int i)
149 return (i + 3) & ~3u;
152 static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
154 struct tipc_node *n = l->owner;
156 if (node_active_link(n, 0) != l)
157 return node_active_link(n, 0);
158 return node_active_link(n, 1);
162 * Simple non-static link routines (i.e. referenced outside this file)
164 int tipc_link_is_up(struct tipc_link *l_ptr)
168 return link_working(l_ptr) || link_probing(l_ptr);
171 int tipc_link_is_active(struct tipc_link *l)
173 struct tipc_node *n = l->owner;
175 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
179 * tipc_link_create - create a new link
180 * @n_ptr: pointer to associated node
181 * @b_ptr: pointer to associated bearer
182 * @media_addr: media address to use when sending messages over link
184 * Returns pointer to link.
186 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
187 struct tipc_bearer *b_ptr,
188 const struct tipc_media_addr *media_addr,
189 struct sk_buff_head *inputq,
190 struct sk_buff_head *namedq)
192 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
193 struct tipc_link *l_ptr;
194 struct tipc_msg *msg;
196 char addr_string[16];
197 u32 peer = n_ptr->addr;
199 if (n_ptr->link_cnt >= MAX_BEARERS) {
200 tipc_addr_string_fill(addr_string, n_ptr->addr);
201 pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
202 n_ptr->link_cnt, addr_string, MAX_BEARERS);
206 if (n_ptr->links[b_ptr->identity].link) {
207 tipc_addr_string_fill(addr_string, n_ptr->addr);
208 pr_err("Attempt to establish second link on <%s> to %s\n",
209 b_ptr->name, addr_string);
213 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
215 pr_warn("Link creation failed, no memory\n");
219 if_name = strchr(b_ptr->name, ':') + 1;
220 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
221 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
222 tipc_node(tn->own_addr),
224 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
225 /* note: peer i/f name is updated by reset/activate message */
226 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
227 l_ptr->owner = n_ptr;
228 l_ptr->peer_session = WILDCARD_SESSION;
229 l_ptr->bearer_id = b_ptr->identity;
230 l_ptr->tolerance = b_ptr->tolerance;
233 l_ptr->state = TIPC_LINK_RESETTING;
235 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
237 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
239 msg_set_size(msg, sizeof(l_ptr->proto_msg));
240 msg_set_session(msg, (tn->random & 0xffff));
241 msg_set_bearer_id(msg, b_ptr->identity);
242 strcpy((char *)msg_data(msg), if_name);
243 l_ptr->net_plane = b_ptr->net_plane;
244 l_ptr->advertised_mtu = b_ptr->mtu;
245 l_ptr->mtu = l_ptr->advertised_mtu;
246 l_ptr->priority = b_ptr->priority;
247 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
249 __skb_queue_head_init(&l_ptr->transmq);
250 __skb_queue_head_init(&l_ptr->backlogq);
251 __skb_queue_head_init(&l_ptr->deferdq);
252 skb_queue_head_init(&l_ptr->wakeupq);
253 l_ptr->inputq = inputq;
254 l_ptr->namedq = namedq;
255 skb_queue_head_init(l_ptr->inputq);
256 link_reset_statistics(l_ptr);
257 tipc_node_attach_link(n_ptr, l_ptr);
262 * tipc_link_delete - Delete a link
263 * @l: link to be deleted
265 void tipc_link_delete(struct tipc_link *l)
268 tipc_link_reset_fragments(l);
269 tipc_node_detach_link(l->owner, l);
272 void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
274 struct tipc_net *tn = net_generic(net, tipc_net_id);
275 struct tipc_link *link;
276 struct tipc_node *node;
279 list_for_each_entry_rcu(node, &tn->node_list, list) {
280 tipc_node_lock(node);
281 link = node->links[bearer_id].link;
283 tipc_link_delete(link);
284 tipc_node_unlock(node);
289 /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
291 * Give a newly added peer node the sequence number where it should
292 * start receiving and acking broadcast packets.
294 static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
295 struct sk_buff_head *xmitq)
298 struct sk_buff_head list;
301 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
302 0, l->addr, link_own_addr(l), 0, 0, 0);
305 last_sent = tipc_bclink_get_last_sent(l->owner->net);
306 msg_set_last_bcast(buf_msg(skb), last_sent);
307 __skb_queue_head_init(&list);
308 __skb_queue_tail(&list, skb);
309 tipc_link_xmit(l, &list, xmitq);
313 * tipc_link_fsm_evt - link finite state machine
314 * @l: pointer to link
315 * @evt: state machine event to be processed
316 * @xmitq: queue to prepend created protocol message, if any
318 static int tipc_link_fsm_evt(struct tipc_link *l, int evt,
319 struct sk_buff_head *xmitq)
321 int mtyp = 0, rc = 0;
322 struct tipc_link *pl;
325 LINK_ACTIVATE = (1 << 1),
326 SND_PROBE = (1 << 2),
327 SND_STATE = (1 << 3),
328 SND_RESET = (1 << 4),
329 SND_ACTIVATE = (1 << 5),
330 SND_BCAST_SYNC = (1 << 6)
333 if (l->exec_mode == TIPC_LINK_BLOCKED)
337 case TIPC_LINK_WORKING:
343 l->state = TIPC_LINK_PROBING;
344 actions |= SND_PROBE;
347 actions |= LINK_RESET | SND_ACTIVATE;
350 pr_debug("%s%u WORKING\n", link_unk_evt, evt);
353 case TIPC_LINK_PROBING:
357 l->state = TIPC_LINK_WORKING;
360 actions |= LINK_RESET | SND_ACTIVATE;
363 if (l->silent_intv_cnt <= l->abort_limit) {
364 actions |= SND_PROBE;
367 actions |= LINK_RESET | SND_RESET;
370 pr_err("%s%u PROBING\n", link_unk_evt, evt);
373 case TIPC_LINK_RESETTING:
378 pl = node_active_link(l->owner, 0);
379 if (pl && link_probing(pl))
381 l->state = TIPC_LINK_WORKING;
382 actions |= LINK_ACTIVATE;
383 if (!l->owner->working_links)
384 actions |= SND_BCAST_SYNC;
387 l->state = TIPC_LINK_ESTABLISHING;
388 actions |= SND_ACTIVATE;
391 actions |= SND_RESET;
394 pr_err("%s%u in RESETTING\n", link_unk_evt, evt);
397 case TIPC_LINK_ESTABLISHING:
401 pl = node_active_link(l->owner, 0);
402 if (pl && link_probing(pl))
404 l->state = TIPC_LINK_WORKING;
405 actions |= LINK_ACTIVATE;
406 if (!l->owner->working_links)
407 actions |= SND_BCAST_SYNC;
412 actions |= SND_ACTIVATE;
415 pr_err("%s%u ESTABLISHING\n", link_unk_evt, evt);
419 pr_err("Unknown link state %u/%u\n", l->state, evt);
422 /* Perform actions as decided by FSM */
423 if (actions & LINK_RESET) {
424 l->exec_mode = TIPC_LINK_BLOCKED;
425 rc |= TIPC_LINK_DOWN_EVT;
427 if (actions & LINK_ACTIVATE) {
428 l->exec_mode = TIPC_LINK_OPEN;
429 rc |= TIPC_LINK_UP_EVT;
431 if (actions & (SND_STATE | SND_PROBE))
433 if (actions & SND_RESET)
435 if (actions & SND_ACTIVATE)
437 if (actions & (SND_PROBE | SND_STATE | SND_RESET | SND_ACTIVATE))
438 tipc_link_build_proto_msg(l, mtyp, actions & SND_PROBE,
440 if (actions & SND_BCAST_SYNC)
441 tipc_link_build_bcast_sync_msg(l, xmitq);
445 /* link_profile_stats - update statistical profiling of traffic
447 static void link_profile_stats(struct tipc_link *l)
450 struct tipc_msg *msg;
453 /* Update counters used in statistical profiling of send traffic */
454 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
455 l->stats.queue_sz_counts++;
457 skb = skb_peek(&l->transmq);
461 length = msg_size(msg);
463 if (msg_user(msg) == MSG_FRAGMENTER) {
464 if (msg_type(msg) != FIRST_FRAGMENT)
466 length = msg_size(msg_get_wrapped(msg));
468 l->stats.msg_lengths_total += length;
469 l->stats.msg_length_counts++;
471 l->stats.msg_length_profile[0]++;
472 else if (length <= 256)
473 l->stats.msg_length_profile[1]++;
474 else if (length <= 1024)
475 l->stats.msg_length_profile[2]++;
476 else if (length <= 4096)
477 l->stats.msg_length_profile[3]++;
478 else if (length <= 16384)
479 l->stats.msg_length_profile[4]++;
480 else if (length <= 32768)
481 l->stats.msg_length_profile[5]++;
483 l->stats.msg_length_profile[6]++;
486 /* tipc_link_timeout - perform periodic task as instructed from node timeout
488 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
492 link_profile_stats(l);
493 if (l->silent_intv_cnt)
494 rc = tipc_link_fsm_evt(l, SILENCE_EVT, xmitq);
495 else if (link_working(l) && tipc_bclink_acks_missing(l->owner))
496 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
497 l->silent_intv_cnt++;
502 * link_schedule_user - schedule a message sender for wakeup after congestion
503 * @link: congested link
504 * @list: message that was attempted sent
505 * Create pseudo msg to send back to user when congestion abates
506 * Does not consume buffer list
508 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
510 struct tipc_msg *msg = buf_msg(skb_peek(list));
511 int imp = msg_importance(msg);
512 u32 oport = msg_origport(msg);
513 u32 addr = link_own_addr(link);
516 /* This really cannot happen... */
517 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
518 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
521 /* Non-blocking sender: */
522 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
525 /* Create and schedule wakeup pseudo message */
526 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
527 addr, addr, oport, 0, 0);
530 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
531 TIPC_SKB_CB(skb)->chain_imp = imp;
532 skb_queue_tail(&link->wakeupq, skb);
533 link->stats.link_congs++;
538 * link_prepare_wakeup - prepare users for wakeup after congestion
539 * @link: congested link
540 * Move a number of waiting users, as permitted by available space in
541 * the send queue, from link wait queue to node wait queue for wakeup
543 void link_prepare_wakeup(struct tipc_link *l)
545 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
547 struct sk_buff *skb, *tmp;
549 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
550 imp = TIPC_SKB_CB(skb)->chain_imp;
551 lim = l->window + l->backlog[imp].limit;
552 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
553 if ((pnd[imp] + l->backlog[imp].len) >= lim)
555 skb_unlink(skb, &l->wakeupq);
556 skb_queue_tail(l->inputq, skb);
557 l->owner->inputq = l->inputq;
558 l->owner->action_flags |= TIPC_MSG_EVT;
563 * tipc_link_reset_fragments - purge link's inbound message fragments queue
564 * @l_ptr: pointer to link
566 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
568 kfree_skb(l_ptr->reasm_buf);
569 l_ptr->reasm_buf = NULL;
572 void tipc_link_purge_backlog(struct tipc_link *l)
574 __skb_queue_purge(&l->backlogq);
575 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
576 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
577 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
578 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
579 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
583 * tipc_link_purge_queues - purge all pkt queues associated with link
584 * @l_ptr: pointer to link
586 void tipc_link_purge_queues(struct tipc_link *l_ptr)
588 __skb_queue_purge(&l_ptr->deferdq);
589 __skb_queue_purge(&l_ptr->transmq);
590 tipc_link_purge_backlog(l_ptr);
591 tipc_link_reset_fragments(l_ptr);
594 void tipc_link_reset(struct tipc_link *l_ptr)
596 u32 prev_state = l_ptr->state;
597 int was_active_link = tipc_link_is_active(l_ptr);
598 struct tipc_node *owner = l_ptr->owner;
599 struct tipc_link *pl = tipc_parallel_link(l_ptr);
601 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
603 /* Link is down, accept any session */
604 l_ptr->peer_session = WILDCARD_SESSION;
606 /* Prepare for renewed mtu size negotiation */
607 l_ptr->mtu = l_ptr->advertised_mtu;
609 l_ptr->state = TIPC_LINK_RESETTING;
611 if ((prev_state == TIPC_LINK_RESETTING) ||
612 (prev_state == TIPC_LINK_ESTABLISHING))
615 tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
616 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
618 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
619 l_ptr->exec_mode = TIPC_LINK_BLOCKED;
620 l_ptr->failover_checkpt = l_ptr->rcv_nxt;
621 pl->failover_pkts = FIRST_FAILOVER;
622 pl->failover_checkpt = l_ptr->rcv_nxt;
623 pl->failover_skb = l_ptr->reasm_buf;
625 kfree_skb(l_ptr->reasm_buf);
627 /* Clean up all queues, except inputq: */
628 __skb_queue_purge(&l_ptr->transmq);
629 __skb_queue_purge(&l_ptr->deferdq);
631 owner->inputq = l_ptr->inputq;
632 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
633 if (!skb_queue_empty(owner->inputq))
634 owner->action_flags |= TIPC_MSG_EVT;
635 tipc_link_purge_backlog(l_ptr);
636 l_ptr->reasm_buf = NULL;
637 l_ptr->rcv_unacked = 0;
640 l_ptr->silent_intv_cnt = 0;
641 l_ptr->stats.recv_info = 0;
642 l_ptr->stale_count = 0;
643 link_reset_statistics(l_ptr);
647 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
649 * @list: chain of buffers containing message
651 * Consumes the buffer chain, except when returning an error code,
652 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
653 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
655 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
656 struct sk_buff_head *list)
658 struct tipc_msg *msg = buf_msg(skb_peek(list));
659 unsigned int maxwin = link->window;
660 unsigned int i, imp = msg_importance(msg);
661 uint mtu = link->mtu;
662 u16 ack = mod(link->rcv_nxt - 1);
663 u16 seqno = link->snd_nxt;
664 u16 bc_last_in = link->owner->bclink.last_in;
665 struct tipc_media_addr *addr = &link->media_addr;
666 struct sk_buff_head *transmq = &link->transmq;
667 struct sk_buff_head *backlogq = &link->backlogq;
668 struct sk_buff *skb, *bskb;
670 /* Match msg importance against this and all higher backlog limits: */
671 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
672 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
673 return link_schedule_user(link, list);
675 if (unlikely(msg_size(msg) > mtu))
678 /* Prepare each packet for sending, and add to relevant queue: */
679 while (skb_queue_len(list)) {
680 skb = skb_peek(list);
682 msg_set_seqno(msg, seqno);
683 msg_set_ack(msg, ack);
684 msg_set_bcast_ack(msg, bc_last_in);
686 if (likely(skb_queue_len(transmq) < maxwin)) {
688 __skb_queue_tail(transmq, skb);
689 tipc_bearer_send(net, link->bearer_id, skb, addr);
690 link->rcv_unacked = 0;
694 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
695 kfree_skb(__skb_dequeue(list));
696 link->stats.sent_bundled++;
699 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
700 kfree_skb(__skb_dequeue(list));
701 __skb_queue_tail(backlogq, bskb);
702 link->backlog[msg_importance(buf_msg(bskb))].len++;
703 link->stats.sent_bundled++;
704 link->stats.sent_bundles++;
707 link->backlog[imp].len += skb_queue_len(list);
708 skb_queue_splice_tail_init(list, backlogq);
710 link->snd_nxt = seqno;
715 * tipc_link_xmit(): enqueue buffer list according to queue situation
717 * @list: chain of buffers containing message
718 * @xmitq: returned list of packets to be sent by caller
720 * Consumes the buffer chain, except when returning -ELINKCONG,
721 * since the caller then may want to make more send attempts.
722 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
723 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
725 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
726 struct sk_buff_head *xmitq)
728 struct tipc_msg *hdr = buf_msg(skb_peek(list));
729 unsigned int maxwin = l->window;
730 unsigned int i, imp = msg_importance(hdr);
731 unsigned int mtu = l->mtu;
732 u16 ack = l->rcv_nxt - 1;
733 u16 seqno = l->snd_nxt;
734 u16 bc_last_in = l->owner->bclink.last_in;
735 struct sk_buff_head *transmq = &l->transmq;
736 struct sk_buff_head *backlogq = &l->backlogq;
737 struct sk_buff *skb, *_skb, *bskb;
739 /* Match msg importance against this and all higher backlog limits: */
740 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
741 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
742 return link_schedule_user(l, list);
744 if (unlikely(msg_size(hdr) > mtu))
747 /* Prepare each packet for sending, and add to relevant queue: */
748 while (skb_queue_len(list)) {
749 skb = skb_peek(list);
751 msg_set_seqno(hdr, seqno);
752 msg_set_ack(hdr, ack);
753 msg_set_bcast_ack(hdr, bc_last_in);
755 if (likely(skb_queue_len(transmq) < maxwin)) {
756 _skb = skb_clone(skb, GFP_ATOMIC);
760 __skb_queue_tail(transmq, skb);
761 __skb_queue_tail(xmitq, _skb);
766 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
767 kfree_skb(__skb_dequeue(list));
768 l->stats.sent_bundled++;
771 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
772 kfree_skb(__skb_dequeue(list));
773 __skb_queue_tail(backlogq, bskb);
774 l->backlog[msg_importance(buf_msg(bskb))].len++;
775 l->stats.sent_bundled++;
776 l->stats.sent_bundles++;
779 l->backlog[imp].len += skb_queue_len(list);
780 skb_queue_splice_tail_init(list, backlogq);
786 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
788 skb_queue_head_init(list);
789 __skb_queue_tail(list, skb);
792 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
794 struct sk_buff_head head;
796 skb2list(skb, &head);
797 return __tipc_link_xmit(link->owner->net, link, &head);
801 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
802 * Receive the sequence number where we should start receiving and
803 * acking broadcast packets from a newly added peer node, and open
804 * up for reception of such packets.
806 * Called with node locked
808 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
810 struct tipc_msg *msg = buf_msg(buf);
812 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
813 n->bclink.recv_permitted = true;
818 * tipc_link_push_packets - push unsent packets to bearer
820 * Push out the unsent messages of a link where congestion
821 * has abated. Node is locked.
823 * Called with node locked
825 void tipc_link_push_packets(struct tipc_link *link)
828 struct tipc_msg *msg;
829 u16 seqno = link->snd_nxt;
830 u16 ack = mod(link->rcv_nxt - 1);
832 while (skb_queue_len(&link->transmq) < link->window) {
833 skb = __skb_dequeue(&link->backlogq);
837 link->backlog[msg_importance(msg)].len--;
838 msg_set_ack(msg, ack);
839 msg_set_seqno(msg, seqno);
840 seqno = mod(seqno + 1);
841 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
842 link->rcv_unacked = 0;
843 __skb_queue_tail(&link->transmq, skb);
844 tipc_bearer_send(link->owner->net, link->bearer_id,
845 skb, &link->media_addr);
847 link->snd_nxt = seqno;
850 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
852 struct sk_buff *skb, *_skb;
853 struct tipc_msg *hdr;
854 u16 seqno = l->snd_nxt;
855 u16 ack = l->rcv_nxt - 1;
857 while (skb_queue_len(&l->transmq) < l->window) {
858 skb = skb_peek(&l->backlogq);
861 _skb = skb_clone(skb, GFP_ATOMIC);
864 __skb_dequeue(&l->backlogq);
866 l->backlog[msg_importance(hdr)].len--;
867 __skb_queue_tail(&l->transmq, skb);
868 __skb_queue_tail(xmitq, _skb);
869 msg_set_ack(hdr, ack);
870 msg_set_seqno(hdr, seqno);
871 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
878 void tipc_link_reset_all(struct tipc_node *node)
880 char addr_string[16];
883 tipc_node_lock(node);
885 pr_warn("Resetting all links to %s\n",
886 tipc_addr_string_fill(addr_string, node->addr));
888 for (i = 0; i < MAX_BEARERS; i++) {
889 if (node->links[i].link) {
890 link_print(node->links[i].link, "Resetting link\n");
891 tipc_link_reset(node->links[i].link);
895 tipc_node_unlock(node);
898 static void link_retransmit_failure(struct tipc_link *l_ptr,
901 struct tipc_msg *msg = buf_msg(buf);
902 struct net *net = l_ptr->owner->net;
904 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
907 /* Handle failure on standard link */
908 link_print(l_ptr, "Resetting link ");
909 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
910 msg_user(msg), msg_type(msg), msg_size(msg),
912 pr_info("sqno %u, prev: %x, src: %x\n",
913 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
914 tipc_link_reset(l_ptr);
916 /* Handle failure on broadcast link */
917 struct tipc_node *n_ptr;
918 char addr_string[16];
920 pr_info("Msg seq number: %u, ", msg_seqno(msg));
921 pr_cont("Outstanding acks: %lu\n",
922 (unsigned long) TIPC_SKB_CB(buf)->handle);
924 n_ptr = tipc_bclink_retransmit_to(net);
926 tipc_addr_string_fill(addr_string, n_ptr->addr);
927 pr_info("Broadcast link info for %s\n", addr_string);
928 pr_info("Reception permitted: %d, Acked: %u\n",
929 n_ptr->bclink.recv_permitted,
930 n_ptr->bclink.acked);
931 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
932 n_ptr->bclink.last_in,
933 n_ptr->bclink.oos_state,
934 n_ptr->bclink.last_sent);
936 n_ptr->action_flags |= TIPC_BCAST_RESET;
937 l_ptr->stale_count = 0;
941 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
944 struct tipc_msg *msg;
951 /* Detect repeated retransmit failures */
952 if (l_ptr->last_retransm == msg_seqno(msg)) {
953 if (++l_ptr->stale_count > 100) {
954 link_retransmit_failure(l_ptr, skb);
958 l_ptr->last_retransm = msg_seqno(msg);
959 l_ptr->stale_count = 1;
962 skb_queue_walk_from(&l_ptr->transmq, skb) {
966 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
967 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
968 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
971 l_ptr->stats.retransmitted++;
975 static int tipc_link_retransm(struct tipc_link *l, int retransm,
976 struct sk_buff_head *xmitq)
978 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
979 struct tipc_msg *hdr;
984 /* Detect repeated retransmit failures on same packet */
985 if (likely(l->last_retransm != buf_seqno(skb))) {
986 l->last_retransm = buf_seqno(skb);
988 } else if (++l->stale_count > 100) {
989 link_retransmit_failure(l, skb);
990 return TIPC_LINK_DOWN_EVT;
992 skb_queue_walk(&l->transmq, skb) {
996 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1000 msg_set_ack(hdr, l->rcv_nxt - 1);
1001 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1002 _skb->priority = TC_PRIO_CONTROL;
1003 __skb_queue_tail(xmitq, _skb);
1005 l->stats.retransmitted++;
1010 /* link_synch(): check if all packets arrived before the synch
1011 * point have been consumed
1012 * Returns true if the parallel links are synched, otherwise false
1014 static bool link_synch(struct tipc_link *l)
1016 unsigned int post_synch;
1017 struct tipc_link *pl;
1019 pl = tipc_parallel_link(l);
1023 /* Was last pre-synch packet added to input queue ? */
1024 if (less_eq(pl->rcv_nxt, l->synch_point))
1027 /* Is it still in the input queue ? */
1028 post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
1029 if (skb_queue_len(pl->inputq) > post_synch)
1032 l->exec_mode = TIPC_LINK_OPEN;
1036 /* tipc_data_input - deliver data and name distr msgs to upper layer
1038 * Consumes buffer if message is of right type
1039 * Node lock must be held
1041 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1043 struct tipc_node *node = link->owner;
1044 struct tipc_msg *msg = buf_msg(skb);
1045 u32 dport = msg_destport(msg);
1047 switch (msg_user(msg)) {
1048 case TIPC_LOW_IMPORTANCE:
1049 case TIPC_MEDIUM_IMPORTANCE:
1050 case TIPC_HIGH_IMPORTANCE:
1051 case TIPC_CRITICAL_IMPORTANCE:
1053 if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
1054 node->inputq = link->inputq;
1055 node->action_flags |= TIPC_MSG_EVT;
1058 case NAME_DISTRIBUTOR:
1059 node->bclink.recv_permitted = true;
1060 node->namedq = link->namedq;
1061 skb_queue_tail(link->namedq, skb);
1062 if (skb_queue_len(link->namedq) == 1)
1063 node->action_flags |= TIPC_NAMED_MSG_EVT;
1066 case TUNNEL_PROTOCOL:
1067 case MSG_FRAGMENTER:
1068 case BCAST_PROTOCOL:
1071 pr_warn("Dropping received illegal msg type\n");
1077 /* tipc_link_input - process packet that has passed link protocol check
1080 * Node lock must be held
1082 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1084 struct tipc_node *node = link->owner;
1085 struct tipc_msg *msg = buf_msg(skb);
1086 struct sk_buff *iskb;
1089 switch (msg_user(msg)) {
1090 case TUNNEL_PROTOCOL:
1092 link->exec_mode = TIPC_LINK_TUNNEL;
1093 link->synch_point = msg_seqno(msg_get_wrapped(msg));
1097 if (!tipc_link_failover_rcv(link, &skb))
1099 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1100 tipc_data_input(link, skb);
1104 link->stats.recv_bundles++;
1105 link->stats.recv_bundled += msg_msgcnt(msg);
1107 while (tipc_msg_extract(skb, &iskb, &pos))
1108 tipc_data_input(link, iskb);
1110 case MSG_FRAGMENTER:
1111 link->stats.recv_fragments++;
1112 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1113 link->stats.recv_fragmented++;
1114 tipc_data_input(link, skb);
1115 } else if (!link->reasm_buf) {
1116 tipc_link_reset(link);
1119 case BCAST_PROTOCOL:
1120 tipc_link_sync_rcv(node, skb);
1127 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1129 bool released = false;
1130 struct sk_buff *skb, *tmp;
1132 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1133 if (more(buf_seqno(skb), acked))
1135 __skb_unlink(skb, &l->transmq);
1142 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1143 * @link: the link that should handle the message
1145 * @xmitq: queue to place packets to be sent after this call
1147 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1148 struct sk_buff_head *xmitq)
1150 struct sk_buff_head *arrvq = &l->deferdq;
1151 struct sk_buff *tmp;
1152 struct tipc_msg *hdr;
1156 if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
1157 if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
1158 tipc_link_build_proto_msg(l, STATE_MSG, 0,
1163 skb_queue_walk_safe(arrvq, skb, tmp) {
1166 /* Verify and update link state */
1167 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
1168 __skb_dequeue(arrvq);
1169 rc |= tipc_link_proto_rcv(l, skb, xmitq);
1173 if (unlikely(!link_working(l))) {
1174 rc |= tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
1175 if (!link_working(l)) {
1176 kfree_skb(__skb_dequeue(arrvq));
1181 l->silent_intv_cnt = 0;
1183 /* Forward queues and wake up waiting users */
1184 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1185 tipc_link_advance_backlog(l, xmitq);
1186 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1187 link_prepare_wakeup(l);
1190 /* Defer reception if there is a gap in the sequence */
1191 seqno = msg_seqno(hdr);
1192 rcv_nxt = l->rcv_nxt;
1193 if (unlikely(less(rcv_nxt, seqno))) {
1194 l->stats.deferred_recv++;
1198 __skb_dequeue(arrvq);
1200 /* Drop if packet already received */
1201 if (unlikely(more(rcv_nxt, seqno))) {
1202 l->stats.duplicates++;
1207 /* Synchronize with parallel link if applicable */
1208 if (unlikely(l->exec_mode == TIPC_LINK_TUNNEL))
1209 if (!msg_dup(hdr) && !link_synch(l)) {
1214 /* Packet can be delivered */
1216 l->stats.recv_info++;
1217 if (unlikely(!tipc_data_input(l, skb)))
1218 tipc_link_input(l, skb);
1220 /* Ack at regular intervals */
1221 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1223 l->stats.sent_acks++;
1224 tipc_link_build_proto_msg(l, STATE_MSG,
1232 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1234 * Returns increase in queue length (i.e. 0 or 1)
1236 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1238 struct sk_buff *skb1;
1239 u16 seq_no = buf_seqno(skb);
1242 if (skb_queue_empty(list)) {
1243 __skb_queue_tail(list, skb);
1248 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1249 __skb_queue_tail(list, skb);
1253 /* Locate insertion point in queue, then insert; discard if duplicate */
1254 skb_queue_walk(list, skb1) {
1255 u16 curr_seqno = buf_seqno(skb1);
1257 if (seq_no == curr_seqno) {
1262 if (less(seq_no, curr_seqno))
1266 __skb_queue_before(list, skb1, skb);
1271 * Send protocol message to the other endpoint.
1273 void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
1274 u32 gap, u32 tolerance, u32 priority)
1276 struct sk_buff *skb = NULL;
1277 struct sk_buff_head xmitq;
1279 __skb_queue_head_init(&xmitq);
1280 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1281 tolerance, priority, &xmitq);
1282 skb = __skb_dequeue(&xmitq);
1285 tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
1290 /* tipc_link_build_proto_msg: prepare link protocol message for transmission
1292 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1293 u16 rcvgap, int tolerance, int priority,
1294 struct sk_buff_head *xmitq)
1296 struct sk_buff *skb = NULL;
1297 struct tipc_msg *hdr = l->pmsg;
1298 u16 snd_nxt = l->snd_nxt;
1299 u16 rcv_nxt = l->rcv_nxt;
1300 u16 rcv_last = rcv_nxt - 1;
1301 int node_up = l->owner->bclink.recv_permitted;
1303 /* Don't send protocol message during reset or link failover */
1304 if (l->exec_mode == TIPC_LINK_BLOCKED)
1307 msg_set_type(hdr, mtyp);
1308 msg_set_net_plane(hdr, l->net_plane);
1309 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1310 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
1311 msg_set_link_tolerance(hdr, tolerance);
1312 msg_set_linkprio(hdr, priority);
1313 msg_set_redundant_link(hdr, node_up);
1314 msg_set_seq_gap(hdr, 0);
1316 /* Compatibility: created msg must not be in sequence with pkt flow */
1317 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
1319 if (mtyp == STATE_MSG) {
1320 if (!tipc_link_is_up(l))
1322 msg_set_next_sent(hdr, snd_nxt);
1324 /* Override rcvgap if there are packets in deferred queue */
1325 if (!skb_queue_empty(&l->deferdq))
1326 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
1328 msg_set_seq_gap(hdr, rcvgap);
1329 l->stats.sent_nacks++;
1331 msg_set_ack(hdr, rcv_last);
1332 msg_set_probe(hdr, probe);
1334 l->stats.sent_probes++;
1335 l->stats.sent_states++;
1337 /* RESET_MSG or ACTIVATE_MSG */
1338 msg_set_max_pkt(hdr, l->advertised_mtu);
1339 msg_set_ack(hdr, l->failover_checkpt - 1);
1340 msg_set_next_sent(hdr, 1);
1342 skb = tipc_buf_acquire(msg_size(hdr));
1345 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1346 skb->priority = TC_PRIO_CONTROL;
1347 __skb_queue_head(xmitq, skb);
1350 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1351 * a different bearer. Owner node is locked.
1353 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1354 struct tipc_msg *tunnel_hdr,
1355 struct tipc_msg *msg,
1358 struct tipc_link *tunnel;
1359 struct sk_buff *skb;
1360 u32 length = msg_size(msg);
1362 tunnel = node_active_link(l_ptr->owner, selector & 1);
1363 if (!tipc_link_is_up(tunnel)) {
1364 pr_warn("%stunnel link no longer available\n", link_co_err);
1367 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1368 skb = tipc_buf_acquire(length + INT_H_SIZE);
1370 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1373 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1374 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1375 __tipc_link_xmit_skb(tunnel, skb);
1379 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1380 * link is still active. We can do failover. Tunnel the failing link's
1381 * whole send queue via the remaining link. This way, we don't lose
1382 * any packets, and sequence order is preserved for subsequent traffic
1383 * sent over the remaining link. Owner node is locked.
1385 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1388 struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
1389 struct tipc_msg tunnel_hdr;
1390 struct sk_buff *skb;
1396 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1397 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1399 skb_queue_walk(&l_ptr->backlogq, skb) {
1400 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1401 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1403 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1404 tipc_link_purge_backlog(l_ptr);
1405 msgcount = skb_queue_len(&l_ptr->transmq);
1406 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1407 msg_set_msgcnt(&tunnel_hdr, msgcount);
1409 if (skb_queue_empty(&l_ptr->transmq)) {
1410 skb = tipc_buf_acquire(INT_H_SIZE);
1412 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1413 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1414 __tipc_link_xmit_skb(tunnel, skb);
1416 pr_warn("%sunable to send changeover msg\n",
1422 split_bundles = (node_active_link(l_ptr->owner, 0) !=
1423 node_active_link(l_ptr->owner, 0));
1425 skb_queue_walk(&l_ptr->transmq, skb) {
1426 struct tipc_msg *msg = buf_msg(skb);
1428 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1429 struct tipc_msg *m = msg_get_wrapped(msg);
1430 unchar *pos = (unchar *)m;
1432 msgcount = msg_msgcnt(msg);
1433 while (msgcount--) {
1434 msg_set_seqno(m, msg_seqno(msg));
1435 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1436 msg_link_selector(m));
1437 pos += align(msg_size(m));
1438 m = (struct tipc_msg *)pos;
1441 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1442 msg_link_selector(msg));
1447 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1448 * duplicate of the first link's send queue via the new link. This way, we
1449 * are guaranteed that currently queued packets from a socket are delivered
1450 * before future traffic from the same socket, even if this is using the
1451 * new link. The last arriving copy of each duplicate packet is dropped at
1452 * the receiving end by the regular protocol check, so packet cardinality
1453 * and sequence order is preserved per sender/receiver socket pair.
1454 * Owner node is locked.
1456 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1457 struct tipc_link *tnl)
1459 struct sk_buff *skb;
1460 struct tipc_msg tnl_hdr;
1461 struct sk_buff_head *queue = &link->transmq;
1465 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1466 SYNCH_MSG, INT_H_SIZE, link->addr);
1467 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1468 msg_set_msgcnt(&tnl_hdr, mcnt);
1469 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1472 skb_queue_walk(queue, skb) {
1473 struct sk_buff *outskb;
1474 struct tipc_msg *msg = buf_msg(skb);
1475 u32 len = msg_size(msg);
1477 msg_set_ack(msg, mod(link->rcv_nxt - 1));
1478 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1479 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1480 outskb = tipc_buf_acquire(len + INT_H_SIZE);
1481 if (outskb == NULL) {
1482 pr_warn("%sunable to send duplicate msg\n",
1486 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1487 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1489 __tipc_link_xmit_skb(tnl, outskb);
1490 if (!tipc_link_is_up(link))
1493 if (queue == &link->backlogq)
1495 seqno = link->snd_nxt;
1496 skb_queue_walk(&link->backlogq, skb) {
1497 msg_set_seqno(buf_msg(skb), seqno);
1498 seqno = mod(seqno + 1);
1500 queue = &link->backlogq;
1504 /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1505 * Owner node is locked.
1507 static bool tipc_link_failover_rcv(struct tipc_link *link,
1508 struct sk_buff **skb)
1510 struct tipc_msg *msg = buf_msg(*skb);
1511 struct sk_buff *iskb = NULL;
1512 struct tipc_link *pl = NULL;
1513 int bearer_id = msg_bearer_id(msg);
1516 if (msg_type(msg) != FAILOVER_MSG) {
1517 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1520 if (bearer_id >= MAX_BEARERS)
1523 if (bearer_id == link->bearer_id)
1526 pl = link->owner->links[bearer_id].link;
1527 if (pl && tipc_link_is_up(pl))
1528 tipc_link_reset(pl);
1530 if (link->failover_pkts == FIRST_FAILOVER)
1531 link->failover_pkts = msg_msgcnt(msg);
1533 /* Should we expect an inner packet? */
1534 if (!link->failover_pkts)
1537 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1538 pr_warn("%sno inner failover pkt\n", link_co_err);
1542 link->failover_pkts--;
1545 /* Was this packet already delivered? */
1546 if (less(buf_seqno(iskb), link->failover_checkpt)) {
1551 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1552 link->stats.recv_fragments++;
1553 tipc_buf_append(&link->failover_skb, &iskb);
1556 if (!link->failover_pkts && pl)
1557 pl->exec_mode = TIPC_LINK_OPEN;
1563 /* tipc_link_proto_rcv(): receive link level protocol message :
1564 * Note that network plane id propagates through the network, and may
1565 * change at any time. The node with lowest numerical id determines
1568 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1569 struct sk_buff_head *xmitq)
1571 struct tipc_msg *hdr = buf_msg(skb);
1573 u16 nacked_gap = msg_seq_gap(hdr);
1574 u16 peers_snd_nxt = msg_next_sent(hdr);
1575 u16 peers_tol = msg_link_tolerance(hdr);
1576 u16 peers_prio = msg_linkprio(hdr);
1580 if (l->exec_mode == TIPC_LINK_BLOCKED)
1583 if (link_own_addr(l) > msg_prevnode(hdr))
1584 l->net_plane = msg_net_plane(hdr);
1586 switch (msg_type(hdr)) {
1589 /* Ignore duplicate RESET with old session number */
1590 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1591 (l->peer_session != WILDCARD_SESSION))
1596 /* Complete own link name with peer's interface name */
1597 if_name = strrchr(l->name, ':') + 1;
1598 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1600 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1602 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1604 /* Update own tolerance if peer indicates a non-zero value */
1605 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1606 l->tolerance = peers_tol;
1608 /* Update own priority if peer's priority is higher */
1609 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1610 l->priority = peers_prio;
1612 l->peer_session = msg_session(hdr);
1613 l->peer_bearer_id = msg_bearer_id(hdr);
1614 rc = tipc_link_fsm_evt(l, msg_type(hdr), xmitq);
1615 if (l->mtu > msg_max_pkt(hdr))
1616 l->mtu = msg_max_pkt(hdr);
1619 /* Update own tolerance if peer indicates a non-zero value */
1620 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1621 l->tolerance = peers_tol;
1623 l->silent_intv_cnt = 0;
1624 l->stats.recv_states++;
1626 l->stats.recv_probes++;
1627 rc = tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
1628 if (!tipc_link_is_up(l))
1631 /* Has peer sent packets we haven't received yet ? */
1632 if (more(peers_snd_nxt, l->rcv_nxt))
1633 rcvgap = peers_snd_nxt - l->rcv_nxt;
1634 if (rcvgap || (msg_probe(hdr)))
1635 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1637 tipc_link_release_pkts(l, msg_ack(hdr));
1639 /* If NACK, retransmit will now start at right position */
1641 rc |= tipc_link_retransm(l, nacked_gap, xmitq);
1642 l->stats.recv_nacks++;
1644 tipc_link_advance_backlog(l, xmitq);
1645 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1646 link_prepare_wakeup(l);
1653 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1655 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1658 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1659 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1660 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1661 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1662 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1665 /* tipc_link_find_owner - locate owner node of link by link's name
1666 * @net: the applicable net namespace
1667 * @name: pointer to link name string
1668 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1670 * Returns pointer to node owning the link, or 0 if no matching link is found.
1672 static struct tipc_node *tipc_link_find_owner(struct net *net,
1673 const char *link_name,
1674 unsigned int *bearer_id)
1676 struct tipc_net *tn = net_generic(net, tipc_net_id);
1677 struct tipc_link *l_ptr;
1678 struct tipc_node *n_ptr;
1679 struct tipc_node *found_node = NULL;
1684 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1685 tipc_node_lock(n_ptr);
1686 for (i = 0; i < MAX_BEARERS; i++) {
1687 l_ptr = n_ptr->links[i].link;
1688 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1694 tipc_node_unlock(n_ptr);
1704 * link_reset_statistics - reset link statistics
1705 * @l_ptr: pointer to link
1707 static void link_reset_statistics(struct tipc_link *l_ptr)
1709 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1710 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1711 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
1714 static void link_print(struct tipc_link *l, const char *str)
1716 struct sk_buff *hskb = skb_peek(&l->transmq);
1717 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
1718 u16 tail = l->snd_nxt - 1;
1720 pr_info("%s Link <%s>:", str, l->name);
1722 if (link_probing(l))
1724 else if (link_establishing(l))
1726 else if (link_resetting(l))
1728 else if (link_working(l))
1733 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1734 skb_queue_len(&l->transmq), head, tail,
1735 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1738 /* Parse and validate nested (link) properties valid for media, bearer and link
1740 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1744 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1745 tipc_nl_prop_policy);
1749 if (props[TIPC_NLA_PROP_PRIO]) {
1752 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1753 if (prio > TIPC_MAX_LINK_PRI)
1757 if (props[TIPC_NLA_PROP_TOL]) {
1760 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1761 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1765 if (props[TIPC_NLA_PROP_WIN]) {
1768 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1769 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1776 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1782 struct tipc_link *link;
1783 struct tipc_node *node;
1784 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1785 struct net *net = sock_net(skb->sk);
1787 if (!info->attrs[TIPC_NLA_LINK])
1790 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1791 info->attrs[TIPC_NLA_LINK],
1792 tipc_nl_link_policy);
1796 if (!attrs[TIPC_NLA_LINK_NAME])
1799 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1801 if (strcmp(name, tipc_bclink_name) == 0)
1802 return tipc_nl_bc_link_set(net, attrs);
1804 node = tipc_link_find_owner(net, name, &bearer_id);
1808 tipc_node_lock(node);
1810 link = node->links[bearer_id].link;
1816 if (attrs[TIPC_NLA_LINK_PROP]) {
1817 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1819 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1826 if (props[TIPC_NLA_PROP_TOL]) {
1829 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1830 link->tolerance = tol;
1831 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1833 if (props[TIPC_NLA_PROP_PRIO]) {
1836 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1837 link->priority = prio;
1838 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1840 if (props[TIPC_NLA_PROP_WIN]) {
1843 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1844 tipc_link_set_queue_limits(link, win);
1849 tipc_node_unlock(node);
1854 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1857 struct nlattr *stats;
1864 struct nla_map map[] = {
1865 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1866 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1867 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1868 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1869 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1870 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1871 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1872 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1873 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1874 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1875 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1876 s->msg_length_counts : 1},
1877 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1878 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1879 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1880 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1881 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1882 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1883 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1884 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1885 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1886 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1887 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1888 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1889 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1890 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1891 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1892 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1893 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1894 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1895 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1896 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1897 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1898 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1899 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1902 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1906 for (i = 0; i < ARRAY_SIZE(map); i++)
1907 if (nla_put_u32(skb, map[i].key, map[i].val))
1910 nla_nest_end(skb, stats);
1914 nla_nest_cancel(skb, stats);
1919 /* Caller should hold appropriate locks to protect the link */
1920 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1921 struct tipc_link *link, int nlflags)
1925 struct nlattr *attrs;
1926 struct nlattr *prop;
1927 struct tipc_net *tn = net_generic(net, tipc_net_id);
1929 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1930 nlflags, TIPC_NL_LINK_GET);
1934 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1938 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1940 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1941 tipc_cluster_mask(tn->own_addr)))
1943 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1945 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1947 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1950 if (tipc_link_is_up(link))
1951 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1953 if (tipc_link_is_active(link))
1954 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1957 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1960 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1962 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1964 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1967 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1969 nla_nest_end(msg->skb, prop);
1971 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1975 nla_nest_end(msg->skb, attrs);
1976 genlmsg_end(msg->skb, hdr);
1981 nla_nest_cancel(msg->skb, prop);
1983 nla_nest_cancel(msg->skb, attrs);
1985 genlmsg_cancel(msg->skb, hdr);
1990 /* Caller should hold node lock */
1991 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1992 struct tipc_node *node, u32 *prev_link)
1997 for (i = *prev_link; i < MAX_BEARERS; i++) {
2000 if (!node->links[i].link)
2003 err = __tipc_nl_add_link(net, msg,
2004 node->links[i].link, NLM_F_MULTI);
2013 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2015 struct net *net = sock_net(skb->sk);
2016 struct tipc_net *tn = net_generic(net, tipc_net_id);
2017 struct tipc_node *node;
2018 struct tipc_nl_msg msg;
2019 u32 prev_node = cb->args[0];
2020 u32 prev_link = cb->args[1];
2021 int done = cb->args[2];
2028 msg.portid = NETLINK_CB(cb->skb).portid;
2029 msg.seq = cb->nlh->nlmsg_seq;
2033 node = tipc_node_find(net, prev_node);
2035 /* We never set seq or call nl_dump_check_consistent()
2036 * this means that setting prev_seq here will cause the
2037 * consistence check to fail in the netlink callback
2038 * handler. Resulting in the last NLMSG_DONE message
2039 * having the NLM_F_DUMP_INTR flag set.
2044 tipc_node_put(node);
2046 list_for_each_entry_continue_rcu(node, &tn->node_list,
2048 tipc_node_lock(node);
2049 err = __tipc_nl_add_node_links(net, &msg, node,
2051 tipc_node_unlock(node);
2055 prev_node = node->addr;
2058 err = tipc_nl_add_bc_link(net, &msg);
2062 list_for_each_entry_rcu(node, &tn->node_list, list) {
2063 tipc_node_lock(node);
2064 err = __tipc_nl_add_node_links(net, &msg, node,
2066 tipc_node_unlock(node);
2070 prev_node = node->addr;
2077 cb->args[0] = prev_node;
2078 cb->args[1] = prev_link;
2084 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2086 struct net *net = genl_info_net(info);
2087 struct tipc_nl_msg msg;
2091 msg.portid = info->snd_portid;
2092 msg.seq = info->snd_seq;
2094 if (!info->attrs[TIPC_NLA_LINK_NAME])
2096 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2098 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2102 if (strcmp(name, tipc_bclink_name) == 0) {
2103 err = tipc_nl_add_bc_link(net, &msg);
2105 nlmsg_free(msg.skb);
2110 struct tipc_node *node;
2111 struct tipc_link *link;
2113 node = tipc_link_find_owner(net, name, &bearer_id);
2117 tipc_node_lock(node);
2118 link = node->links[bearer_id].link;
2120 tipc_node_unlock(node);
2121 nlmsg_free(msg.skb);
2125 err = __tipc_nl_add_link(net, &msg, link, 0);
2126 tipc_node_unlock(node);
2128 nlmsg_free(msg.skb);
2133 return genlmsg_reply(msg.skb, info);
2136 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2140 unsigned int bearer_id;
2141 struct tipc_link *link;
2142 struct tipc_node *node;
2143 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2144 struct net *net = sock_net(skb->sk);
2146 if (!info->attrs[TIPC_NLA_LINK])
2149 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2150 info->attrs[TIPC_NLA_LINK],
2151 tipc_nl_link_policy);
2155 if (!attrs[TIPC_NLA_LINK_NAME])
2158 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2160 if (strcmp(link_name, tipc_bclink_name) == 0) {
2161 err = tipc_bclink_reset_stats(net);
2167 node = tipc_link_find_owner(net, link_name, &bearer_id);
2171 tipc_node_lock(node);
2173 link = node->links[bearer_id].link;
2175 tipc_node_unlock(node);
2179 link_reset_statistics(link);
2181 tipc_node_unlock(node);