2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err = "Link changeover error, ";
52 static const char *link_rst_msg = "Resetting link ";
53 static const char *link_unk_evt = "Unknown link event ";
55 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
59 .len = TIPC_MAX_LINK_NAME
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
80 * Interval between NACKs when packets arrive out of order
82 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
84 * Out-of-range value for link session numbers
86 #define WILDCARD_SESSION 0x10000
88 /* State value stored in 'failover_pkts'
90 #define FIRST_FAILOVER 0xffffu
92 /* Link FSM states and events:
98 TIPC_LINK_ESTABLISHING
102 PEER_RESET_EVT = RESET_MSG,
103 ACTIVATE_EVT = ACTIVATE_MSG,
104 TRAFFIC_EVT, /* Any other valid msg from peer */
105 SILENCE_EVT /* Peer was silent during last timer interval*/
108 /* Link FSM state checking routines
110 static int link_working(struct tipc_link *l)
112 return l->state == TIPC_LINK_WORKING;
115 static int link_probing(struct tipc_link *l)
117 return l->state == TIPC_LINK_PROBING;
120 static int link_resetting(struct tipc_link *l)
122 return l->state == TIPC_LINK_RESETTING;
125 static int link_establishing(struct tipc_link *l)
127 return l->state == TIPC_LINK_ESTABLISHING;
130 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
131 struct sk_buff_head *xmitq);
132 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
133 u16 rcvgap, int tolerance, int priority,
134 struct sk_buff_head *xmitq);
135 static void link_reset_statistics(struct tipc_link *l_ptr);
136 static void link_print(struct tipc_link *l_ptr, const char *str);
137 static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
138 struct sk_buff_head *xmitq);
139 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
140 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
141 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
142 static int tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
145 * Simple link routines
147 static unsigned int align(unsigned int i)
149 return (i + 3) & ~3u;
152 static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
154 struct tipc_node *n = l->owner;
156 if (node_active_link(n, 0) != l)
157 return node_active_link(n, 0);
158 return node_active_link(n, 1);
162 * Simple non-static link routines (i.e. referenced outside this file)
164 int tipc_link_is_up(struct tipc_link *l_ptr)
168 return link_working(l_ptr) || link_probing(l_ptr);
171 int tipc_link_is_active(struct tipc_link *l)
173 struct tipc_node *n = l->owner;
175 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
179 * tipc_link_create - create a new link
180 * @n_ptr: pointer to associated node
181 * @b_ptr: pointer to associated bearer
182 * @media_addr: media address to use when sending messages over link
184 * Returns pointer to link.
186 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
187 struct tipc_bearer *b_ptr,
188 const struct tipc_media_addr *media_addr,
189 struct sk_buff_head *inputq,
190 struct sk_buff_head *namedq)
192 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
193 struct tipc_link *l_ptr;
194 struct tipc_msg *msg;
196 char addr_string[16];
197 u32 peer = n_ptr->addr;
199 if (n_ptr->link_cnt >= MAX_BEARERS) {
200 tipc_addr_string_fill(addr_string, n_ptr->addr);
201 pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
202 n_ptr->link_cnt, addr_string, MAX_BEARERS);
206 if (n_ptr->links[b_ptr->identity].link) {
207 tipc_addr_string_fill(addr_string, n_ptr->addr);
208 pr_err("Attempt to establish second link on <%s> to %s\n",
209 b_ptr->name, addr_string);
213 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
215 pr_warn("Link creation failed, no memory\n");
219 if_name = strchr(b_ptr->name, ':') + 1;
220 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
221 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
222 tipc_node(tn->own_addr),
224 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
225 /* note: peer i/f name is updated by reset/activate message */
226 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
227 l_ptr->owner = n_ptr;
228 l_ptr->peer_session = WILDCARD_SESSION;
229 l_ptr->bearer_id = b_ptr->identity;
230 l_ptr->tolerance = b_ptr->tolerance;
233 l_ptr->state = TIPC_LINK_RESETTING;
235 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
237 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
239 msg_set_size(msg, sizeof(l_ptr->proto_msg));
240 msg_set_session(msg, (tn->random & 0xffff));
241 msg_set_bearer_id(msg, b_ptr->identity);
242 strcpy((char *)msg_data(msg), if_name);
243 l_ptr->net_plane = b_ptr->net_plane;
244 l_ptr->advertised_mtu = b_ptr->mtu;
245 l_ptr->mtu = l_ptr->advertised_mtu;
246 l_ptr->priority = b_ptr->priority;
247 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
249 __skb_queue_head_init(&l_ptr->transmq);
250 __skb_queue_head_init(&l_ptr->backlogq);
251 __skb_queue_head_init(&l_ptr->deferdq);
252 skb_queue_head_init(&l_ptr->wakeupq);
253 l_ptr->inputq = inputq;
254 l_ptr->namedq = namedq;
255 skb_queue_head_init(l_ptr->inputq);
256 link_reset_statistics(l_ptr);
257 tipc_node_attach_link(n_ptr, l_ptr);
261 /* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
263 * Give a newly added peer node the sequence number where it should
264 * start receiving and acking broadcast packets.
266 static void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
267 struct sk_buff_head *xmitq)
270 struct sk_buff_head list;
273 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
274 0, l->addr, link_own_addr(l), 0, 0, 0);
277 last_sent = tipc_bclink_get_last_sent(l->owner->net);
278 msg_set_last_bcast(buf_msg(skb), last_sent);
279 __skb_queue_head_init(&list);
280 __skb_queue_tail(&list, skb);
281 tipc_link_xmit(l, &list, xmitq);
285 * tipc_link_fsm_evt - link finite state machine
286 * @l: pointer to link
287 * @evt: state machine event to be processed
288 * @xmitq: queue to prepend created protocol message, if any
290 static int tipc_link_fsm_evt(struct tipc_link *l, int evt,
291 struct sk_buff_head *xmitq)
293 int mtyp = 0, rc = 0;
294 struct tipc_link *pl;
297 LINK_ACTIVATE = (1 << 1),
298 SND_PROBE = (1 << 2),
299 SND_STATE = (1 << 3),
300 SND_RESET = (1 << 4),
301 SND_ACTIVATE = (1 << 5),
302 SND_BCAST_SYNC = (1 << 6)
305 if (l->exec_mode == TIPC_LINK_BLOCKED)
309 case TIPC_LINK_WORKING:
315 l->state = TIPC_LINK_PROBING;
316 actions |= SND_PROBE;
319 actions |= LINK_RESET | SND_ACTIVATE;
322 pr_debug("%s%u WORKING\n", link_unk_evt, evt);
325 case TIPC_LINK_PROBING:
329 l->state = TIPC_LINK_WORKING;
332 actions |= LINK_RESET | SND_ACTIVATE;
335 if (l->silent_intv_cnt <= l->abort_limit) {
336 actions |= SND_PROBE;
339 actions |= LINK_RESET | SND_RESET;
342 pr_err("%s%u PROBING\n", link_unk_evt, evt);
345 case TIPC_LINK_RESETTING:
350 pl = node_active_link(l->owner, 0);
351 if (pl && link_probing(pl))
353 l->state = TIPC_LINK_WORKING;
354 actions |= LINK_ACTIVATE;
355 if (!l->owner->working_links)
356 actions |= SND_BCAST_SYNC;
359 l->state = TIPC_LINK_ESTABLISHING;
360 actions |= SND_ACTIVATE;
363 actions |= SND_RESET;
366 pr_err("%s%u in RESETTING\n", link_unk_evt, evt);
369 case TIPC_LINK_ESTABLISHING:
373 pl = node_active_link(l->owner, 0);
374 if (pl && link_probing(pl))
376 l->state = TIPC_LINK_WORKING;
377 actions |= LINK_ACTIVATE;
378 if (!l->owner->working_links)
379 actions |= SND_BCAST_SYNC;
384 actions |= SND_ACTIVATE;
387 pr_err("%s%u ESTABLISHING\n", link_unk_evt, evt);
391 pr_err("Unknown link state %u/%u\n", l->state, evt);
394 /* Perform actions as decided by FSM */
395 if (actions & LINK_RESET) {
396 l->exec_mode = TIPC_LINK_BLOCKED;
397 rc |= TIPC_LINK_DOWN_EVT;
399 if (actions & LINK_ACTIVATE) {
400 l->exec_mode = TIPC_LINK_OPEN;
401 rc |= TIPC_LINK_UP_EVT;
403 if (actions & (SND_STATE | SND_PROBE))
405 if (actions & SND_RESET)
407 if (actions & SND_ACTIVATE)
409 if (actions & (SND_PROBE | SND_STATE | SND_RESET | SND_ACTIVATE))
410 tipc_link_build_proto_msg(l, mtyp, actions & SND_PROBE,
412 if (actions & SND_BCAST_SYNC)
413 tipc_link_build_bcast_sync_msg(l, xmitq);
417 /* link_profile_stats - update statistical profiling of traffic
419 static void link_profile_stats(struct tipc_link *l)
422 struct tipc_msg *msg;
425 /* Update counters used in statistical profiling of send traffic */
426 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
427 l->stats.queue_sz_counts++;
429 skb = skb_peek(&l->transmq);
433 length = msg_size(msg);
435 if (msg_user(msg) == MSG_FRAGMENTER) {
436 if (msg_type(msg) != FIRST_FRAGMENT)
438 length = msg_size(msg_get_wrapped(msg));
440 l->stats.msg_lengths_total += length;
441 l->stats.msg_length_counts++;
443 l->stats.msg_length_profile[0]++;
444 else if (length <= 256)
445 l->stats.msg_length_profile[1]++;
446 else if (length <= 1024)
447 l->stats.msg_length_profile[2]++;
448 else if (length <= 4096)
449 l->stats.msg_length_profile[3]++;
450 else if (length <= 16384)
451 l->stats.msg_length_profile[4]++;
452 else if (length <= 32768)
453 l->stats.msg_length_profile[5]++;
455 l->stats.msg_length_profile[6]++;
458 /* tipc_link_timeout - perform periodic task as instructed from node timeout
460 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
464 link_profile_stats(l);
465 if (l->silent_intv_cnt)
466 rc = tipc_link_fsm_evt(l, SILENCE_EVT, xmitq);
467 else if (link_working(l) && tipc_bclink_acks_missing(l->owner))
468 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
469 l->silent_intv_cnt++;
474 * link_schedule_user - schedule a message sender for wakeup after congestion
475 * @link: congested link
476 * @list: message that was attempted sent
477 * Create pseudo msg to send back to user when congestion abates
478 * Does not consume buffer list
480 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
482 struct tipc_msg *msg = buf_msg(skb_peek(list));
483 int imp = msg_importance(msg);
484 u32 oport = msg_origport(msg);
485 u32 addr = link_own_addr(link);
488 /* This really cannot happen... */
489 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
490 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
493 /* Non-blocking sender: */
494 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
497 /* Create and schedule wakeup pseudo message */
498 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
499 addr, addr, oport, 0, 0);
502 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
503 TIPC_SKB_CB(skb)->chain_imp = imp;
504 skb_queue_tail(&link->wakeupq, skb);
505 link->stats.link_congs++;
510 * link_prepare_wakeup - prepare users for wakeup after congestion
511 * @link: congested link
512 * Move a number of waiting users, as permitted by available space in
513 * the send queue, from link wait queue to node wait queue for wakeup
515 void link_prepare_wakeup(struct tipc_link *l)
517 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
519 struct sk_buff *skb, *tmp;
521 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
522 imp = TIPC_SKB_CB(skb)->chain_imp;
523 lim = l->window + l->backlog[imp].limit;
524 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
525 if ((pnd[imp] + l->backlog[imp].len) >= lim)
527 skb_unlink(skb, &l->wakeupq);
528 skb_queue_tail(l->inputq, skb);
529 l->owner->inputq = l->inputq;
530 l->owner->action_flags |= TIPC_MSG_EVT;
535 * tipc_link_reset_fragments - purge link's inbound message fragments queue
536 * @l_ptr: pointer to link
538 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
540 kfree_skb(l_ptr->reasm_buf);
541 l_ptr->reasm_buf = NULL;
544 void tipc_link_purge_backlog(struct tipc_link *l)
546 __skb_queue_purge(&l->backlogq);
547 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
548 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
549 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
550 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
551 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
555 * tipc_link_purge_queues - purge all pkt queues associated with link
556 * @l_ptr: pointer to link
558 void tipc_link_purge_queues(struct tipc_link *l_ptr)
560 __skb_queue_purge(&l_ptr->deferdq);
561 __skb_queue_purge(&l_ptr->transmq);
562 tipc_link_purge_backlog(l_ptr);
563 tipc_link_reset_fragments(l_ptr);
566 void tipc_link_reset(struct tipc_link *l_ptr)
568 u32 prev_state = l_ptr->state;
569 int was_active_link = tipc_link_is_active(l_ptr);
570 struct tipc_node *owner = l_ptr->owner;
571 struct tipc_link *pl = tipc_parallel_link(l_ptr);
573 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
575 /* Link is down, accept any session */
576 l_ptr->peer_session = WILDCARD_SESSION;
578 /* Prepare for renewed mtu size negotiation */
579 l_ptr->mtu = l_ptr->advertised_mtu;
581 l_ptr->state = TIPC_LINK_RESETTING;
583 if ((prev_state == TIPC_LINK_RESETTING) ||
584 (prev_state == TIPC_LINK_ESTABLISHING))
587 tipc_node_link_down(l_ptr->owner, l_ptr->bearer_id);
588 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
590 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
591 l_ptr->exec_mode = TIPC_LINK_BLOCKED;
592 l_ptr->failover_checkpt = l_ptr->rcv_nxt;
593 pl->failover_pkts = FIRST_FAILOVER;
594 pl->failover_checkpt = l_ptr->rcv_nxt;
595 pl->failover_skb = l_ptr->reasm_buf;
597 kfree_skb(l_ptr->reasm_buf);
599 /* Clean up all queues, except inputq: */
600 __skb_queue_purge(&l_ptr->transmq);
601 __skb_queue_purge(&l_ptr->deferdq);
603 owner->inputq = l_ptr->inputq;
604 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
605 if (!skb_queue_empty(owner->inputq))
606 owner->action_flags |= TIPC_MSG_EVT;
607 tipc_link_purge_backlog(l_ptr);
608 l_ptr->reasm_buf = NULL;
609 l_ptr->rcv_unacked = 0;
612 l_ptr->silent_intv_cnt = 0;
613 l_ptr->stats.recv_info = 0;
614 l_ptr->stale_count = 0;
615 link_reset_statistics(l_ptr);
619 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
621 * @list: chain of buffers containing message
623 * Consumes the buffer chain, except when returning an error code,
624 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
625 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
627 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
628 struct sk_buff_head *list)
630 struct tipc_msg *msg = buf_msg(skb_peek(list));
631 unsigned int maxwin = link->window;
632 unsigned int i, imp = msg_importance(msg);
633 uint mtu = link->mtu;
634 u16 ack = mod(link->rcv_nxt - 1);
635 u16 seqno = link->snd_nxt;
636 u16 bc_last_in = link->owner->bclink.last_in;
637 struct tipc_media_addr *addr = &link->media_addr;
638 struct sk_buff_head *transmq = &link->transmq;
639 struct sk_buff_head *backlogq = &link->backlogq;
640 struct sk_buff *skb, *bskb;
642 /* Match msg importance against this and all higher backlog limits: */
643 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
644 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
645 return link_schedule_user(link, list);
647 if (unlikely(msg_size(msg) > mtu))
650 /* Prepare each packet for sending, and add to relevant queue: */
651 while (skb_queue_len(list)) {
652 skb = skb_peek(list);
654 msg_set_seqno(msg, seqno);
655 msg_set_ack(msg, ack);
656 msg_set_bcast_ack(msg, bc_last_in);
658 if (likely(skb_queue_len(transmq) < maxwin)) {
660 __skb_queue_tail(transmq, skb);
661 tipc_bearer_send(net, link->bearer_id, skb, addr);
662 link->rcv_unacked = 0;
666 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
667 kfree_skb(__skb_dequeue(list));
668 link->stats.sent_bundled++;
671 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
672 kfree_skb(__skb_dequeue(list));
673 __skb_queue_tail(backlogq, bskb);
674 link->backlog[msg_importance(buf_msg(bskb))].len++;
675 link->stats.sent_bundled++;
676 link->stats.sent_bundles++;
679 link->backlog[imp].len += skb_queue_len(list);
680 skb_queue_splice_tail_init(list, backlogq);
682 link->snd_nxt = seqno;
687 * tipc_link_xmit(): enqueue buffer list according to queue situation
689 * @list: chain of buffers containing message
690 * @xmitq: returned list of packets to be sent by caller
692 * Consumes the buffer chain, except when returning -ELINKCONG,
693 * since the caller then may want to make more send attempts.
694 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
695 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
697 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
698 struct sk_buff_head *xmitq)
700 struct tipc_msg *hdr = buf_msg(skb_peek(list));
701 unsigned int maxwin = l->window;
702 unsigned int i, imp = msg_importance(hdr);
703 unsigned int mtu = l->mtu;
704 u16 ack = l->rcv_nxt - 1;
705 u16 seqno = l->snd_nxt;
706 u16 bc_last_in = l->owner->bclink.last_in;
707 struct sk_buff_head *transmq = &l->transmq;
708 struct sk_buff_head *backlogq = &l->backlogq;
709 struct sk_buff *skb, *_skb, *bskb;
711 /* Match msg importance against this and all higher backlog limits: */
712 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
713 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
714 return link_schedule_user(l, list);
716 if (unlikely(msg_size(hdr) > mtu))
719 /* Prepare each packet for sending, and add to relevant queue: */
720 while (skb_queue_len(list)) {
721 skb = skb_peek(list);
723 msg_set_seqno(hdr, seqno);
724 msg_set_ack(hdr, ack);
725 msg_set_bcast_ack(hdr, bc_last_in);
727 if (likely(skb_queue_len(transmq) < maxwin)) {
728 _skb = skb_clone(skb, GFP_ATOMIC);
732 __skb_queue_tail(transmq, skb);
733 __skb_queue_tail(xmitq, _skb);
738 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
739 kfree_skb(__skb_dequeue(list));
740 l->stats.sent_bundled++;
743 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
744 kfree_skb(__skb_dequeue(list));
745 __skb_queue_tail(backlogq, bskb);
746 l->backlog[msg_importance(buf_msg(bskb))].len++;
747 l->stats.sent_bundled++;
748 l->stats.sent_bundles++;
751 l->backlog[imp].len += skb_queue_len(list);
752 skb_queue_splice_tail_init(list, backlogq);
758 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
760 skb_queue_head_init(list);
761 __skb_queue_tail(list, skb);
764 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
766 struct sk_buff_head head;
768 skb2list(skb, &head);
769 return __tipc_link_xmit(link->owner->net, link, &head);
773 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
774 * Receive the sequence number where we should start receiving and
775 * acking broadcast packets from a newly added peer node, and open
776 * up for reception of such packets.
778 * Called with node locked
780 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
782 struct tipc_msg *msg = buf_msg(buf);
784 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
785 n->bclink.recv_permitted = true;
790 * tipc_link_push_packets - push unsent packets to bearer
792 * Push out the unsent messages of a link where congestion
793 * has abated. Node is locked.
795 * Called with node locked
797 void tipc_link_push_packets(struct tipc_link *link)
800 struct tipc_msg *msg;
801 u16 seqno = link->snd_nxt;
802 u16 ack = mod(link->rcv_nxt - 1);
804 while (skb_queue_len(&link->transmq) < link->window) {
805 skb = __skb_dequeue(&link->backlogq);
809 link->backlog[msg_importance(msg)].len--;
810 msg_set_ack(msg, ack);
811 msg_set_seqno(msg, seqno);
812 seqno = mod(seqno + 1);
813 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
814 link->rcv_unacked = 0;
815 __skb_queue_tail(&link->transmq, skb);
816 tipc_bearer_send(link->owner->net, link->bearer_id,
817 skb, &link->media_addr);
819 link->snd_nxt = seqno;
822 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
824 struct sk_buff *skb, *_skb;
825 struct tipc_msg *hdr;
826 u16 seqno = l->snd_nxt;
827 u16 ack = l->rcv_nxt - 1;
829 while (skb_queue_len(&l->transmq) < l->window) {
830 skb = skb_peek(&l->backlogq);
833 _skb = skb_clone(skb, GFP_ATOMIC);
836 __skb_dequeue(&l->backlogq);
838 l->backlog[msg_importance(hdr)].len--;
839 __skb_queue_tail(&l->transmq, skb);
840 __skb_queue_tail(xmitq, _skb);
841 msg_set_ack(hdr, ack);
842 msg_set_seqno(hdr, seqno);
843 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
850 static void link_retransmit_failure(struct tipc_link *l_ptr,
853 struct tipc_msg *msg = buf_msg(buf);
854 struct net *net = l_ptr->owner->net;
856 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
859 /* Handle failure on standard link */
860 link_print(l_ptr, "Resetting link ");
861 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
862 msg_user(msg), msg_type(msg), msg_size(msg),
864 pr_info("sqno %u, prev: %x, src: %x\n",
865 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
867 /* Handle failure on broadcast link */
868 struct tipc_node *n_ptr;
869 char addr_string[16];
871 pr_info("Msg seq number: %u, ", msg_seqno(msg));
872 pr_cont("Outstanding acks: %lu\n",
873 (unsigned long) TIPC_SKB_CB(buf)->handle);
875 n_ptr = tipc_bclink_retransmit_to(net);
877 tipc_addr_string_fill(addr_string, n_ptr->addr);
878 pr_info("Broadcast link info for %s\n", addr_string);
879 pr_info("Reception permitted: %d, Acked: %u\n",
880 n_ptr->bclink.recv_permitted,
881 n_ptr->bclink.acked);
882 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
883 n_ptr->bclink.last_in,
884 n_ptr->bclink.oos_state,
885 n_ptr->bclink.last_sent);
887 n_ptr->action_flags |= TIPC_BCAST_RESET;
888 l_ptr->stale_count = 0;
892 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
895 struct tipc_msg *msg;
902 /* Detect repeated retransmit failures */
903 if (l_ptr->last_retransm == msg_seqno(msg)) {
904 if (++l_ptr->stale_count > 100) {
905 link_retransmit_failure(l_ptr, skb);
909 l_ptr->last_retransm = msg_seqno(msg);
910 l_ptr->stale_count = 1;
913 skb_queue_walk_from(&l_ptr->transmq, skb) {
917 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
918 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
919 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
922 l_ptr->stats.retransmitted++;
926 static int tipc_link_retransm(struct tipc_link *l, int retransm,
927 struct sk_buff_head *xmitq)
929 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
930 struct tipc_msg *hdr;
935 /* Detect repeated retransmit failures on same packet */
936 if (likely(l->last_retransm != buf_seqno(skb))) {
937 l->last_retransm = buf_seqno(skb);
939 } else if (++l->stale_count > 100) {
940 link_retransmit_failure(l, skb);
941 l->exec_mode = TIPC_LINK_BLOCKED;
942 return TIPC_LINK_DOWN_EVT;
944 skb_queue_walk(&l->transmq, skb) {
948 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
952 msg_set_ack(hdr, l->rcv_nxt - 1);
953 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
954 _skb->priority = TC_PRIO_CONTROL;
955 __skb_queue_tail(xmitq, _skb);
957 l->stats.retransmitted++;
962 /* link_synch(): check if all packets arrived before the synch
963 * point have been consumed
964 * Returns true if the parallel links are synched, otherwise false
966 static bool link_synch(struct tipc_link *l)
968 unsigned int post_synch;
969 struct tipc_link *pl;
971 pl = tipc_parallel_link(l);
975 /* Was last pre-synch packet added to input queue ? */
976 if (less_eq(pl->rcv_nxt, l->synch_point))
979 /* Is it still in the input queue ? */
980 post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
981 if (skb_queue_len(pl->inputq) > post_synch)
984 l->exec_mode = TIPC_LINK_OPEN;
988 /* tipc_data_input - deliver data and name distr msgs to upper layer
990 * Consumes buffer if message is of right type
991 * Node lock must be held
993 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
995 struct tipc_node *node = link->owner;
996 struct tipc_msg *msg = buf_msg(skb);
997 u32 dport = msg_destport(msg);
999 switch (msg_user(msg)) {
1000 case TIPC_LOW_IMPORTANCE:
1001 case TIPC_MEDIUM_IMPORTANCE:
1002 case TIPC_HIGH_IMPORTANCE:
1003 case TIPC_CRITICAL_IMPORTANCE:
1005 if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
1006 node->inputq = link->inputq;
1007 node->action_flags |= TIPC_MSG_EVT;
1010 case NAME_DISTRIBUTOR:
1011 node->bclink.recv_permitted = true;
1012 node->namedq = link->namedq;
1013 skb_queue_tail(link->namedq, skb);
1014 if (skb_queue_len(link->namedq) == 1)
1015 node->action_flags |= TIPC_NAMED_MSG_EVT;
1018 case TUNNEL_PROTOCOL:
1019 case MSG_FRAGMENTER:
1020 case BCAST_PROTOCOL:
1023 pr_warn("Dropping received illegal msg type\n");
1029 /* tipc_link_input - process packet that has passed link protocol check
1032 * Node lock must be held
1034 static int tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1036 struct tipc_node *node = link->owner;
1037 struct tipc_msg *msg = buf_msg(skb);
1038 struct sk_buff *iskb;
1042 switch (msg_user(msg)) {
1043 case TUNNEL_PROTOCOL:
1045 link->exec_mode = TIPC_LINK_TUNNEL;
1046 link->synch_point = msg_seqno(msg_get_wrapped(msg));
1050 rc |= tipc_link_failover_rcv(link, &skb);
1053 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1054 tipc_data_input(link, skb);
1058 link->stats.recv_bundles++;
1059 link->stats.recv_bundled += msg_msgcnt(msg);
1061 while (tipc_msg_extract(skb, &iskb, &pos))
1062 tipc_data_input(link, iskb);
1064 case MSG_FRAGMENTER:
1065 link->stats.recv_fragments++;
1066 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1067 link->stats.recv_fragmented++;
1068 tipc_data_input(link, skb);
1069 } else if (!link->reasm_buf) {
1070 link->exec_mode = TIPC_LINK_BLOCKED;
1071 rc |= TIPC_LINK_DOWN_EVT;
1074 case BCAST_PROTOCOL:
1075 tipc_link_sync_rcv(node, skb);
1083 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1085 bool released = false;
1086 struct sk_buff *skb, *tmp;
1088 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1089 if (more(buf_seqno(skb), acked))
1091 __skb_unlink(skb, &l->transmq);
1098 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1099 * @link: the link that should handle the message
1101 * @xmitq: queue to place packets to be sent after this call
1103 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1104 struct sk_buff_head *xmitq)
1106 struct sk_buff_head *arrvq = &l->deferdq;
1107 struct sk_buff *tmp;
1108 struct tipc_msg *hdr;
1112 if (unlikely(!__tipc_skb_queue_sorted(arrvq, skb))) {
1113 if (!(skb_queue_len(arrvq) % TIPC_NACK_INTV))
1114 tipc_link_build_proto_msg(l, STATE_MSG, 0,
1119 skb_queue_walk_safe(arrvq, skb, tmp) {
1122 /* Verify and update link state */
1123 if (unlikely(msg_user(hdr) == LINK_PROTOCOL)) {
1124 __skb_dequeue(arrvq);
1125 rc |= tipc_link_proto_rcv(l, skb, xmitq);
1129 if (unlikely(!link_working(l))) {
1130 rc |= tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
1131 if (!link_working(l)) {
1132 kfree_skb(__skb_dequeue(arrvq));
1137 l->silent_intv_cnt = 0;
1139 /* Forward queues and wake up waiting users */
1140 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1141 tipc_link_advance_backlog(l, xmitq);
1142 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1143 link_prepare_wakeup(l);
1146 /* Defer reception if there is a gap in the sequence */
1147 seqno = msg_seqno(hdr);
1148 rcv_nxt = l->rcv_nxt;
1149 if (unlikely(less(rcv_nxt, seqno))) {
1150 l->stats.deferred_recv++;
1154 __skb_dequeue(arrvq);
1156 /* Drop if packet already received */
1157 if (unlikely(more(rcv_nxt, seqno))) {
1158 l->stats.duplicates++;
1163 /* Synchronize with parallel link if applicable */
1164 if (unlikely(l->exec_mode == TIPC_LINK_TUNNEL))
1165 if (!msg_dup(hdr) && !link_synch(l)) {
1170 /* Packet can be delivered */
1172 l->stats.recv_info++;
1173 if (unlikely(!tipc_data_input(l, skb)))
1174 rc |= tipc_link_input(l, skb);
1176 /* Ack at regular intervals */
1177 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1179 l->stats.sent_acks++;
1180 tipc_link_build_proto_msg(l, STATE_MSG,
1188 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1190 * Returns increase in queue length (i.e. 0 or 1)
1192 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1194 struct sk_buff *skb1;
1195 u16 seq_no = buf_seqno(skb);
1198 if (skb_queue_empty(list)) {
1199 __skb_queue_tail(list, skb);
1204 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1205 __skb_queue_tail(list, skb);
1209 /* Locate insertion point in queue, then insert; discard if duplicate */
1210 skb_queue_walk(list, skb1) {
1211 u16 curr_seqno = buf_seqno(skb1);
1213 if (seq_no == curr_seqno) {
1218 if (less(seq_no, curr_seqno))
1222 __skb_queue_before(list, skb1, skb);
1227 * Send protocol message to the other endpoint.
1229 void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
1230 u32 gap, u32 tolerance, u32 priority)
1232 struct sk_buff *skb = NULL;
1233 struct sk_buff_head xmitq;
1235 __skb_queue_head_init(&xmitq);
1236 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1237 tolerance, priority, &xmitq);
1238 skb = __skb_dequeue(&xmitq);
1241 tipc_bearer_send(l->owner->net, l->bearer_id, skb, &l->media_addr);
1246 /* tipc_link_build_proto_msg: prepare link protocol message for transmission
1248 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1249 u16 rcvgap, int tolerance, int priority,
1250 struct sk_buff_head *xmitq)
1252 struct sk_buff *skb = NULL;
1253 struct tipc_msg *hdr = l->pmsg;
1254 u16 snd_nxt = l->snd_nxt;
1255 u16 rcv_nxt = l->rcv_nxt;
1256 u16 rcv_last = rcv_nxt - 1;
1257 int node_up = l->owner->bclink.recv_permitted;
1259 /* Don't send protocol message during reset or link failover */
1260 if (l->exec_mode == TIPC_LINK_BLOCKED)
1263 msg_set_type(hdr, mtyp);
1264 msg_set_net_plane(hdr, l->net_plane);
1265 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1266 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
1267 msg_set_link_tolerance(hdr, tolerance);
1268 msg_set_linkprio(hdr, priority);
1269 msg_set_redundant_link(hdr, node_up);
1270 msg_set_seq_gap(hdr, 0);
1272 /* Compatibility: created msg must not be in sequence with pkt flow */
1273 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
1275 if (mtyp == STATE_MSG) {
1276 if (!tipc_link_is_up(l))
1278 msg_set_next_sent(hdr, snd_nxt);
1280 /* Override rcvgap if there are packets in deferred queue */
1281 if (!skb_queue_empty(&l->deferdq))
1282 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
1284 msg_set_seq_gap(hdr, rcvgap);
1285 l->stats.sent_nacks++;
1287 msg_set_ack(hdr, rcv_last);
1288 msg_set_probe(hdr, probe);
1290 l->stats.sent_probes++;
1291 l->stats.sent_states++;
1293 /* RESET_MSG or ACTIVATE_MSG */
1294 msg_set_max_pkt(hdr, l->advertised_mtu);
1295 msg_set_ack(hdr, l->failover_checkpt - 1);
1296 msg_set_next_sent(hdr, 1);
1298 skb = tipc_buf_acquire(msg_size(hdr));
1301 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1302 skb->priority = TC_PRIO_CONTROL;
1303 __skb_queue_head(xmitq, skb);
1306 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1307 * a different bearer. Owner node is locked.
1309 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1310 struct tipc_msg *tunnel_hdr,
1311 struct tipc_msg *msg,
1314 struct tipc_link *tunnel;
1315 struct sk_buff *skb;
1316 u32 length = msg_size(msg);
1318 tunnel = node_active_link(l_ptr->owner, selector & 1);
1319 if (!tipc_link_is_up(tunnel)) {
1320 pr_warn("%stunnel link no longer available\n", link_co_err);
1323 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1324 skb = tipc_buf_acquire(length + INT_H_SIZE);
1326 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1329 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1330 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1331 __tipc_link_xmit_skb(tunnel, skb);
1335 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1336 * link is still active. We can do failover. Tunnel the failing link's
1337 * whole send queue via the remaining link. This way, we don't lose
1338 * any packets, and sequence order is preserved for subsequent traffic
1339 * sent over the remaining link. Owner node is locked.
1341 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1344 struct tipc_link *tunnel = node_active_link(l_ptr->owner, 0);
1345 struct tipc_msg tunnel_hdr;
1346 struct sk_buff *skb;
1352 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1353 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1355 skb_queue_walk(&l_ptr->backlogq, skb) {
1356 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1357 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1359 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1360 tipc_link_purge_backlog(l_ptr);
1361 msgcount = skb_queue_len(&l_ptr->transmq);
1362 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1363 msg_set_msgcnt(&tunnel_hdr, msgcount);
1365 if (skb_queue_empty(&l_ptr->transmq)) {
1366 skb = tipc_buf_acquire(INT_H_SIZE);
1368 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1369 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1370 __tipc_link_xmit_skb(tunnel, skb);
1372 pr_warn("%sunable to send changeover msg\n",
1378 split_bundles = (node_active_link(l_ptr->owner, 0) !=
1379 node_active_link(l_ptr->owner, 0));
1381 skb_queue_walk(&l_ptr->transmq, skb) {
1382 struct tipc_msg *msg = buf_msg(skb);
1384 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1385 struct tipc_msg *m = msg_get_wrapped(msg);
1386 unchar *pos = (unchar *)m;
1388 msgcount = msg_msgcnt(msg);
1389 while (msgcount--) {
1390 msg_set_seqno(m, msg_seqno(msg));
1391 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1392 msg_link_selector(m));
1393 pos += align(msg_size(m));
1394 m = (struct tipc_msg *)pos;
1397 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1398 msg_link_selector(msg));
1403 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1404 * duplicate of the first link's send queue via the new link. This way, we
1405 * are guaranteed that currently queued packets from a socket are delivered
1406 * before future traffic from the same socket, even if this is using the
1407 * new link. The last arriving copy of each duplicate packet is dropped at
1408 * the receiving end by the regular protocol check, so packet cardinality
1409 * and sequence order is preserved per sender/receiver socket pair.
1410 * Owner node is locked.
1412 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1413 struct tipc_link *tnl)
1415 struct sk_buff *skb;
1416 struct tipc_msg tnl_hdr;
1417 struct sk_buff_head *queue = &link->transmq;
1421 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1422 SYNCH_MSG, INT_H_SIZE, link->addr);
1423 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1424 msg_set_msgcnt(&tnl_hdr, mcnt);
1425 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1428 skb_queue_walk(queue, skb) {
1429 struct sk_buff *outskb;
1430 struct tipc_msg *msg = buf_msg(skb);
1431 u32 len = msg_size(msg);
1433 msg_set_ack(msg, mod(link->rcv_nxt - 1));
1434 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1435 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1436 outskb = tipc_buf_acquire(len + INT_H_SIZE);
1437 if (outskb == NULL) {
1438 pr_warn("%sunable to send duplicate msg\n",
1442 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1443 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1445 __tipc_link_xmit_skb(tnl, outskb);
1446 if (!tipc_link_is_up(link))
1449 if (queue == &link->backlogq)
1451 seqno = link->snd_nxt;
1452 skb_queue_walk(&link->backlogq, skb) {
1453 msg_set_seqno(buf_msg(skb), seqno);
1454 seqno = mod(seqno + 1);
1456 queue = &link->backlogq;
1460 /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1461 * Owner node is locked.
1463 static int tipc_link_failover_rcv(struct tipc_link *link,
1464 struct sk_buff **skb)
1466 struct tipc_msg *msg = buf_msg(*skb);
1467 struct sk_buff *iskb = NULL;
1468 struct tipc_link *pl = NULL;
1469 int bearer_id = msg_bearer_id(msg);
1473 if (msg_type(msg) != FAILOVER_MSG) {
1474 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1477 if (bearer_id >= MAX_BEARERS)
1480 if (bearer_id == link->bearer_id)
1483 pl = link->owner->links[bearer_id].link;
1485 if (link->failover_pkts == FIRST_FAILOVER)
1486 link->failover_pkts = msg_msgcnt(msg);
1488 /* Should we expect an inner packet? */
1489 if (!link->failover_pkts)
1492 if (!tipc_msg_extract(*skb, &iskb, &pos)) {
1493 pr_warn("%sno inner failover pkt\n", link_co_err);
1497 link->failover_pkts--;
1500 /* Was this packet already delivered? */
1501 if (less(buf_seqno(iskb), link->failover_checkpt)) {
1506 if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) {
1507 link->stats.recv_fragments++;
1508 if (!tipc_buf_append(&link->failover_skb, &iskb) &&
1509 !link->failover_skb) {
1510 link->exec_mode = TIPC_LINK_BLOCKED;
1511 rc |= TIPC_LINK_DOWN_EVT;
1515 if (!link->failover_pkts && pl)
1516 pl->exec_mode = TIPC_LINK_OPEN;
1522 /* tipc_link_proto_rcv(): receive link level protocol message :
1523 * Note that network plane id propagates through the network, and may
1524 * change at any time. The node with lowest numerical id determines
1527 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1528 struct sk_buff_head *xmitq)
1530 struct tipc_msg *hdr = buf_msg(skb);
1532 u16 nacked_gap = msg_seq_gap(hdr);
1533 u16 peers_snd_nxt = msg_next_sent(hdr);
1534 u16 peers_tol = msg_link_tolerance(hdr);
1535 u16 peers_prio = msg_linkprio(hdr);
1539 if (l->exec_mode == TIPC_LINK_BLOCKED)
1542 if (link_own_addr(l) > msg_prevnode(hdr))
1543 l->net_plane = msg_net_plane(hdr);
1545 switch (msg_type(hdr)) {
1548 /* Ignore duplicate RESET with old session number */
1549 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1550 (l->peer_session != WILDCARD_SESSION))
1555 /* Complete own link name with peer's interface name */
1556 if_name = strrchr(l->name, ':') + 1;
1557 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1559 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1561 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1563 /* Update own tolerance if peer indicates a non-zero value */
1564 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1565 l->tolerance = peers_tol;
1567 /* Update own priority if peer's priority is higher */
1568 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1569 l->priority = peers_prio;
1571 l->peer_session = msg_session(hdr);
1572 l->peer_bearer_id = msg_bearer_id(hdr);
1573 rc = tipc_link_fsm_evt(l, msg_type(hdr), xmitq);
1574 if (l->mtu > msg_max_pkt(hdr))
1575 l->mtu = msg_max_pkt(hdr);
1578 /* Update own tolerance if peer indicates a non-zero value */
1579 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1580 l->tolerance = peers_tol;
1582 l->silent_intv_cnt = 0;
1583 l->stats.recv_states++;
1585 l->stats.recv_probes++;
1586 rc = tipc_link_fsm_evt(l, TRAFFIC_EVT, xmitq);
1587 if (!tipc_link_is_up(l))
1590 /* Has peer sent packets we haven't received yet ? */
1591 if (more(peers_snd_nxt, l->rcv_nxt))
1592 rcvgap = peers_snd_nxt - l->rcv_nxt;
1593 if (rcvgap || (msg_probe(hdr)))
1594 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1596 tipc_link_release_pkts(l, msg_ack(hdr));
1598 /* If NACK, retransmit will now start at right position */
1600 rc |= tipc_link_retransm(l, nacked_gap, xmitq);
1601 l->stats.recv_nacks++;
1603 tipc_link_advance_backlog(l, xmitq);
1604 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1605 link_prepare_wakeup(l);
1612 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1614 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1617 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1618 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1619 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1620 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1621 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1624 /* tipc_link_find_owner - locate owner node of link by link's name
1625 * @net: the applicable net namespace
1626 * @name: pointer to link name string
1627 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1629 * Returns pointer to node owning the link, or 0 if no matching link is found.
1631 static struct tipc_node *tipc_link_find_owner(struct net *net,
1632 const char *link_name,
1633 unsigned int *bearer_id)
1635 struct tipc_net *tn = net_generic(net, tipc_net_id);
1636 struct tipc_link *l_ptr;
1637 struct tipc_node *n_ptr;
1638 struct tipc_node *found_node = NULL;
1643 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1644 tipc_node_lock(n_ptr);
1645 for (i = 0; i < MAX_BEARERS; i++) {
1646 l_ptr = n_ptr->links[i].link;
1647 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1653 tipc_node_unlock(n_ptr);
1663 * link_reset_statistics - reset link statistics
1664 * @l_ptr: pointer to link
1666 static void link_reset_statistics(struct tipc_link *l_ptr)
1668 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1669 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1670 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
1673 static void link_print(struct tipc_link *l, const char *str)
1675 struct sk_buff *hskb = skb_peek(&l->transmq);
1676 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
1677 u16 tail = l->snd_nxt - 1;
1679 pr_info("%s Link <%s>:", str, l->name);
1681 if (link_probing(l))
1683 else if (link_establishing(l))
1685 else if (link_resetting(l))
1687 else if (link_working(l))
1692 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1693 skb_queue_len(&l->transmq), head, tail,
1694 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1697 /* Parse and validate nested (link) properties valid for media, bearer and link
1699 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1703 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1704 tipc_nl_prop_policy);
1708 if (props[TIPC_NLA_PROP_PRIO]) {
1711 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1712 if (prio > TIPC_MAX_LINK_PRI)
1716 if (props[TIPC_NLA_PROP_TOL]) {
1719 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1720 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1724 if (props[TIPC_NLA_PROP_WIN]) {
1727 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1728 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1735 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1741 struct tipc_link *link;
1742 struct tipc_node *node;
1743 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1744 struct net *net = sock_net(skb->sk);
1746 if (!info->attrs[TIPC_NLA_LINK])
1749 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1750 info->attrs[TIPC_NLA_LINK],
1751 tipc_nl_link_policy);
1755 if (!attrs[TIPC_NLA_LINK_NAME])
1758 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1760 if (strcmp(name, tipc_bclink_name) == 0)
1761 return tipc_nl_bc_link_set(net, attrs);
1763 node = tipc_link_find_owner(net, name, &bearer_id);
1767 tipc_node_lock(node);
1769 link = node->links[bearer_id].link;
1775 if (attrs[TIPC_NLA_LINK_PROP]) {
1776 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1778 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1785 if (props[TIPC_NLA_PROP_TOL]) {
1788 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1789 link->tolerance = tol;
1790 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
1792 if (props[TIPC_NLA_PROP_PRIO]) {
1795 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1796 link->priority = prio;
1797 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
1799 if (props[TIPC_NLA_PROP_WIN]) {
1802 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1803 tipc_link_set_queue_limits(link, win);
1808 tipc_node_unlock(node);
1813 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1816 struct nlattr *stats;
1823 struct nla_map map[] = {
1824 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1825 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1826 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1827 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1828 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1829 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1830 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1831 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1832 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1833 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1834 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1835 s->msg_length_counts : 1},
1836 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1837 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1838 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1839 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1840 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1841 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1842 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1843 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1844 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1845 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1846 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1847 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1848 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1849 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1850 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1851 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1852 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1853 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1854 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1855 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1856 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1857 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1858 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1861 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1865 for (i = 0; i < ARRAY_SIZE(map); i++)
1866 if (nla_put_u32(skb, map[i].key, map[i].val))
1869 nla_nest_end(skb, stats);
1873 nla_nest_cancel(skb, stats);
1878 /* Caller should hold appropriate locks to protect the link */
1879 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1880 struct tipc_link *link, int nlflags)
1884 struct nlattr *attrs;
1885 struct nlattr *prop;
1886 struct tipc_net *tn = net_generic(net, tipc_net_id);
1888 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1889 nlflags, TIPC_NL_LINK_GET);
1893 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1897 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1899 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1900 tipc_cluster_mask(tn->own_addr)))
1902 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1904 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1906 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1909 if (tipc_link_is_up(link))
1910 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1912 if (tipc_link_is_active(link))
1913 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1916 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1919 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1921 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1923 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1926 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1928 nla_nest_end(msg->skb, prop);
1930 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1934 nla_nest_end(msg->skb, attrs);
1935 genlmsg_end(msg->skb, hdr);
1940 nla_nest_cancel(msg->skb, prop);
1942 nla_nest_cancel(msg->skb, attrs);
1944 genlmsg_cancel(msg->skb, hdr);
1949 /* Caller should hold node lock */
1950 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1951 struct tipc_node *node, u32 *prev_link)
1956 for (i = *prev_link; i < MAX_BEARERS; i++) {
1959 if (!node->links[i].link)
1962 err = __tipc_nl_add_link(net, msg,
1963 node->links[i].link, NLM_F_MULTI);
1972 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1974 struct net *net = sock_net(skb->sk);
1975 struct tipc_net *tn = net_generic(net, tipc_net_id);
1976 struct tipc_node *node;
1977 struct tipc_nl_msg msg;
1978 u32 prev_node = cb->args[0];
1979 u32 prev_link = cb->args[1];
1980 int done = cb->args[2];
1987 msg.portid = NETLINK_CB(cb->skb).portid;
1988 msg.seq = cb->nlh->nlmsg_seq;
1992 node = tipc_node_find(net, prev_node);
1994 /* We never set seq or call nl_dump_check_consistent()
1995 * this means that setting prev_seq here will cause the
1996 * consistence check to fail in the netlink callback
1997 * handler. Resulting in the last NLMSG_DONE message
1998 * having the NLM_F_DUMP_INTR flag set.
2003 tipc_node_put(node);
2005 list_for_each_entry_continue_rcu(node, &tn->node_list,
2007 tipc_node_lock(node);
2008 err = __tipc_nl_add_node_links(net, &msg, node,
2010 tipc_node_unlock(node);
2014 prev_node = node->addr;
2017 err = tipc_nl_add_bc_link(net, &msg);
2021 list_for_each_entry_rcu(node, &tn->node_list, list) {
2022 tipc_node_lock(node);
2023 err = __tipc_nl_add_node_links(net, &msg, node,
2025 tipc_node_unlock(node);
2029 prev_node = node->addr;
2036 cb->args[0] = prev_node;
2037 cb->args[1] = prev_link;
2043 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2045 struct net *net = genl_info_net(info);
2046 struct tipc_nl_msg msg;
2050 msg.portid = info->snd_portid;
2051 msg.seq = info->snd_seq;
2053 if (!info->attrs[TIPC_NLA_LINK_NAME])
2055 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2057 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2061 if (strcmp(name, tipc_bclink_name) == 0) {
2062 err = tipc_nl_add_bc_link(net, &msg);
2064 nlmsg_free(msg.skb);
2069 struct tipc_node *node;
2070 struct tipc_link *link;
2072 node = tipc_link_find_owner(net, name, &bearer_id);
2076 tipc_node_lock(node);
2077 link = node->links[bearer_id].link;
2079 tipc_node_unlock(node);
2080 nlmsg_free(msg.skb);
2084 err = __tipc_nl_add_link(net, &msg, link, 0);
2085 tipc_node_unlock(node);
2087 nlmsg_free(msg.skb);
2092 return genlmsg_reply(msg.skb, info);
2095 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2099 unsigned int bearer_id;
2100 struct tipc_link *link;
2101 struct tipc_node *node;
2102 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2103 struct net *net = sock_net(skb->sk);
2105 if (!info->attrs[TIPC_NLA_LINK])
2108 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2109 info->attrs[TIPC_NLA_LINK],
2110 tipc_nl_link_policy);
2114 if (!attrs[TIPC_NLA_LINK_NAME])
2117 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2119 if (strcmp(link_name, tipc_bclink_name) == 0) {
2120 err = tipc_bclink_reset_stats(net);
2126 node = tipc_link_find_owner(net, link_name, &bearer_id);
2130 tipc_node_lock(node);
2132 link = node->links[bearer_id].link;
2134 tipc_node_unlock(node);
2138 link_reset_statistics(link);
2140 tipc_node_unlock(node);