2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
45 #include <linux/pkt_sched.h>
48 * Error message prefixes
50 static const char *link_co_err = "Link changeover error, ";
51 static const char *link_rst_msg = "Resetting link ";
52 static const char *link_unk_evt = "Unknown link event ";
54 static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_LINK_NAME] = {
58 .len = TIPC_MAX_LINK_NAME
60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
70 /* Properties valid for media, bearar and link */
71 static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
79 * Out-of-range value for link session numbers
81 #define INVALID_SESSION 0x10000
86 #define STARTING_EVT 856384768 /* link processing trigger */
87 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
88 #define TIMEOUT_EVT 560817u /* link timer expired */
91 * The following two 'message types' is really just implementation
92 * data conveniently stored in the message header.
93 * They must not be considered part of the protocol
99 * State value stored in 'exp_msg_count'
101 #define START_CHANGEOVER 100000u
103 static void link_handle_out_of_seq_msg(struct tipc_link *link,
104 struct sk_buff *skb);
105 static void tipc_link_proto_rcv(struct tipc_link *link,
106 struct sk_buff *skb);
107 static int tipc_link_tunnel_rcv(struct tipc_node *node,
108 struct sk_buff **skb);
109 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
110 static void link_state_event(struct tipc_link *l_ptr, u32 event);
111 static void link_reset_statistics(struct tipc_link *l_ptr);
112 static void link_print(struct tipc_link *l_ptr, const char *str);
113 static void tipc_link_sync_xmit(struct tipc_link *l);
114 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115 static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
116 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
119 * Simple link routines
121 static unsigned int align(unsigned int i)
123 return (i + 3) & ~3u;
126 static void tipc_link_release(struct kref *kref)
128 kfree(container_of(kref, struct tipc_link, ref));
131 static void tipc_link_get(struct tipc_link *l_ptr)
133 kref_get(&l_ptr->ref);
136 static void tipc_link_put(struct tipc_link *l_ptr)
138 kref_put(&l_ptr->ref, tipc_link_release);
141 static void link_init_max_pkt(struct tipc_link *l_ptr)
143 struct tipc_node *node = l_ptr->owner;
144 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
145 struct tipc_bearer *b_ptr;
149 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
154 max_pkt = (b_ptr->mtu & ~3);
157 if (max_pkt > MAX_MSG_SIZE)
158 max_pkt = MAX_MSG_SIZE;
160 l_ptr->max_pkt_target = max_pkt;
161 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
162 l_ptr->max_pkt = l_ptr->max_pkt_target;
164 l_ptr->max_pkt = MAX_PKT_DEFAULT;
166 l_ptr->max_pkt_probes = 0;
170 * Simple non-static link routines (i.e. referenced outside this file)
172 int tipc_link_is_up(struct tipc_link *l_ptr)
176 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
179 int tipc_link_is_active(struct tipc_link *l_ptr)
181 return (l_ptr->owner->active_links[0] == l_ptr) ||
182 (l_ptr->owner->active_links[1] == l_ptr);
186 * link_timeout - handle expiration of link timer
187 * @l_ptr: pointer to link
189 static void link_timeout(unsigned long data)
191 struct tipc_link *l_ptr = (struct tipc_link *)data;
194 tipc_node_lock(l_ptr->owner);
196 /* update counters used in statistical profiling of send traffic */
197 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq);
198 l_ptr->stats.queue_sz_counts++;
200 skb = skb_peek(&l_ptr->transmq);
202 struct tipc_msg *msg = buf_msg(skb);
203 u32 length = msg_size(msg);
205 if ((msg_user(msg) == MSG_FRAGMENTER) &&
206 (msg_type(msg) == FIRST_FRAGMENT)) {
207 length = msg_size(msg_get_wrapped(msg));
210 l_ptr->stats.msg_lengths_total += length;
211 l_ptr->stats.msg_length_counts++;
213 l_ptr->stats.msg_length_profile[0]++;
214 else if (length <= 256)
215 l_ptr->stats.msg_length_profile[1]++;
216 else if (length <= 1024)
217 l_ptr->stats.msg_length_profile[2]++;
218 else if (length <= 4096)
219 l_ptr->stats.msg_length_profile[3]++;
220 else if (length <= 16384)
221 l_ptr->stats.msg_length_profile[4]++;
222 else if (length <= 32768)
223 l_ptr->stats.msg_length_profile[5]++;
225 l_ptr->stats.msg_length_profile[6]++;
229 /* do all other link processing performed on a periodic basis */
230 link_state_event(l_ptr, TIMEOUT_EVT);
232 if (skb_queue_len(&l_ptr->backlogq))
233 tipc_link_push_packets(l_ptr);
235 tipc_node_unlock(l_ptr->owner);
236 tipc_link_put(l_ptr);
239 static void link_set_timer(struct tipc_link *link, unsigned long time)
241 if (!mod_timer(&link->timer, jiffies + time))
246 * tipc_link_create - create a new link
247 * @n_ptr: pointer to associated node
248 * @b_ptr: pointer to associated bearer
249 * @media_addr: media address to use when sending messages over link
251 * Returns pointer to link.
253 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
254 struct tipc_bearer *b_ptr,
255 const struct tipc_media_addr *media_addr)
257 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
258 struct tipc_link *l_ptr;
259 struct tipc_msg *msg;
261 char addr_string[16];
262 u32 peer = n_ptr->addr;
264 if (n_ptr->link_cnt >= MAX_BEARERS) {
265 tipc_addr_string_fill(addr_string, n_ptr->addr);
266 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
267 n_ptr->link_cnt, addr_string, MAX_BEARERS);
271 if (n_ptr->links[b_ptr->identity]) {
272 tipc_addr_string_fill(addr_string, n_ptr->addr);
273 pr_err("Attempt to establish second link on <%s> to %s\n",
274 b_ptr->name, addr_string);
278 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
280 pr_warn("Link creation failed, no memory\n");
283 kref_init(&l_ptr->ref);
285 if_name = strchr(b_ptr->name, ':') + 1;
286 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
287 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
288 tipc_node(tn->own_addr),
290 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
291 /* note: peer i/f name is updated by reset/activate message */
292 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
293 l_ptr->owner = n_ptr;
294 l_ptr->checkpoint = 1;
295 l_ptr->peer_session = INVALID_SESSION;
296 l_ptr->bearer_id = b_ptr->identity;
297 link_set_supervision_props(l_ptr, b_ptr->tolerance);
298 l_ptr->state = RESET_UNKNOWN;
300 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
302 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
304 msg_set_size(msg, sizeof(l_ptr->proto_msg));
305 msg_set_session(msg, (tn->random & 0xffff));
306 msg_set_bearer_id(msg, b_ptr->identity);
307 strcpy((char *)msg_data(msg), if_name);
309 l_ptr->priority = b_ptr->priority;
310 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
312 l_ptr->net_plane = b_ptr->net_plane;
313 link_init_max_pkt(l_ptr);
315 l_ptr->next_out_no = 1;
316 __skb_queue_head_init(&l_ptr->transmq);
317 __skb_queue_head_init(&l_ptr->backlogq);
318 __skb_queue_head_init(&l_ptr->deferdq);
319 skb_queue_head_init(&l_ptr->wakeupq);
320 skb_queue_head_init(&l_ptr->inputq);
321 skb_queue_head_init(&l_ptr->namedq);
322 link_reset_statistics(l_ptr);
323 tipc_node_attach_link(n_ptr, l_ptr);
324 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
325 link_state_event(l_ptr, STARTING_EVT);
331 * link_delete - Conditional deletion of link.
332 * If timer still running, real delete is done when it expires
333 * @link: link to be deleted
335 void tipc_link_delete(struct tipc_link *link)
337 tipc_link_reset_fragments(link);
338 tipc_node_detach_link(link->owner, link);
342 void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
345 struct tipc_net *tn = net_generic(net, tipc_net_id);
346 struct tipc_link *link;
347 struct tipc_node *node;
351 list_for_each_entry_rcu(node, &tn->node_list, list) {
352 tipc_node_lock(node);
353 link = node->links[bearer_id];
355 tipc_node_unlock(node);
358 del_link = !tipc_link_is_up(link) && !link->exp_msg_count;
359 tipc_link_reset(link);
360 if (del_timer(&link->timer))
362 link->flags |= LINK_STOPPED;
363 /* Delete link now, or when failover is finished: */
364 if (shutting_down || !tipc_node_is_up(node) || del_link)
365 tipc_link_delete(link);
366 tipc_node_unlock(node);
372 * link_schedule_user - schedule user for wakeup after congestion
373 * @link: congested link
374 * @oport: sending port
375 * @chain_sz: size of buffer chain that was attempted sent
376 * @imp: importance of message attempted sent
377 * Create pseudo msg to send back to user when congestion abates
379 static bool link_schedule_user(struct tipc_link *link, u32 oport,
380 uint chain_sz, uint imp)
384 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
385 link_own_addr(link), link_own_addr(link),
389 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
390 TIPC_SKB_CB(buf)->chain_imp = imp;
391 skb_queue_tail(&link->wakeupq, buf);
392 link->stats.link_congs++;
397 * link_prepare_wakeup - prepare users for wakeup after congestion
398 * @link: congested link
399 * Move a number of waiting users, as permitted by available space in
400 * the send queue, from link wait queue to node wait queue for wakeup
402 void link_prepare_wakeup(struct tipc_link *link)
404 uint pend_qsz = skb_queue_len(&link->backlogq);
405 struct sk_buff *skb, *tmp;
407 skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
408 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
410 pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
411 skb_unlink(skb, &link->wakeupq);
412 skb_queue_tail(&link->inputq, skb);
413 link->owner->inputq = &link->inputq;
414 link->owner->action_flags |= TIPC_MSG_EVT;
419 * tipc_link_reset_fragments - purge link's inbound message fragments queue
420 * @l_ptr: pointer to link
422 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
424 kfree_skb(l_ptr->reasm_buf);
425 l_ptr->reasm_buf = NULL;
429 * tipc_link_purge_queues - purge all pkt queues associated with link
430 * @l_ptr: pointer to link
432 void tipc_link_purge_queues(struct tipc_link *l_ptr)
434 __skb_queue_purge(&l_ptr->deferdq);
435 __skb_queue_purge(&l_ptr->transmq);
436 __skb_queue_purge(&l_ptr->backlogq);
437 tipc_link_reset_fragments(l_ptr);
440 void tipc_link_reset(struct tipc_link *l_ptr)
442 u32 prev_state = l_ptr->state;
443 u32 checkpoint = l_ptr->next_in_no;
444 int was_active_link = tipc_link_is_active(l_ptr);
445 struct tipc_node *owner = l_ptr->owner;
447 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
449 /* Link is down, accept any session */
450 l_ptr->peer_session = INVALID_SESSION;
452 /* Prepare for max packet size negotiation */
453 link_init_max_pkt(l_ptr);
455 l_ptr->state = RESET_UNKNOWN;
457 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
460 tipc_node_link_down(l_ptr->owner, l_ptr);
461 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
463 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
464 l_ptr->reset_checkpoint = checkpoint;
465 l_ptr->exp_msg_count = START_CHANGEOVER;
468 /* Clean up all queues, except inputq: */
469 __skb_queue_purge(&l_ptr->transmq);
470 __skb_queue_purge(&l_ptr->backlogq);
471 __skb_queue_purge(&l_ptr->deferdq);
473 owner->inputq = &l_ptr->inputq;
474 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
475 if (!skb_queue_empty(owner->inputq))
476 owner->action_flags |= TIPC_MSG_EVT;
477 l_ptr->rcv_unacked = 0;
478 l_ptr->checkpoint = 1;
479 l_ptr->next_out_no = 1;
480 l_ptr->fsm_msg_cnt = 0;
481 l_ptr->stale_count = 0;
482 link_reset_statistics(l_ptr);
485 void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
487 struct tipc_net *tn = net_generic(net, tipc_net_id);
488 struct tipc_link *l_ptr;
489 struct tipc_node *n_ptr;
492 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
493 tipc_node_lock(n_ptr);
494 l_ptr = n_ptr->links[bearer_id];
496 tipc_link_reset(l_ptr);
497 tipc_node_unlock(n_ptr);
502 static void link_activate(struct tipc_link *link)
504 struct tipc_node *node = link->owner;
506 link->next_in_no = 1;
507 link->stats.recv_info = 1;
508 tipc_node_link_up(node, link);
509 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
513 * link_state_event - link finite state machine
514 * @l_ptr: pointer to link
515 * @event: state machine event to process
517 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
519 struct tipc_link *other;
520 unsigned long cont_intv = l_ptr->cont_intv;
522 if (l_ptr->flags & LINK_STOPPED)
525 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
526 return; /* Not yet. */
528 /* Check whether changeover is going on */
529 if (l_ptr->exp_msg_count) {
530 if (event == TIMEOUT_EVT)
531 link_set_timer(l_ptr, cont_intv);
535 switch (l_ptr->state) {
536 case WORKING_WORKING:
538 case TRAFFIC_MSG_EVT:
542 if (l_ptr->next_in_no != l_ptr->checkpoint) {
543 l_ptr->checkpoint = l_ptr->next_in_no;
544 if (tipc_bclink_acks_missing(l_ptr->owner)) {
545 tipc_link_proto_xmit(l_ptr, STATE_MSG,
547 l_ptr->fsm_msg_cnt++;
548 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
549 tipc_link_proto_xmit(l_ptr, STATE_MSG,
551 l_ptr->fsm_msg_cnt++;
553 link_set_timer(l_ptr, cont_intv);
556 l_ptr->state = WORKING_UNKNOWN;
557 l_ptr->fsm_msg_cnt = 0;
558 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
559 l_ptr->fsm_msg_cnt++;
560 link_set_timer(l_ptr, cont_intv / 4);
563 pr_debug("%s<%s>, requested by peer\n",
564 link_rst_msg, l_ptr->name);
565 tipc_link_reset(l_ptr);
566 l_ptr->state = RESET_RESET;
567 l_ptr->fsm_msg_cnt = 0;
568 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
570 l_ptr->fsm_msg_cnt++;
571 link_set_timer(l_ptr, cont_intv);
574 pr_debug("%s%u in WW state\n", link_unk_evt, event);
577 case WORKING_UNKNOWN:
579 case TRAFFIC_MSG_EVT:
581 l_ptr->state = WORKING_WORKING;
582 l_ptr->fsm_msg_cnt = 0;
583 link_set_timer(l_ptr, cont_intv);
586 pr_debug("%s<%s>, requested by peer while probing\n",
587 link_rst_msg, l_ptr->name);
588 tipc_link_reset(l_ptr);
589 l_ptr->state = RESET_RESET;
590 l_ptr->fsm_msg_cnt = 0;
591 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
593 l_ptr->fsm_msg_cnt++;
594 link_set_timer(l_ptr, cont_intv);
597 if (l_ptr->next_in_no != l_ptr->checkpoint) {
598 l_ptr->state = WORKING_WORKING;
599 l_ptr->fsm_msg_cnt = 0;
600 l_ptr->checkpoint = l_ptr->next_in_no;
601 if (tipc_bclink_acks_missing(l_ptr->owner)) {
602 tipc_link_proto_xmit(l_ptr, STATE_MSG,
604 l_ptr->fsm_msg_cnt++;
606 link_set_timer(l_ptr, cont_intv);
607 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
608 tipc_link_proto_xmit(l_ptr, STATE_MSG,
610 l_ptr->fsm_msg_cnt++;
611 link_set_timer(l_ptr, cont_intv / 4);
612 } else { /* Link has failed */
613 pr_debug("%s<%s>, peer not responding\n",
614 link_rst_msg, l_ptr->name);
615 tipc_link_reset(l_ptr);
616 l_ptr->state = RESET_UNKNOWN;
617 l_ptr->fsm_msg_cnt = 0;
618 tipc_link_proto_xmit(l_ptr, RESET_MSG,
620 l_ptr->fsm_msg_cnt++;
621 link_set_timer(l_ptr, cont_intv);
625 pr_err("%s%u in WU state\n", link_unk_evt, event);
630 case TRAFFIC_MSG_EVT:
633 other = l_ptr->owner->active_links[0];
634 if (other && link_working_unknown(other))
636 l_ptr->state = WORKING_WORKING;
637 l_ptr->fsm_msg_cnt = 0;
638 link_activate(l_ptr);
639 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
640 l_ptr->fsm_msg_cnt++;
641 if (l_ptr->owner->working_links == 1)
642 tipc_link_sync_xmit(l_ptr);
643 link_set_timer(l_ptr, cont_intv);
646 l_ptr->state = RESET_RESET;
647 l_ptr->fsm_msg_cnt = 0;
648 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
650 l_ptr->fsm_msg_cnt++;
651 link_set_timer(l_ptr, cont_intv);
654 l_ptr->flags |= LINK_STARTED;
655 l_ptr->fsm_msg_cnt++;
656 link_set_timer(l_ptr, cont_intv);
659 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
660 l_ptr->fsm_msg_cnt++;
661 link_set_timer(l_ptr, cont_intv);
664 pr_err("%s%u in RU state\n", link_unk_evt, event);
669 case TRAFFIC_MSG_EVT:
671 other = l_ptr->owner->active_links[0];
672 if (other && link_working_unknown(other))
674 l_ptr->state = WORKING_WORKING;
675 l_ptr->fsm_msg_cnt = 0;
676 link_activate(l_ptr);
677 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
678 l_ptr->fsm_msg_cnt++;
679 if (l_ptr->owner->working_links == 1)
680 tipc_link_sync_xmit(l_ptr);
681 link_set_timer(l_ptr, cont_intv);
686 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
688 l_ptr->fsm_msg_cnt++;
689 link_set_timer(l_ptr, cont_intv);
692 pr_err("%s%u in RR state\n", link_unk_evt, event);
696 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
700 /* tipc_link_cong: determine return value and how to treat the
701 * sent buffer during link congestion.
702 * - For plain, errorless user data messages we keep the buffer and
704 * - For all other messages we discard the buffer and return -EHOSTUNREACH
705 * - For TIPC internal messages we also reset the link
707 static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
709 struct sk_buff *skb = skb_peek(list);
710 struct tipc_msg *msg = buf_msg(skb);
711 uint imp = tipc_msg_tot_importance(msg);
712 u32 oport = msg_tot_origport(msg);
714 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
715 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
716 tipc_link_reset(link);
719 if (unlikely(msg_errcode(msg)))
721 if (unlikely(msg_reroute_cnt(msg)))
723 if (TIPC_SKB_CB(skb)->wakeup_pending)
725 if (link_schedule_user(link, oport, skb_queue_len(list), imp))
728 __skb_queue_purge(list);
729 return -EHOSTUNREACH;
733 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
735 * @list: chain of buffers containing message
737 * Consumes the buffer chain, except when returning -ELINKCONG
738 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
739 * user data messages) or -EHOSTUNREACH (all other messages/senders)
740 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
741 * to act on the return value, since they may need to do more send attempts.
743 int __tipc_link_xmit(struct net *net, struct tipc_link *link,
744 struct sk_buff_head *list)
746 struct tipc_msg *msg = buf_msg(skb_peek(list));
747 unsigned int maxwin = link->window;
748 uint imp = tipc_msg_tot_importance(msg);
749 uint mtu = link->max_pkt;
750 uint ack = mod(link->next_in_no - 1);
751 uint seqno = link->next_out_no;
752 uint bc_last_in = link->owner->bclink.last_in;
753 struct tipc_media_addr *addr = &link->media_addr;
754 struct sk_buff_head *transmq = &link->transmq;
755 struct sk_buff_head *backlogq = &link->backlogq;
756 struct sk_buff *skb, *tmp;
758 /* Match queue limits against msg importance: */
759 if (unlikely(skb_queue_len(backlogq) >= link->queue_limit[imp]))
760 return tipc_link_cong(link, list);
762 /* Has valid packet limit been used ? */
763 if (unlikely(msg_size(msg) > mtu)) {
764 __skb_queue_purge(list);
768 /* Prepare each packet for sending, and add to relevant queue: */
769 skb_queue_walk_safe(list, skb, tmp) {
770 __skb_unlink(skb, list);
772 msg_set_seqno(msg, seqno);
773 msg_set_ack(msg, ack);
774 msg_set_bcast_ack(msg, bc_last_in);
776 if (likely(skb_queue_len(transmq) < maxwin)) {
777 __skb_queue_tail(transmq, skb);
778 tipc_bearer_send(net, link->bearer_id, skb, addr);
779 link->rcv_unacked = 0;
783 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) {
784 link->stats.sent_bundled++;
787 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
788 link->stats.sent_bundled++;
789 link->stats.sent_bundles++;
791 __skb_queue_tail(backlogq, skb);
794 link->next_out_no = seqno;
798 static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
800 skb_queue_head_init(list);
801 __skb_queue_tail(list, skb);
804 static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
806 struct sk_buff_head head;
808 skb2list(skb, &head);
809 return __tipc_link_xmit(link->owner->net, link, &head);
812 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
815 struct sk_buff_head head;
817 skb2list(skb, &head);
818 return tipc_link_xmit(net, &head, dnode, selector);
822 * tipc_link_xmit() is the general link level function for message sending
823 * @net: the applicable net namespace
824 * @list: chain of buffers containing message
825 * @dsz: amount of user data to be sent
826 * @dnode: address of destination node
827 * @selector: a number used for deterministic link selection
828 * Consumes the buffer chain, except when returning -ELINKCONG
829 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
831 int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
834 struct tipc_link *link = NULL;
835 struct tipc_node *node;
836 int rc = -EHOSTUNREACH;
838 node = tipc_node_find(net, dnode);
840 tipc_node_lock(node);
841 link = node->active_links[selector & 1];
843 rc = __tipc_link_xmit(net, link, list);
844 tipc_node_unlock(node);
849 if (likely(in_own_node(net, dnode)))
850 return tipc_sk_rcv(net, list);
852 __skb_queue_purge(list);
857 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
859 * Give a newly added peer node the sequence number where it should
860 * start receiving and acking broadcast packets.
862 * Called with node locked
864 static void tipc_link_sync_xmit(struct tipc_link *link)
867 struct tipc_msg *msg;
869 skb = tipc_buf_acquire(INT_H_SIZE);
874 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
875 INT_H_SIZE, link->addr);
876 msg_set_last_bcast(msg, link->owner->bclink.acked);
877 __tipc_link_xmit_skb(link, skb);
881 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
882 * Receive the sequence number where we should start receiving and
883 * acking broadcast packets from a newly added peer node, and open
884 * up for reception of such packets.
886 * Called with node locked
888 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
890 struct tipc_msg *msg = buf_msg(buf);
892 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
893 n->bclink.recv_permitted = true;
898 * tipc_link_push_packets - push unsent packets to bearer
900 * Push out the unsent messages of a link where congestion
901 * has abated. Node is locked.
903 * Called with node locked
905 void tipc_link_push_packets(struct tipc_link *link)
908 struct tipc_msg *msg;
909 unsigned int ack = mod(link->next_in_no - 1);
911 while (skb_queue_len(&link->transmq) < link->window) {
912 skb = __skb_dequeue(&link->backlogq);
916 msg_set_ack(msg, ack);
917 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
918 link->rcv_unacked = 0;
919 __skb_queue_tail(&link->transmq, skb);
920 tipc_bearer_send(link->owner->net, link->bearer_id,
921 skb, &link->media_addr);
925 void tipc_link_reset_all(struct tipc_node *node)
927 char addr_string[16];
930 tipc_node_lock(node);
932 pr_warn("Resetting all links to %s\n",
933 tipc_addr_string_fill(addr_string, node->addr));
935 for (i = 0; i < MAX_BEARERS; i++) {
936 if (node->links[i]) {
937 link_print(node->links[i], "Resetting link\n");
938 tipc_link_reset(node->links[i]);
942 tipc_node_unlock(node);
945 static void link_retransmit_failure(struct tipc_link *l_ptr,
948 struct tipc_msg *msg = buf_msg(buf);
949 struct net *net = l_ptr->owner->net;
951 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
954 /* Handle failure on standard link */
955 link_print(l_ptr, "Resetting link\n");
956 tipc_link_reset(l_ptr);
959 /* Handle failure on broadcast link */
960 struct tipc_node *n_ptr;
961 char addr_string[16];
963 pr_info("Msg seq number: %u, ", msg_seqno(msg));
964 pr_cont("Outstanding acks: %lu\n",
965 (unsigned long) TIPC_SKB_CB(buf)->handle);
967 n_ptr = tipc_bclink_retransmit_to(net);
968 tipc_node_lock(n_ptr);
970 tipc_addr_string_fill(addr_string, n_ptr->addr);
971 pr_info("Broadcast link info for %s\n", addr_string);
972 pr_info("Reception permitted: %d, Acked: %u\n",
973 n_ptr->bclink.recv_permitted,
974 n_ptr->bclink.acked);
975 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
976 n_ptr->bclink.last_in,
977 n_ptr->bclink.oos_state,
978 n_ptr->bclink.last_sent);
980 tipc_node_unlock(n_ptr);
982 tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
983 l_ptr->stale_count = 0;
987 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
990 struct tipc_msg *msg;
997 /* Detect repeated retransmit failures */
998 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
999 if (++l_ptr->stale_count > 100) {
1000 link_retransmit_failure(l_ptr, skb);
1004 l_ptr->last_retransmitted = msg_seqno(msg);
1005 l_ptr->stale_count = 1;
1008 skb_queue_walk_from(&l_ptr->transmq, skb) {
1012 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1013 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1014 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1015 &l_ptr->media_addr);
1017 l_ptr->stats.retransmitted++;
1021 static void link_retrieve_defq(struct tipc_link *link,
1022 struct sk_buff_head *list)
1026 if (skb_queue_empty(&link->deferdq))
1029 seq_no = buf_seqno(skb_peek(&link->deferdq));
1030 if (seq_no == mod(link->next_in_no))
1031 skb_queue_splice_tail_init(&link->deferdq, list);
1035 * tipc_rcv - process TIPC packets/messages arriving from off-node
1036 * @net: the applicable net namespace
1038 * @b_ptr: pointer to bearer message arrived on
1040 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1041 * structure (i.e. cannot be NULL), but bearer can be inactive.
1043 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1045 struct tipc_net *tn = net_generic(net, tipc_net_id);
1046 struct sk_buff_head head;
1047 struct tipc_node *n_ptr;
1048 struct tipc_link *l_ptr;
1049 struct sk_buff *skb1, *tmp;
1050 struct tipc_msg *msg;
1055 skb2list(skb, &head);
1057 while ((skb = __skb_dequeue(&head))) {
1058 /* Ensure message is well-formed */
1059 if (unlikely(!tipc_msg_validate(skb)))
1062 /* Handle arrival of a non-unicast link message */
1064 if (unlikely(msg_non_seq(msg))) {
1065 if (msg_user(msg) == LINK_CONFIG)
1066 tipc_disc_rcv(net, skb, b_ptr);
1068 tipc_bclink_rcv(net, skb);
1072 /* Discard unicast link messages destined for another node */
1073 if (unlikely(!msg_short(msg) &&
1074 (msg_destnode(msg) != tn->own_addr)))
1077 /* Locate neighboring node that sent message */
1078 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1079 if (unlikely(!n_ptr))
1081 tipc_node_lock(n_ptr);
1083 /* Locate unicast link endpoint that should handle message */
1084 l_ptr = n_ptr->links[b_ptr->identity];
1085 if (unlikely(!l_ptr))
1088 /* Verify that communication with node is currently allowed */
1089 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1090 msg_user(msg) == LINK_PROTOCOL &&
1091 (msg_type(msg) == RESET_MSG ||
1092 msg_type(msg) == ACTIVATE_MSG) &&
1093 !msg_redundant_link(msg))
1094 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1096 if (tipc_node_blocked(n_ptr))
1099 /* Validate message sequence number info */
1100 seq_no = msg_seqno(msg);
1101 ackd = msg_ack(msg);
1103 /* Release acked messages */
1104 if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg)))
1105 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1108 skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) {
1109 if (more(buf_seqno(skb1), ackd))
1111 __skb_unlink(skb1, &l_ptr->transmq);
1116 /* Try sending any messages link endpoint has pending */
1117 if (unlikely(skb_queue_len(&l_ptr->backlogq)))
1118 tipc_link_push_packets(l_ptr);
1120 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1121 link_prepare_wakeup(l_ptr);
1123 /* Process the incoming packet */
1124 if (unlikely(!link_working_working(l_ptr))) {
1125 if (msg_user(msg) == LINK_PROTOCOL) {
1126 tipc_link_proto_rcv(l_ptr, skb);
1127 link_retrieve_defq(l_ptr, &head);
1132 /* Traffic message. Conditionally activate link */
1133 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1135 if (link_working_working(l_ptr)) {
1136 /* Re-insert buffer in front of queue */
1137 __skb_queue_head(&head, skb);
1144 /* Link is now in state WORKING_WORKING */
1145 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1146 link_handle_out_of_seq_msg(l_ptr, skb);
1147 link_retrieve_defq(l_ptr, &head);
1151 l_ptr->next_in_no++;
1152 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1153 link_retrieve_defq(l_ptr, &head);
1154 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
1155 l_ptr->stats.sent_acks++;
1156 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1158 tipc_link_input(l_ptr, skb);
1161 tipc_node_unlock(n_ptr);
1168 /* tipc_data_input - deliver data and name distr msgs to upper layer
1170 * Consumes buffer if message is of right type
1171 * Node lock must be held
1173 static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1175 struct tipc_node *node = link->owner;
1176 struct tipc_msg *msg = buf_msg(skb);
1177 u32 dport = msg_destport(msg);
1179 switch (msg_user(msg)) {
1180 case TIPC_LOW_IMPORTANCE:
1181 case TIPC_MEDIUM_IMPORTANCE:
1182 case TIPC_HIGH_IMPORTANCE:
1183 case TIPC_CRITICAL_IMPORTANCE:
1185 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1186 node->inputq = &link->inputq;
1187 node->action_flags |= TIPC_MSG_EVT;
1190 case NAME_DISTRIBUTOR:
1191 node->bclink.recv_permitted = true;
1192 node->namedq = &link->namedq;
1193 skb_queue_tail(&link->namedq, skb);
1194 if (skb_queue_len(&link->namedq) == 1)
1195 node->action_flags |= TIPC_NAMED_MSG_EVT;
1198 case CHANGEOVER_PROTOCOL:
1199 case MSG_FRAGMENTER:
1200 case BCAST_PROTOCOL:
1203 pr_warn("Dropping received illegal msg type\n");
1209 /* tipc_link_input - process packet that has passed link protocol check
1212 * Node lock must be held
1214 static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1216 struct tipc_node *node = link->owner;
1217 struct tipc_msg *msg = buf_msg(skb);
1218 struct sk_buff *iskb;
1221 if (likely(tipc_data_input(link, skb)))
1224 switch (msg_user(msg)) {
1225 case CHANGEOVER_PROTOCOL:
1226 if (!tipc_link_tunnel_rcv(node, &skb))
1228 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1229 tipc_data_input(link, skb);
1233 link->stats.recv_bundles++;
1234 link->stats.recv_bundled += msg_msgcnt(msg);
1236 while (tipc_msg_extract(skb, &iskb, &pos))
1237 tipc_data_input(link, iskb);
1239 case MSG_FRAGMENTER:
1240 link->stats.recv_fragments++;
1241 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1242 link->stats.recv_fragmented++;
1243 tipc_data_input(link, skb);
1244 } else if (!link->reasm_buf) {
1245 tipc_link_reset(link);
1248 case BCAST_PROTOCOL:
1249 tipc_link_sync_rcv(node, skb);
1257 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1259 * Returns increase in queue length (i.e. 0 or 1)
1261 u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1263 struct sk_buff *skb1;
1264 u32 seq_no = buf_seqno(skb);
1267 if (skb_queue_empty(list)) {
1268 __skb_queue_tail(list, skb);
1273 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1274 __skb_queue_tail(list, skb);
1278 /* Locate insertion point in queue, then insert; discard if duplicate */
1279 skb_queue_walk(list, skb1) {
1280 u32 curr_seqno = buf_seqno(skb1);
1282 if (seq_no == curr_seqno) {
1287 if (less(seq_no, curr_seqno))
1291 __skb_queue_before(list, skb1, skb);
1296 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1298 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1299 struct sk_buff *buf)
1301 u32 seq_no = buf_seqno(buf);
1303 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1304 tipc_link_proto_rcv(l_ptr, buf);
1308 /* Record OOS packet arrival (force mismatch on next timeout) */
1309 l_ptr->checkpoint--;
1312 * Discard packet if a duplicate; otherwise add it to deferred queue
1313 * and notify peer of gap as per protocol specification
1315 if (less(seq_no, mod(l_ptr->next_in_no))) {
1316 l_ptr->stats.duplicates++;
1321 if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) {
1322 l_ptr->stats.deferred_recv++;
1323 if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1)
1324 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1326 l_ptr->stats.duplicates++;
1331 * Send protocol message to the other endpoint.
1333 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1334 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1336 struct sk_buff *buf = NULL;
1337 struct tipc_msg *msg = l_ptr->pmsg;
1338 u32 msg_size = sizeof(l_ptr->proto_msg);
1341 /* Don't send protocol message during link changeover */
1342 if (l_ptr->exp_msg_count)
1345 /* Abort non-RESET send if communication with node is prohibited */
1346 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1349 /* Create protocol message with "out-of-sequence" sequence number */
1350 msg_set_type(msg, msg_typ);
1351 msg_set_net_plane(msg, l_ptr->net_plane);
1352 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1353 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1355 if (msg_typ == STATE_MSG) {
1356 u32 next_sent = mod(l_ptr->next_out_no);
1358 if (!tipc_link_is_up(l_ptr))
1360 if (skb_queue_len(&l_ptr->backlogq))
1361 next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
1362 msg_set_next_sent(msg, next_sent);
1363 if (!skb_queue_empty(&l_ptr->deferdq)) {
1364 u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq));
1365 gap = mod(rec - mod(l_ptr->next_in_no));
1367 msg_set_seq_gap(msg, gap);
1369 l_ptr->stats.sent_nacks++;
1370 msg_set_link_tolerance(msg, tolerance);
1371 msg_set_linkprio(msg, priority);
1372 msg_set_max_pkt(msg, ack_mtu);
1373 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1374 msg_set_probe(msg, probe_msg != 0);
1376 u32 mtu = l_ptr->max_pkt;
1378 if ((mtu < l_ptr->max_pkt_target) &&
1379 link_working_working(l_ptr) &&
1380 l_ptr->fsm_msg_cnt) {
1381 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1382 if (l_ptr->max_pkt_probes == 10) {
1383 l_ptr->max_pkt_target = (msg_size - 4);
1384 l_ptr->max_pkt_probes = 0;
1385 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1387 l_ptr->max_pkt_probes++;
1390 l_ptr->stats.sent_probes++;
1392 l_ptr->stats.sent_states++;
1393 } else { /* RESET_MSG or ACTIVATE_MSG */
1394 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1395 msg_set_seq_gap(msg, 0);
1396 msg_set_next_sent(msg, 1);
1397 msg_set_probe(msg, 0);
1398 msg_set_link_tolerance(msg, l_ptr->tolerance);
1399 msg_set_linkprio(msg, l_ptr->priority);
1400 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1403 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1404 msg_set_redundant_link(msg, r_flag);
1405 msg_set_linkprio(msg, l_ptr->priority);
1406 msg_set_size(msg, msg_size);
1408 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1410 buf = tipc_buf_acquire(msg_size);
1414 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1415 buf->priority = TC_PRIO_CONTROL;
1416 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1417 &l_ptr->media_addr);
1418 l_ptr->rcv_unacked = 0;
1423 * Receive protocol message :
1424 * Note that network plane id propagates through the network, and may
1425 * change at any time. The node with lowest address rules
1427 static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1428 struct sk_buff *buf)
1434 struct tipc_msg *msg = buf_msg(buf);
1436 /* Discard protocol message during link changeover */
1437 if (l_ptr->exp_msg_count)
1440 if (l_ptr->net_plane != msg_net_plane(msg))
1441 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1442 l_ptr->net_plane = msg_net_plane(msg);
1444 switch (msg_type(msg)) {
1447 if (!link_working_unknown(l_ptr) &&
1448 (l_ptr->peer_session != INVALID_SESSION)) {
1449 if (less_eq(msg_session(msg), l_ptr->peer_session))
1450 break; /* duplicate or old reset: ignore */
1453 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1454 link_working_unknown(l_ptr))) {
1456 * peer has lost contact -- don't allow peer's links
1457 * to reactivate before we recognize loss & clean up
1459 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1462 link_state_event(l_ptr, RESET_MSG);
1466 /* Update link settings according other endpoint's values */
1467 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1469 msg_tol = msg_link_tolerance(msg);
1470 if (msg_tol > l_ptr->tolerance)
1471 link_set_supervision_props(l_ptr, msg_tol);
1473 if (msg_linkprio(msg) > l_ptr->priority)
1474 l_ptr->priority = msg_linkprio(msg);
1476 max_pkt_info = msg_max_pkt(msg);
1478 if (max_pkt_info < l_ptr->max_pkt_target)
1479 l_ptr->max_pkt_target = max_pkt_info;
1480 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1481 l_ptr->max_pkt = l_ptr->max_pkt_target;
1483 l_ptr->max_pkt = l_ptr->max_pkt_target;
1486 /* Synchronize broadcast link info, if not done previously */
1487 if (!tipc_node_is_up(l_ptr->owner)) {
1488 l_ptr->owner->bclink.last_sent =
1489 l_ptr->owner->bclink.last_in =
1490 msg_last_bcast(msg);
1491 l_ptr->owner->bclink.oos_state = 0;
1494 l_ptr->peer_session = msg_session(msg);
1495 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1497 if (msg_type(msg) == ACTIVATE_MSG)
1498 link_state_event(l_ptr, ACTIVATE_MSG);
1502 msg_tol = msg_link_tolerance(msg);
1504 link_set_supervision_props(l_ptr, msg_tol);
1506 if (msg_linkprio(msg) &&
1507 (msg_linkprio(msg) != l_ptr->priority)) {
1508 pr_debug("%s<%s>, priority change %u->%u\n",
1509 link_rst_msg, l_ptr->name,
1510 l_ptr->priority, msg_linkprio(msg));
1511 l_ptr->priority = msg_linkprio(msg);
1512 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1516 /* Record reception; force mismatch at next timeout: */
1517 l_ptr->checkpoint--;
1519 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1520 l_ptr->stats.recv_states++;
1521 if (link_reset_unknown(l_ptr))
1524 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1525 rec_gap = mod(msg_next_sent(msg) -
1526 mod(l_ptr->next_in_no));
1529 max_pkt_ack = msg_max_pkt(msg);
1530 if (max_pkt_ack > l_ptr->max_pkt) {
1531 l_ptr->max_pkt = max_pkt_ack;
1532 l_ptr->max_pkt_probes = 0;
1536 if (msg_probe(msg)) {
1537 l_ptr->stats.recv_probes++;
1538 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1539 max_pkt_ack = msg_size(msg);
1542 /* Protocol message before retransmits, reduce loss risk */
1543 if (l_ptr->owner->bclink.recv_permitted)
1544 tipc_bclink_update_link_state(l_ptr->owner,
1545 msg_last_bcast(msg));
1547 if (rec_gap || (msg_probe(msg))) {
1548 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
1551 if (msg_seq_gap(msg)) {
1552 l_ptr->stats.recv_nacks++;
1553 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq),
1563 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1564 * a different bearer. Owner node is locked.
1566 static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1567 struct tipc_msg *tunnel_hdr,
1568 struct tipc_msg *msg,
1571 struct tipc_link *tunnel;
1572 struct sk_buff *skb;
1573 u32 length = msg_size(msg);
1575 tunnel = l_ptr->owner->active_links[selector & 1];
1576 if (!tipc_link_is_up(tunnel)) {
1577 pr_warn("%stunnel link no longer available\n", link_co_err);
1580 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1581 skb = tipc_buf_acquire(length + INT_H_SIZE);
1583 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1586 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1587 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1588 __tipc_link_xmit_skb(tunnel, skb);
1592 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1593 * link is still active. We can do failover. Tunnel the failing link's
1594 * whole send queue via the remaining link. This way, we don't lose
1595 * any packets, and sequence order is preserved for subsequent traffic
1596 * sent over the remaining link. Owner node is locked.
1598 void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1601 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1602 struct tipc_msg tunnel_hdr;
1603 struct sk_buff *skb;
1609 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1610 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1611 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1612 msgcount = skb_queue_len(&l_ptr->transmq);
1613 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1614 msg_set_msgcnt(&tunnel_hdr, msgcount);
1616 if (skb_queue_empty(&l_ptr->transmq)) {
1617 skb = tipc_buf_acquire(INT_H_SIZE);
1619 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1620 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1621 __tipc_link_xmit_skb(tunnel, skb);
1623 pr_warn("%sunable to send changeover msg\n",
1629 split_bundles = (l_ptr->owner->active_links[0] !=
1630 l_ptr->owner->active_links[1]);
1632 skb_queue_walk(&l_ptr->transmq, skb) {
1633 struct tipc_msg *msg = buf_msg(skb);
1635 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1636 struct tipc_msg *m = msg_get_wrapped(msg);
1637 unchar *pos = (unchar *)m;
1639 msgcount = msg_msgcnt(msg);
1640 while (msgcount--) {
1641 msg_set_seqno(m, msg_seqno(msg));
1642 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, m,
1643 msg_link_selector(m));
1644 pos += align(msg_size(m));
1645 m = (struct tipc_msg *)pos;
1648 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1649 msg_link_selector(msg));
1654 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1655 * duplicate of the first link's send queue via the new link. This way, we
1656 * are guaranteed that currently queued packets from a socket are delivered
1657 * before future traffic from the same socket, even if this is using the
1658 * new link. The last arriving copy of each duplicate packet is dropped at
1659 * the receiving end by the regular protocol check, so packet cardinality
1660 * and sequence order is preserved per sender/receiver socket pair.
1661 * Owner node is locked.
1663 void tipc_link_dup_queue_xmit(struct tipc_link *link,
1664 struct tipc_link *tnl)
1666 struct sk_buff *skb;
1667 struct tipc_msg tnl_hdr;
1668 struct sk_buff_head *queue = &link->transmq;
1671 tipc_msg_init(link_own_addr(link), &tnl_hdr, CHANGEOVER_PROTOCOL,
1672 DUPLICATE_MSG, INT_H_SIZE, link->addr);
1673 mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq);
1674 msg_set_msgcnt(&tnl_hdr, mcnt);
1675 msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id);
1678 skb_queue_walk(queue, skb) {
1679 struct sk_buff *outskb;
1680 struct tipc_msg *msg = buf_msg(skb);
1681 u32 len = msg_size(msg);
1683 msg_set_ack(msg, mod(link->next_in_no - 1));
1684 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1685 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1686 outskb = tipc_buf_acquire(len + INT_H_SIZE);
1687 if (outskb == NULL) {
1688 pr_warn("%sunable to send duplicate msg\n",
1692 skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE);
1693 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE,
1695 __tipc_link_xmit_skb(tnl, outskb);
1696 if (!tipc_link_is_up(link))
1699 if (queue == &link->backlogq)
1701 queue = &link->backlogq;
1705 /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1706 * Owner node is locked.
1708 static void tipc_link_dup_rcv(struct tipc_link *link,
1709 struct sk_buff *skb)
1711 struct sk_buff *iskb;
1714 if (!tipc_link_is_up(link))
1717 if (!tipc_msg_extract(skb, &iskb, &pos)) {
1718 pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
1721 /* Append buffer to deferred queue, if applicable: */
1722 link_handle_out_of_seq_msg(link, iskb);
1725 /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
1726 * Owner node is locked.
1728 static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1729 struct sk_buff *t_buf)
1731 struct tipc_msg *t_msg = buf_msg(t_buf);
1732 struct sk_buff *buf = NULL;
1733 struct tipc_msg *msg;
1736 if (tipc_link_is_up(l_ptr))
1737 tipc_link_reset(l_ptr);
1739 /* First failover packet? */
1740 if (l_ptr->exp_msg_count == START_CHANGEOVER)
1741 l_ptr->exp_msg_count = msg_msgcnt(t_msg);
1743 /* Should there be an inner packet? */
1744 if (l_ptr->exp_msg_count) {
1745 l_ptr->exp_msg_count--;
1746 if (!tipc_msg_extract(t_buf, &buf, &pos)) {
1747 pr_warn("%sno inner failover pkt\n", link_co_err);
1752 if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
1757 if (msg_user(msg) == MSG_FRAGMENTER) {
1758 l_ptr->stats.recv_fragments++;
1759 tipc_buf_append(&l_ptr->reasm_buf, &buf);
1763 if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1764 tipc_link_delete(l_ptr);
1768 /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
1769 * via other link as result of a failover (ORIGINAL_MSG) or
1770 * a new active link (DUPLICATE_MSG). Failover packets are
1771 * returned to the active link for delivery upwards.
1772 * Owner node is locked.
1774 static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
1775 struct sk_buff **buf)
1777 struct sk_buff *t_buf = *buf;
1778 struct tipc_link *l_ptr;
1779 struct tipc_msg *t_msg = buf_msg(t_buf);
1780 u32 bearer_id = msg_bearer_id(t_msg);
1784 if (bearer_id >= MAX_BEARERS)
1787 l_ptr = n_ptr->links[bearer_id];
1791 if (msg_type(t_msg) == DUPLICATE_MSG)
1792 tipc_link_dup_rcv(l_ptr, t_buf);
1793 else if (msg_type(t_msg) == ORIGINAL_MSG)
1794 *buf = tipc_link_failover_rcv(l_ptr, t_buf);
1796 pr_warn("%sunknown tunnel pkt received\n", link_co_err);
1799 return *buf != NULL;
1802 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1804 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1806 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1809 l_ptr->tolerance = tol;
1810 l_ptr->cont_intv = msecs_to_jiffies(intv);
1811 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1814 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
1816 l_ptr->window = window;
1818 /* Data messages from this node, inclusive FIRST_FRAGM */
1819 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
1820 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
1821 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
1822 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
1823 /* Transiting data messages,inclusive FIRST_FRAGM */
1824 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
1825 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
1826 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
1827 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
1828 l_ptr->queue_limit[CONN_MANAGER] = 1200;
1829 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
1830 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
1831 /* FRAGMENT and LAST_FRAGMENT packets */
1832 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
1835 /* tipc_link_find_owner - locate owner node of link by link's name
1836 * @net: the applicable net namespace
1837 * @name: pointer to link name string
1838 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1840 * Returns pointer to node owning the link, or 0 if no matching link is found.
1842 static struct tipc_node *tipc_link_find_owner(struct net *net,
1843 const char *link_name,
1844 unsigned int *bearer_id)
1846 struct tipc_net *tn = net_generic(net, tipc_net_id);
1847 struct tipc_link *l_ptr;
1848 struct tipc_node *n_ptr;
1849 struct tipc_node *found_node = NULL;
1854 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1855 tipc_node_lock(n_ptr);
1856 for (i = 0; i < MAX_BEARERS; i++) {
1857 l_ptr = n_ptr->links[i];
1858 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1864 tipc_node_unlock(n_ptr);
1874 * link_reset_statistics - reset link statistics
1875 * @l_ptr: pointer to link
1877 static void link_reset_statistics(struct tipc_link *l_ptr)
1879 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1880 l_ptr->stats.sent_info = l_ptr->next_out_no;
1881 l_ptr->stats.recv_info = l_ptr->next_in_no;
1884 static void link_print(struct tipc_link *l_ptr, const char *str)
1886 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
1887 struct tipc_bearer *b_ptr;
1890 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
1892 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
1895 if (link_working_unknown(l_ptr))
1897 else if (link_reset_reset(l_ptr))
1899 else if (link_reset_unknown(l_ptr))
1901 else if (link_working_working(l_ptr))
1907 /* Parse and validate nested (link) properties valid for media, bearer and link
1909 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1913 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1914 tipc_nl_prop_policy);
1918 if (props[TIPC_NLA_PROP_PRIO]) {
1921 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1922 if (prio > TIPC_MAX_LINK_PRI)
1926 if (props[TIPC_NLA_PROP_TOL]) {
1929 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1930 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1934 if (props[TIPC_NLA_PROP_WIN]) {
1937 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1938 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1945 int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1951 struct tipc_link *link;
1952 struct tipc_node *node;
1953 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1954 struct net *net = sock_net(skb->sk);
1956 if (!info->attrs[TIPC_NLA_LINK])
1959 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1960 info->attrs[TIPC_NLA_LINK],
1961 tipc_nl_link_policy);
1965 if (!attrs[TIPC_NLA_LINK_NAME])
1968 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1970 node = tipc_link_find_owner(net, name, &bearer_id);
1974 tipc_node_lock(node);
1976 link = node->links[bearer_id];
1982 if (attrs[TIPC_NLA_LINK_PROP]) {
1983 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1985 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1992 if (props[TIPC_NLA_PROP_TOL]) {
1995 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1996 link_set_supervision_props(link, tol);
1997 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
1999 if (props[TIPC_NLA_PROP_PRIO]) {
2002 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2003 link->priority = prio;
2004 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2006 if (props[TIPC_NLA_PROP_WIN]) {
2009 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2010 tipc_link_set_queue_limits(link, win);
2015 tipc_node_unlock(node);
2020 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2023 struct nlattr *stats;
2030 struct nla_map map[] = {
2031 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2032 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2033 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2034 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2035 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2036 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2037 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2038 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2039 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2040 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2041 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2042 s->msg_length_counts : 1},
2043 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2044 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2045 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2046 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2047 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2048 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2049 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2050 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2051 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2052 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2053 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2054 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2055 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2056 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2057 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2058 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2059 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2060 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2061 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2062 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2063 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2064 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2065 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2068 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2072 for (i = 0; i < ARRAY_SIZE(map); i++)
2073 if (nla_put_u32(skb, map[i].key, map[i].val))
2076 nla_nest_end(skb, stats);
2080 nla_nest_cancel(skb, stats);
2085 /* Caller should hold appropriate locks to protect the link */
2086 static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2087 struct tipc_link *link)
2091 struct nlattr *attrs;
2092 struct nlattr *prop;
2093 struct tipc_net *tn = net_generic(net, tipc_net_id);
2095 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2096 NLM_F_MULTI, TIPC_NL_LINK_GET);
2100 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2104 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2106 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2107 tipc_cluster_mask(tn->own_addr)))
2109 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2111 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2113 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2116 if (tipc_link_is_up(link))
2117 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2119 if (tipc_link_is_active(link))
2120 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2123 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2126 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2128 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2130 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2131 link->queue_limit[TIPC_LOW_IMPORTANCE]))
2133 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2135 nla_nest_end(msg->skb, prop);
2137 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2141 nla_nest_end(msg->skb, attrs);
2142 genlmsg_end(msg->skb, hdr);
2147 nla_nest_cancel(msg->skb, prop);
2149 nla_nest_cancel(msg->skb, attrs);
2151 genlmsg_cancel(msg->skb, hdr);
2156 /* Caller should hold node lock */
2157 static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2158 struct tipc_node *node, u32 *prev_link)
2163 for (i = *prev_link; i < MAX_BEARERS; i++) {
2166 if (!node->links[i])
2169 err = __tipc_nl_add_link(net, msg, node->links[i]);
2178 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2180 struct net *net = sock_net(skb->sk);
2181 struct tipc_net *tn = net_generic(net, tipc_net_id);
2182 struct tipc_node *node;
2183 struct tipc_nl_msg msg;
2184 u32 prev_node = cb->args[0];
2185 u32 prev_link = cb->args[1];
2186 int done = cb->args[2];
2193 msg.portid = NETLINK_CB(cb->skb).portid;
2194 msg.seq = cb->nlh->nlmsg_seq;
2199 node = tipc_node_find(net, prev_node);
2201 /* We never set seq or call nl_dump_check_consistent()
2202 * this means that setting prev_seq here will cause the
2203 * consistence check to fail in the netlink callback
2204 * handler. Resulting in the last NLMSG_DONE message
2205 * having the NLM_F_DUMP_INTR flag set.
2211 list_for_each_entry_continue_rcu(node, &tn->node_list,
2213 tipc_node_lock(node);
2214 err = __tipc_nl_add_node_links(net, &msg, node,
2216 tipc_node_unlock(node);
2220 prev_node = node->addr;
2223 err = tipc_nl_add_bc_link(net, &msg);
2227 list_for_each_entry_rcu(node, &tn->node_list, list) {
2228 tipc_node_lock(node);
2229 err = __tipc_nl_add_node_links(net, &msg, node,
2231 tipc_node_unlock(node);
2235 prev_node = node->addr;
2242 cb->args[0] = prev_node;
2243 cb->args[1] = prev_link;
2249 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2251 struct net *net = genl_info_net(info);
2252 struct sk_buff *ans_skb;
2253 struct tipc_nl_msg msg;
2254 struct tipc_link *link;
2255 struct tipc_node *node;
2260 if (!info->attrs[TIPC_NLA_LINK_NAME])
2263 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2264 node = tipc_link_find_owner(net, name, &bearer_id);
2268 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2273 msg.portid = info->snd_portid;
2274 msg.seq = info->snd_seq;
2276 tipc_node_lock(node);
2277 link = node->links[bearer_id];
2283 err = __tipc_nl_add_link(net, &msg, link);
2287 tipc_node_unlock(node);
2289 return genlmsg_reply(ans_skb, info);
2292 tipc_node_unlock(node);
2293 nlmsg_free(ans_skb);
2298 int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2302 unsigned int bearer_id;
2303 struct tipc_link *link;
2304 struct tipc_node *node;
2305 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2306 struct net *net = sock_net(skb->sk);
2308 if (!info->attrs[TIPC_NLA_LINK])
2311 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2312 info->attrs[TIPC_NLA_LINK],
2313 tipc_nl_link_policy);
2317 if (!attrs[TIPC_NLA_LINK_NAME])
2320 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2322 if (strcmp(link_name, tipc_bclink_name) == 0) {
2323 err = tipc_bclink_reset_stats(net);
2329 node = tipc_link_find_owner(net, link_name, &bearer_id);
2333 tipc_node_lock(node);
2335 link = node->links[bearer_id];
2337 tipc_node_unlock(node);
2341 link_reset_statistics(link);
2343 tipc_node_unlock(node);