2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include "byte-order.h"
25 #include "connectivity.h"
26 #include "dynamic-string.h"
33 #include "poll-loop.h"
41 VLOG_DEFINE_THIS_MODULE(cfm);
43 #define CFM_MAX_RMPS 256
45 /* Ethernet destination address of CCM packets. */
46 static const uint8_t eth_addr_ccm[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x30 };
47 static const uint8_t eth_addr_ccm_x[6] = {
48 0x01, 0x23, 0x20, 0x00, 0x00, 0x30
51 #define ETH_TYPE_CFM 0x8902
53 /* A 'ccm' represents a Continuity Check Message from the 802.1ag
54 * specification. Continuity Check Messages are broadcast periodically so that
55 * hosts can determine whom they have connectivity to.
57 * The minimum length of a CCM as specified by IEEE 802.1ag is 75 bytes.
58 * Previous versions of Open vSwitch generated 74-byte CCM messages, so we
59 * accept such messages too. */
61 #define CCM_ACCEPT_LEN 74
62 #define CCM_MAID_LEN 48
63 #define CCM_OPCODE 1 /* CFM message opcode meaning CCM. */
64 #define CCM_RDI_MASK 0x80
65 #define CFM_HEALTH_INTERVAL 6
69 uint8_t mdlevel_version; /* MD Level and Version */
75 uint8_t maid[CCM_MAID_LEN];
77 /* Defined by ITU-T Y.1731 should be zero */
78 ovs_be16 interval_ms_x; /* Transmission interval in ms. */
79 ovs_be64 mpid64; /* MPID in extended mode. */
80 uint8_t opdown; /* Operationally down. */
86 BUILD_ASSERT_DECL(CCM_LEN == sizeof(struct ccm));
89 const char *name; /* Name of this CFM object. */
90 struct hmap_node hmap_node; /* Node in all_cfms list. */
92 struct netdev *netdev;
93 uint64_t rx_packets; /* Packets received by 'netdev'. */
96 bool demand; /* Demand mode. */
97 bool booted; /* A full fault interval has occurred. */
98 enum cfm_fault_reason fault; /* Connectivity fault status. */
99 enum cfm_fault_reason recv_fault; /* Bit mask of faults occurring on
101 bool opup; /* Operational State. */
102 bool remote_opup; /* Remote Operational State. */
104 int fault_override; /* Manual override of 'fault' status.
105 Ignored if negative. */
107 uint32_t seq; /* The sequence number of our last CCM. */
108 uint8_t ccm_interval; /* The CCM transmission interval. */
109 int ccm_interval_ms; /* 'ccm_interval' in milliseconds. */
110 uint16_t ccm_vlan; /* Vlan tag of CCM PDUs. CFM_RANDOM_VLAN if
112 uint8_t ccm_pcp; /* Priority of CCM PDUs. */
113 uint8_t maid[CCM_MAID_LEN]; /* The MAID of this CFM. */
115 struct timer tx_timer; /* Send CCM when expired. */
116 struct timer fault_timer; /* Check for faults when expired. */
118 struct hmap remote_mps; /* Remote MPs. */
120 /* Result of cfm_get_remote_mpids(). Updated only during fault check to
122 uint64_t *rmps_array; /* Cache of remote_mps. */
123 size_t rmps_array_len; /* Number of rmps in 'rmps_array'. */
125 int health; /* Percentage of the number of CCM frames
127 int health_interval; /* Number of fault_intervals since health was
129 long long int last_tx; /* Last CCM transmission time. */
131 /* These bools are atomic to allow readers to check their values
132 * without taking 'mutex'. Such readers do not assume the values they
133 * read are synchronized with any other members. */
134 atomic_bool check_tnl_key; /* Verify the tunnel key of inbound packets? */
135 atomic_bool extended; /* Extended mode. */
136 struct ovs_refcount ref_cnt;
138 uint64_t flap_count; /* Count the flaps since boot. */
140 /* True when the variables returned by cfm_get_*() are changed
141 * since last check. */
144 /* When 'cfm->demand' is set, at least one ccm is required to be received
145 * every 100 * cfm_interval. If ccm is not received within this interval,
146 * even if data packets are received, the cfm fault will be set. */
147 struct timer demand_rx_ccm_t;
150 /* Remote MPs represent foreign network entities that are configured to have
151 * the same MAID as this CFM instance. */
153 uint64_t mpid; /* The Maintenance Point ID of this 'remote_mp'. */
154 struct hmap_node node; /* Node in 'remote_mps' map. */
156 bool recv; /* CCM was received since last fault check. */
157 bool opup; /* Operational State. */
158 uint32_t seq; /* Most recently received sequence number. */
159 uint8_t num_health_ccm; /* Number of received ccm frames every
160 CFM_HEALTH_INTERVAL * 'fault_interval'. */
161 long long int last_rx; /* Last CCM reception time. */
165 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(20, 30);
167 static struct ovs_mutex mutex = OVS_MUTEX_INITIALIZER;
168 static struct hmap all_cfms__ = HMAP_INITIALIZER(&all_cfms__);
169 static struct hmap *const all_cfms OVS_GUARDED_BY(mutex) = &all_cfms__;
171 static unixctl_cb_func cfm_unixctl_show;
172 static unixctl_cb_func cfm_unixctl_set_fault;
175 cfm_rx_packets(const struct cfm *cfm) OVS_REQUIRES(mutex)
177 struct netdev_stats stats;
179 if (!netdev_get_stats(cfm->netdev, &stats)) {
180 return stats.rx_packets;
186 static const uint8_t *
187 cfm_ccm_addr(struct cfm *cfm)
191 atomic_read_relaxed(&cfm->extended, &extended);
193 return extended ? eth_addr_ccm_x : eth_addr_ccm;
196 /* Returns the string representation of the given cfm_fault_reason 'reason'. */
198 cfm_fault_reason_to_str(int reason)
201 #define CFM_FAULT_REASON(NAME, STR) case CFM_FAULT_##NAME: return #STR;
203 #undef CFM_FAULT_REASON
204 default: return "<unknown>";
209 ds_put_cfm_fault(struct ds *ds, int fault)
213 for (i = 0; i < CFM_FAULT_N_REASONS; i++) {
216 if (fault & reason) {
217 ds_put_format(ds, "%s ", cfm_fault_reason_to_str(reason));
225 cfm_generate_maid(struct cfm *cfm) OVS_REQUIRES(mutex)
227 const char *ovs_md_name = "ovs";
228 const char *ovs_ma_name = "ovs";
230 size_t md_len, ma_len;
232 memset(cfm->maid, 0, CCM_MAID_LEN);
234 md_len = strlen(ovs_md_name);
235 ma_len = strlen(ovs_ma_name);
237 ovs_assert(md_len && ma_len && md_len + ma_len + 4 <= CCM_MAID_LEN);
239 cfm->maid[0] = 4; /* MD name string format. */
240 cfm->maid[1] = md_len; /* MD name size. */
241 memcpy(&cfm->maid[2], ovs_md_name, md_len); /* MD name. */
243 ma_p = cfm->maid + 2 + md_len;
244 ma_p[0] = 2; /* MA name string format. */
245 ma_p[1] = ma_len; /* MA name size. */
246 memcpy(&ma_p[2], ovs_ma_name, ma_len); /* MA name. */
250 ccm_interval_to_ms(uint8_t interval)
253 case 0: OVS_NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
254 case 1: return 3; /* Not recommended due to timer resolution. */
255 case 2: return 10; /* Not recommended due to timer resolution. */
258 case 5: return 10000;
259 case 6: return 60000;
260 case 7: return 600000;
261 default: OVS_NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
268 cfm_fault_interval(struct cfm *cfm) OVS_REQUIRES(mutex)
270 /* According to the 802.1ag specification we should assume every other MP
271 * with the same MAID has the same transmission interval that we have. If
272 * an MP has a different interval, cfm_process_heartbeat will register it
273 * as a fault (likely due to a configuration error). Thus we can check all
274 * MPs at once making this quite a bit simpler.
276 * When cfm is not in demand mode, we check when (ccm_interval_ms * 3.5) ms
277 * have passed. When cfm is in demand mode, we check when
278 * (MAX(ccm_interval_ms, 500) * 3.5) ms have passed. This ensures that
279 * ovs-vswitchd has enough time to pull statistics from the datapath. */
281 return (MAX(cfm->ccm_interval_ms, cfm->demand ? 500 : cfm->ccm_interval_ms)
286 ms_to_ccm_interval(int interval_ms)
290 for (i = 7; i > 0; i--) {
291 if (ccm_interval_to_ms(i) <= interval_ms) {
300 hash_mpid(uint64_t mpid)
302 return hash_uint64(mpid);
306 cfm_is_valid_mpid(bool extended, uint64_t mpid)
308 /* 802.1ag specification requires MPIDs to be within the range [1, 8191].
309 * In extended mode we relax this requirement. */
310 return mpid >= 1 && (extended || mpid <= 8191);
313 static struct remote_mp *
314 lookup_remote_mp(const struct cfm *cfm, uint64_t mpid) OVS_REQUIRES(mutex)
316 struct remote_mp *rmp;
318 HMAP_FOR_EACH_IN_BUCKET (rmp, node, hash_mpid(mpid), &cfm->remote_mps) {
319 if (rmp->mpid == mpid) {
330 unixctl_command_register("cfm/show", "[interface]", 0, 1, cfm_unixctl_show,
332 unixctl_command_register("cfm/set-fault", "[interface] normal|false|true",
333 1, 2, cfm_unixctl_set_fault, NULL);
336 /* Records the status change and changes the global connectivity seq. */
338 cfm_status_changed(struct cfm *cfm) OVS_REQUIRES(mutex)
340 seq_change(connectivity_seq_get());
341 cfm->status_changed = true;
344 /* Allocates a 'cfm' object called 'name'. 'cfm' should be initialized by
345 * cfm_configure() before use. */
347 cfm_create(const struct netdev *netdev) OVS_EXCLUDED(mutex)
351 cfm = xzalloc(sizeof *cfm);
352 cfm->netdev = netdev_ref(netdev);
353 cfm->name = netdev_get_name(cfm->netdev);
354 hmap_init(&cfm->remote_mps);
355 cfm->remote_opup = true;
356 cfm->fault_override = -1;
360 atomic_init(&cfm->extended, false);
361 atomic_init(&cfm->check_tnl_key, false);
362 ovs_refcount_init(&cfm->ref_cnt);
364 ovs_mutex_lock(&mutex);
365 cfm_status_changed(cfm);
366 cfm_generate_maid(cfm);
367 hmap_insert(all_cfms, &cfm->hmap_node, hash_string(cfm->name, 0));
368 ovs_mutex_unlock(&mutex);
374 cfm_unref(struct cfm *cfm) OVS_EXCLUDED(mutex)
376 struct remote_mp *rmp, *rmp_next;
382 if (ovs_refcount_unref_relaxed(&cfm->ref_cnt) != 1) {
386 ovs_mutex_lock(&mutex);
387 cfm_status_changed(cfm);
388 hmap_remove(all_cfms, &cfm->hmap_node);
389 ovs_mutex_unlock(&mutex);
391 HMAP_FOR_EACH_SAFE (rmp, rmp_next, node, &cfm->remote_mps) {
392 hmap_remove(&cfm->remote_mps, &rmp->node);
396 hmap_destroy(&cfm->remote_mps);
397 netdev_close(cfm->netdev);
398 free(cfm->rmps_array);
404 cfm_ref(const struct cfm *cfm_)
406 struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
408 ovs_refcount_ref(&cfm->ref_cnt);
413 /* Should be run periodically to update fault statistics messages. */
415 cfm_run(struct cfm *cfm) OVS_EXCLUDED(mutex)
417 ovs_mutex_lock(&mutex);
418 if (timer_expired(&cfm->fault_timer)) {
419 long long int interval = cfm_fault_interval(cfm);
420 struct remote_mp *rmp, *rmp_next;
421 enum cfm_fault_reason old_cfm_fault = cfm->fault;
422 uint64_t old_flap_count = cfm->flap_count;
423 int old_health = cfm->health;
424 size_t old_rmps_array_len = cfm->rmps_array_len;
425 bool old_rmps_deleted = false;
426 bool old_rmp_opup = cfm->remote_opup;
427 bool demand_override;
428 bool rmp_set_opup = false;
429 bool rmp_set_opdown = false;
431 cfm->fault = cfm->recv_fault;
434 cfm->rmps_array_len = 0;
435 free(cfm->rmps_array);
436 cfm->rmps_array = xmalloc(hmap_count(&cfm->remote_mps) *
437 sizeof *cfm->rmps_array);
439 if (cfm->health_interval == CFM_HEALTH_INTERVAL) {
440 /* Calculate the cfm health of the interface. If the number of
441 * remote_mpids of a cfm interface is > 1, the cfm health is
442 * undefined. If the number of remote_mpids is 1, the cfm health is
443 * the percentage of the ccm frames received in the
444 * (CFM_HEALTH_INTERVAL * 3.5)ms, else it is 0. */
445 if (hmap_count(&cfm->remote_mps) > 1) {
447 } else if (hmap_is_empty(&cfm->remote_mps)) {
452 rmp = CONTAINER_OF(hmap_first(&cfm->remote_mps),
453 struct remote_mp, node);
454 exp_ccm_recvd = (CFM_HEALTH_INTERVAL * 7) / 2;
455 /* Calculate the percentage of healthy ccm frames received.
456 * Since the 'fault_interval' is (3.5 * cfm_interval), and
457 * 1 CCM packet must be received every cfm_interval,
458 * the 'remote_mpid' health reports the percentage of
459 * healthy CCM frames received every
460 * 'CFM_HEALTH_INTERVAL'th 'fault_interval'. */
461 cfm->health = (rmp->num_health_ccm * 100) / exp_ccm_recvd;
462 cfm->health = MIN(cfm->health, 100);
463 rmp->num_health_ccm = 0;
464 ovs_assert(cfm->health >= 0 && cfm->health <= 100);
466 cfm->health_interval = 0;
468 cfm->health_interval++;
470 demand_override = false;
472 uint64_t rx_packets = cfm_rx_packets(cfm);
473 demand_override = hmap_count(&cfm->remote_mps) == 1
474 && rx_packets > cfm->rx_packets
475 && !timer_expired(&cfm->demand_rx_ccm_t);
476 cfm->rx_packets = rx_packets;
479 HMAP_FOR_EACH_SAFE (rmp, rmp_next, node, &cfm->remote_mps) {
481 VLOG_INFO("%s: Received no CCM from RMP %"PRIu64" in the last"
482 " %lldms", cfm->name, rmp->mpid,
483 time_msec() - rmp->last_rx);
484 if (!demand_override) {
485 old_rmps_deleted = true;
486 hmap_remove(&cfm->remote_mps, &rmp->node);
495 rmp_set_opdown = true;
498 cfm->rmps_array[cfm->rmps_array_len++] = rmp->mpid;
502 if (rmp_set_opdown) {
503 cfm->remote_opup = false;
505 else if (rmp_set_opup) {
506 cfm->remote_opup = true;
509 if (hmap_is_empty(&cfm->remote_mps)) {
510 cfm->fault |= CFM_FAULT_RECV;
513 if (old_cfm_fault != cfm->fault) {
514 if (!VLOG_DROP_INFO(&rl)) {
515 struct ds ds = DS_EMPTY_INITIALIZER;
517 ds_put_cstr(&ds, "from [");
518 ds_put_cfm_fault(&ds, old_cfm_fault);
519 ds_put_cstr(&ds, "] to [");
520 ds_put_cfm_fault(&ds, cfm->fault);
521 ds_put_char(&ds, ']');
522 VLOG_INFO("%s: CFM faults changed %s.", cfm->name, ds_cstr(&ds));
526 /* If there is a flap, increments the counter. */
527 if (old_cfm_fault == 0 || cfm->fault == 0) {
532 /* These variables represent the cfm session status, it is desirable
533 * to update them to database immediately after change. */
534 if (old_health != cfm->health
535 || old_rmp_opup != cfm->remote_opup
536 || (old_rmps_array_len != cfm->rmps_array_len || old_rmps_deleted)
537 || old_cfm_fault != cfm->fault
538 || old_flap_count != cfm->flap_count) {
539 cfm_status_changed(cfm);
543 timer_set_duration(&cfm->fault_timer, interval);
544 VLOG_DBG("%s: new fault interval", cfm->name);
546 ovs_mutex_unlock(&mutex);
549 /* Should be run periodically to check if the CFM module has a CCM message it
552 cfm_should_send_ccm(struct cfm *cfm) OVS_EXCLUDED(mutex)
556 ovs_mutex_lock(&mutex);
557 ret = timer_expired(&cfm->tx_timer);
558 ovs_mutex_unlock(&mutex);
562 /* Composes a CCM message into 'packet'. Messages generated with this function
563 * should be sent whenever cfm_should_send_ccm() indicates. */
565 cfm_compose_ccm(struct cfm *cfm, struct ofpbuf *packet,
566 uint8_t eth_src[ETH_ADDR_LEN]) OVS_EXCLUDED(mutex)
572 ovs_mutex_lock(&mutex);
573 timer_set_duration(&cfm->tx_timer, cfm->ccm_interval_ms);
574 eth_compose(packet, cfm_ccm_addr(cfm), eth_src, ETH_TYPE_CFM, sizeof *ccm);
576 ccm_vlan = (cfm->ccm_vlan != CFM_RANDOM_VLAN
579 ccm_vlan = ccm_vlan & VLAN_VID_MASK;
581 if (ccm_vlan || cfm->ccm_pcp) {
582 uint16_t tci = ccm_vlan | (cfm->ccm_pcp << VLAN_PCP_SHIFT);
583 eth_push_vlan(packet, htons(ETH_TYPE_VLAN), htons(tci));
586 atomic_read_relaxed(&cfm->extended, &extended);
588 ccm = ofpbuf_l3(packet);
589 ccm->mdlevel_version = 0;
590 ccm->opcode = CCM_OPCODE;
591 ccm->tlv_offset = 70;
592 ccm->seq = htonl(++cfm->seq);
593 ccm->flags = cfm->ccm_interval;
594 memcpy(ccm->maid, cfm->maid, sizeof ccm->maid);
595 memset(ccm->zero, 0, sizeof ccm->zero);
599 ccm->mpid = htons(hash_mpid(cfm->mpid));
600 ccm->mpid64 = htonll(cfm->mpid);
601 ccm->opdown = !cfm->opup;
603 ccm->mpid = htons(cfm->mpid);
604 ccm->mpid64 = htonll(0);
608 if (cfm->ccm_interval == 0) {
609 ovs_assert(extended);
610 ccm->interval_ms_x = htons(cfm->ccm_interval_ms);
612 ccm->interval_ms_x = htons(0);
615 if (cfm->booted && hmap_is_empty(&cfm->remote_mps)) {
616 ccm->flags |= CCM_RDI_MASK;
620 long long int delay = time_msec() - cfm->last_tx;
621 if (delay > (cfm->ccm_interval_ms * 3 / 2)) {
622 VLOG_INFO("%s: long delay of %lldms (expected %dms) sending CCM"
623 " seq %"PRIu32, cfm->name, delay, cfm->ccm_interval_ms,
627 cfm->last_tx = time_msec();
628 ovs_mutex_unlock(&mutex);
632 cfm_wait(struct cfm *cfm) OVS_EXCLUDED(mutex)
634 poll_timer_wait_until(cfm_wake_time(cfm));
638 /* Returns the next cfm wakeup time. */
640 cfm_wake_time(struct cfm *cfm) OVS_EXCLUDED(mutex)
642 long long int retval;
648 ovs_mutex_lock(&mutex);
649 retval = MIN(cfm->tx_timer.t, cfm->fault_timer.t);
650 ovs_mutex_unlock(&mutex);
655 /* Configures 'cfm' with settings from 's'. */
657 cfm_configure(struct cfm *cfm, const struct cfm_settings *s)
663 if (!cfm_is_valid_mpid(s->extended, s->mpid) || s->interval <= 0) {
667 ovs_mutex_lock(&mutex);
670 interval = ms_to_ccm_interval(s->interval);
671 interval_ms = ccm_interval_to_ms(interval);
673 atomic_store_relaxed(&cfm->check_tnl_key, s->check_tnl_key);
674 atomic_store_relaxed(&cfm->extended, s->extended);
676 cfm->ccm_vlan = s->ccm_vlan;
677 cfm->ccm_pcp = s->ccm_pcp & (VLAN_PCP_MASK >> VLAN_PCP_SHIFT);
678 if (s->extended && interval_ms != s->interval) {
680 interval_ms = MIN(s->interval, UINT16_MAX);
683 if (s->extended && s->demand) {
686 cfm->rx_packets = cfm_rx_packets(cfm);
692 if (interval != cfm->ccm_interval || interval_ms != cfm->ccm_interval_ms) {
693 cfm->ccm_interval = interval;
694 cfm->ccm_interval_ms = interval_ms;
696 timer_set_expired(&cfm->tx_timer);
697 timer_set_duration(&cfm->fault_timer, cfm_fault_interval(cfm));
700 ovs_mutex_unlock(&mutex);
704 /* Must be called when the netdev owned by 'cfm' should change. */
706 cfm_set_netdev(struct cfm *cfm, const struct netdev *netdev)
709 ovs_mutex_lock(&mutex);
710 if (cfm->netdev != netdev) {
711 netdev_close(cfm->netdev);
712 cfm->netdev = netdev_ref(netdev);
714 ovs_mutex_unlock(&mutex);
717 /* Returns true if 'cfm' should process packets from 'flow'. Sets
718 * fields in 'wc' that were used to make the determination. */
720 cfm_should_process_flow(const struct cfm *cfm_, const struct flow *flow,
721 struct flow_wildcards *wc)
723 struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
726 /* Most packets are not CFM. */
727 if (OVS_LIKELY(flow->dl_type != htons(ETH_TYPE_CFM))) {
731 memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
732 if (OVS_UNLIKELY(!eth_addr_equals(flow->dl_dst, cfm_ccm_addr(cfm)))) {
736 atomic_read_relaxed(&cfm->check_tnl_key, &check_tnl_key);
739 memset(&wc->masks.tunnel.tun_id, 0xff, sizeof wc->masks.tunnel.tun_id);
740 return flow->tunnel.tun_id == htonll(0);
745 /* Updates internal statistics relevant to packet 'p'. Should be called on
746 * every packet whose flow returned true when passed to
747 * cfm_should_process_flow. */
749 cfm_process_heartbeat(struct cfm *cfm, const struct ofpbuf *p)
753 struct eth_header *eth;
756 ovs_mutex_lock(&mutex);
758 atomic_read_relaxed(&cfm->extended, &extended);
761 ccm = ofpbuf_at(p, (uint8_t *)ofpbuf_l3(p) - (uint8_t *)ofpbuf_data(p),
765 VLOG_INFO_RL(&rl, "%s: Received an unparseable 802.1ag CCM heartbeat.",
770 if (ccm->opcode != CCM_OPCODE) {
771 VLOG_INFO_RL(&rl, "%s: Received an unsupported 802.1ag message. "
772 "(opcode %u)", cfm->name, ccm->opcode);
776 /* According to the 802.1ag specification, reception of a CCM with an
777 * incorrect ccm_interval, unexpected MAID, or unexpected MPID should
778 * trigger a fault. We ignore this requirement for several reasons.
780 * Faults can cause a controller or Open vSwitch to make potentially
781 * expensive changes to the network topology. It seems prudent to trigger
782 * them judiciously, especially when CFM is used to check slave status of
783 * bonds. Furthermore, faults can be maliciously triggered by crafting
784 * unexpected CCMs. */
785 if (memcmp(ccm->maid, cfm->maid, sizeof ccm->maid)) {
786 cfm->recv_fault |= CFM_FAULT_MAID;
787 VLOG_WARN_RL(&rl, "%s: Received unexpected remote MAID from MAC "
788 ETH_ADDR_FMT, cfm->name, ETH_ADDR_ARGS(eth->eth_src));
790 uint8_t ccm_interval = ccm->flags & 0x7;
791 bool ccm_rdi = ccm->flags & CCM_RDI_MASK;
792 uint16_t ccm_interval_ms_x = ntohs(ccm->interval_ms_x);
794 struct remote_mp *rmp;
798 enum cfm_fault_reason cfm_fault = 0;
801 ccm_mpid = ntohll(ccm->mpid64);
802 ccm_opdown = ccm->opdown;
804 ccm_mpid = ntohs(ccm->mpid);
807 ccm_seq = ntohl(ccm->seq);
809 if (ccm_interval != cfm->ccm_interval) {
810 VLOG_WARN_RL(&rl, "%s: received a CCM with an unexpected interval"
811 " (%"PRIu8") from RMP %"PRIu64, cfm->name,
812 ccm_interval, ccm_mpid);
815 if (extended && ccm_interval == 0
816 && ccm_interval_ms_x != cfm->ccm_interval_ms) {
817 VLOG_WARN_RL(&rl, "%s: received a CCM with an unexpected extended"
818 " interval (%"PRIu16"ms) from RMP %"PRIu64, cfm->name,
819 ccm_interval_ms_x, ccm_mpid);
822 rmp = lookup_remote_mp(cfm, ccm_mpid);
824 if (hmap_count(&cfm->remote_mps) < CFM_MAX_RMPS) {
825 rmp = xzalloc(sizeof *rmp);
826 hmap_insert(&cfm->remote_mps, &rmp->node, hash_mpid(ccm_mpid));
828 cfm_fault |= CFM_FAULT_OVERFLOW;
830 "%s: dropped CCM with MPID %"PRIu64" from MAC "
831 ETH_ADDR_FMT, cfm->name, ccm_mpid,
832 ETH_ADDR_ARGS(eth->eth_src));
837 cfm_fault |= CFM_FAULT_RDI;
838 VLOG_DBG("%s: RDI bit flagged from RMP %"PRIu64, cfm->name,
842 VLOG_DBG("%s: received CCM (seq %"PRIu32") (mpid %"PRIu64")"
843 " (interval %"PRIu8") (RDI %s)", cfm->name, ccm_seq,
844 ccm_mpid, ccm_interval, ccm_rdi ? "true" : "false");
847 if (rmp->mpid == cfm->mpid) {
848 cfm_fault |= CFM_FAULT_LOOPBACK;
849 VLOG_WARN_RL(&rl,"%s: received CCM with local MPID"
850 " %"PRIu64, cfm->name, rmp->mpid);
853 if (rmp->seq && ccm_seq != (rmp->seq + 1)) {
854 VLOG_WARN_RL(&rl, "%s: (mpid %"PRIu64") detected sequence"
855 " numbers which indicate possible connectivity"
856 " problems (previous %"PRIu32") (current %"PRIu32
857 ")", cfm->name, ccm_mpid, rmp->seq, ccm_seq);
860 rmp->mpid = ccm_mpid;
862 rmp->num_health_ccm++;
864 timer_set_duration(&cfm->demand_rx_ccm_t,
865 100 * cfm->ccm_interval_ms);
869 cfm->recv_fault |= cfm_fault;
871 rmp->opup = !ccm_opdown;
872 rmp->last_rx = time_msec();
877 ovs_mutex_unlock(&mutex);
880 /* Returns and resets the 'cfm->status_changed'. */
882 cfm_check_status_change(struct cfm *cfm) OVS_EXCLUDED(mutex)
886 ovs_mutex_lock(&mutex);
887 ret = cfm->status_changed;
888 cfm->status_changed = false;
889 ovs_mutex_unlock(&mutex);
895 cfm_get_fault__(const struct cfm *cfm) OVS_REQUIRES(mutex)
897 if (cfm->fault_override >= 0) {
898 return cfm->fault_override ? CFM_FAULT_OVERRIDE : 0;
903 /* Gets the fault status of 'cfm'. Returns a bit mask of 'cfm_fault_reason's
904 * indicating the cause of the connectivity fault, or zero if there is no
907 cfm_get_fault(const struct cfm *cfm) OVS_EXCLUDED(mutex)
911 ovs_mutex_lock(&mutex);
912 fault = cfm_get_fault__(cfm);
913 ovs_mutex_unlock(&mutex);
917 /* Gets the number of cfm fault flapping since start. */
919 cfm_get_flap_count(const struct cfm *cfm) OVS_EXCLUDED(mutex)
922 ovs_mutex_lock(&mutex);
923 flap_count = cfm->flap_count;
924 ovs_mutex_unlock(&mutex);
928 /* Gets the health of 'cfm'. Returns an integer between 0 and 100 indicating
929 * the health of the link as a percentage of ccm frames received in
930 * CFM_HEALTH_INTERVAL * 'fault_interval' if there is only 1 remote_mpid,
931 * returns 0 if there are no remote_mpids, and returns -1 if there are more
932 * than 1 remote_mpids. */
934 cfm_get_health(const struct cfm *cfm) OVS_EXCLUDED(mutex)
938 ovs_mutex_lock(&mutex);
939 health = cfm->health;
940 ovs_mutex_unlock(&mutex);
945 cfm_get_opup__(const struct cfm *cfm_) OVS_REQUIRES(mutex)
947 struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
950 atomic_read_relaxed(&cfm->extended, &extended);
952 return extended ? cfm->remote_opup : -1;
955 /* Gets the operational state of 'cfm'. 'cfm' is considered operationally down
956 * if it has received a CCM with the operationally down bit set from any of its
957 * remote maintenance points. Returns 1 if 'cfm' is operationally up, 0 if
958 * 'cfm' is operationally down, or -1 if 'cfm' has no operational state
959 * (because it isn't in extended mode). */
961 cfm_get_opup(const struct cfm *cfm) OVS_EXCLUDED(mutex)
965 ovs_mutex_lock(&mutex);
966 opup = cfm_get_opup__(cfm);
967 ovs_mutex_unlock(&mutex);
973 cfm_get_remote_mpids__(const struct cfm *cfm, uint64_t **rmps, size_t *n_rmps)
976 *rmps = xmemdup(cfm->rmps_array, cfm->rmps_array_len * sizeof **rmps);
977 *n_rmps = cfm->rmps_array_len;
980 /* Populates 'rmps' with an array of remote maintenance points reachable by
981 * 'cfm'. The number of remote maintenance points is written to 'n_rmps'.
982 * 'cfm' retains ownership of the array written to 'rmps' */
984 cfm_get_remote_mpids(const struct cfm *cfm, uint64_t **rmps, size_t *n_rmps)
987 ovs_mutex_lock(&mutex);
988 cfm_get_remote_mpids__(cfm, rmps, n_rmps);
989 ovs_mutex_unlock(&mutex);
992 /* Extracts the status of 'cfm' and fills in the 's'. */
994 cfm_get_status(const struct cfm *cfm, struct cfm_status *s) OVS_EXCLUDED(mutex)
996 ovs_mutex_lock(&mutex);
997 s->faults = cfm_get_fault__(cfm);
998 s->remote_opstate = cfm_get_opup__(cfm);
999 s->flap_count = cfm->flap_count;
1000 s->health = cfm->health;
1001 cfm_get_remote_mpids__(cfm, &s->rmps, &s->n_rmps);
1002 ovs_mutex_unlock(&mutex);
1006 cfm_find(const char *name) OVS_REQUIRES(mutex)
1010 HMAP_FOR_EACH_WITH_HASH (cfm, hmap_node, hash_string(name, 0), all_cfms) {
1011 if (!strcmp(cfm->name, name)) {
1019 cfm_print_details(struct ds *ds, struct cfm *cfm) OVS_REQUIRES(mutex)
1021 struct remote_mp *rmp;
1025 atomic_read_relaxed(&cfm->extended, &extended);
1027 ds_put_format(ds, "---- %s ----\n", cfm->name);
1028 ds_put_format(ds, "MPID %"PRIu64":%s%s\n", cfm->mpid,
1029 extended ? " extended" : "",
1030 cfm->fault_override >= 0 ? " fault_override" : "");
1032 fault = cfm_get_fault__(cfm);
1034 ds_put_cstr(ds, "\tfault: ");
1035 ds_put_cfm_fault(ds, fault);
1036 ds_put_cstr(ds, "\n");
1039 if (cfm->health == -1) {
1040 ds_put_format(ds, "\taverage health: undefined\n");
1042 ds_put_format(ds, "\taverage health: %d\n", cfm->health);
1044 ds_put_format(ds, "\topstate: %s\n", cfm->opup ? "up" : "down");
1045 ds_put_format(ds, "\tremote_opstate: %s\n",
1046 cfm->remote_opup ? "up" : "down");
1047 ds_put_format(ds, "\tinterval: %dms\n", cfm->ccm_interval_ms);
1048 ds_put_format(ds, "\tnext CCM tx: %lldms\n",
1049 timer_msecs_until_expired(&cfm->tx_timer));
1050 ds_put_format(ds, "\tnext fault check: %lldms\n",
1051 timer_msecs_until_expired(&cfm->fault_timer));
1053 HMAP_FOR_EACH (rmp, node, &cfm->remote_mps) {
1054 ds_put_format(ds, "Remote MPID %"PRIu64"\n", rmp->mpid);
1055 ds_put_format(ds, "\trecv since check: %s\n",
1056 rmp->recv ? "true" : "false");
1057 ds_put_format(ds, "\topstate: %s\n", rmp->opup? "up" : "down");
1062 cfm_unixctl_show(struct unixctl_conn *conn, int argc, const char *argv[],
1063 void *aux OVS_UNUSED) OVS_EXCLUDED(mutex)
1065 struct ds ds = DS_EMPTY_INITIALIZER;
1068 ovs_mutex_lock(&mutex);
1070 cfm = cfm_find(argv[1]);
1072 unixctl_command_reply_error(conn, "no such CFM object");
1075 cfm_print_details(&ds, cfm);
1077 HMAP_FOR_EACH (cfm, hmap_node, all_cfms) {
1078 cfm_print_details(&ds, cfm);
1082 unixctl_command_reply(conn, ds_cstr(&ds));
1085 ovs_mutex_unlock(&mutex);
1089 cfm_unixctl_set_fault(struct unixctl_conn *conn, int argc, const char *argv[],
1090 void *aux OVS_UNUSED) OVS_EXCLUDED(mutex)
1092 const char *fault_str = argv[argc - 1];
1096 ovs_mutex_lock(&mutex);
1097 if (!strcasecmp("true", fault_str)) {
1099 } else if (!strcasecmp("false", fault_str)) {
1101 } else if (!strcasecmp("normal", fault_str)) {
1102 fault_override = -1;
1104 unixctl_command_reply_error(conn, "unknown fault string");
1109 cfm = cfm_find(argv[1]);
1111 unixctl_command_reply_error(conn, "no such CFM object");
1114 cfm->fault_override = fault_override;
1115 cfm_status_changed(cfm);
1117 HMAP_FOR_EACH (cfm, hmap_node, all_cfms) {
1118 cfm->fault_override = fault_override;
1119 cfm_status_changed(cfm);
1123 unixctl_command_reply(conn, "OK");
1126 ovs_mutex_unlock(&mutex);