/*
- * Copyright (c) 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <string.h>
#include "byte-order.h"
+#include "connectivity.h"
+#include "dp-packet.h"
#include "dynamic-string.h"
#include "flow.h"
#include "hash.h"
#include "hmap.h"
#include "netdev.h"
-#include "ofpbuf.h"
+#include "ovs-atomic.h"
#include "packets.h"
#include "poll-loop.h"
#include "random.h"
+#include "seq.h"
#include "timer.h"
#include "timeval.h"
#include "unixctl.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(cfm);
#define CFM_MAX_RMPS 256
/* Ethernet destination address of CCM packets. */
-static const uint8_t eth_addr_ccm[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x30 };
-static const uint8_t eth_addr_ccm_x[6] = {
+static const uint8_t eth_addr_ccm[ETH_ADDR_LEN] = {
+ 0x01, 0x80, 0xC2, 0x00, 0x00, 0x30 };
+static const uint8_t eth_addr_ccm_x[ETH_ADDR_LEN] = {
0x01, 0x23, 0x20, 0x00, 0x00, 0x30
};
recomputed. */
long long int last_tx; /* Last CCM transmission time. */
+ /* These bools are atomic to allow readers to check their values
+ * without taking 'mutex'. Such readers do not assume the values they
+ * read are synchronized with any other members. */
atomic_bool check_tnl_key; /* Verify the tunnel key of inbound packets? */
atomic_bool extended; /* Extended mode. */
- atomic_int ref_cnt;
+ struct ovs_refcount ref_cnt;
+
+ uint64_t flap_count; /* Count the flaps since boot. */
+
+ /* True when the variables returned by cfm_get_*() are changed
+ * since last check. */
+ bool status_changed;
+
+ /* When 'cfm->demand' is set, at least one ccm is required to be received
+ * every 100 * cfm_interval. If ccm is not received within this interval,
+ * even if data packets are received, the cfm fault will be set. */
+ struct timer demand_rx_ccm_t;
};
/* Remote MPs represent foreign network entities that are configured to have
}
static const uint8_t *
-cfm_ccm_addr(const struct cfm *cfm)
+cfm_ccm_addr(struct cfm *cfm)
{
bool extended;
- atomic_read(&cfm->extended, &extended);
+
+ atomic_read_relaxed(&cfm->extended, &extended);
+
return extended ? eth_addr_ccm_x : eth_addr_ccm;
}
ccm_interval_to_ms(uint8_t interval)
{
switch (interval) {
- case 0: NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
+ case 0: OVS_NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
case 1: return 3; /* Not recommended due to timer resolution. */
case 2: return 10; /* Not recommended due to timer resolution. */
case 3: return 100;
case 5: return 10000;
case 6: return 60000;
case 7: return 600000;
- default: NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
+ default: OVS_NOT_REACHED(); /* Explicitly not supported by 802.1ag. */
}
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
static long long int
* as a fault (likely due to a configuration error). Thus we can check all
* MPs at once making this quite a bit simpler.
*
- * According to the specification we should check when (ccm_interval_ms *
- * 3.5)ms have passed. */
- return (cfm->ccm_interval_ms * 7) / 2;
+ * When cfm is not in demand mode, we check when (ccm_interval_ms * 3.5) ms
+ * have passed. When cfm is in demand mode, we check when
+ * (MAX(ccm_interval_ms, 500) * 3.5) ms have passed. This ensures that
+ * ovs-vswitchd has enough time to pull statistics from the datapath. */
+
+ return (MAX(cfm->ccm_interval_ms, cfm->demand ? 500 : cfm->ccm_interval_ms)
+ * 7) / 2;
}
static uint8_t
static uint32_t
hash_mpid(uint64_t mpid)
{
- return hash_bytes(&mpid, sizeof mpid, 0);
+ return hash_uint64(mpid);
}
static bool
1, 2, cfm_unixctl_set_fault, NULL);
}
+/* Records the status change and changes the global connectivity seq. */
+static void
+cfm_status_changed(struct cfm *cfm) OVS_REQUIRES(mutex)
+{
+ seq_change(connectivity_seq_get());
+ cfm->status_changed = true;
+}
+
/* Allocates a 'cfm' object called 'name'. 'cfm' should be initialized by
* cfm_configure() before use. */
struct cfm *
cfm->fault_override = -1;
cfm->health = -1;
cfm->last_tx = 0;
+ cfm->flap_count = 0;
atomic_init(&cfm->extended, false);
atomic_init(&cfm->check_tnl_key, false);
- atomic_init(&cfm->ref_cnt, 1);
+ ovs_refcount_init(&cfm->ref_cnt);
ovs_mutex_lock(&mutex);
+ cfm_status_changed(cfm);
cfm_generate_maid(cfm);
hmap_insert(all_cfms, &cfm->hmap_node, hash_string(cfm->name, 0));
ovs_mutex_unlock(&mutex);
+
return cfm;
}
cfm_unref(struct cfm *cfm) OVS_EXCLUDED(mutex)
{
struct remote_mp *rmp, *rmp_next;
- int orig;
if (!cfm) {
return;
}
- atomic_sub(&cfm->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
- if (orig != 1) {
+ if (ovs_refcount_unref_relaxed(&cfm->ref_cnt) != 1) {
return;
}
ovs_mutex_lock(&mutex);
+ cfm_status_changed(cfm);
hmap_remove(all_cfms, &cfm->hmap_node);
ovs_mutex_unlock(&mutex);
hmap_destroy(&cfm->remote_mps);
netdev_close(cfm->netdev);
free(cfm->rmps_array);
+
free(cfm);
}
{
struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
if (cfm) {
- int orig;
- atomic_add(&cfm->ref_cnt, 1, &orig);
- ovs_assert(orig > 0);
+ ovs_refcount_ref(&cfm->ref_cnt);
}
return cfm;
}
if (timer_expired(&cfm->fault_timer)) {
long long int interval = cfm_fault_interval(cfm);
struct remote_mp *rmp, *rmp_next;
- bool old_cfm_fault = cfm->fault;
+ enum cfm_fault_reason old_cfm_fault = cfm->fault;
+ uint64_t old_flap_count = cfm->flap_count;
+ int old_health = cfm->health;
+ size_t old_rmps_array_len = cfm->rmps_array_len;
+ bool old_rmps_deleted = false;
+ bool old_rmp_opup = cfm->remote_opup;
bool demand_override;
bool rmp_set_opup = false;
bool rmp_set_opdown = false;
if (cfm->demand) {
uint64_t rx_packets = cfm_rx_packets(cfm);
demand_override = hmap_count(&cfm->remote_mps) == 1
- && rx_packets > cfm->rx_packets;
+ && rx_packets > cfm->rx_packets
+ && !timer_expired(&cfm->demand_rx_ccm_t);
cfm->rx_packets = rx_packets;
}
" %lldms", cfm->name, rmp->mpid,
time_msec() - rmp->last_rx);
if (!demand_override) {
+ old_rmps_deleted = true;
hmap_remove(&cfm->remote_mps, &rmp->node);
free(rmp);
}
cfm->fault |= CFM_FAULT_RECV;
}
- if (old_cfm_fault != cfm->fault && !VLOG_DROP_INFO(&rl)) {
- struct ds ds = DS_EMPTY_INITIALIZER;
+ if (old_cfm_fault != cfm->fault) {
+ if (!VLOG_DROP_INFO(&rl)) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ ds_put_cstr(&ds, "from [");
+ ds_put_cfm_fault(&ds, old_cfm_fault);
+ ds_put_cstr(&ds, "] to [");
+ ds_put_cfm_fault(&ds, cfm->fault);
+ ds_put_char(&ds, ']');
+ VLOG_INFO("%s: CFM faults changed %s.", cfm->name, ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
- ds_put_cstr(&ds, "from [");
- ds_put_cfm_fault(&ds, old_cfm_fault);
- ds_put_cstr(&ds, "] to [");
- ds_put_cfm_fault(&ds, cfm->fault);
- ds_put_char(&ds, ']');
- VLOG_INFO("%s: CFM faults changed %s.", cfm->name, ds_cstr(&ds));
- ds_destroy(&ds);
+ /* If there is a flap, increments the counter. */
+ if (old_cfm_fault == 0 || cfm->fault == 0) {
+ cfm->flap_count++;
+ }
+ }
+
+ /* These variables represent the cfm session status, it is desirable
+ * to update them to database immediately after change. */
+ if (old_health != cfm->health
+ || old_rmp_opup != cfm->remote_opup
+ || (old_rmps_array_len != cfm->rmps_array_len || old_rmps_deleted)
+ || old_cfm_fault != cfm->fault
+ || old_flap_count != cfm->flap_count) {
+ cfm_status_changed(cfm);
}
cfm->booted = true;
/* Composes a CCM message into 'packet'. Messages generated with this function
* should be sent whenever cfm_should_send_ccm() indicates. */
void
-cfm_compose_ccm(struct cfm *cfm, struct ofpbuf *packet,
- uint8_t eth_src[ETH_ADDR_LEN]) OVS_EXCLUDED(mutex)
+cfm_compose_ccm(struct cfm *cfm, struct dp_packet *packet,
+ const uint8_t eth_src[ETH_ADDR_LEN]) OVS_EXCLUDED(mutex)
{
uint16_t ccm_vlan;
struct ccm *ccm;
if (ccm_vlan || cfm->ccm_pcp) {
uint16_t tci = ccm_vlan | (cfm->ccm_pcp << VLAN_PCP_SHIFT);
- eth_push_vlan(packet, htons(tci));
+ eth_push_vlan(packet, htons(ETH_TYPE_VLAN), htons(tci));
}
- ccm = packet->l3;
+ atomic_read_relaxed(&cfm->extended, &extended);
+
+ ccm = dp_packet_l3(packet);
ccm->mdlevel_version = 0;
ccm->opcode = CCM_OPCODE;
ccm->tlv_offset = 70;
memset(ccm->zero, 0, sizeof ccm->zero);
ccm->end_tlv = 0;
- atomic_read(&cfm->extended, &extended);
if (extended) {
ccm->mpid = htons(hash_mpid(cfm->mpid));
ccm->mpid64 = htonll(cfm->mpid);
if (cfm->last_tx) {
long long int delay = time_msec() - cfm->last_tx;
if (delay > (cfm->ccm_interval_ms * 3 / 2)) {
- VLOG_WARN("%s: long delay of %lldms (expected %dms) sending CCM"
+ VLOG_INFO("%s: long delay of %lldms (expected %dms) sending CCM"
" seq %"PRIu32, cfm->name, delay, cfm->ccm_interval_ms,
cfm->seq);
}
ovs_mutex_unlock(&mutex);
}
-void
+long long int
cfm_wait(struct cfm *cfm) OVS_EXCLUDED(mutex)
{
+ long long int wake_time = cfm_wake_time(cfm);
+ poll_timer_wait_until(wake_time);
+ return wake_time;
+}
+
+
+/* Returns the next cfm wakeup time. */
+long long int
+cfm_wake_time(struct cfm *cfm) OVS_EXCLUDED(mutex)
+{
+ long long int retval;
+
+ if (!cfm) {
+ return LLONG_MAX;
+ }
+
ovs_mutex_lock(&mutex);
- timer_wait(&cfm->tx_timer);
- timer_wait(&cfm->fault_timer);
+ retval = MIN(cfm->tx_timer.t, cfm->fault_timer.t);
ovs_mutex_unlock(&mutex);
+ return retval;
}
+
/* Configures 'cfm' with settings from 's'. */
bool
cfm_configure(struct cfm *cfm, const struct cfm_settings *s)
interval = ms_to_ccm_interval(s->interval);
interval_ms = ccm_interval_to_ms(interval);
- atomic_store(&cfm->check_tnl_key, s->check_tnl_key);
- atomic_store(&cfm->extended, s->extended);
+ atomic_store_relaxed(&cfm->check_tnl_key, s->check_tnl_key);
+ atomic_store_relaxed(&cfm->extended, s->extended);
cfm->ccm_vlan = s->ccm_vlan;
cfm->ccm_pcp = s->ccm_pcp & (VLAN_PCP_MASK >> VLAN_PCP_SHIFT);
}
if (s->extended && s->demand) {
- interval_ms = MAX(interval_ms, 500);
if (!cfm->demand) {
cfm->demand = true;
cfm->rx_packets = cfm_rx_packets(cfm);
/* Returns true if 'cfm' should process packets from 'flow'. Sets
* fields in 'wc' that were used to make the determination. */
bool
-cfm_should_process_flow(const struct cfm *cfm, const struct flow *flow,
+cfm_should_process_flow(const struct cfm *cfm_, const struct flow *flow,
struct flow_wildcards *wc)
{
+ struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
bool check_tnl_key;
- atomic_read(&cfm->check_tnl_key, &check_tnl_key);
+ /* Most packets are not CFM. */
+ if (OVS_LIKELY(flow->dl_type != htons(ETH_TYPE_CFM))) {
+ return false;
+ }
+
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ if (OVS_UNLIKELY(!eth_addr_equals(flow->dl_dst, cfm_ccm_addr(cfm)))) {
+ return false;
+ }
+
+ atomic_read_relaxed(&cfm->check_tnl_key, &check_tnl_key);
+
if (check_tnl_key) {
memset(&wc->masks.tunnel.tun_id, 0xff, sizeof wc->masks.tunnel.tun_id);
+ return flow->tunnel.tun_id == htonll(0);
}
- return (ntohs(flow->dl_type) == ETH_TYPE_CFM
- && eth_addr_equals(flow->dl_dst, cfm_ccm_addr(cfm))
- && (!check_tnl_key || flow->tunnel.tun_id == htonll(0)));
+ return true;
}
/* Updates internal statistics relevant to packet 'p'. Should be called on
* every packet whose flow returned true when passed to
* cfm_should_process_flow. */
void
-cfm_process_heartbeat(struct cfm *cfm, const struct ofpbuf *p)
+cfm_process_heartbeat(struct cfm *cfm, const struct dp_packet *p)
OVS_EXCLUDED(mutex)
{
struct ccm *ccm;
struct eth_header *eth;
+ bool extended;
ovs_mutex_lock(&mutex);
- eth = p->l2;
- ccm = ofpbuf_at(p, (uint8_t *)p->l3 - (uint8_t *)p->data, CCM_ACCEPT_LEN);
+ atomic_read_relaxed(&cfm->extended, &extended);
+
+ eth = dp_packet_l2(p);
+ ccm = dp_packet_at(p, (uint8_t *)dp_packet_l3(p) - (uint8_t *)dp_packet_data(p),
+ CCM_ACCEPT_LEN);
if (!ccm) {
VLOG_INFO_RL(&rl, "%s: Received an unparseable 802.1ag CCM heartbeat.",
uint64_t ccm_mpid;
uint32_t ccm_seq;
bool ccm_opdown;
- bool extended;
enum cfm_fault_reason cfm_fault = 0;
- atomic_read(&cfm->extended, &extended);
if (extended) {
ccm_mpid = ntohll(ccm->mpid64);
ccm_opdown = ccm->opdown;
ccm_seq = ntohl(ccm->seq);
if (ccm_interval != cfm->ccm_interval) {
- cfm_fault |= CFM_FAULT_INTERVAL;
VLOG_WARN_RL(&rl, "%s: received a CCM with an unexpected interval"
" (%"PRIu8") from RMP %"PRIu64, cfm->name,
ccm_interval, ccm_mpid);
if (extended && ccm_interval == 0
&& ccm_interval_ms_x != cfm->ccm_interval_ms) {
- cfm_fault |= CFM_FAULT_INTERVAL;
VLOG_WARN_RL(&rl, "%s: received a CCM with an unexpected extended"
" interval (%"PRIu16"ms) from RMP %"PRIu64, cfm->name,
ccm_interval_ms_x, ccm_mpid);
rmp->mpid = ccm_mpid;
if (!cfm_fault) {
rmp->num_health_ccm++;
+ if (cfm->demand) {
+ timer_set_duration(&cfm->demand_rx_ccm_t,
+ 100 * cfm->ccm_interval_ms);
+ }
}
rmp->recv = true;
cfm->recv_fault |= cfm_fault;
ovs_mutex_unlock(&mutex);
}
+/* Returns and resets the 'cfm->status_changed'. */
+bool
+cfm_check_status_change(struct cfm *cfm) OVS_EXCLUDED(mutex)
+{
+ bool ret;
+
+ ovs_mutex_lock(&mutex);
+ ret = cfm->status_changed;
+ cfm->status_changed = false;
+ ovs_mutex_unlock(&mutex);
+
+ return ret;
+}
+
static int
cfm_get_fault__(const struct cfm *cfm) OVS_REQUIRES(mutex)
{
return fault;
}
+/* Gets the number of cfm fault flapping since start. */
+uint64_t
+cfm_get_flap_count(const struct cfm *cfm) OVS_EXCLUDED(mutex)
+{
+ uint64_t flap_count;
+ ovs_mutex_lock(&mutex);
+ flap_count = cfm->flap_count;
+ ovs_mutex_unlock(&mutex);
+ return flap_count;
+}
+
/* Gets the health of 'cfm'. Returns an integer between 0 and 100 indicating
* the health of the link as a percentage of ccm frames received in
* CFM_HEALTH_INTERVAL * 'fault_interval' if there is only 1 remote_mpid,
return health;
}
+static int
+cfm_get_opup__(const struct cfm *cfm_) OVS_REQUIRES(mutex)
+{
+ struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
+ bool extended;
+
+ atomic_read_relaxed(&cfm->extended, &extended);
+
+ return extended ? cfm->remote_opup : -1;
+}
+
/* Gets the operational state of 'cfm'. 'cfm' is considered operationally down
* if it has received a CCM with the operationally down bit set from any of its
* remote maintenance points. Returns 1 if 'cfm' is operationally up, 0 if
int
cfm_get_opup(const struct cfm *cfm) OVS_EXCLUDED(mutex)
{
- bool extended;
int opup;
ovs_mutex_lock(&mutex);
- atomic_read(&cfm->extended, &extended);
- opup = extended ? cfm->remote_opup : -1;
+ opup = cfm_get_opup__(cfm);
ovs_mutex_unlock(&mutex);
return opup;
}
+static void
+cfm_get_remote_mpids__(const struct cfm *cfm, uint64_t **rmps, size_t *n_rmps)
+ OVS_REQUIRES(mutex)
+{
+ *rmps = xmemdup(cfm->rmps_array, cfm->rmps_array_len * sizeof **rmps);
+ *n_rmps = cfm->rmps_array_len;
+}
+
/* Populates 'rmps' with an array of remote maintenance points reachable by
* 'cfm'. The number of remote maintenance points is written to 'n_rmps'.
* 'cfm' retains ownership of the array written to 'rmps' */
OVS_EXCLUDED(mutex)
{
ovs_mutex_lock(&mutex);
- *rmps = xmemdup(cfm->rmps_array, cfm->rmps_array_len);
- *n_rmps = cfm->rmps_array_len;
+ cfm_get_remote_mpids__(cfm, rmps, n_rmps);
+ ovs_mutex_unlock(&mutex);
+}
+
+/* Extracts the status of 'cfm' and fills in the 's'. */
+void
+cfm_get_status(const struct cfm *cfm, struct cfm_status *s) OVS_EXCLUDED(mutex)
+{
+ ovs_mutex_lock(&mutex);
+ s->faults = cfm_get_fault__(cfm);
+ s->remote_opstate = cfm_get_opup__(cfm);
+ s->flap_count = cfm->flap_count;
+ s->health = cfm->health;
+ cfm_get_remote_mpids__(cfm, &s->rmps, &s->n_rmps);
ovs_mutex_unlock(&mutex);
}
static struct cfm *
-cfm_find(const char *name) OVS_REQUIRES(&mutex)
+cfm_find(const char *name) OVS_REQUIRES(mutex)
{
struct cfm *cfm;
}
static void
-cfm_print_details(struct ds *ds, const struct cfm *cfm) OVS_REQUIRES(&mutex)
+cfm_print_details(struct ds *ds, struct cfm *cfm) OVS_REQUIRES(mutex)
{
struct remote_mp *rmp;
bool extended;
int fault;
- atomic_read(&cfm->extended, &extended);
+ atomic_read_relaxed(&cfm->extended, &extended);
ds_put_format(ds, "---- %s ----\n", cfm->name);
ds_put_format(ds, "MPID %"PRIu64":%s%s\n", cfm->mpid,
void *aux OVS_UNUSED) OVS_EXCLUDED(mutex)
{
struct ds ds = DS_EMPTY_INITIALIZER;
- const struct cfm *cfm;
+ struct cfm *cfm;
ovs_mutex_lock(&mutex);
if (argc > 1) {
goto out;
}
cfm->fault_override = fault_override;
+ cfm_status_changed(cfm);
} else {
HMAP_FOR_EACH (cfm, hmap_node, all_cfms) {
cfm->fault_override = fault_override;
+ cfm_status_changed(cfm);
}
}