static int set_stp_port(struct ofport *,
const struct ofproto_port_stp_settings *);
+static void rstp_run(struct ofproto_dpif *ofproto);
+static void set_rstp_port(struct ofport *,
+ const struct ofproto_port_rstp_settings *);
+
struct ofport_dpif {
struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
struct ofport up;
enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
long long int stp_state_entered;
+ /* Rapid Spanning Tree. */
+ struct rstp_port *rstp_port; /* Rapid Spanning Tree Protocol, if any. */
+ enum rstp_state rstp_state; /* Always RSTP_DISABLED if RSTP not in use. */
+
/* Queue to DSCP mapping. */
struct ofproto_port_queue *qdscp;
size_t n_qdscp;
enum revalidate_reason {
REV_RECONFIGURE = 1, /* Switch configuration changed. */
REV_STP, /* Spanning tree protocol port status change. */
+ REV_RSTP, /* RSTP port status change. */
REV_BOND, /* Bonding changed. */
REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
REV_FLOW_TABLE, /* Flow table changed. */
};
COVERAGE_DEFINE(rev_reconfigure);
COVERAGE_DEFINE(rev_stp);
+COVERAGE_DEFINE(rev_rstp);
COVERAGE_DEFINE(rev_bond);
COVERAGE_DEFINE(rev_port_toggled);
COVERAGE_DEFINE(rev_flow_table);
* False if the datapath supports only 8-byte (or shorter) userdata. */
bool variable_length_userdata;
+ /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
+ * actions. */
+ bool masked_set_action;
+
/* Maximum number of MPLS label stack entries that the datapath supports
* in a match */
size_t max_mpls_depth;
struct stp *stp;
long long int stp_last_tick;
+ /* Rapid Spanning Tree. */
+ struct rstp *rstp;
+ long long int rstp_last_tick;
+
/* VLAN splinters. */
struct ovs_mutex vsp_mutex;
struct hmap realdev_vid_map OVS_GUARDED; /* (realdev,vid) -> vlandev. */
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
}
+ dpif_poll_threads_set(backer->dpif, n_dpdk_rxqs, pmd_cpu_mask);
+
if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
struct simap_node *node;
switch (backer->need_revalidate) {
case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_RSTP: COVERAGE_INC(rev_rstp); break;
case REV_BOND: COVERAGE_INC(rev_bond); break;
case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
xlate_ofproto_set(ofproto, ofproto->up.name,
ofproto->backer->dpif, ofproto->miss_rule,
ofproto->no_packet_in_rule, ofproto->ml,
- ofproto->stp, ofproto->ms, ofproto->mbridge,
- ofproto->sflow, ofproto->ipfix,
+ ofproto->stp, ofproto->rstp, ofproto->ms,
+ ofproto->mbridge, ofproto->sflow, ofproto->ipfix,
ofproto->netflow, ofproto->up.frag_handling,
ofproto->up.forward_bpdu,
connmgr_has_in_band(ofproto->up.connmgr),
ofproto->backer->enable_recirc,
ofproto->backer->variable_length_userdata,
- ofproto->backer->max_mpls_depth);
+ ofproto->backer->max_mpls_depth,
+ ofproto->backer->masked_set_action);
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
xlate_bundle_set(ofproto, bundle, bundle->name,
ofport->up.ofp_port, ofport->odp_port,
ofport->up.netdev, ofport->cfm,
ofport->bfd, ofport->peer, stp_port,
- ofport->qdscp, ofport->n_qdscp,
- ofport->up.pp.config, ofport->up.pp.state,
- ofport->is_tunnel, ofport->may_enable);
+ ofport->rstp_port, ofport->qdscp,
+ ofport->n_qdscp, ofport->up.pp.config,
+ ofport->up.pp.state, ofport->is_tunnel,
+ ofport->may_enable);
}
xlate_txn_commit();
}
static bool check_variable_length_userdata(struct dpif_backer *backer);
static size_t check_max_mpls_depth(struct dpif_backer *backer);
static bool check_recirc(struct dpif_backer *backer);
+static bool check_masked_set_action(struct dpif_backer *backer);
static int
open_dpif_backer(const char *type, struct dpif_backer **backerp)
shash_add(&all_dpif_backers, type, backer);
+ backer->enable_recirc = check_recirc(backer);
+ backer->max_mpls_depth = check_max_mpls_depth(backer);
+ backer->masked_set_action = check_masked_set_action(backer);
+ backer->rid_pool = recirc_id_pool_create();
+
error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
VLOG_ERR("failed to listen on datapath of type %s: %s",
close_dpif_backer(backer);
return error;
}
- backer->enable_recirc = check_recirc(backer);
- backer->variable_length_userdata = check_variable_length_userdata(backer);
- backer->max_mpls_depth = check_max_mpls_depth(backer);
- backer->rid_pool = recirc_id_pool_create();
if (backer->recv_set_enable) {
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
}
+ /* This check fails if performed before udpif threads have been set,
+ * as the kernel module checks that the 'pid' in userspace action
+ * is non-zero. */
+ backer->variable_length_userdata = check_variable_length_userdata(backer);
+
return error;
}
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
- error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE,
ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL,
0, NULL);
if (error && error != EEXIST) {
switch (error) {
case 0:
- /* Variable-length userdata is supported.
- *
- * Purge received packets to avoid processing the nonsense packet we
- * sent to userspace, then report success. */
- dpif_recv_purge(backer->dpif);
return true;
case ERANGE:
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, &flow, NULL, 0, false);
- error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
- ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL, 0, NULL);
+ error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE,
+ ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0,
+ NULL, 0, NULL);
if (error && error != EEXIST) {
if (error != EINVAL) {
VLOG_WARN("%s: MPLS stack length feature probe failed (%s)",
break;
}
- error = dpif_flow_del(backer->dpif, ofpbuf_data(&key), ofpbuf_size(&key), NULL);
+ error = dpif_flow_del(backer->dpif, ofpbuf_data(&key),
+ ofpbuf_size(&key), NULL);
if (error) {
VLOG_WARN("%s: failed to delete MPLS feature probe flow",
dpif_name(backer->dpif));
return n;
}
+/* Tests whether 'backer''s datapath supports masked data in
+ * OVS_ACTION_ATTR_SET actions. We need to disable some features on older
+ * datapaths that don't support this feature. */
+static bool
+check_masked_set_action(struct dpif_backer *backer)
+{
+ struct eth_header *eth;
+ struct ofpbuf actions;
+ struct dpif_execute execute;
+ struct ofpbuf packet;
+ int error;
+ struct ovs_key_ethernet key, mask;
+
+ /* Compose a set action that will cause an EINVAL error on older
+ * datapaths that don't support masked set actions.
+ * Avoid using a full mask, as it could be translated to a non-masked
+ * set action instead. */
+ ofpbuf_init(&actions, 64);
+ memset(&key, 0x53, sizeof key);
+ memset(&mask, 0x7f, sizeof mask);
+ commit_masked_set_action(&actions, OVS_KEY_ATTR_ETHERNET, &key, &mask,
+ sizeof key);
+
+ /* Compose a dummy ethernet packet. */
+ ofpbuf_init(&packet, ETH_HEADER_LEN);
+ eth = ofpbuf_put_zeros(&packet, ETH_HEADER_LEN);
+ eth->eth_type = htons(0x1234);
+
+ /* Execute the actions. On older datapaths this fails with EINVAL, on
+ * newer datapaths it succeeds. */
+ execute.actions = ofpbuf_data(&actions);
+ execute.actions_len = ofpbuf_size(&actions);
+ execute.packet = &packet;
+ execute.md = PKT_METADATA_INITIALIZER(0);
+ execute.needs_help = false;
+
+ error = dpif_execute(backer->dpif, &execute);
+
+ ofpbuf_uninit(&packet);
+ ofpbuf_uninit(&actions);
+
+ if (error) {
+ /* Masked set action is not supported. */
+ VLOG_INFO("%s: datapath does not support masked set action feature.",
+ dpif_name(backer->dpif));
+ }
+ return !error;
+}
+
static int
construct(struct ofproto *ofproto_)
{
ofproto->sflow = NULL;
ofproto->ipfix = NULL;
ofproto->stp = NULL;
+ ofproto->rstp = NULL;
ofproto->dump_seq = 0;
hmap_init(&ofproto->bundles);
ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
* (priority=2), recirc=0, actions=resubmit(, 0)
*/
resubmit = ofpact_put_RESUBMIT(&ofpacts);
- resubmit->ofpact.compat = 0;
resubmit->in_port = OFPP_IN_PORT;
resubmit->table_id = 0;
destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct rule_dpif *rule, *next_rule;
struct ofproto_packet_in *pin, *next_pin;
+ struct rule_dpif *rule;
struct oftable *table;
struct list pins;
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- struct cls_cursor cursor;
-
- fat_rwlock_rdlock(&table->cls.rwlock);
- cls_cursor_init(&cursor, &table->cls, NULL);
- fat_rwlock_unlock(&table->cls.rwlock);
- CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
+ CLS_FOR_EACH_SAFE (rule, up.cr, &table->cls) {
ofproto_rule_delete(&ofproto->up, &rule->up);
}
}
netflow_unref(ofproto->netflow);
dpif_sflow_unref(ofproto->sflow);
+ dpif_ipfix_unref(ofproto->ipfix);
hmap_destroy(&ofproto->bundles);
mac_learning_unref(ofproto->ml);
mcast_snooping_unref(ofproto->ms);
}
stp_run(ofproto);
+ rstp_run(ofproto);
ovs_rwlock_wrlock(&ofproto->ml->rwlock);
if (mac_learning_run(ofproto->ml)) {
ofproto->backer->need_revalidate = REV_MAC_LEARNING;
}
static void
-get_features(struct ofproto *ofproto_ OVS_UNUSED,
- bool *arp_match_ip, enum ofputil_action_bitmap *actions)
-{
- *arp_match_ip = true;
- *actions = (OFPUTIL_A_OUTPUT |
- OFPUTIL_A_SET_VLAN_VID |
- OFPUTIL_A_SET_VLAN_PCP |
- OFPUTIL_A_STRIP_VLAN |
- OFPUTIL_A_SET_DL_SRC |
- OFPUTIL_A_SET_DL_DST |
- OFPUTIL_A_SET_NW_SRC |
- OFPUTIL_A_SET_NW_DST |
- OFPUTIL_A_SET_NW_TOS |
- OFPUTIL_A_SET_TP_SRC |
- OFPUTIL_A_SET_TP_DST |
- OFPUTIL_A_ENQUEUE);
-}
-
-static void
-get_tables(struct ofproto *ofproto, struct ofp12_table_stats *ots)
+query_tables(struct ofproto *ofproto,
+ struct ofputil_table_features *features,
+ struct ofputil_table_stats *stats)
{
- int i;
+ strcpy(features->name, "classifier");
+
+ if (stats) {
+ int i;
- strcpy(ots->name, "classifier");
+ for (i = 0; i < ofproto->n_tables; i++) {
+ unsigned long missed, matched;
- for (i = 0; i < ofproto->n_tables; i++) {
- unsigned long missed, matched;
+ atomic_read_relaxed(&ofproto->tables[i].n_matched, &matched);
+ atomic_read_relaxed(&ofproto->tables[i].n_missed, &missed);
- atomic_read(&ofproto->tables[i].n_matched, &matched);
- ots[i].matched_count = htonll(matched);
- atomic_read(&ofproto->tables[i].n_missed, &missed);
- ots[i].lookup_count = htonll(matched + missed);
+ stats[i].matched_count = matched;
+ stats[i].lookup_count = matched + missed;
+ }
}
}
port->bundle = NULL;
port->cfm = NULL;
port->bfd = NULL;
- port->may_enable = true;
+ port->may_enable = false;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
+ port->rstp_port = NULL;
+ port->rstp_state = RSTP_DISABLED;
port->is_tunnel = false;
port->peer = NULL;
port->qdscp = NULL;
if (netdev_get_tunnel_config(netdev)) {
tnl_port_add(port, port->up.netdev, port->odp_port);
port->is_tunnel = true;
+ if (ofproto->ipfix) {
+ dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
+ }
} else {
/* Sanity-check that a mapping doesn't already exist. This
* shouldn't happen for non-tunnel ports. */
ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
}
+ if (port->is_tunnel && ofproto->ipfix) {
+ dpif_ipfix_del_tunnel_port(ofproto->ipfix, port->odp_port);
+ }
+
tnl_port_del(port);
sset_find_and_delete(&ofproto->ports, devname);
sset_find_and_delete(&ofproto->ghost_ports, devname);
if (port->stp_port) {
stp_port_disable(port->stp_port);
}
+ set_rstp_port(port_, NULL);
if (ofproto->sflow) {
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct dpif_ipfix *di = ofproto->ipfix;
bool has_options = bridge_exporter_options || flow_exporters_options;
+ bool new_di = false;
if (has_options && !di) {
di = ofproto->ipfix = dpif_ipfix_create();
+ new_di = true;
}
if (di) {
di, bridge_exporter_options, flow_exporters_options,
n_flow_exporters_options);
+ /* Add tunnel ports only when a new ipfix created */
+ if (new_di == true) {
+ struct ofport_dpif *ofport;
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ if (ofport->is_tunnel == true) {
+ dpif_ipfix_add_tunnel_port(di, &ofport->up, ofport->odp_port);
+ }
+ }
+ }
+
if (!has_options) {
dpif_ipfix_unref(di);
ofproto->ipfix = NULL;
\f
/* Spanning Tree. */
+/* Called while rstp_mutex is held. */
+static void
+rstp_send_bpdu_cb(struct ofpbuf *pkt, void *ofport_, void *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_;
+ struct ofport_dpif *ofport = ofport_;
+ struct eth_header *eth = ofpbuf_l2(pkt);
+
+ netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
+ if (eth_addr_is_zero(eth->eth_src)) {
+ VLOG_WARN_RL(&rl, "%s port %d: cannot send RSTP BPDU on a port which "
+ "does not have a configured source MAC address.",
+ ofproto->up.name, ofp_to_u16(ofport->up.ofp_port));
+ } else {
+ ofproto_dpif_send_packet(ofport, pkt);
+ }
+ ofpbuf_delete(pkt);
+}
+
static void
send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
{
ofpbuf_delete(pkt);
}
+/* Configure RSTP on 'ofproto_' using the settings defined in 's'. */
+static void
+set_rstp(struct ofproto *ofproto_, const struct ofproto_rstp_settings *s)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ /* Only revalidate flows if the configuration changed. */
+ if (!s != !ofproto->rstp) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
+
+ if (s) {
+ if (!ofproto->rstp) {
+ ofproto->rstp = rstp_create(ofproto_->name, s->address,
+ rstp_send_bpdu_cb, ofproto);
+ ofproto->rstp_last_tick = time_msec();
+ }
+ rstp_set_bridge_address(ofproto->rstp, s->address);
+ rstp_set_bridge_priority(ofproto->rstp, s->priority);
+ rstp_set_bridge_ageing_time(ofproto->rstp, s->ageing_time);
+ rstp_set_bridge_force_protocol_version(ofproto->rstp,
+ s->force_protocol_version);
+ rstp_set_bridge_max_age(ofproto->rstp, s->bridge_max_age);
+ rstp_set_bridge_forward_delay(ofproto->rstp, s->bridge_forward_delay);
+ rstp_set_bridge_transmit_hold_count(ofproto->rstp,
+ s->transmit_hold_count);
+ } else {
+ struct ofport *ofport;
+ HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
+ set_rstp_port(ofport, NULL);
+ }
+ rstp_unref(ofproto->rstp);
+ ofproto->rstp = NULL;
+ }
+}
+
+static void
+get_rstp_status(struct ofproto *ofproto_, struct ofproto_rstp_status *s)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ if (ofproto->rstp) {
+ s->enabled = true;
+ s->root_id = rstp_get_root_id(ofproto->rstp);
+ s->bridge_id = rstp_get_bridge_id(ofproto->rstp);
+ s->designated_id = rstp_get_designated_id(ofproto->rstp);
+ s->root_path_cost = rstp_get_root_path_cost(ofproto->rstp);
+ s->designated_port_id = rstp_get_designated_port_id(ofproto->rstp);
+ s->bridge_port_id = rstp_get_bridge_port_id(ofproto->rstp);
+ } else {
+ s->enabled = false;
+ }
+}
+
+static void
+update_rstp_port_state(struct ofport_dpif *ofport)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ enum rstp_state state;
+
+ /* Figure out new state. */
+ state = ofport->rstp_port ? rstp_port_get_state(ofport->rstp_port)
+ : RSTP_DISABLED;
+
+ /* Update state. */
+ if (ofport->rstp_state != state) {
+ enum ofputil_port_state of_state;
+ bool fwd_change;
+
+ VLOG_DBG("port %s: RSTP state changed from %s to %s",
+ netdev_get_name(ofport->up.netdev),
+ rstp_state_name(ofport->rstp_state),
+ rstp_state_name(state));
+ if (rstp_learn_in_state(ofport->rstp_state)
+ != rstp_learn_in_state(state)) {
+ /* xxx Learning action flows should also be flushed. */
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ }
+ fwd_change = rstp_forward_in_state(ofport->rstp_state)
+ != rstp_forward_in_state(state);
+
+ ofproto->backer->need_revalidate = REV_RSTP;
+ ofport->rstp_state = state;
+
+ if (fwd_change && ofport->bundle) {
+ bundle_update(ofport->bundle);
+ }
+
+ /* Update the RSTP state bits in the OpenFlow port description. */
+ of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
+ of_state |= (state == RSTP_LEARNING ? OFPUTIL_PS_STP_LEARN
+ : state == RSTP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
+ : state == RSTP_DISCARDING ? OFPUTIL_PS_STP_LISTEN
+ : 0);
+ ofproto_port_set_state(&ofport->up, of_state);
+ }
+}
+
+static void
+rstp_run(struct ofproto_dpif *ofproto)
+{
+ if (ofproto->rstp) {
+ long long int now = time_msec();
+ long long int elapsed = now - ofproto->rstp_last_tick;
+ struct rstp_port *rp;
+ struct ofport_dpif *ofport;
+
+ /* Every second, decrease the values of the timers. */
+ if (elapsed >= 1000) {
+ rstp_tick_timers(ofproto->rstp);
+ ofproto->rstp_last_tick = now;
+ }
+ rp = NULL;
+ while ((ofport = rstp_get_next_changed_port_aux(ofproto->rstp, &rp))) {
+ update_rstp_port_state(ofport);
+ }
+ /* FIXME: This check should be done on-event (i.e., when setting
+ * p->fdb_flush) and not periodically.
+ */
+ if (rstp_check_and_reset_fdb_flush(ofproto->rstp)) {
+ ovs_rwlock_wrlock(&ofproto->ml->rwlock);
+ /* FIXME: RSTP should be able to flush the entries pertaining to a
+ * single port, not the whole table.
+ */
+ mac_learning_flush(ofproto->ml);
+ ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ }
+ }
+}
+
/* Configures STP on 'ofproto_' using the settings defined in 's'. */
static int
set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
enum ofputil_port_state of_state;
bool fwd_change;
- VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
- netdev_get_name(ofport->up.netdev),
- stp_state_name(ofport->stp_state),
- stp_state_name(state));
+ VLOG_DBG("port %s: STP state changed from %s to %s",
+ netdev_get_name(ofport->up.netdev),
+ stp_state_name(ofport->stp_state),
+ stp_state_name(state));
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
}
return 0;
} else if (sp && stp_port_no(sp) != s->port_num
- && ofport == stp_port_get_aux(sp)) {
+ && ofport == stp_port_get_aux(sp)) {
/* The port-id changed, so disable the old one if it's not
* already in use by another port. */
stp_port_disable(sp);
}
sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
+
+ /* Set name before enabling the port so that debugging messages can print
+ * the name. */
+ stp_port_set_name(sp, netdev_get_name(ofport->up.netdev));
stp_port_enable(sp);
stp_port_set_aux(sp, ofport);
poll_timer_wait(1000);
}
}
+
+/* Configures RSTP on 'ofport_' using the settings defined in 's'. The
+ * caller is responsible for assigning RSTP port numbers and ensuring
+ * there are no duplicates. */
+static void
+set_rstp_port(struct ofport *ofport_,
+ const struct ofproto_port_rstp_settings *s)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct rstp_port *rp = ofport->rstp_port;
+
+ if (!s || !s->enable) {
+ if (rp) {
+ rstp_port_unref(rp);
+ ofport->rstp_port = NULL;
+ update_rstp_port_state(ofport);
+ }
+ return;
+ }
+
+ /* Check if need to add a new port. */
+ if (!rp) {
+ rp = ofport->rstp_port = rstp_add_port(ofproto->rstp);
+ }
+
+ rstp_port_set(rp, s->port_num, s->priority, s->path_cost,
+ s->admin_edge_port, s->auto_edge, s->mcheck, ofport);
+ update_rstp_port_state(ofport);
+ /* Synchronize operational status. */
+ rstp_port_set_mac_operational(rp, ofport->may_enable);
+}
+
+static void
+get_rstp_port_status(struct ofport *ofport_,
+ struct ofproto_port_rstp_status *s)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct rstp_port *rp = ofport->rstp_port;
+
+ if (!ofproto->rstp || !rp) {
+ s->enabled = false;
+ return;
+ }
+
+ s->enabled = true;
+ rstp_port_get_status(rp, &s->port_id, &s->state, &s->role, &s->tx_count,
+ &s->rx_count, &s->error_count, &s->uptime);
+}
+
\f
static int
set_queues(struct ofport *ofport_, const struct ofproto_port_queue *qdscp,
if (ofport->may_enable != enable) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+
ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
+
+ if (ofport->rstp_port) {
+ rstp_port_set_mac_operational(ofport->rstp_port, enable);
+ }
}
ofport->may_enable = enable;
rule_dpif_credit_stats(rule, &stats);
}
- xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
+ xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
+ stats.tcp_flags, packet);
xin.ofpacts = ofpacts;
xin.ofpacts_len = ofpacts_len;
xin.resubmit_stats = &stats;
xlate_actions(&xin, &xout);
- execute.actions = ofpbuf_data(&xout.odp_actions);
- execute.actions_len = ofpbuf_size(&xout.odp_actions);
+ execute.actions = ofpbuf_data(xout.odp_actions);
+ execute.actions_len = ofpbuf_size(xout.odp_actions);
execute.packet = packet;
execute.md = pkt_metadata_from_flow(flow);
execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
struct classifier *cls = &ofproto->up.tables[table_id].cls;
const struct cls_rule *cls_rule;
struct rule_dpif *rule;
+ struct flow ofpc_normal_flow;
- fat_rwlock_rdlock(&cls->rwlock);
if (ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
- if (wc) {
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
- if (is_ip_any(flow)) {
- wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
- }
- }
+ /* We always unwildcard dl_type and nw_frag (for IP), so they
+ * need not be unwildcarded here. */
if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
/* We must pretend that transport ports are unavailable. */
- struct flow ofpc_normal_flow = *flow;
+ ofpc_normal_flow = *flow;
ofpc_normal_flow.tp_src = htons(0);
ofpc_normal_flow.tp_dst = htons(0);
- cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
+ flow = &ofpc_normal_flow;
} else {
- /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM). */
+ /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM).
+ * Use the drop_frags_rule (which cannot disappear). */
cls_rule = &ofproto->drop_frags_rule->up.cr;
+ rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
+ if (take_ref) {
+ rule_dpif_ref(rule);
+ }
+ return rule;
}
- } else {
- cls_rule = classifier_lookup(cls, flow, wc);
}
- } else {
- cls_rule = classifier_lookup(cls, flow, wc);
}
- rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
- if (take_ref) {
- rule_dpif_ref(rule);
- }
- fat_rwlock_unlock(&cls->rwlock);
+ do {
+ cls_rule = classifier_lookup(cls, flow, wc);
+
+ rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
+
+ /* Try again if the rule was released before we get the reference. */
+ } while (rule && take_ref && !rule_dpif_try_ref(rule));
return rule;
}
take_ref);
if (stats) {
struct oftable *tbl = &ofproto->up.tables[next_id];
- atomic_ulong *stat = *rule ? &tbl->n_matched : &tbl->n_missed;
unsigned long orig;
- atomic_add(stat, stats->n_packets, &orig);
+
+ atomic_add_relaxed(*rule ? &tbl->n_matched : &tbl->n_missed,
+ stats->n_packets, &orig);
}
if (*rule) {
return RULE_DPIF_LOOKUP_VERDICT_MATCH;
} else if (!honor_table_miss) {
return RULE_DPIF_LOOKUP_VERDICT_CONTROLLER;
} else {
- switch (ofproto_table_get_config(&ofproto->up, *table_id)) {
- case OFPROTO_TABLE_MISS_CONTINUE:
+ switch (ofproto_table_get_miss_config(&ofproto->up, *table_id)) {
+ case OFPUTIL_TABLE_MISS_CONTINUE:
break;
- case OFPROTO_TABLE_MISS_CONTROLLER:
+ case OFPUTIL_TABLE_MISS_CONTROLLER:
return RULE_DPIF_LOOKUP_VERDICT_CONTROLLER;
- case OFPROTO_TABLE_MISS_DROP:
+ case OFPUTIL_TABLE_MISS_DROP:
return RULE_DPIF_LOOKUP_VERDICT_DROP;
- case OFPROTO_TABLE_MISS_DEFAULT:
+ case OFPUTIL_TABLE_MISS_DEFAULT:
return RULE_DPIF_LOOKUP_VERDICT_DEFAULT;
}
}
trace_format_odp(struct ds *result, int level, const char *title,
struct trace_ctx *trace)
{
- struct ofpbuf *odp_actions = &trace->xout.odp_actions;
+ struct ofpbuf *odp_actions = trace->xout.odp_actions;
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
goto exit;
}
- if (xlate_receive(backer, NULL, ofpbuf_data(&odp_key),
- ofpbuf_size(&odp_key), flow,
- ofprotop, NULL, NULL, NULL, NULL)) {
+ if (odp_flow_key_to_flow(ofpbuf_data(&odp_key), ofpbuf_size(&odp_key),
+ flow) == ODP_FIT_ERROR) {
+ error = "Failed to parse flow key";
+ goto exit;
+ }
+
+ *ofprotop = xlate_lookup_ofproto(backer, flow,
+ &flow->in_port.ofp_port);
+ if (*ofprotop == NULL) {
error = "Invalid datapath flow";
goto exit;
}
+
+ vsp_adjust_flow(*ofprotop, flow, NULL);
+
} else {
char *err = parse_ofp_exact_flow(flow, NULL, argv[argc - 1], NULL);
ofpbuf_init(&ofpacts, 0);
/* Parse actions. */
- error = parse_ofpacts(argv[--argc], &ofpacts, &usable_protocols);
+ error = ofpacts_parse_actions(argv[--argc], &ofpacts, &usable_protocols);
if (error) {
unixctl_command_reply_error(conn, error);
free(error);
trace.result = ds;
trace.key = flow; /* Original flow key, used for megaflow. */
trace.flow = *flow; /* May be modified by actions. */
- xlate_in_init(&trace.xin, ofproto, flow, rule, ntohs(flow->tcp_flags),
- packet);
+ xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, rule,
+ ntohs(flow->tcp_flags), packet);
if (ofpacts) {
trace.xin.ofpacts = ofpacts;
trace.xin.ofpacts_len = ofpacts_len;
trace_format_megaflow(ds, 0, "Megaflow", &trace);
ds_put_cstr(ds, "Datapath actions: ");
- format_odp_actions(ds, ofpbuf_data(&trace.xout.odp_actions),
- ofpbuf_size(&trace.xout.odp_actions));
+ format_odp_actions(ds, ofpbuf_data(trace.xout.odp_actions),
+ ofpbuf_size(trace.xout.odp_actions));
if (trace.xout.slow) {
enum slow_path_reason slow;
ds_destroy(&ds);
}
-static bool
-ofproto_dpif_contains_flow(const struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len)
-{
- struct ofproto_dpif *ofp;
- struct flow flow;
-
- xlate_receive(ofproto->backer, NULL, key, key_len, &flow, &ofp,
- NULL, NULL, NULL, NULL);
- return ofp == ofproto;
-}
-
static void
ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
int argc OVS_UNUSED, const char *argv[],
flow_dump = dpif_flow_dump_create(ofproto->backer->dpif);
flow_dump_thread = dpif_flow_dump_thread_create(flow_dump);
while (dpif_flow_dump_next(flow_dump_thread, &f, 1)) {
- if (!ofproto_dpif_contains_flow(ofproto, f.key, f.key_len)) {
+ struct flow flow;
+
+ if (odp_flow_key_to_flow(f.key, f.key_len, &flow) == ODP_FIT_ERROR
+ || xlate_lookup_ofproto(ofproto->backer, &flow, NULL) != ofproto) {
continue;
}
/* Given 'flow', a flow representing a packet received on 'ofproto', checks
* whether 'flow->in_port' represents a Linux VLAN device. If so, changes
* 'flow->in_port' to the "real" device backing the VLAN device, sets
- * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
- * always the case unless VLAN splinters are enabled), returns false without
- * making any changes. */
+ * 'flow->vlan_tci' to the VLAN VID, and returns true. Optionally pushes the
+ * appropriate VLAN on 'packet' if provided. Otherwise (which is always the
+ * case unless VLAN splinters are enabled), returns false without making any
+ * changes. */
bool
-vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
+vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow,
+ struct ofpbuf *packet)
OVS_EXCLUDED(ofproto->vsp_mutex)
{
ofp_port_t realdev;
* the VLAN device's VLAN ID. */
flow->in_port.ofp_port = realdev;
flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
+
+ if (packet) {
+ /* Make the packet resemble the flow, so that it gets sent to an
+ * OpenFlow controller properly, so that it looks correct for sFlow,
+ * and so that flow_extract() will get the correct vlan_tci if it is
+ * called on 'packet'. */
+ eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
+ }
+
return true;
}
NULL, /* get_memory_usage. */
type_get_memory_usage,
flush,
- get_features,
- get_tables,
+ query_tables,
port_alloc,
port_construct,
port_destruct,
set_stp_port,
get_stp_port_status,
get_stp_port_stats,
+ set_rstp,
+ get_rstp_status,
+ set_rstp_port,
+ get_rstp_port_status,
set_queues,
bundle_set,
bundle_remove,