#include "ofproto-dpif-sflow.h"
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
-#include "ovs-router.h"
#include "poll-loop.h"
+#include "ovs-router.h"
#include "seq.h"
#include "simap.h"
#include "smap.h"
static void bundle_del_port(struct ofport_dpif *);
static void bundle_run(struct ofbundle *);
static void bundle_wait(struct ofbundle *);
+static void bundle_flush_macs(struct ofbundle *, bool);
+static void bundle_move(struct ofbundle *, struct ofbundle *);
static void stp_run(struct ofproto_dpif *ofproto);
static void stp_wait(struct ofproto_dpif *ofproto);
/* Version string of the datapath stored in OVSDB. */
char *dp_version_string;
+
+ /* True if the datapath supports tnl_push and pop actions. */
+ bool enable_tnl_push_pop;
+ struct atomic_count tnl_count;
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
/* All existing ofproto_dpif instances, indexed by ->up.name. */
static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
-static void ofproto_dpif_unixctl_init(void);
+static bool ofproto_use_tnl_push_pop = true;
+static void ofproto_unixctl_init(void);
static inline struct ofproto_dpif *
ofproto_dpif_cast(const struct ofproto *ofproto)
return 0;
}
- dpif_run(backer->dpif);
+
+ if (dpif_run(backer->dpif)) {
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+
udpif_run(backer->udpif);
/* If vswitchd started with other_config:flow_restore_wait set as "true",
iter->odp_port = node ? u32_to_odp(node->data) : ODPP_NONE;
if (tnl_port_reconfigure(iter, iter->up.netdev,
- iter->odp_port)) {
+ iter->odp_port,
+ ovs_native_tunneling_is_on(ofproto), dp_port)) {
backer->need_revalidate = REV_RECONFIGURE;
}
}
backer->masked_set_action = check_masked_set_action(backer);
backer->rid_pool = recirc_id_pool_create();
+ backer->enable_tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
+ atomic_count_init(&backer->tnl_count, 0);
+
error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
VLOG_ERR("failed to listen on datapath of type %s: %s",
return error;
}
+bool
+ovs_native_tunneling_is_on(struct ofproto_dpif *ofproto)
+{
+ return ofproto_use_tnl_push_pop && ofproto->backer->enable_tnl_push_pop &&
+ atomic_count_get(&ofproto->backer->tnl_count);
+}
+
/* Tests whether 'backer''s datapath supports recirculation. Only newer
* datapaths support OVS_KEY_ATTR_RECIRC_ID in keys. We need to disable some
* features on older datapaths that don't support this feature.
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
ofproto->lacp_enabled = false;
+ ofproto_tunnel_init();
ovs_mutex_init_adaptive(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
guarded_list_init(&ofproto->pins);
- ofproto_dpif_unixctl_init();
- ovs_router_unixctl_register();
+ ofproto_unixctl_init();
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- CLS_FOR_EACH_SAFE (rule, up.cr, &table->cls) {
+ CLS_FOR_EACH (rule, up.cr, &table->cls) {
ofproto_rule_delete(&ofproto->up, &rule->up);
}
}
}
}
}
-
return 0;
}
port->odp_port = dpif_port.port_no;
if (netdev_get_tunnel_config(netdev)) {
- tnl_port_add(port, port->up.netdev, port->odp_port);
+ atomic_count_inc(&ofproto->backer->tnl_count);
+ tnl_port_add(port, port->up.netdev, port->odp_port,
+ ovs_native_tunneling_is_on(ofproto), namebuf);
port->is_tunnel = true;
if (ofproto->ipfix) {
dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
}
+ if (port->is_tunnel) {
+ atomic_count_dec(&ofproto->backer->tnl_count);
+ }
+
if (port->is_tunnel && ofproto->ipfix) {
dpif_ipfix_del_tunnel_port(ofproto->ipfix, port->odp_port);
}
port_modified(struct ofport *port_)
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
+ char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
+ struct netdev *netdev = port->up.netdev;
if (port->bundle && port->bundle->bond) {
- bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
+ bond_slave_set_netdev(port->bundle->bond, port, netdev);
}
if (port->cfm) {
- cfm_set_netdev(port->cfm, port->up.netdev);
+ cfm_set_netdev(port->cfm, netdev);
}
if (port->bfd) {
- bfd_set_netdev(port->bfd, port->up.netdev);
+ bfd_set_netdev(port->bfd, netdev);
}
ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
port->up.pp.hw_addr);
- if (port->is_tunnel && tnl_port_reconfigure(port, port->up.netdev,
- port->odp_port)) {
- ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate =
- REV_RECONFIGURE;
+ netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
+
+ if (port->is_tunnel) {
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+
+ if (tnl_port_reconfigure(port, netdev, port->odp_port,
+ ovs_native_tunneling_is_on(ofproto), namebuf)) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
}
ofport_update_peer(port);
netdev_get_name(ofport->up.netdev),
rstp_state_name(ofport->rstp_state),
rstp_state_name(state));
+
if (rstp_learn_in_state(ofport->rstp_state)
- != rstp_learn_in_state(state)) {
- /* xxx Learning action flows should also be flushed. */
- ovs_rwlock_wrlock(&ofproto->ml->rwlock);
- mac_learning_flush(ofproto->ml);
- ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ != rstp_learn_in_state(state)) {
+ /* XXX: Learning action flows should also be flushed. */
+ if (ofport->bundle) {
+ if (!rstp_shift_root_learned_address(ofproto->rstp)
+ || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
+ bundle_flush_macs(ofport->bundle, false);
+ }
+ }
}
fwd_change = rstp_forward_in_state(ofport->rstp_state)
!= rstp_forward_in_state(state);
while ((ofport = rstp_get_next_changed_port_aux(ofproto->rstp, &rp))) {
update_rstp_port_state(ofport);
}
+ rp = NULL;
+ ofport = NULL;
/* FIXME: This check should be done on-event (i.e., when setting
* p->fdb_flush) and not periodically.
*/
- if (rstp_check_and_reset_fdb_flush(ofproto->rstp)) {
- ovs_rwlock_wrlock(&ofproto->ml->rwlock);
- /* FIXME: RSTP should be able to flush the entries pertaining to a
- * single port, not the whole table.
- */
- mac_learning_flush(ofproto->ml);
- ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ while ((ofport = rstp_check_and_reset_fdb_flush(ofproto->rstp, &rp))) {
+ if (!rstp_shift_root_learned_address(ofproto->rstp)
+ || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
+ bundle_flush_macs(ofport->bundle, false);
+ }
+ }
+
+ if (rstp_shift_root_learned_address(ofproto->rstp)) {
+ bundle_move(((struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp))->bundle,
+ ((struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp))->bundle);
+ rstp_reset_root_changed(ofproto->rstp);
}
}
}
}
rstp_port_set(rp, s->port_num, s->priority, s->path_cost,
- s->admin_edge_port, s->auto_edge, s->mcheck, ofport);
+ s->admin_edge_port, s->auto_edge,
+ s->admin_p2p_mac_state, s->admin_port_state, s->mcheck,
+ ofport);
update_rstp_port_state(ofport);
/* Synchronize operational status. */
rstp_port_set_mac_operational(rp, ofport->may_enable);
}
s->enabled = true;
- rstp_port_get_status(rp, &s->port_id, &s->state, &s->role, &s->tx_count,
+ rstp_port_get_status(rp, &s->port_id, &s->state, &s->role,
+ &s->designated_bridge_id, &s->designated_port_id,
+ &s->designated_path_cost, &s->tx_count,
&s->rx_count, &s->error_count, &s->uptime);
}
ovs_rwlock_unlock(&ml->rwlock);
}
+static void
+bundle_move(struct ofbundle *old, struct ofbundle *new)
+{
+ struct ofproto_dpif *ofproto = old->ofproto;
+ struct mac_learning *ml = ofproto->ml;
+ struct mac_entry *mac, *next_mac;
+
+ ovs_assert(new->ofproto == old->ofproto);
+
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ovs_rwlock_wrlock(&ml->rwlock);
+ LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
+ if (mac->port.p == old) {
+ mac->port.p = new;
+ }
+ }
+ ovs_rwlock_unlock(&ml->rwlock);
+}
+
static struct ofbundle *
bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
{
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
|| port->is_layer3
- || !stp_forward_in_state(port->stp_state)) {
+ || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
+ || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
bundle->floodable = false;
break;
}
list_push_back(&bundle->ports, &port->bundle_node);
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
|| port->is_layer3
- || !stp_forward_in_state(port->stp_state)) {
+ || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
+ || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
bundle->floodable = false;
}
}
return error;
}
+static int
+port_get_lacp_stats(const struct ofport *ofport_, struct lacp_slave_stats *stats)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ if (ofport->bundle && ofport->bundle->lacp) {
+ if (lacp_get_slave_stats(ofport->bundle->lacp, ofport, stats)) {
+ return 0;
+ }
+ }
+ return -1;
+}
+
struct port_dump_state {
uint32_t bucket;
uint32_t offset;
execute.actions = ofpbuf_data(xout.odp_actions);
execute.actions_len = ofpbuf_size(xout.odp_actions);
+
execute.packet = packet;
execute.md = pkt_metadata_from_flow(flow);
execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
}
static void
-ofproto_dpif_unixctl_init(void)
+ofproto_revalidate_all_backers(void)
+{
+ const struct shash_node **backers;
+ int i;
+
+ backers = shash_sort(&all_dpif_backers);
+ for (i = 0; i < shash_count(&all_dpif_backers); i++) {
+ struct dpif_backer *backer = backers[i]->data;
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ free(backers);
+}
+
+static void
+disable_tnl_push_pop(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ if (!strcasecmp(argv[1], "off")) {
+ ofproto_use_tnl_push_pop = false;
+ unixctl_command_reply(conn, "Tunnel push-pop off");
+ ofproto_revalidate_all_backers();
+ } else if (!strcasecmp(argv[1], "on")) {
+ ofproto_use_tnl_push_pop = true;
+ unixctl_command_reply(conn, "Tunnel push-pop on");
+ ofproto_revalidate_all_backers();
+ }
+}
+
+static void
+ofproto_unixctl_init(void)
{
static bool registered;
if (registered) {
NULL);
unixctl_command_register("dpif/dump-flows", "[-m] bridge", 1, 2,
ofproto_unixctl_dpif_dump_flows, NULL);
+
+ unixctl_command_register("ofproto/tnl-push-pop", "[on]|[off]", 1, 1,
+ disable_tnl_push_pop, NULL);
}
/* Returns true if 'table' is the table used for internal rules,
port_poll,
port_poll_wait,
port_is_lacp_current,
+ port_get_lacp_stats,
NULL, /* rule_choose_table */
rule_alloc,
rule_construct,