#include "connmgr.h"
#include "coverage.h"
#include "cfm.h"
+#include "ovs-lldp.h"
#include "dpif.h"
#include "dynamic-string.h"
#include "fail-open.h"
struct ovs_list bundle_node;/* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
struct bfd *bfd; /* BFD, if any. */
+ struct lldp *lldp; /* lldp, if any. */
bool may_enable; /* May be enabled in bonds. */
bool is_tunnel; /* This port is a tunnel. */
bool is_layer3; /* This is a layer 3 port. */
static void port_run(struct ofport_dpif *);
static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
+static int set_lldp(struct ofport *ofport_, const struct smap *cfg);
static void ofport_update_peer(struct ofport_dpif *);
/* Reasons that we might need to revalidate every datapath flow, and
COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_mcast_snooping);
-/* Stores mapping between 'recirc_id' and 'ofproto-dpif'. */
-struct dpif_backer_recirc_node {
- struct cmap_node cmap_node;
- struct ofproto_dpif *ofproto;
- uint32_t recirc_id;
-};
-
/* All datapaths of a given type share a single dpif backer instance. */
struct dpif_backer {
char *type;
bool recv_set_enable; /* Enables or disables receiving packets. */
/* Recirculation. */
- struct recirc_id_pool *rid_pool; /* Recirculation ID pool. */
- struct cmap recirc_map; /* Map of 'recirc_id's to 'ofproto's. */
- struct ovs_mutex recirc_mutex; /* Protects 'recirc_map'. */
bool enable_recirc; /* True if the datapath supports recirculation */
/* True if the datapath supports unique flow identifiers */
return backer->enable_ufid;
}
-static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto,
- ofp_port_t ofp_port);
static void ofproto_trace(struct ofproto_dpif *, struct flow *,
const struct dp_packet *packet,
const struct ofpact[], size_t ofpacts_len,
: -1;
xlate_ofport_set(ofproto, ofport->bundle, ofport,
ofport->up.ofp_port, ofport->odp_port,
- ofport->up.netdev, ofport->cfm,
- ofport->bfd, ofport->peer, stp_port,
+ ofport->up.netdev, ofport->cfm, ofport->bfd,
+ ofport->lldp, ofport->peer, stp_port,
ofport->rstp_port, ofport->qdscp,
ofport->n_qdscp, ofport->up.pp.config,
ofport->up.pp.state, ofport->is_tunnel,
free(ofproto);
}
-/* Called when 'ofproto' is destructed. Checks for and clears any
- * recirc_id leak. */
-static void
-dpif_backer_recirc_clear_ofproto(struct dpif_backer *backer,
- struct ofproto_dpif *ofproto)
-{
- struct dpif_backer_recirc_node *node;
-
- ovs_mutex_lock(&backer->recirc_mutex);
- CMAP_FOR_EACH (node, cmap_node, &backer->recirc_map) {
- if (node->ofproto == ofproto) {
- VLOG_ERR("recirc_id %"PRIu32", not freed when ofproto (%s) "
- "is destructed", node->recirc_id, ofproto->up.name);
- cmap_remove(&backer->recirc_map, &node->cmap_node,
- node->recirc_id);
- ovsrcu_postpone(free, node);
- }
- }
- ovs_mutex_unlock(&backer->recirc_mutex);
-}
-
static void
close_dpif_backer(struct dpif_backer *backer)
{
ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
hmap_destroy(&backer->odp_to_ofport_map);
shash_find_and_delete(&all_dpif_backers, backer->type);
- recirc_id_pool_destroy(backer->rid_pool);
- cmap_destroy(&backer->recirc_map);
- ovs_mutex_destroy(&backer->recirc_mutex);
free(backer->type);
free(backer->dp_version_string);
dpif_close(backer->dpif);
struct dpif_port port;
struct shash_node *node;
struct ovs_list garbage_list;
- struct odp_garbage *garbage, *next;
+ struct odp_garbage *garbage;
struct sset names;
char *backer_name;
const char *name;
int error;
+ recirc_init();
+
backer = shash_find_data(&all_dpif_backers, type);
if (backer) {
backer->refcount++;
}
dpif_port_dump_done(&port_dump);
- LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
+ LIST_FOR_EACH_POP (garbage, list_node, &garbage_list) {
dpif_port_del(backer->dpif, garbage->odp_port);
- list_remove(&garbage->list_node);
free(garbage);
}
backer->max_mpls_depth = check_max_mpls_depth(backer);
backer->masked_set_action = check_masked_set_action(backer);
backer->enable_ufid = check_ufid(backer);
- backer->rid_pool = recirc_id_pool_create();
- ovs_mutex_init(&backer->recirc_mutex);
- cmap_init(&backer->recirc_map);
backer->enable_tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
atomic_count_init(&backer->tnl_count, 0);
uint64_t ofpacts_stub[128 / 8];
struct ofpbuf ofpacts;
struct rule *unused_rulep OVS_UNUSED;
- struct ofpact_resubmit *resubmit;
struct match match;
int error;
int id;
match_set_recirc_id(&match, 0);
error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, 0, &ofpacts,
&unused_rulep);
- if (error) {
- return error;
- }
-
- /* Continue rule lookups for not-matched recirc rules from table 0.
- *
- * (priority=1), actions=resubmit(, 0)
- */
- resubmit = ofpact_put_RESUBMIT(&ofpacts);
- resubmit->in_port = OFPP_IN_PORT;
- resubmit->table_id = 0;
-
- match_init_catchall(&match);
- error = ofproto_dpif_add_internal_flow(ofproto, &match, 1, 0, &ofpacts,
- &unused_rulep);
-
return error;
}
destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofproto_packet_in *pin, *next_pin;
+ struct ofproto_packet_in *pin;
struct rule_dpif *rule;
struct oftable *table;
struct ovs_list pins;
}
guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
- list_remove(&pin->list_node);
+ LIST_FOR_EACH_POP (pin, list_node, &pins) {
free(CONST_CAST(void *, pin->up.packet));
free(pin);
}
guarded_list_destroy(&ofproto->pins);
- dpif_backer_recirc_clear_ofproto(ofproto->backer, ofproto);
+ recirc_free_ofproto(ofproto, ofproto->up.name);
mbridge_unref(ofproto->mbridge);
/* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
if (!ofproto_get_flow_restore_wait()) {
- struct ofproto_packet_in *pin, *next_pin;
+ struct ofproto_packet_in *pin;
struct ovs_list pins;
guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
+ LIST_FOR_EACH_POP (pin, list_node, &pins) {
connmgr_send_packet_in(ofproto->up.connmgr, pin);
- list_remove(&pin->list_node);
free(CONST_CAST(void *, pin->up.packet));
free(pin);
}
port->bundle = NULL;
port->cfm = NULL;
port->bfd = NULL;
+ port->lldp = NULL;
port->may_enable = false;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
bundle_remove(port_);
set_cfm(port_, NULL);
set_bfd(port_, NULL);
+ set_lldp(port_, NULL);
if (port->stp_port) {
stp_port_disable(port->stp_port);
}
}
ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
- port->up.pp.hw_addr);
+ port->lldp, port->up.pp.hw_addr);
netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct cfm *old = ofport->cfm;
int error = 0;
if (s) {
if (!ofport->cfm) {
- struct ofproto_dpif *ofproto;
-
- ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofport->cfm = cfm_create(ofport->up.netdev);
}
cfm_unref(ofport->cfm);
ofport->cfm = NULL;
out:
+ if (ofport->cfm != old) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
- ofport->up.pp.hw_addr);
+ ofport->lldp, ofport->up.pp.hw_addr);
return error;
}
ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
- ofport->up.pp.hw_addr);
+ ofport->lldp, ofport->up.pp.hw_addr);
return 0;
}
return ret;
}
+
+static int
+set_lldp(struct ofport *ofport_,
+ const struct smap *cfg)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ int error = 0;
+
+ if (cfg) {
+ if (!ofport->lldp) {
+ struct ofproto_dpif *ofproto;
+
+ ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ofport->lldp = lldp_create(ofport->up.netdev, ofport_->mtu, cfg);
+ }
+
+ if (!lldp_configure(ofport->lldp, cfg)) {
+ error = EINVAL;
+ }
+ }
+ if (error) {
+ lldp_unref(ofport->lldp);
+ ofport->lldp = NULL;
+ }
+
+ ofproto_dpif_monitor_port_update(ofport,
+ ofport->bfd,
+ ofport->cfm,
+ ofport->lldp,
+ ofport->up.pp.hw_addr);
+ return error;
+}
+
+static bool
+get_lldp_status(const struct ofport *ofport_,
+ struct lldp_status *status OVS_UNUSED)
+{
+ struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+
+ return ofport->lldp ? true : false;
+}
+
+static int
+set_aa(struct ofproto *ofproto OVS_UNUSED,
+ const struct aa_settings *s)
+{
+ return aa_configure(s);
+}
+
+static int
+aa_mapping_set(struct ofproto *ofproto_ OVS_UNUSED, void *aux,
+ const struct aa_mapping_settings *s)
+{
+ return aa_mapping_register(aux, s);
+}
+
+static int
+aa_mapping_unset(struct ofproto *ofproto OVS_UNUSED, void *aux)
+{
+ return aa_mapping_unregister(aux);
+}
+
+static int
+aa_vlan_get_queued(struct ofproto *ofproto OVS_UNUSED, struct ovs_list *list)
+{
+ return aa_get_vlan_queued(list);
+}
+
+static unsigned int
+aa_vlan_get_queue_size(struct ofproto *ofproto OVS_UNUSED)
+{
+ return aa_get_vlan_queue_size();
+}
+
\f
/* Spanning Tree. */
{
struct ofport_dpif *port;
- port = get_ofp_port(bundle->ofproto, ofp_port);
+ port = ofp_port_to_ofport(bundle->ofproto, ofp_port);
if (!port) {
return false;
}
bundle_send_learning_packets(struct ofbundle *bundle)
{
struct ofproto_dpif *ofproto = bundle->ofproto;
- struct dp_packet *learning_packet;
int error, n_packets, n_errors;
struct mac_entry *e;
+ struct pkt_list {
+ struct ovs_list list_node;
+ struct ofport_dpif *port;
+ struct dp_packet *pkt;
+ } *pkt_node;
struct ovs_list packets;
list_init(&packets);
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
if (mac_entry_get_port(ofproto->ml, e) != bundle) {
- void *port_void;
-
- learning_packet = bond_compose_learning_packet(bundle->bond,
- e->mac, e->vlan,
- &port_void);
- /* Temporarily use 'frame' as a private pointer (see below). */
- ovs_assert(learning_packet->frame == dp_packet_data(learning_packet));
- learning_packet->frame = port_void;
- list_push_back(&packets, &learning_packet->list_node);
+ pkt_node = xmalloc(sizeof *pkt_node);
+ pkt_node->pkt = bond_compose_learning_packet(bundle->bond,
+ e->mac, e->vlan,
+ (void **)&pkt_node->port);
+ list_push_back(&packets, &pkt_node->list_node);
}
}
ovs_rwlock_unlock(&ofproto->ml->rwlock);
error = n_packets = n_errors = 0;
- LIST_FOR_EACH (learning_packet, list_node, &packets) {
+ LIST_FOR_EACH_POP (pkt_node, list_node, &packets) {
int ret;
- void *port_void = learning_packet->frame;
- /* Restore 'frame'. */
- learning_packet->frame = dp_packet_data(learning_packet);
- ret = ofproto_dpif_send_packet(port_void, learning_packet);
+ ret = ofproto_dpif_send_packet(pkt_node->port, pkt_node->pkt);
+ dp_packet_delete(pkt_node->pkt);
+ free(pkt_node);
if (ret) {
error = ret;
n_errors++;
}
n_packets++;
}
- dp_packet_list_delete(&packets);
if (n_errors) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
\f
/* Ports. */
-static struct ofport_dpif *
-get_ofp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
+struct ofport_dpif *
+ofp_port_to_ofport(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
{
struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
return ofport ? ofport_dpif_cast(ofport) : NULL;
port_del(struct ofproto *ofproto_, ofp_port_t ofp_port)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
+ struct ofport_dpif *ofport = ofp_port_to_ofport(ofproto, ofp_port);
int error = 0;
if (!ofport) {
rule_dpif_set_recirc_id(struct rule_dpif *rule, uint32_t id)
OVS_REQUIRES(rule->up.mutex)
{
- ovs_assert(!rule->recirc_id);
- rule->recirc_id = id;
-}
-
-/* Returns 'rule''s recirculation id. */
-uint32_t
-rule_dpif_get_recirc_id(struct rule_dpif *rule)
- OVS_REQUIRES(rule->up.mutex)
-{
- if (!rule->recirc_id) {
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
- rule_dpif_set_recirc_id(rule, ofproto_dpif_alloc_recirc_id(ofproto));
+ ovs_assert(!rule->recirc_id || rule->recirc_id == id);
+ if (rule->recirc_id == id) {
+ /* Release the new reference to the same id. */
+ recirc_free_id(id);
+ } else {
+ rule->recirc_id = id;
}
- return rule->recirc_id;
}
/* Sets 'rule''s recirculation id. */
ovs_mutex_unlock(&rule->up.mutex);
}
-/* Lookup 'flow' in table 0 of 'ofproto''s classifier.
- * If 'wc' is non-null, sets the fields that were relevant as part of
- * the lookup. Returns the table id where a match or miss occurred via
- * 'table_id'. This will be zero unless there was a miss and
- * OFPTC11_TABLE_MISS_CONTINUE is in effect for the sequence of tables
- * where misses occur, or TBL_INTERNAL if the rule has a non-zero
- * recirculation ID, and a match was found in the internal table, or if
- * there was no match and one of the special rules (drop_frags_rule,
- * miss_rule, or no_packet_in_rule) was returned.
- *
- * The return value is the found rule, which is valid at least until the next
- * RCU quiescent period. If the rule needs to stay around longer,
- * a non-zero 'take_ref' must be passed in to cause a reference to be taken
- * on it before this returns. */
-struct rule_dpif *
-rule_dpif_lookup(struct ofproto_dpif *ofproto, struct flow *flow,
- struct flow_wildcards *wc, bool take_ref,
- const struct dpif_flow_stats *stats, uint8_t *table_id)
-{
- *table_id = 0;
-
- if (ofproto_dpif_get_enable_recirc(ofproto)) {
- /* Always exactly match recirc_id since datapath supports
- * recirculation. */
- if (wc) {
- wc->masks.recirc_id = UINT32_MAX;
- }
- *table_id = rule_dpif_lookup_get_init_table_id(flow);
- }
-
- return rule_dpif_lookup_from_table(ofproto, flow, wc, take_ref, stats,
- table_id, flow->in_port.ofp_port, true,
- true);
-}
-
/* The returned rule (if any) is valid at least until the next RCU quiescent
* period. If the rule needs to stay around longer, a non-zero 'take_ref'
* must be passed in to cause a reference to be taken on it.
|| miss_config == OFPUTIL_TABLE_MISS_CONTROLLER) {
struct ofport_dpif *port;
- port = get_ofp_port(ofproto, old_in_port);
+ port = ofp_port_to_ofport(ofproto, old_in_port);
if (!port) {
VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
old_in_port);
ovs_mutex_destroy(&rule->stats_mutex);
if (rule->recirc_id) {
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
- ofproto_dpif_free_recirc_id(ofproto, rule->recirc_id);
+ recirc_free_id(rule->recirc_id);
}
}
{
return group->up.type;
}
+
+const char *
+group_dpif_get_selection_method(const struct group_dpif *group)
+{
+ return group->up.props.selection_method;
+}
\f
/* Sends 'packet' out 'ofport'.
* May modify 'packet'.
ovs_mutex_unlock(&ofproto->stats_mutex);
return error;
}
+
+uint64_t
+group_dpif_get_selection_method_param(const struct group_dpif *group)
+{
+ return group->up.props.selection_method_param;
+}
+
+const struct field_array *
+group_dpif_get_fields(const struct group_dpif *group)
+{
+ return &group->up.props.fields;
+}
\f
/* Return the version string of the datapath that backs up
* this 'ofproto'.
static odp_port_t
ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
{
- const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
+ const struct ofport_dpif *ofport = ofp_port_to_ofport(ofproto, ofp_port);
return ofport ? ofport->odp_port : ODPP_NONE;
}
}
}
-struct ofproto_dpif *
-ofproto_dpif_recirc_get_ofproto(const struct dpif_backer *backer,
- uint32_t recirc_id)
-{
- struct dpif_backer_recirc_node *node;
-
- node = CONTAINER_OF(cmap_find(&backer->recirc_map, recirc_id),
- struct dpif_backer_recirc_node, cmap_node);
-
- return node ? node->ofproto : NULL;
-}
-
-uint32_t
-ofproto_dpif_alloc_recirc_id(struct ofproto_dpif *ofproto)
-{
- struct dpif_backer *backer = ofproto->backer;
- uint32_t recirc_id = recirc_id_alloc(backer->rid_pool);
-
- if (recirc_id) {
- struct dpif_backer_recirc_node *node = xmalloc(sizeof *node);
-
- node->recirc_id = recirc_id;
- node->ofproto = ofproto;
-
- ovs_mutex_lock(&backer->recirc_mutex);
- cmap_insert(&backer->recirc_map, &node->cmap_node, node->recirc_id);
- ovs_mutex_unlock(&backer->recirc_mutex);
- }
-
- return recirc_id;
-}
-
-void
-ofproto_dpif_free_recirc_id(struct ofproto_dpif *ofproto, uint32_t recirc_id)
-{
- struct dpif_backer *backer = ofproto->backer;
- struct dpif_backer_recirc_node *node;
-
- node = CONTAINER_OF(cmap_find(&backer->recirc_map, recirc_id),
- struct dpif_backer_recirc_node, cmap_node);
- if (node) {
- ovs_mutex_lock(&backer->recirc_mutex);
- cmap_remove(&backer->recirc_map, &node->cmap_node, node->recirc_id);
- ovs_mutex_unlock(&backer->recirc_mutex);
- recirc_id_free(backer->rid_pool, node->recirc_id);
-
- /* 'recirc_id' should never be freed by non-owning 'ofproto'. */
- ovs_assert(node->ofproto == ofproto);
-
- /* RCU postpone the free, since other threads may be referring
- * to 'node' at same time. */
- ovsrcu_postpone(free, node);
- }
-}
-
int
ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
const struct match *match, int priority,
set_cfm,
cfm_status_changed,
get_cfm_status,
+ set_lldp,
+ get_lldp_status,
+ set_aa,
+ aa_mapping_set,
+ aa_mapping_unset,
+ aa_vlan_get_queued,
+ aa_vlan_get_queue_size,
set_bfd,
bfd_status_changed,
get_bfd_status,