/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
#include "poll-loop.h"
+#include "ovs-rcu.h"
#include "ovs-router.h"
#include "seq.h"
#include "simap.h"
#include "unaligned.h"
#include "unixctl.h"
#include "vlan-bitmap.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
char *name; /* Identifier for log messages. */
/* Configuration. */
- struct list ports; /* Contains "struct ofport"s. */
+ struct ovs_list ports; /* Contains "struct ofport"s. */
enum port_vlan_mode vlan_mode; /* VLAN mode */
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
odp_port_t odp_port;
struct ofbundle *bundle; /* Bundle that contains this port, if any. */
- struct list bundle_node; /* In struct ofbundle's "ports" list. */
+ struct ovs_list bundle_node;/* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
struct bfd *bfd; /* BFD, if any. */
bool may_enable; /* May be enabled in bonds. */
COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_mcast_snooping);
+/* Stores mapping between 'recirc_id' and 'ofproto-dpif'. */
+struct dpif_backer_recirc_node {
+ struct cmap_node cmap_node;
+ struct ofproto_dpif *ofproto;
+ uint32_t recirc_id;
+};
+
/* All datapaths of a given type share a single dpif backer instance. */
struct dpif_backer {
char *type;
/* Recirculation. */
struct recirc_id_pool *rid_pool; /* Recirculation ID pool. */
+ struct cmap recirc_map; /* Map of 'recirc_id's to 'ofproto's. */
+ struct ovs_mutex recirc_mutex; /* Protects 'recirc_map'. */
bool enable_recirc; /* True if the datapath supports recirculation */
+ /* True if the datapath supports unique flow identifiers */
+ bool enable_ufid;
+
/* True if the datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
* False if the datapath supports only 8-byte (or shorter) userdata. */
return ofproto->backer->enable_recirc;
}
+bool
+ofproto_dpif_get_enable_ufid(struct dpif_backer *backer)
+{
+ return backer->enable_ufid;
+}
+
static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto,
ofp_port_t ofp_port);
static void ofproto_trace(struct ofproto_dpif *, struct flow *,
free(ofproto);
}
+/* Called when 'ofproto' is destructed. Checks for and clears any
+ * recirc_id leak. */
+static void
+dpif_backer_recirc_clear_ofproto(struct dpif_backer *backer,
+ struct ofproto_dpif *ofproto)
+{
+ struct dpif_backer_recirc_node *node;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ CMAP_FOR_EACH (node, cmap_node, &backer->recirc_map) {
+ if (node->ofproto == ofproto) {
+ VLOG_ERR("recirc_id %"PRIu32", not freed when ofproto (%s) "
+ "is destructed", node->recirc_id, ofproto->up.name);
+ cmap_remove(&backer->recirc_map, &node->cmap_node,
+ node->recirc_id);
+ ovsrcu_postpone(free, node);
+ }
+ }
+ ovs_mutex_unlock(&backer->recirc_mutex);
+}
+
static void
close_dpif_backer(struct dpif_backer *backer)
{
hmap_destroy(&backer->odp_to_ofport_map);
shash_find_and_delete(&all_dpif_backers, backer->type);
recirc_id_pool_destroy(backer->rid_pool);
+ cmap_destroy(&backer->recirc_map);
+ ovs_mutex_destroy(&backer->recirc_mutex);
free(backer->type);
free(backer->dp_version_string);
dpif_close(backer->dpif);
/* Datapath port slated for removal from datapath. */
struct odp_garbage {
- struct list list_node;
+ struct ovs_list list_node;
odp_port_t odp_port;
};
static bool check_variable_length_userdata(struct dpif_backer *backer);
static size_t check_max_mpls_depth(struct dpif_backer *backer);
static bool check_recirc(struct dpif_backer *backer);
+static bool check_ufid(struct dpif_backer *backer);
static bool check_masked_set_action(struct dpif_backer *backer);
static int
struct dpif_port_dump port_dump;
struct dpif_port port;
struct shash_node *node;
- struct list garbage_list;
+ struct ovs_list garbage_list;
struct odp_garbage *garbage, *next;
struct sset names;
backer->enable_recirc = check_recirc(backer);
backer->max_mpls_depth = check_max_mpls_depth(backer);
backer->masked_set_action = check_masked_set_action(backer);
+ backer->enable_ufid = check_ufid(backer);
backer->rid_pool = recirc_id_pool_create();
+ ovs_mutex_init(&backer->recirc_mutex);
+ cmap_init(&backer->recirc_map);
backer->enable_tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
atomic_count_init(&backer->tnl_count, 0);
return enable_recirc;
}
+/* Tests whether 'dpif' supports userspace flow ids. We can skip serializing
+ * some flow attributes for datapaths that support this feature.
+ *
+ * Returns true if 'dpif' supports UFID for flow operations.
+ * Returns false if 'dpif' does not support UFID. */
+static bool
+check_ufid(struct dpif_backer *backer)
+{
+ struct flow flow;
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ ovs_u128 ufid;
+ bool enable_ufid;
+
+ memset(&flow, 0, sizeof flow);
+ flow.dl_type = htons(0x1234);
+
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
+ dpif_flow_hash(backer->dpif, ofpbuf_data(&key), ofpbuf_size(&key), &ufid);
+
+ enable_ufid = dpif_probe_feature(backer->dpif, "UFID", &key, &ufid);
+
+ if (enable_ufid) {
+ VLOG_INFO("%s: Datapath supports userspace flow ids",
+ dpif_name(backer->dpif));
+ } else {
+ VLOG_INFO("%s: Datapath does not support userspace flow ids",
+ dpif_name(backer->dpif));
+ }
+ return enable_ufid;
+}
+
/* Tests whether 'backer''s datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. We need
* to disable some features on older datapaths that don't support this
struct shash_node *node, *next;
int error;
+ /* Tunnel module can get used right after the udpif threads are running. */
+ ofproto_tunnel_init();
+
error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
if (error) {
return error;
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
ofproto->lacp_enabled = false;
- ofproto_tunnel_init();
ovs_mutex_init_adaptive(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
struct ofproto_packet_in *pin, *next_pin;
struct rule_dpif *rule;
struct oftable *table;
- struct list pins;
+ struct ovs_list pins;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
xlate_txn_start();
}
guarded_list_destroy(&ofproto->pins);
+ dpif_backer_recirc_clear_ofproto(ofproto->backer, ofproto);
+
mbridge_unref(ofproto->mbridge);
netflow_unref(ofproto->netflow);
* waiting for flow restore to complete. */
if (!ofproto_get_flow_restore_wait()) {
struct ofproto_packet_in *pin, *next_pin;
- struct list pins;
+ struct ovs_list pins;
guarded_list_pop_all(&ofproto->pins, &pins);
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
struct dpif_sflow *ds = ofproto->sflow;
if (sflow_options) {
+ uint32_t old_probability = ds ? dpif_sflow_get_probability(ds) : 0;
if (!ds) {
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
}
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
+ if (dpif_sflow_get_probability(ds) != old_probability) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
} else {
if (ds) {
dpif_sflow_unref(ds);
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ovs_rwlock_wrlock(&ml->rwlock);
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
- if (mac->port.p == bundle) {
+ if (mac_entry_get_port(ml, mac) == bundle) {
if (all_ofprotos) {
struct ofproto_dpif *o;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ovs_rwlock_wrlock(&ml->rwlock);
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
- if (mac->port.p == old) {
- mac->port.p = new;
+ if (mac_entry_get_port(ml, mac) == old) {
+ mac_entry_set_port(ml, mac, new);
}
}
ovs_rwlock_unlock(&ml->rwlock);
}
ofproto = bundle->ofproto;
- mbridge_unregister_bundle(ofproto->mbridge, bundle->aux);
+ mbridge_unregister_bundle(ofproto->mbridge, bundle);
xlate_txn_start();
xlate_bundle_remove(bundle);
struct ofpbuf *learning_packet;
int error, n_packets, n_errors;
struct mac_entry *e;
- struct list packets;
+ struct ovs_list packets;
list_init(&packets);
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
- if (e->port.p != bundle) {
+ if (mac_entry_get_port(ofproto->ml, e) != bundle) {
void *port_void;
learning_packet = bond_compose_learning_packet(bundle->bond,
return 0;
}
-/* Configures multicast snooping port's flood setting on 'ofproto'. */
+/* Configures multicast snooping port's flood settings on 'ofproto'. */
static int
-set_mcast_snooping_port(struct ofproto *ofproto_, void *aux, bool flood)
+set_mcast_snooping_port(struct ofproto *ofproto_, void *aux,
+ const struct ofproto_mcast_snooping_port_settings *s)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofbundle *bundle = bundle_lookup(ofproto, aux);
- if (ofproto->ms) {
+ if (ofproto->ms && s) {
ovs_rwlock_wrlock(&ofproto->ms->rwlock);
- mcast_snooping_set_port_flood(ofproto->ms, bundle->vlan, bundle,
- flood);
+ mcast_snooping_set_port_flood(ofproto->ms, bundle, s->flood);
+ mcast_snooping_set_port_flood_reports(ofproto->ms, bundle,
+ s->flood_reports);
ovs_rwlock_unlock(&ofproto->ms->rwlock);
}
return 0;
/* The returned rule (if any) is valid at least until the next RCU quiescent
* period. If the rule needs to stay around longer, a non-zero 'take_ref'
- * must be passed in to cause a reference to be taken on it. */
+ * must be passed in to cause a reference to be taken on it.
+ *
+ * 'flow' is non-const to allow for temporary modifications during the lookup.
+ * Any changes are restored before returning. */
static struct rule_dpif *
rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
- const struct flow *flow, struct flow_wildcards *wc,
+ struct flow *flow, struct flow_wildcards *wc,
bool take_ref)
{
struct classifier *cls = &ofproto->up.tables[table_id].cls;
* on it before this returns.
*
* 'in_port' allows the lookup to take place as if the in port had the value
- * 'in_port'. This is needed for resubmit action support. */
+ * 'in_port'. This is needed for resubmit action support.
+ *
+ * 'flow' is non-const to allow for temporary modifications during the lookup.
+ * Any changes are restored before returning. */
struct rule_dpif *
rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
struct flow_wildcards *wc, bool take_ref,
OVS_REQUIRES(group->stats_mutex)
{
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
group->packet_count = 0;
group->byte_count = 0;
bucket->stats.packet_count += stats->n_packets;
bucket->stats.byte_count += stats->n_bytes;
} else { /* Credit to all buckets */
- const struct list *buckets;
+ const struct ovs_list *buckets;
group_dpif_get_buckets(group, &buckets);
LIST_FOR_EACH (bucket, list_node, buckets) {
{
struct group_dpif *group = group_dpif_cast(group_);
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
struct bucket_counter *bucket_stats;
ovs_mutex_lock(&group->stats_mutex);
void
group_dpif_get_buckets(const struct group_dpif *group,
- const struct list **buckets)
+ const struct ovs_list **buckets)
{
*buckets = &group->up.buckets;
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
- struct ofbundle *bundle = e->port.p;
+ struct ofbundle *bundle = mac_entry_get_port(ofproto->ml, e);
char name[OFP_MAX_PORT_NAME_LEN];
ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
}
}
+struct ofproto_dpif *
+ofproto_dpif_recirc_get_ofproto(const struct dpif_backer *backer,
+ uint32_t recirc_id)
+{
+ struct dpif_backer_recirc_node *node;
+
+ node = CONTAINER_OF(cmap_find(&backer->recirc_map, recirc_id),
+ struct dpif_backer_recirc_node, cmap_node);
+
+ return node ? node->ofproto : NULL;
+}
+
uint32_t
ofproto_dpif_alloc_recirc_id(struct ofproto_dpif *ofproto)
{
struct dpif_backer *backer = ofproto->backer;
+ uint32_t recirc_id = recirc_id_alloc(backer->rid_pool);
- return recirc_id_alloc(backer->rid_pool);
+ if (recirc_id) {
+ struct dpif_backer_recirc_node *node = xmalloc(sizeof *node);
+
+ node->recirc_id = recirc_id;
+ node->ofproto = ofproto;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ cmap_insert(&backer->recirc_map, &node->cmap_node, node->recirc_id);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ }
+
+ return recirc_id;
}
void
ofproto_dpif_free_recirc_id(struct ofproto_dpif *ofproto, uint32_t recirc_id)
{
struct dpif_backer *backer = ofproto->backer;
+ struct dpif_backer_recirc_node *node;
- recirc_id_free(backer->rid_pool, recirc_id);
+ node = CONTAINER_OF(cmap_find(&backer->recirc_map, recirc_id),
+ struct dpif_backer_recirc_node, cmap_node);
+ if (node) {
+ ovs_mutex_lock(&backer->recirc_mutex);
+ cmap_remove(&backer->recirc_map, &node->cmap_node, node->recirc_id);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ recirc_id_free(backer->rid_pool, node->recirc_id);
+
+ /* 'recirc_id' should never be freed by non-owning 'ofproto'. */
+ ovs_assert(node->ofproto == ofproto);
+
+ /* RCU postpone the free, since other threads may be referring
+ * to 'node' at same time. */
+ ovsrcu_postpone(free, node);
+ }
}
int