#include "ofproto-dpif-sflow.h"
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
+#include "ovs-rcu.h"
#include "poll-loop.h"
#include "seq.h"
#include "simap.h"
COVERAGE_DEFINE(rev_flow_table);
COVERAGE_DEFINE(rev_mac_learning);
+/* Stores mapping between 'recirc_id' and 'ofproto-dpif'. */
+struct dpif_backer_recirc_node {
+ struct hmap_node hmap_node;
+ struct ofproto_dpif *ofproto;
+ uint32_t recirc_id;
+};
+
/* All datapaths of a given type share a single dpif backer instance. */
struct dpif_backer {
char *type;
/* Recirculation. */
struct recirc_id_pool *rid_pool; /* Recirculation ID pool. */
+ struct hmap recirc_map; /* Map of 'recirc_id's to 'ofproto's. */
+ struct ovs_mutex recirc_mutex; /* Protects 'recirc_map'. */
bool enable_recirc; /* True if the datapath supports recirculation */
/* True if the datapath supports variable-length
free(ofproto);
}
+/* Called when 'ofproto' is destructed. Checks for and clears any
+ * recirc_id leak. */
+static void
+dpif_backer_recirc_clear_ofproto(struct dpif_backer *backer,
+ struct ofproto_dpif *ofproto)
+{
+ struct dpif_backer_recirc_node *node;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ HMAP_FOR_EACH (node, hmap_node, &backer->recirc_map) {
+ if (node->ofproto == ofproto) {
+ VLOG_ERR("recirc_id %"PRIu32", not freed when ofproto (%s) "
+ "is destructed", node->recirc_id, ofproto->up.name);
+ hmap_remove(&backer->recirc_map, &node->hmap_node);
+ ovsrcu_postpone(free, node);
+ }
+ }
+ ovs_mutex_unlock(&backer->recirc_mutex);
+}
+
static void
close_dpif_backer(struct dpif_backer *backer)
{
hmap_destroy(&backer->odp_to_ofport_map);
shash_find_and_delete(&all_dpif_backers, backer->type);
recirc_id_pool_destroy(backer->rid_pool);
+ hmap_destroy(&backer->recirc_map);
+ ovs_mutex_destroy(&backer->recirc_mutex);
free(backer->type);
dpif_close(backer->dpif);
free(backer);
shash_add(&all_dpif_backers, type, backer);
+ backer->enable_recirc = check_recirc(backer);
+ backer->variable_length_userdata = check_variable_length_userdata(backer);
+ backer->max_mpls_depth = check_max_mpls_depth(backer);
+ backer->rid_pool = recirc_id_pool_create();
+ ovs_mutex_init(&backer->recirc_mutex);
+ hmap_init(&backer->recirc_map);
+
error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
VLOG_ERR("failed to listen on datapath of type %s: %s",
close_dpif_backer(backer);
return error;
}
- backer->enable_recirc = check_recirc(backer);
- backer->variable_length_userdata = check_variable_length_userdata(backer);
- backer->max_mpls_depth = check_max_mpls_depth(backer);
- backer->rid_pool = recirc_id_pool_create();
if (backer->recv_set_enable) {
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, &flow, NULL, 0);
- error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE,
ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL,
0, NULL);
if (error && error != EEXIST) {
switch (error) {
case 0:
- /* Variable-length userdata is supported.
- *
- * Purge received packets to avoid processing the nonsense packet we
- * sent to userspace, then report success. */
- dpif_recv_purge(backer->dpif);
return true;
case ERANGE:
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, &flow, NULL, 0);
- error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE,
ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL, 0, NULL);
if (error && error != EEXIST) {
if (error != EINVAL) {
return error;
}
- /* Continue non-recirculation rule lookups from table 0.
+ /* Drop any run away non-recirc rule lookups. Recirc_id has to be
+ * zero when reaching this rule.
*
- * (priority=2), recirc=0, actions=resubmit(, 0)
+ * (priority=2), recirc_id=0, actions=drop
*/
- resubmit = ofpact_put_RESUBMIT(&ofpacts);
- resubmit->ofpact.compat = 0;
- resubmit->in_port = OFPP_IN_PORT;
- resubmit->table_id = 0;
-
+ ofpbuf_clear(&ofpacts);
match_init_catchall(&match);
match_set_recirc_id(&match, 0);
-
error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, &ofpacts,
&unused_rulep);
if (error) {
return error;
}
- /* Drop any run away recirc rule lookups. Recirc_id has to be
- * non-zero when reaching this rule.
+ /* Continue rule lookups for not-matched recirc rules from table 0.
*
- * (priority=1), *, actions=drop
+ * (priority=1), actions=resubmit(, 0)
*/
- ofpbuf_clear(&ofpacts);
+ resubmit = ofpact_put_RESUBMIT(&ofpacts);
+ resubmit->ofpact.compat = 0;
+ resubmit->in_port = OFPP_IN_PORT;
+ resubmit->table_id = 0;
+
match_init_catchall(&match);
+
error = ofproto_dpif_add_internal_flow(ofproto, &match, 1, &ofpacts,
&unused_rulep);
}
guarded_list_destroy(&ofproto->pins);
+ dpif_backer_recirc_clear_ofproto(ofproto->backer, ofproto);
+
mbridge_unref(ofproto->mbridge);
netflow_unref(ofproto->netflow);
struct dpif_sflow *ds = ofproto->sflow;
if (sflow_options) {
+ uint32_t old_probability = ds ? dpif_sflow_get_probability(ds) : 0;
if (!ds) {
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
}
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
+ if (dpif_sflow_get_probability(ds) != old_probability) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
} else {
if (ds) {
dpif_sflow_unref(ds);
}
ofproto = bundle->ofproto;
- mbridge_unregister_bundle(ofproto->mbridge, bundle->aux);
+ mbridge_unregister_bundle(ofproto->mbridge, bundle);
ovs_rwlock_wrlock(&xlate_rwlock);
xlate_bundle_remove(bundle);
xin.resubmit_stats = &stats;
xlate_actions(&xin, &xout);
+ execute.actions = ofpbuf_data(&xout.odp_actions);
+ execute.actions_len = ofpbuf_size(&xout.odp_actions);
+ execute.packet = packet;
+ execute.md = pkt_metadata_from_flow(flow);
+ execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
+
+ /* Fix up in_port. */
in_port = flow->in_port.ofp_port;
if (in_port == OFPP_NONE) {
in_port = OFPP_LOCAL;
}
- execute.actions = ofpbuf_data(&xout.odp_actions);
- execute.actions_len = ofpbuf_size(&xout.odp_actions);
- execute.packet = packet;
- execute.md.tunnel = flow->tunnel;
- execute.md.skb_priority = flow->skb_priority;
- execute.md.pkt_mark = flow->pkt_mark;
execute.md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
- execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
error = dpif_execute(ofproto->backer->dpif, &execute);
if (wc) {
wc->masks.recirc_id = UINT32_MAX;
}
-
- /* Start looking up from internal table for post recirculation flows
- * or packets. We can also simply send all, including normal flows
- * or packets to the internal table. They will not match any post
- * recirculation rules except the 'catch all' rule that resubmit
- * them to table 0.
- *
- * As an optimization, we send normal flows and packets to table 0
- * directly, saving one table lookup. */
- table_id = flow->recirc_id ? TBL_INTERNAL : 0;
+ table_id = rule_dpif_lookup_get_init_table_id(flow);
} else {
table_id = 0;
}
}
}
+struct ofproto_dpif *
+ofproto_dpif_recirc_get_ofproto(const struct dpif_backer *backer,
+ uint32_t recirc_id)
+{
+ struct dpif_backer_recirc_node *node;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ node = CONTAINER_OF(hmap_first_with_hash(&backer->recirc_map, recirc_id),
+ struct dpif_backer_recirc_node, hmap_node);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+
+ return node ? node->ofproto : NULL;
+}
+
uint32_t
ofproto_dpif_alloc_recirc_id(struct ofproto_dpif *ofproto)
{
struct dpif_backer *backer = ofproto->backer;
+ uint32_t recirc_id = recirc_id_alloc(backer->rid_pool);
- return recirc_id_alloc(backer->rid_pool);
+ if (recirc_id) {
+ struct dpif_backer_recirc_node *node = xmalloc(sizeof *node);
+
+ node->recirc_id = recirc_id;
+ node->ofproto = ofproto;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ hmap_insert(&backer->recirc_map, &node->hmap_node, node->recirc_id);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ }
+
+ return recirc_id;
}
void
ofproto_dpif_free_recirc_id(struct ofproto_dpif *ofproto, uint32_t recirc_id)
{
struct dpif_backer *backer = ofproto->backer;
+ struct dpif_backer_recirc_node *node;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ node = CONTAINER_OF(hmap_first_with_hash(&backer->recirc_map, recirc_id),
+ struct dpif_backer_recirc_node, hmap_node);
+ if (node) {
+ hmap_remove(&backer->recirc_map, &node->hmap_node);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ recirc_id_free(backer->rid_pool, node->recirc_id);
+
+ if (node->ofproto != ofproto) {
+ VLOG_ERR("recirc_id %"PRIu32", freed by incorrect ofproto (%s),"
+ " expect ofproto (%s)", node->recirc_id, ofproto->up.name,
+ node->ofproto->up.name);
+ }
- recirc_id_free(backer->rid_pool, recirc_id);
+ /* RCU postpone the free, since other threads may be referring
+ * to 'node' at same time. */
+ ovsrcu_postpone(free, node);
+ } else {
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ }
}
int