/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
#include "poll-loop.h"
+#include "ovs-rcu.h"
#include "ovs-router.h"
#include "seq.h"
#include "simap.h"
#include "unaligned.h"
#include "unixctl.h"
#include "vlan-bitmap.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
char *name; /* Identifier for log messages. */
/* Configuration. */
- struct list ports; /* Contains "struct ofport"s. */
+ struct ovs_list ports; /* Contains "struct ofport"s. */
enum port_vlan_mode vlan_mode; /* VLAN mode */
int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
static void bundle_del_port(struct ofport_dpif *);
static void bundle_run(struct ofbundle *);
static void bundle_wait(struct ofbundle *);
+static void bundle_flush_macs(struct ofbundle *, bool);
+static void bundle_move(struct ofbundle *, struct ofbundle *);
static void stp_run(struct ofproto_dpif *ofproto);
static void stp_wait(struct ofproto_dpif *ofproto);
odp_port_t odp_port;
struct ofbundle *bundle; /* Bundle that contains this port, if any. */
- struct list bundle_node; /* In struct ofbundle's "ports" list. */
+ struct ovs_list bundle_node;/* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
struct bfd *bfd; /* BFD, if any. */
bool may_enable; /* May be enabled in bonds. */
COVERAGE_DEFINE(rev_mac_learning);
COVERAGE_DEFINE(rev_mcast_snooping);
+/* Stores mapping between 'recirc_id' and 'ofproto-dpif'. */
+struct dpif_backer_recirc_node {
+ struct cmap_node cmap_node;
+ struct ofproto_dpif *ofproto;
+ uint32_t recirc_id;
+};
+
/* All datapaths of a given type share a single dpif backer instance. */
struct dpif_backer {
char *type;
/* Recirculation. */
struct recirc_id_pool *rid_pool; /* Recirculation ID pool. */
+ struct cmap recirc_map; /* Map of 'recirc_id's to 'ofproto's. */
+ struct ovs_mutex recirc_mutex; /* Protects 'recirc_map'. */
bool enable_recirc; /* True if the datapath supports recirculation */
+ /* True if the datapath supports unique flow identifiers */
+ bool enable_ufid;
+
/* True if the datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
* False if the datapath supports only 8-byte (or shorter) userdata. */
return ofproto->backer->enable_recirc;
}
+bool
+ofproto_dpif_get_enable_ufid(struct dpif_backer *backer)
+{
+ return backer->enable_ufid;
+}
+
static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto,
ofp_port_t ofp_port);
static void ofproto_trace(struct ofproto_dpif *, struct flow *,
free(ofproto);
}
+/* Called when 'ofproto' is destructed. Checks for and clears any
+ * recirc_id leak. */
+static void
+dpif_backer_recirc_clear_ofproto(struct dpif_backer *backer,
+ struct ofproto_dpif *ofproto)
+{
+ struct dpif_backer_recirc_node *node;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ CMAP_FOR_EACH (node, cmap_node, &backer->recirc_map) {
+ if (node->ofproto == ofproto) {
+ VLOG_ERR("recirc_id %"PRIu32", not freed when ofproto (%s) "
+ "is destructed", node->recirc_id, ofproto->up.name);
+ cmap_remove(&backer->recirc_map, &node->cmap_node,
+ node->recirc_id);
+ ovsrcu_postpone(free, node);
+ }
+ }
+ ovs_mutex_unlock(&backer->recirc_mutex);
+}
+
static void
close_dpif_backer(struct dpif_backer *backer)
{
hmap_destroy(&backer->odp_to_ofport_map);
shash_find_and_delete(&all_dpif_backers, backer->type);
recirc_id_pool_destroy(backer->rid_pool);
+ cmap_destroy(&backer->recirc_map);
+ ovs_mutex_destroy(&backer->recirc_mutex);
free(backer->type);
free(backer->dp_version_string);
dpif_close(backer->dpif);
/* Datapath port slated for removal from datapath. */
struct odp_garbage {
- struct list list_node;
+ struct ovs_list list_node;
odp_port_t odp_port;
};
static bool check_variable_length_userdata(struct dpif_backer *backer);
static size_t check_max_mpls_depth(struct dpif_backer *backer);
static bool check_recirc(struct dpif_backer *backer);
+static bool check_ufid(struct dpif_backer *backer);
static bool check_masked_set_action(struct dpif_backer *backer);
static int
struct dpif_port_dump port_dump;
struct dpif_port port;
struct shash_node *node;
- struct list garbage_list;
+ struct ovs_list garbage_list;
struct odp_garbage *garbage, *next;
struct sset names;
backer->enable_recirc = check_recirc(backer);
backer->max_mpls_depth = check_max_mpls_depth(backer);
backer->masked_set_action = check_masked_set_action(backer);
+ backer->enable_ufid = check_ufid(backer);
backer->rid_pool = recirc_id_pool_create();
+ ovs_mutex_init(&backer->recirc_mutex);
+ cmap_init(&backer->recirc_map);
backer->enable_tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
atomic_count_init(&backer->tnl_count, 0);
struct flow flow;
struct odputil_keybuf keybuf;
struct ofpbuf key;
- int error;
- bool enable_recirc = false;
+ bool enable_recirc;
memset(&flow, 0, sizeof flow);
flow.recirc_id = 1;
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
+ enable_recirc = dpif_probe_feature(backer->dpif, "recirculation", &key,
+ NULL);
- error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_PROBE,
- ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0, NULL,
- 0, NULL);
- if (error && error != EEXIST) {
- if (error != EINVAL) {
- VLOG_WARN("%s: Reciculation flow probe failed (%s)",
- dpif_name(backer->dpif), ovs_strerror(error));
- }
- goto done;
- }
-
- error = dpif_flow_del(backer->dpif, ofpbuf_data(&key), ofpbuf_size(&key),
- NULL);
- if (error) {
- VLOG_WARN("%s: failed to delete recirculation feature probe flow",
- dpif_name(backer->dpif));
- }
-
- enable_recirc = true;
-
-done:
if (enable_recirc) {
VLOG_INFO("%s: Datapath supports recirculation",
dpif_name(backer->dpif));
return enable_recirc;
}
+/* Tests whether 'dpif' supports userspace flow ids. We can skip serializing
+ * some flow attributes for datapaths that support this feature.
+ *
+ * Returns true if 'dpif' supports UFID for flow operations.
+ * Returns false if 'dpif' does not support UFID. */
+static bool
+check_ufid(struct dpif_backer *backer)
+{
+ struct flow flow;
+ struct odputil_keybuf keybuf;
+ struct ofpbuf key;
+ ovs_u128 ufid;
+ bool enable_ufid;
+
+ memset(&flow, 0, sizeof flow);
+ flow.dl_type = htons(0x1234);
+
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
+ dpif_flow_hash(backer->dpif, ofpbuf_data(&key), ofpbuf_size(&key), &ufid);
+
+ enable_ufid = dpif_probe_feature(backer->dpif, "UFID", &key, &ufid);
+
+ if (enable_ufid) {
+ VLOG_INFO("%s: Datapath supports userspace flow ids",
+ dpif_name(backer->dpif));
+ } else {
+ VLOG_INFO("%s: Datapath does not support userspace flow ids",
+ dpif_name(backer->dpif));
+ }
+ return enable_ufid;
+}
+
/* Tests whether 'backer''s datapath supports variable-length
* OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. We need
* to disable some features on older datapaths that don't support this
for (n = 0; n < FLOW_MAX_MPLS_LABELS; n++) {
struct odputil_keybuf keybuf;
struct ofpbuf key;
- int error;
memset(&flow, 0, sizeof flow);
flow.dl_type = htons(ETH_TYPE_MPLS);
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, &flow, NULL, 0, false);
-
- error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_PROBE,
- ofpbuf_data(&key), ofpbuf_size(&key), NULL, 0,
- NULL, 0, NULL);
- if (error && error != EEXIST) {
- if (error != EINVAL) {
- VLOG_WARN("%s: MPLS stack length feature probe failed (%s)",
- dpif_name(backer->dpif), ovs_strerror(error));
- }
+ if (!dpif_probe_feature(backer->dpif, "MPLS", &key, NULL)) {
break;
}
-
- error = dpif_flow_del(backer->dpif, ofpbuf_data(&key),
- ofpbuf_size(&key), NULL);
- if (error) {
- VLOG_WARN("%s: failed to delete MPLS feature probe flow",
- dpif_name(backer->dpif));
- }
}
VLOG_INFO("%s: MPLS label stack length probed as %d",
struct shash_node *node, *next;
int error;
+ /* Tunnel module can get used right after the udpif threads are running. */
+ ofproto_tunnel_init();
+
error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
if (error) {
return error;
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
ofproto->lacp_enabled = false;
- ofproto_tunnel_init();
ovs_mutex_init_adaptive(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
guarded_list_init(&ofproto->pins);
ofproto_unixctl_init();
- ovs_router_unixctl_register();
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
return error;
}
- /* Continue non-recirculation rule lookups from table 0.
+ /* Drop any run away non-recirc rule lookups. Recirc_id has to be
+ * zero when reaching this rule.
*
- * (priority=2), recirc=0, actions=resubmit(, 0)
+ * (priority=2), recirc_id=0, actions=drop
*/
- resubmit = ofpact_put_RESUBMIT(&ofpacts);
- resubmit->in_port = OFPP_IN_PORT;
- resubmit->table_id = 0;
-
+ ofpbuf_clear(&ofpacts);
match_init_catchall(&match);
match_set_recirc_id(&match, 0);
-
error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, 0, &ofpacts,
&unused_rulep);
if (error) {
return error;
}
- /* Drop any run away recirc rule lookups. Recirc_id has to be
- * non-zero when reaching this rule.
+ /* Continue rule lookups for not-matched recirc rules from table 0.
*
- * (priority=1), *, actions=drop
+ * (priority=1), actions=resubmit(, 0)
*/
- ofpbuf_clear(&ofpacts);
+ resubmit = ofpact_put_RESUBMIT(&ofpacts);
+ resubmit->in_port = OFPP_IN_PORT;
+ resubmit->table_id = 0;
+
match_init_catchall(&match);
error = ofproto_dpif_add_internal_flow(ofproto, &match, 1, 0, &ofpacts,
&unused_rulep);
struct ofproto_packet_in *pin, *next_pin;
struct rule_dpif *rule;
struct oftable *table;
- struct list pins;
+ struct ovs_list pins;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
xlate_txn_start();
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- CLS_FOR_EACH_SAFE (rule, up.cr, &table->cls) {
+ CLS_FOR_EACH (rule, up.cr, &table->cls) {
ofproto_rule_delete(&ofproto->up, &rule->up);
}
}
}
guarded_list_destroy(&ofproto->pins);
+ dpif_backer_recirc_clear_ofproto(ofproto->backer, ofproto);
+
mbridge_unref(ofproto->mbridge);
netflow_unref(ofproto->netflow);
* waiting for flow restore to complete. */
if (!ofproto_get_flow_restore_wait()) {
struct ofproto_packet_in *pin, *next_pin;
- struct list pins;
+ struct ovs_list pins;
guarded_list_pop_all(&ofproto->pins, &pins);
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
struct dpif_sflow *ds = ofproto->sflow;
if (sflow_options) {
+ uint32_t old_probability = ds ? dpif_sflow_get_probability(ds) : 0;
if (!ds) {
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
}
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
+ if (dpif_sflow_get_probability(ds) != old_probability) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
} else {
if (ds) {
dpif_sflow_unref(ds);
netdev_get_name(ofport->up.netdev),
rstp_state_name(ofport->rstp_state),
rstp_state_name(state));
+
if (rstp_learn_in_state(ofport->rstp_state)
- != rstp_learn_in_state(state)) {
- /* xxx Learning action flows should also be flushed. */
- ovs_rwlock_wrlock(&ofproto->ml->rwlock);
- mac_learning_flush(ofproto->ml);
- ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ != rstp_learn_in_state(state)) {
+ /* XXX: Learning action flows should also be flushed. */
+ if (ofport->bundle) {
+ if (!rstp_shift_root_learned_address(ofproto->rstp)
+ || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
+ bundle_flush_macs(ofport->bundle, false);
+ }
+ }
}
fwd_change = rstp_forward_in_state(ofport->rstp_state)
!= rstp_forward_in_state(state);
while ((ofport = rstp_get_next_changed_port_aux(ofproto->rstp, &rp))) {
update_rstp_port_state(ofport);
}
+ rp = NULL;
+ ofport = NULL;
/* FIXME: This check should be done on-event (i.e., when setting
* p->fdb_flush) and not periodically.
*/
- if (rstp_check_and_reset_fdb_flush(ofproto->rstp)) {
- ovs_rwlock_wrlock(&ofproto->ml->rwlock);
- /* FIXME: RSTP should be able to flush the entries pertaining to a
- * single port, not the whole table.
- */
- mac_learning_flush(ofproto->ml);
- ovs_rwlock_unlock(&ofproto->ml->rwlock);
+ while ((ofport = rstp_check_and_reset_fdb_flush(ofproto->rstp, &rp))) {
+ if (!rstp_shift_root_learned_address(ofproto->rstp)
+ || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
+ bundle_flush_macs(ofport->bundle, false);
+ }
+ }
+
+ if (rstp_shift_root_learned_address(ofproto->rstp)) {
+ bundle_move(((struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp))->bundle,
+ ((struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp))->bundle);
+ rstp_reset_root_changed(ofproto->rstp);
}
}
}
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ovs_rwlock_wrlock(&ml->rwlock);
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
- if (mac->port.p == bundle) {
+ if (mac_entry_get_port(ml, mac) == bundle) {
if (all_ofprotos) {
struct ofproto_dpif *o;
ovs_rwlock_unlock(&ml->rwlock);
}
+static void
+bundle_move(struct ofbundle *old, struct ofbundle *new)
+{
+ struct ofproto_dpif *ofproto = old->ofproto;
+ struct mac_learning *ml = ofproto->ml;
+ struct mac_entry *mac, *next_mac;
+
+ ovs_assert(new->ofproto == old->ofproto);
+
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ovs_rwlock_wrlock(&ml->rwlock);
+ LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
+ if (mac_entry_get_port(ml, mac) == old) {
+ mac_entry_set_port(ml, mac, new);
+ }
+ }
+ ovs_rwlock_unlock(&ml->rwlock);
+}
+
static struct ofbundle *
bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
{
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
|| port->is_layer3
- || !stp_forward_in_state(port->stp_state)) {
+ || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
+ || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
bundle->floodable = false;
break;
}
list_push_back(&bundle->ports, &port->bundle_node);
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
|| port->is_layer3
- || !stp_forward_in_state(port->stp_state)) {
+ || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
+ || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
bundle->floodable = false;
}
}
}
ofproto = bundle->ofproto;
- mbridge_unregister_bundle(ofproto->mbridge, bundle->aux);
+ mbridge_unregister_bundle(ofproto->mbridge, bundle);
xlate_txn_start();
xlate_bundle_remove(bundle);
struct ofpbuf *learning_packet;
int error, n_packets, n_errors;
struct mac_entry *e;
- struct list packets;
+ struct ovs_list packets;
list_init(&packets);
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
- if (e->port.p != bundle) {
+ if (mac_entry_get_port(ofproto->ml, e) != bundle) {
void *port_void;
learning_packet = bond_compose_learning_packet(bundle->bond,
return 0;
}
-/* Configures multicast snooping port's flood setting on 'ofproto'. */
+/* Configures multicast snooping port's flood settings on 'ofproto'. */
static int
-set_mcast_snooping_port(struct ofproto *ofproto_, void *aux, bool flood)
+set_mcast_snooping_port(struct ofproto *ofproto_, void *aux,
+ const struct ofproto_mcast_snooping_port_settings *s)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofbundle *bundle = bundle_lookup(ofproto, aux);
- if (ofproto->ms) {
+ if (ofproto->ms && s) {
ovs_rwlock_wrlock(&ofproto->ms->rwlock);
- mcast_snooping_set_port_flood(ofproto->ms, bundle->vlan, bundle,
- flood);
+ mcast_snooping_set_port_flood(ofproto->ms, bundle, s->flood);
+ mcast_snooping_set_port_flood_reports(ofproto->ms, bundle,
+ s->flood_reports);
ovs_rwlock_unlock(&ofproto->ms->rwlock);
}
return 0;
if (wc) {
wc->masks.recirc_id = UINT32_MAX;
}
- if (flow->recirc_id) {
- /* Start looking up from internal table for post recirculation
- * flows or packets. */
- *table_id = TBL_INTERNAL;
- }
+ *table_id = rule_dpif_lookup_get_init_table_id(flow);
}
return rule_dpif_lookup_from_table(ofproto, flow, wc, take_ref, stats,
/* The returned rule (if any) is valid at least until the next RCU quiescent
* period. If the rule needs to stay around longer, a non-zero 'take_ref'
- * must be passed in to cause a reference to be taken on it. */
+ * must be passed in to cause a reference to be taken on it.
+ *
+ * 'flow' is non-const to allow for temporary modifications during the lookup.
+ * Any changes are restored before returning. */
static struct rule_dpif *
rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
- const struct flow *flow, struct flow_wildcards *wc,
+ struct flow *flow, struct flow_wildcards *wc,
bool take_ref)
{
struct classifier *cls = &ofproto->up.tables[table_id].cls;
* on it before this returns.
*
* 'in_port' allows the lookup to take place as if the in port had the value
- * 'in_port'. This is needed for resubmit action support. */
+ * 'in_port'. This is needed for resubmit action support.
+ *
+ * 'flow' is non-const to allow for temporary modifications during the lookup.
+ * Any changes are restored before returning. */
struct rule_dpif *
rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
struct flow_wildcards *wc, bool take_ref,
OVS_REQUIRES(group->stats_mutex)
{
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
group->packet_count = 0;
group->byte_count = 0;
bucket->stats.packet_count += stats->n_packets;
bucket->stats.byte_count += stats->n_bytes;
} else { /* Credit to all buckets */
- const struct list *buckets;
+ const struct ovs_list *buckets;
group_dpif_get_buckets(group, &buckets);
LIST_FOR_EACH (bucket, list_node, buckets) {
{
struct group_dpif *group = group_dpif_cast(group_);
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
struct bucket_counter *bucket_stats;
ovs_mutex_lock(&group->stats_mutex);
void
group_dpif_get_buckets(const struct group_dpif *group,
- const struct list **buckets)
+ const struct ovs_list **buckets)
{
*buckets = &group->up.buckets;
}
ds_put_cstr(&ds, " port VLAN MAC Age\n");
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
- struct ofbundle *bundle = e->port.p;
+ struct ofbundle *bundle = mac_entry_get_port(ofproto->ml, e);
char name[OFP_MAX_PORT_NAME_LEN];
ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
*
* On success, initializes '*ofprotop' and 'flow' and returns NULL. On failure
* returns a nonnull malloced error message. */
-static char * WARN_UNUSED_RESULT
+static char * OVS_WARN_UNUSED_RESULT
parse_flow_and_packet(int argc, const char *argv[],
struct ofproto_dpif **ofprotop, struct flow *flow,
struct ofpbuf **packetp)
}
ds_init(&ds);
- flow_dump = dpif_flow_dump_create(ofproto->backer->dpif);
+ flow_dump = dpif_flow_dump_create(ofproto->backer->dpif, false);
flow_dump_thread = dpif_flow_dump_thread_create(flow_dump);
while (dpif_flow_dump_next(flow_dump_thread, &f, 1)) {
struct flow flow;
continue;
}
+ if (verbosity) {
+ odp_format_ufid(&f.ufid, &ds);
+ ds_put_cstr(&ds, " ");
+ }
odp_flow_format(f.key, f.key_len, f.mask, f.mask_len,
&portno_names, &ds, verbosity);
ds_put_cstr(&ds, ", ");
}
}
+struct ofproto_dpif *
+ofproto_dpif_recirc_get_ofproto(const struct dpif_backer *backer,
+ uint32_t recirc_id)
+{
+ struct dpif_backer_recirc_node *node;
+
+ node = CONTAINER_OF(cmap_find(&backer->recirc_map, recirc_id),
+ struct dpif_backer_recirc_node, cmap_node);
+
+ return node ? node->ofproto : NULL;
+}
+
uint32_t
ofproto_dpif_alloc_recirc_id(struct ofproto_dpif *ofproto)
{
struct dpif_backer *backer = ofproto->backer;
+ uint32_t recirc_id = recirc_id_alloc(backer->rid_pool);
- return recirc_id_alloc(backer->rid_pool);
+ if (recirc_id) {
+ struct dpif_backer_recirc_node *node = xmalloc(sizeof *node);
+
+ node->recirc_id = recirc_id;
+ node->ofproto = ofproto;
+
+ ovs_mutex_lock(&backer->recirc_mutex);
+ cmap_insert(&backer->recirc_map, &node->cmap_node, node->recirc_id);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ }
+
+ return recirc_id;
}
void
ofproto_dpif_free_recirc_id(struct ofproto_dpif *ofproto, uint32_t recirc_id)
{
struct dpif_backer *backer = ofproto->backer;
+ struct dpif_backer_recirc_node *node;
- recirc_id_free(backer->rid_pool, recirc_id);
+ node = CONTAINER_OF(cmap_find(&backer->recirc_map, recirc_id),
+ struct dpif_backer_recirc_node, cmap_node);
+ if (node) {
+ ovs_mutex_lock(&backer->recirc_mutex);
+ cmap_remove(&backer->recirc_map, &node->cmap_node, node->recirc_id);
+ ovs_mutex_unlock(&backer->recirc_mutex);
+ recirc_id_free(backer->rid_pool, node->recirc_id);
+
+ /* 'recirc_id' should never be freed by non-owning 'ofproto'. */
+ ovs_assert(node->ofproto == ofproto);
+
+ /* RCU postpone the free, since other threads may be referring
+ * to 'node' at same time. */
+ ovsrcu_postpone(free, node);
+ }
}
int