};
/* In the absence of a multiple-writer multiple-reader datastructure for
- * storing ukeys, we use a large number of cmaps, each with its own lock for
- * writing. */
+ * storing udpif_keys ("ukeys"), we use a large number of cmaps, each with its
+ * own lock for writing. */
#define N_UMAPS 512 /* per udpif. */
struct umap {
struct ovs_mutex mutex; /* Take for writing to the following. */
};
/* A thread that processes datapath flows, updates OpenFlow statistics, and
- * updates or removes them if necessary. */
+ * updates or removes them if necessary.
+ *
+ * Revalidator threads operate in two phases: "dump" and "sweep". In between
+ * each phase, all revalidators sync up so that all revalidator threads are
+ * either in one phase or the other, but not a combination.
+ *
+ * During the dump phase, revalidators fetch flows from the datapath and
+ * attribute the statistics to OpenFlow rules. Each datapath flow has a
+ * corresponding ukey which caches the most recently seen statistics. If
+ * a flow needs to be deleted (for example, because it is unused over a
+ * period of time), revalidator threads may delete the flow during the
+ * dump phase. The datapath is not guaranteed to reliably dump all flows
+ * from the datapath, and there is no mapping between datapath flows to
+ * revalidators, so a particular flow may be handled by zero or more
+ * revalidators during a single dump phase. To avoid duplicate attribution
+ * of statistics, ukeys are never deleted during this phase.
+ *
+ * During the sweep phase, each revalidator takes ownership of a different
+ * slice of umaps and sweeps through all ukeys in those umaps to figure out
+ * whether they need to be deleted. During this phase, revalidators may
+ * fetch individual flows which were not dumped during the dump phase to
+ * validate them and attribute statistics.
+ */
struct revalidator {
struct udpif *udpif; /* Parent udpif. */
pthread_t thread; /* Thread ID. */
static bool ukey_install_finish(struct udpif_key *ukey, int error);
static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
static struct udpif_key *ukey_lookup(struct udpif *udpif,
- const ovs_u128 *ufid);
+ const ovs_u128 *ufid,
+ const unsigned pmd_id);
static int ukey_acquire(struct udpif *, const struct dpif_flow *,
struct udpif_key **result, int *error);
static void ukey_delete__(struct udpif_key *);
if (upcall->type == DPIF_UC_MISS) {
xin.resubmit_stats = &stats;
- if (xin.recirc) {
+ if (xin.frozen_state) {
/* We may install a datapath flow only if we get a reference to the
* recirculation context (otherwise we could have recirculation
* upcalls using recirculation ID for which no context can be
* found). We may still execute the flow's actions even if we
* don't install the flow. */
- upcall->recirc = xin.recirc;
- upcall->have_recirc_ref = recirc_id_node_try_ref_rcu(xin.recirc);
+ upcall->recirc = recirc_id_node_from_state(xin.frozen_state);
+ upcall->have_recirc_ref = recirc_id_node_try_ref_rcu(upcall->recirc);
}
} else {
/* For non-miss upcalls, we are either executing actions (one of which
xlate_actions(&xin, &upcall->xout);
upcall->xout_initialized = true;
- /* Special case for fail-open mode.
- *
- * If we are in fail-open mode, but we are connected to a controller too,
- * then we should send the packet up to the controller in the hope that it
- * will try to set up a flow and thereby allow us to exit fail-open.
- *
- * See the top-level comment in fail-open.c for more information.
- *
- * Copy packets before they are modified by execution. */
- if (upcall->xout.fail_open) {
- const struct dp_packet *packet = upcall->packet;
- struct ofproto_packet_in *pin;
-
- pin = xmalloc(sizeof *pin);
- pin->up.packet = xmemdup(dp_packet_data(packet), dp_packet_size(packet));
- pin->up.packet_len = dp_packet_size(packet);
- pin->up.reason = OFPR_NO_MATCH;
- pin->up.table_id = 0;
- pin->up.cookie = OVS_BE64_MAX;
- flow_get_metadata(upcall->flow, &pin->up.flow_metadata);
- pin->send_len = 0; /* Not used for flow table misses. */
- pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
- ofproto_dpif_send_packet_in(upcall->ofproto, pin);
- }
-
if (!upcall->xout.slow) {
ofpbuf_use_const(&upcall->put_actions,
odp_actions->data, odp_actions->size);
}
if (actions_len == 0) {
/* Lookup actions in userspace cache. */
- struct udpif_key *ukey = ukey_lookup(udpif, upcall->ufid);
+ struct udpif_key *ukey = ukey_lookup(udpif, upcall->ufid,
+ upcall->pmd_id);
if (ukey) {
ukey_get_actions(ukey, &actions, &actions_len);
dpif_sflow_read_actions(flow, actions, actions_len,
}
static uint32_t
-get_ufid_hash(const ovs_u128 *ufid)
+get_ukey_hash(const ovs_u128 *ufid, const unsigned pmd_id)
{
- return ufid->u32[0];
+ return hash_2words(ufid->u32[0], pmd_id);
}
static struct udpif_key *
-ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid)
+ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid, const unsigned pmd_id)
{
struct udpif_key *ukey;
- int idx = get_ufid_hash(ufid) % N_UMAPS;
+ int idx = get_ukey_hash(ufid, pmd_id) % N_UMAPS;
struct cmap *cmap = &udpif->ukeys[idx].cmap;
- CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, get_ufid_hash(ufid), cmap) {
+ CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node,
+ get_ukey_hash(ufid, pmd_id), cmap) {
if (ovs_u128_equals(&ukey->ufid, ufid)) {
return ukey;
}
ukey->ufid_present = ufid_present;
ukey->ufid = *ufid;
ukey->pmd_id = pmd_id;
- ukey->hash = get_ufid_hash(&ukey->ufid);
+ ukey->hash = get_ukey_hash(&ukey->ufid, pmd_id);
ovsrcu_init(&ukey->actions, NULL);
ukey_set_actions(ukey, actions);
idx = new_ukey->hash % N_UMAPS;
umap = &udpif->ukeys[idx];
ovs_mutex_lock(&umap->mutex);
- old_ukey = ukey_lookup(udpif, &new_ukey->ufid);
+ old_ukey = ukey_lookup(udpif, &new_ukey->ufid, new_ukey->pmd_id);
if (old_ukey) {
/* Uncommon case: A ukey is already installed with the same UFID. */
if (old_ukey->key_len == new_ukey->key_len
struct udpif_key *ukey;
int retval;
- ukey = ukey_lookup(udpif, &flow->ufid);
+ ukey = ukey_lookup(udpif, &flow->ufid, flow->pmd_id);
if (ukey) {
retval = ovs_mutex_trylock(&ukey->mutex);
} else {
&op->dop.u.flow_put.actions_len);
}
+/* Executes datapath operations 'ops' and attributes stats retrieved from the
+ * datapath as part of those operations. */
static void
-push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
+push_dp_ops(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
{
struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
size_t i;
}
}
+/* Executes datapath operations 'ops', attributes stats retrieved from the
+ * datapath, and deletes ukeys corresponding to deleted flows. */
static void
push_ukey_ops(struct udpif *udpif, struct umap *umap,
struct ukey_op *ops, size_t n_ops)
{
int i;
- push_ukey_ops__(udpif, ops, n_ops);
+ push_dp_ops(udpif, ops, n_ops);
ovs_mutex_lock(&umap->mutex);
for (i = 0; i < n_ops; i++) {
if (ops[i].dop.type == DPIF_OP_FLOW_DEL) {
}
if (n_ops) {
- push_ukey_ops__(udpif, ops, n_ops);
+ /* Push datapath ops but defer ukey deletion to 'sweep' phase. */
+ push_dp_ops(udpif, ops, n_ops);
}
ovsrcu_quiesce();
}
size_t n_ops = 0;
CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
- bool flow_exists, seq_mismatch;
- struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
- enum reval_result result;
+ bool flow_exists;
/* Handler threads could be holding a ukey lock while it installs a
* new flow, so don't hang around waiting for access to it. */
continue;
}
flow_exists = ukey->flow_exists;
- seq_mismatch = (ukey->dump_seq != dump_seq
- && ukey->reval_seq != reval_seq);
-
- if (purge) {
- result = UKEY_DELETE;
- } else if (!seq_mismatch) {
- result = UKEY_KEEP;
- } else {
- struct dpif_flow_stats stats;
- COVERAGE_INC(revalidate_missed_dp_flow);
- memset(&stats, 0, sizeof stats);
- result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
- reval_seq, &recircs);
- }
- if (flow_exists && result != UKEY_KEEP) {
- /* Takes ownership of 'recircs'. */
- reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
- &odp_actions);
+ if (flow_exists) {
+ struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
+ bool seq_mismatch = (ukey->dump_seq != dump_seq
+ && ukey->reval_seq != reval_seq);
+ enum reval_result result;
+
+ if (purge) {
+ result = UKEY_DELETE;
+ } else if (!seq_mismatch) {
+ result = UKEY_KEEP;
+ } else {
+ struct dpif_flow_stats stats;
+ COVERAGE_INC(revalidate_missed_dp_flow);
+ memset(&stats, 0, sizeof stats);
+ result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
+ reval_seq, &recircs);
+ }
+ if (result != UKEY_KEEP) {
+ /* Clears 'recircs' if filled by revalidate_ukey(). */
+ reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
+ &odp_actions);
+ }
}
ovs_mutex_unlock(&ukey->mutex);
- if (n_ops == REVALIDATE_MAX_BATCH) {
- push_ukey_ops(udpif, umap, ops, n_ops);
- n_ops = 0;
- }
-
if (!flow_exists) {
+ /* The common flow deletion case involves deletion of the flow
+ * during the dump phase and ukey deletion here. */
ovs_mutex_lock(&umap->mutex);
ukey_delete(umap, ukey);
ovs_mutex_unlock(&umap->mutex);
}
+
+ if (n_ops == REVALIDATE_MAX_BATCH) {
+ /* Update/delete missed flows and clean up corresponding ukeys
+ * if necessary. */
+ push_ukey_ops(udpif, umap, ops, n_ops);
+ n_ops = 0;
+ }
}
if (n_ops) {