#include "connmgr.h"
#include "coverage.h"
+#include "cmap.h"
#include "dpif.h"
#include "dynamic-string.h"
#include "fail-open.h"
#include "poll-loop.h"
#include "seq.h"
#include "unixctl.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
#define MAX_QUEUE_LENGTH 512
#define UPCALL_MAX_BATCH 64
COVERAGE_DEFINE(dumped_duplicate_flow);
COVERAGE_DEFINE(dumped_new_flow);
+COVERAGE_DEFINE(handler_duplicate_upcall);
+COVERAGE_DEFINE(upcall_ukey_contention);
COVERAGE_DEFINE(revalidate_missed_dp_flow);
/* A thread that reads upcalls from dpif, forwards each upcall's packet,
uint32_t handler_id; /* Handler id. */
};
+/* In the absence of a multiple-writer multiple-reader datastructure for
+ * storing ukeys, we use a large number of cmaps, each with its own lock for
+ * writing. */
+#define N_UMAPS 512 /* per udpif. */
+struct umap {
+ struct ovs_mutex mutex; /* Take for writing to the following. */
+ struct cmap cmap; /* Datapath flow keys. */
+};
+
/* A thread that processes datapath flows, updates OpenFlow statistics, and
* updates or removes them if necessary. */
struct revalidator {
struct udpif *udpif; /* Parent udpif. */
pthread_t thread; /* Thread ID. */
unsigned int id; /* ovsthread_id_self(). */
- struct hmap *ukeys; /* Points into udpif->ukeys for this
- revalidator. Used for GC phase. */
};
/* An upcall handler for ofproto_dpif.
* them.
*/
struct udpif {
- struct list list_node; /* In all_udpifs list. */
+ struct ovs_list list_node; /* In all_udpifs list. */
struct dpif *dpif; /* Datapath handle. */
struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
- uint32_t secret; /* Random seed for upcall hash. */
-
struct handler *handlers; /* Upcall handlers. */
size_t n_handlers;
/* Revalidation. */
struct seq *reval_seq; /* Incremented to force revalidation. */
- bool need_revalidate; /* As indicated by 'reval_seq'. */
bool reval_exit; /* Set by leader on 'exit_latch. */
struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
struct dpif_flow_dump *dump; /* DPIF flow dump state. */
long long int dump_duration; /* Duration of the last flow dump. */
struct seq *dump_seq; /* Increments each dump iteration. */
+ atomic_bool enable_ufid; /* If true, skip dumping flow attrs. */
- /* There are 'n_revalidators' ukey hmaps. Each revalidator retains a
- * reference to one of these for garbage collection.
+ /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
*
* During the flow dump phase, revalidators insert into these with a random
* distribution. During the garbage collection phase, each revalidator
- * takes care of garbage collecting one of these hmaps. */
- struct {
- struct ovs_mutex mutex; /* Guards the following. */
- struct hmap hmap OVS_GUARDED; /* Datapath flow keys. */
- } *ukeys;
+ * takes care of garbage collecting a slice of these maps. */
+ struct umap *ukeys;
/* Datapath flow statistics. */
unsigned int max_n_flows;
struct upcall {
struct ofproto_dpif *ofproto; /* Parent ofproto. */
+ const struct recirc_id_node *recirc; /* Recirculation context. */
+ bool have_recirc_ref; /* Reference held on recirc ctx? */
/* The flow and packet are only required to be constant when using
* dpif-netdev. If a modification is absolutely necessary, a const cast
* may be used with other datapaths. */
const struct flow *flow; /* Parsed representation of the packet. */
- const struct ofpbuf *packet; /* Packet associated with this upcall. */
+ const ovs_u128 *ufid; /* Unique identifier for 'flow'. */
+ unsigned pmd_id; /* Datapath poll mode driver id. */
+ const struct dp_packet *packet; /* Packet associated with this upcall. */
ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
enum dpif_upcall_type type; /* Datapath type of the upcall. */
const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
+ const struct nlattr *actions; /* Flow actions in DPIF_UC_ACTION Upcalls. */
bool xout_initialized; /* True if 'xout' must be uninitialized. */
struct xlate_out xout; /* Result of xlate_actions(). */
bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
VLAN splinters if true. */
+ struct udpif_key *ukey; /* Revalidator flow cache. */
+ bool ukey_persists; /* Set true to keep 'ukey' beyond the
+ lifetime of this upcall. */
+
+ uint64_t dump_seq; /* udpif->dump_seq at translation time. */
+ uint64_t reval_seq; /* udpif->reval_seq at translation time. */
+
/* Not used by the upcall callback interface. */
const struct nlattr *key; /* Datapath flow key. */
size_t key_len; /* Datapath flow key length. */
/* 'udpif_key's are responsible for tracking the little bit of state udpif
* needs to do flow expiration which can't be pulled directly from the
- * datapath. They may be created or maintained by any revalidator during
- * the dump phase, but are owned by a single revalidator, and are destroyed
- * by that revalidator during the garbage-collection phase.
+ * datapath. They may be created by any handler or revalidator thread at any
+ * time, and read by any revalidator during the dump phase. They are however
+ * each owned by a single revalidator which takes care of destroying them
+ * during the garbage-collection phase.
*
- * While some elements of a udpif_key are protected by a mutex, the ukey itself
- * is not. Therefore it is not safe to destroy a udpif_key except when all
- * revalidators are in garbage collection phase, or they aren't running. */
+ * The mutex within the ukey protects some members of the ukey. The ukey
+ * itself is protected by RCU and is held within a umap in the parent udpif.
+ * Adding or removing a ukey from a umap is only safe when holding the
+ * corresponding umap lock. */
struct udpif_key {
- struct hmap_node hmap_node; /* In parent revalidator 'ukeys' map. */
+ struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
/* These elements are read only once created, and therefore aren't
* protected by a mutex. */
const struct nlattr *key; /* Datapath flow key. */
size_t key_len; /* Length of 'key'. */
+ const struct nlattr *mask; /* Datapath flow mask. */
+ size_t mask_len; /* Length of 'mask'. */
+ struct ofpbuf *actions; /* Datapath flow actions as nlattrs. */
+ ovs_u128 ufid; /* Unique flow identifier. */
+ bool ufid_present; /* True if 'ufid' is in datapath. */
+ uint32_t hash; /* Pre-computed hash for 'key'. */
+ unsigned pmd_id; /* Datapath poll mode driver id. */
struct ovs_mutex mutex; /* Guards the following. */
struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
long long int created OVS_GUARDED; /* Estimate of creation time. */
uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
+ uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
once. */
* are affected by this ukey.
* Used for stats and learning.*/
union {
- struct odputil_keybuf key_buf; /* Memory for 'key'. */
- struct nlattr key_buf_nla;
- };
+ struct odputil_keybuf buf;
+ struct nlattr nla;
+ } keybuf, maskbuf;
+
+ /* Recirculation IDs with references held by the ukey. */
+ unsigned n_recircs;
+ uint32_t recircs[]; /* 'n_recircs' id's for which references are held. */
+};
+
+/* Datapath operation with optional ukey attached. */
+struct ukey_op {
+ struct udpif_key *ukey;
+ struct dpif_flow_stats stats; /* Stats for 'op'. */
+ struct dpif_op dop; /* Flow operation. */
};
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs);
+static struct ovs_list all_udpifs = OVS_LIST_INITIALIZER(&all_udpifs);
static size_t recv_upcalls(struct handler *);
static int process_upcall(struct udpif *, struct upcall *,
const char *argv[], void *aux);
static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
const char *argv[], void *aux);
+static void upcall_unixctl_disable_ufid(struct unixctl_conn *, int argc,
+ const char *argv[], void *aux);
+static void upcall_unixctl_enable_ufid(struct unixctl_conn *, int argc,
+ const char *argv[], void *aux);
static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
const char *argv[], void *aux);
static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
const char *argv[], void *aux);
-
-static struct udpif_key *ukey_create(const struct nlattr *key, size_t key_len,
- long long int used);
+static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
+ const char *argv[], void *aux);
+
+static struct udpif_key *ukey_create_from_upcall(struct upcall *);
+static int ukey_create_from_dpif_flow(const struct udpif *,
+ const struct dpif_flow *,
+ struct udpif_key **);
+static bool ukey_install_start(struct udpif *, struct udpif_key *ukey);
+static bool ukey_install_finish(struct udpif_key *ukey, int error);
+static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
static struct udpif_key *ukey_lookup(struct udpif *udpif,
- const struct nlattr *key, size_t key_len,
- uint32_t hash);
-static bool ukey_acquire(struct udpif *udpif, const struct nlattr *key,
- size_t key_len, long long int used,
- struct udpif_key **result);
-static void ukey_delete(struct revalidator *, struct udpif_key *);
+ const ovs_u128 *ufid);
+static int ukey_acquire(struct udpif *, const struct dpif_flow *,
+ struct udpif_key **result, int *error);
+static void ukey_delete__(struct udpif_key *);
+static void ukey_delete(struct umap *, struct udpif_key *);
static enum upcall_type classify_upcall(enum dpif_upcall_type type,
const struct nlattr *userdata);
static int upcall_receive(struct upcall *, const struct dpif_backer *,
- const struct ofpbuf *packet, enum dpif_upcall_type,
- const struct nlattr *userdata, const struct flow *);
+ const struct dp_packet *packet, enum dpif_upcall_type,
+ const struct nlattr *userdata, const struct flow *,
+ const ovs_u128 *ufid, const unsigned pmd_id);
static void upcall_uninit(struct upcall *);
static upcall_callback upcall_cb;
static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
+static atomic_bool enable_ufid = ATOMIC_VAR_INIT(true);
-struct udpif *
-udpif_create(struct dpif_backer *backer, struct dpif *dpif)
+void
+udpif_init(void)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- struct udpif *udpif = xzalloc(sizeof *udpif);
-
if (ovsthread_once_start(&once)) {
unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
NULL);
upcall_unixctl_disable_megaflows, NULL);
unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
upcall_unixctl_enable_megaflows, NULL);
+ unixctl_command_register("upcall/disable-ufid", "", 0, 0,
+ upcall_unixctl_disable_ufid, NULL);
+ unixctl_command_register("upcall/enable-ufid", "", 0, 0,
+ upcall_unixctl_enable_ufid, NULL);
unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
upcall_unixctl_set_flow_limit, NULL);
unixctl_command_register("revalidator/wait", "", 0, 0,
upcall_unixctl_dump_wait, NULL);
+ unixctl_command_register("revalidator/purge", "", 0, 0,
+ upcall_unixctl_purge, NULL);
ovsthread_once_done(&once);
}
+}
+
+struct udpif *
+udpif_create(struct dpif_backer *backer, struct dpif *dpif)
+{
+ struct udpif *udpif = xzalloc(sizeof *udpif);
udpif->dpif = dpif;
udpif->backer = backer;
atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
- udpif->secret = random_uint32();
udpif->reval_seq = seq_create();
udpif->dump_seq = seq_create();
latch_init(&udpif->exit_latch);
list_push_back(&all_udpifs, &udpif->list_node);
+ atomic_init(&udpif->enable_ufid, false);
atomic_init(&udpif->n_flows, 0);
atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
ovs_mutex_init(&udpif->n_flows_mutex);
+ udpif->ukeys = xmalloc(N_UMAPS * sizeof *udpif->ukeys);
+ for (int i = 0; i < N_UMAPS; i++) {
+ cmap_init(&udpif->ukeys[i].cmap);
+ ovs_mutex_init(&udpif->ukeys[i].mutex);
+ }
dpif_register_upcall_cb(dpif, upcall_cb, udpif);
{
udpif_stop_threads(udpif);
+ for (int i = 0; i < N_UMAPS; i++) {
+ cmap_destroy(&udpif->ukeys[i].cmap);
+ ovs_mutex_destroy(&udpif->ukeys[i].mutex);
+ }
+ free(udpif->ukeys);
+ udpif->ukeys = NULL;
+
list_remove(&udpif->list_node);
latch_destroy(&udpif->exit_latch);
seq_destroy(udpif->reval_seq);
/* Delete ukeys, and delete all flows from the datapath to prevent
* double-counting stats. */
revalidator_purge(revalidator);
-
- hmap_destroy(&udpif->ukeys[i].hmap);
- ovs_mutex_destroy(&udpif->ukeys[i].mutex);
}
latch_poll(&udpif->exit_latch);
free(udpif->handlers);
udpif->handlers = NULL;
udpif->n_handlers = 0;
-
- free(udpif->ukeys);
- udpif->ukeys = NULL;
}
}
{
if (udpif && n_handlers && n_revalidators) {
size_t i;
+ bool enable_ufid;
udpif->n_handlers = n_handlers;
udpif->n_revalidators = n_revalidators;
"handler", udpif_upcall_handler, handler);
}
+ enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
+ atomic_init(&udpif->enable_ufid, enable_ufid);
dpif_enable_upcall(udpif->dpif);
ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
udpif->reval_exit = false;
udpif->revalidators = xzalloc(udpif->n_revalidators
* sizeof *udpif->revalidators);
- udpif->ukeys = xmalloc(sizeof *udpif->ukeys * n_revalidators);
for (i = 0; i < udpif->n_revalidators; i++) {
struct revalidator *revalidator = &udpif->revalidators[i];
revalidator->udpif = udpif;
- hmap_init(&udpif->ukeys[i].hmap);
- ovs_mutex_init(&udpif->ukeys[i].mutex);
- revalidator->ukeys = &udpif->ukeys[i].hmap;
revalidator->thread = ovs_thread_create(
"revalidator", udpif_revalidator, revalidator);
}
simap_increase(usage, "handlers", udpif->n_handlers);
simap_increase(usage, "revalidators", udpif->n_revalidators);
- for (i = 0; i < udpif->n_revalidators; i++) {
- ovs_mutex_lock(&udpif->ukeys[i].mutex);
- simap_increase(usage, "udpif keys", hmap_count(&udpif->ukeys[i].hmap));
- ovs_mutex_unlock(&udpif->ukeys[i].mutex);
+ for (i = 0; i < N_UMAPS; i++) {
+ simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
}
}
}
}
+static bool
+udpif_use_ufid(struct udpif *udpif)
+{
+ bool enable;
+
+ atomic_read_relaxed(&enable_ufid, &enable);
+ return enable && ofproto_dpif_get_enable_ufid(udpif->backer);
+}
+
\f
static unsigned long
udpif_get_n_flows(struct udpif *udpif)
struct udpif *udpif = handler->udpif;
while (!latch_is_set(&handler->udpif->exit_latch)) {
- if (!recv_upcalls(handler)) {
+ if (recv_upcalls(handler)) {
+ poll_immediate_wake();
+ } else {
dpif_recv_wait(udpif->dpif, handler->handler_id);
latch_wait(&udpif->exit_latch);
- poll_block();
}
- coverage_clear();
+ poll_block();
}
return NULL;
struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
struct upcall upcalls[UPCALL_MAX_BATCH];
+ struct flow flows[UPCALL_MAX_BATCH];
size_t n_upcalls, i;
n_upcalls = 0;
struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
struct upcall *upcall = &upcalls[n_upcalls];
- struct pkt_metadata md;
- struct flow flow;
+ struct flow *flow = &flows[n_upcalls];
int error;
ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
break;
}
- if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, &flow)
+ if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
== ODP_FIT_ERROR) {
goto free_dupcall;
}
error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
- dupcall->type, dupcall->userdata, &flow);
+ dupcall->type, dupcall->userdata, flow,
+ &dupcall->ufid, PMD_ID_NULL);
if (error) {
if (error == ENODEV) {
/* Received packet on datapath port for which we couldn't
* while traffic is being received. Print a rate-limited
* message in case it happens frequently. */
dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
- dupcall->key_len, NULL, 0, NULL, 0, NULL);
+ dupcall->key_len, NULL, 0, NULL, 0,
+ &dupcall->ufid, PMD_ID_NULL, NULL);
VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
- "port %"PRIu32, flow.in_port.odp_port);
+ "port %"PRIu32, flow->in_port.odp_port);
}
goto free_dupcall;
}
upcall->key = dupcall->key;
upcall->key_len = dupcall->key_len;
+ upcall->ufid = &dupcall->ufid;
upcall->out_tun_key = dupcall->out_tun_key;
+ upcall->actions = dupcall->actions;
- if (vsp_adjust_flow(upcall->ofproto, &flow, &dupcall->packet)) {
+ if (vsp_adjust_flow(upcall->ofproto, flow, &dupcall->packet)) {
upcall->vsp_adjusted = true;
}
- md = pkt_metadata_from_flow(&flow);
- flow_extract(&dupcall->packet, &md, &flow);
+ pkt_metadata_from_flow(&dupcall->packet.md, flow);
+ flow_extract(&dupcall->packet, flow);
error = process_upcall(udpif, upcall, NULL);
if (error) {
cleanup:
upcall_uninit(upcall);
free_dupcall:
- ofpbuf_uninit(&dupcall->packet);
+ dp_packet_uninit(&dupcall->packet);
ofpbuf_uninit(recv_buf);
}
if (n_upcalls) {
handle_upcalls(handler->udpif, upcalls, n_upcalls);
for (i = 0; i < n_upcalls; i++) {
- ofpbuf_uninit(&dupcalls[i].packet);
+ dp_packet_uninit(&dupcalls[i].packet);
ofpbuf_uninit(&recv_bufs[i]);
upcall_uninit(&upcalls[i]);
}
if (leader) {
uint64_t reval_seq;
+ recirc_run(); /* Recirculation cleanup. */
+
reval_seq = seq_read(udpif->reval_seq);
- udpif->need_revalidate = last_reval_seq != reval_seq;
last_reval_seq = reval_seq;
n_flows = udpif_get_n_flows(udpif);
start_time = time_msec();
if (!udpif->reval_exit) {
- udpif->dump = dpif_flow_dump_create(udpif->dpif);
+ bool terse_dump;
+
+ terse_dump = udpif_use_ufid(udpif);
+ udpif->dump = dpif_flow_dump_create(udpif->dpif, terse_dump);
}
}
? ODPP_NONE
: odp_in_port;
pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
- odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, ODPP_NONE,
- buf);
+ odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path,
+ ODPP_NONE, false, buf);
}
/* If there is no error, the upcall must be destroyed with upcall_uninit()
* since the 'upcall->put_actions' remains uninitialized. */
static int
upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
- const struct ofpbuf *packet, enum dpif_upcall_type type,
- const struct nlattr *userdata, const struct flow *flow)
+ const struct dp_packet *packet, enum dpif_upcall_type type,
+ const struct nlattr *userdata, const struct flow *flow,
+ const ovs_u128 *ufid, const unsigned pmd_id)
{
int error;
return error;
}
+ upcall->recirc = NULL;
+ upcall->have_recirc_ref = false;
upcall->flow = flow;
upcall->packet = packet;
+ upcall->ufid = ufid;
+ upcall->pmd_id = pmd_id;
upcall->type = type;
upcall->userdata = userdata;
ofpbuf_init(&upcall->put_actions, 0);
upcall->xout_initialized = false;
upcall->vsp_adjusted = false;
+ upcall->ukey_persists = false;
+ upcall->ukey = NULL;
upcall->key = NULL;
upcall->key_len = 0;
upcall->out_tun_key = NULL;
+ upcall->actions = NULL;
return 0;
}
struct xlate_in xin;
stats.n_packets = 1;
- stats.n_bytes = ofpbuf_size(upcall->packet);
+ stats.n_bytes = dp_packet_size(upcall->packet);
stats.used = time_msec();
stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
if (upcall->type == DPIF_UC_MISS) {
xin.resubmit_stats = &stats;
+
+ if (xin.recirc) {
+ /* We may install a datapath flow only if we get a reference to the
+ * recirculation context (otherwise we could have recirculation
+ * upcalls using recirculation ID for which no context can be
+ * found). We may still execute the flow's actions even if we
+ * don't install the flow. */
+ upcall->recirc = xin.recirc;
+ upcall->have_recirc_ref = recirc_id_node_try_ref_rcu(xin.recirc);
+ }
} else {
- /* For non-miss upcalls, there's a flow in the datapath which this
- * packet was accounted to. Presumably the revalidators will deal
+ /* For non-miss upcalls, we are either executing actions (one of which
+ * is an userspace action) for an upcall, in which case the stats have
+ * already been taken care of, or there's a flow in the datapath which
+ * this packet was accounted to. Presumably the revalidators will deal
* with pushing its stats eventually. */
}
+ upcall->dump_seq = seq_read(udpif->dump_seq);
+ upcall->reval_seq = seq_read(udpif->reval_seq);
xlate_actions(&xin, &upcall->xout);
upcall->xout_initialized = true;
*
* Copy packets before they are modified by execution. */
if (upcall->xout.fail_open) {
- const struct ofpbuf *packet = upcall->packet;
+ const struct dp_packet *packet = upcall->packet;
struct ofproto_packet_in *pin;
pin = xmalloc(sizeof *pin);
- pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet));
- pin->up.packet_len = ofpbuf_size(packet);
+ pin->up.packet = xmemdup(dp_packet_data(packet), dp_packet_size(packet));
+ pin->up.packet_len = dp_packet_size(packet);
pin->up.reason = OFPR_NO_MATCH;
pin->up.table_id = 0;
pin->up.cookie = OVS_BE64_MAX;
- flow_get_metadata(upcall->flow, &pin->up.fmd);
+ flow_get_metadata(upcall->flow, &pin->up.flow_metadata);
pin->send_len = 0; /* Not used for flow table misses. */
pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
ofproto_dpif_send_packet_in(upcall->ofproto, pin);
if (!upcall->xout.slow) {
ofpbuf_use_const(&upcall->put_actions,
- ofpbuf_data(upcall->xout.odp_actions),
- ofpbuf_size(upcall->xout.odp_actions));
+ upcall->xout.odp_actions->data,
+ upcall->xout.odp_actions->size);
} else {
ofpbuf_init(&upcall->put_actions, 0);
compose_slow_path(udpif, &upcall->xout, upcall->flow,
upcall->flow->in_port.odp_port,
&upcall->put_actions);
}
+
+ /* This function is also called for slow-pathed flows. As we are only
+ * going to create new datapath flows for actual datapath misses, there is
+ * no point in creating a ukey otherwise. */
+ if (upcall->type == DPIF_UC_MISS) {
+ upcall->ukey = ukey_create_from_upcall(upcall);
+ }
}
static void
xlate_out_uninit(&upcall->xout);
}
ofpbuf_uninit(&upcall->put_actions);
+ if (upcall->ukey) {
+ if (!upcall->ukey_persists) {
+ ukey_delete__(upcall->ukey);
+ }
+ } else if (upcall->have_recirc_ref) {
+ /* The reference was transferred to the ukey if one was created. */
+ recirc_id_node_unref(upcall->recirc);
+ }
}
}
static int
-upcall_cb(const struct ofpbuf *packet, const struct flow *flow,
- enum dpif_upcall_type type, const struct nlattr *userdata,
- struct ofpbuf *actions, struct flow_wildcards *wc,
- struct ofpbuf *put_actions, void *aux)
+upcall_cb(const struct dp_packet *packet, const struct flow *flow, ovs_u128 *ufid,
+ unsigned pmd_id, enum dpif_upcall_type type,
+ const struct nlattr *userdata, struct ofpbuf *actions,
+ struct flow_wildcards *wc, struct ofpbuf *put_actions, void *aux)
{
struct udpif *udpif = aux;
unsigned int flow_limit;
atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
- flow);
+ flow, ufid, pmd_id);
if (error) {
return error;
}
}
if (upcall.xout.slow && put_actions) {
- ofpbuf_put(put_actions, ofpbuf_data(&upcall.put_actions),
- ofpbuf_size(&upcall.put_actions));
+ ofpbuf_put(put_actions, upcall.put_actions.data,
+ upcall.put_actions.size);
}
if (OVS_LIKELY(wc)) {
/* XXX: This could be avoided with sufficient API changes. */
*wc = upcall.xout.wc;
} else {
- memset(wc, 0xff, sizeof *wc);
- flow_wildcards_clear_non_packet_fields(wc);
+ flow_wildcards_init_for_packet(wc, flow);
}
}
if (udpif_get_n_flows(udpif) >= flow_limit) {
error = ENOSPC;
+ goto out;
+ }
+
+ /* Prevent miss flow installation if the key has recirculation ID but we
+ * were not able to get a reference on it. */
+ if (type == DPIF_UC_MISS && upcall.recirc && !upcall.have_recirc_ref) {
+ error = ENOSPC;
+ goto out;
}
+ if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
+ error = ENOSPC;
+ }
out:
+ if (!error) {
+ upcall.ukey_persists = true;
+ }
upcall_uninit(&upcall);
return error;
}
struct ofpbuf *odp_actions)
{
const struct nlattr *userdata = upcall->userdata;
- const struct ofpbuf *packet = upcall->packet;
+ const struct dp_packet *packet = upcall->packet;
const struct flow *flow = upcall->flow;
switch (classify_upcall(upcall->type, userdata)) {
case SFLOW_UPCALL:
if (upcall->sflow) {
union user_action_cookie cookie;
-
+ const struct nlattr *actions;
+ int actions_len = 0;
+ struct dpif_sflow_actions sflow_actions;
+ memset(&sflow_actions, 0, sizeof sflow_actions);
memset(&cookie, 0, sizeof cookie);
memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
+ if (upcall->actions) {
+ /* Actions were passed up from datapath. */
+ actions = nl_attr_get(upcall->actions);
+ actions_len = nl_attr_get_size(upcall->actions);
+ if (actions && actions_len) {
+ dpif_sflow_read_actions(flow, actions, actions_len,
+ &sflow_actions);
+ }
+ }
+ if (actions_len == 0) {
+ /* Lookup actions in userspace cache. */
+ struct udpif_key *ukey = ukey_lookup(udpif, upcall->ufid);
+ if (ukey) {
+ actions = ukey->actions->data;
+ actions_len = ukey->actions->size;
+ dpif_sflow_read_actions(flow, actions, actions_len,
+ &sflow_actions);
+ }
+ }
dpif_sflow_received(upcall->sflow, packet, flow,
- flow->in_port.odp_port, &cookie);
+ flow->in_port.odp_port, &cookie,
+ actions_len > 0 ? &sflow_actions : NULL);
}
break;
memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
if (upcall->out_tun_key) {
- memset(&output_tunnel_key, 0, sizeof output_tunnel_key);
odp_tun_key_from_attr(upcall->out_tun_key,
&output_tunnel_key);
}
handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
size_t n_upcalls)
{
- struct odputil_keybuf mask_bufs[UPCALL_MAX_BATCH];
struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
- struct dpif_op ops[UPCALL_MAX_BATCH * 2];
+ struct ukey_op ops[UPCALL_MAX_BATCH * 2];
unsigned int flow_limit;
- size_t n_ops, i;
+ size_t n_ops, n_opsp, i;
bool may_put;
bool megaflow;
n_ops = 0;
for (i = 0; i < n_upcalls; i++) {
struct upcall *upcall = &upcalls[i];
- const struct ofpbuf *packet = upcall->packet;
- struct dpif_op *op;
+ const struct dp_packet *packet = upcall->packet;
+ struct ukey_op *op;
if (upcall->vsp_adjusted) {
/* This packet was received on a VLAN splinter port. We added a
* actions were composed assuming that the packet contained no
* VLAN. So, we must remove the VLAN header from the packet before
* trying to execute the actions. */
- if (ofpbuf_size(upcall->xout.odp_actions)) {
- eth_pop_vlan(CONST_CAST(struct ofpbuf *, upcall->packet));
+ if (upcall->xout.odp_actions->size) {
+ eth_pop_vlan(CONST_CAST(struct dp_packet *, upcall->packet));
}
/* Remove the flow vlan tags inserted by vlan splinter logic
* - The datapath already has too many flows.
*
* - We received this packet via some flow installed in the kernel
- * already. */
- if (may_put && upcall->type == DPIF_UC_MISS) {
- struct ofpbuf mask;
-
- ofpbuf_use_stack(&mask, &mask_bufs[i], sizeof mask_bufs[i]);
-
- if (megaflow) {
- size_t max_mpls;
- bool recirc;
-
- recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto);
- max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto);
- odp_flow_key_from_mask(&mask, &upcall->xout.wc.masks,
- upcall->flow, UINT32_MAX, max_mpls,
- recirc);
- }
+ * already.
+ *
+ * - Upcall was a recirculation but we do not have a reference to
+ * to the recirculation ID. */
+ if (may_put && upcall->type == DPIF_UC_MISS &&
+ (!upcall->recirc || upcall->have_recirc_ref)) {
+ struct udpif_key *ukey = upcall->ukey;
+ upcall->ukey_persists = true;
op = &ops[n_ops++];
- op->type = DPIF_OP_FLOW_PUT;
- op->u.flow_put.flags = DPIF_FP_CREATE;
- op->u.flow_put.key = upcall->key;
- op->u.flow_put.key_len = upcall->key_len;
- op->u.flow_put.mask = ofpbuf_data(&mask);
- op->u.flow_put.mask_len = ofpbuf_size(&mask);
- op->u.flow_put.stats = NULL;
- op->u.flow_put.actions = ofpbuf_data(&upcall->put_actions);
- op->u.flow_put.actions_len = ofpbuf_size(&upcall->put_actions);
+
+ op->ukey = ukey;
+ op->dop.type = DPIF_OP_FLOW_PUT;
+ op->dop.u.flow_put.flags = DPIF_FP_CREATE;
+ op->dop.u.flow_put.key = ukey->key;
+ op->dop.u.flow_put.key_len = ukey->key_len;
+ op->dop.u.flow_put.mask = ukey->mask;
+ op->dop.u.flow_put.mask_len = ukey->mask_len;
+ op->dop.u.flow_put.ufid = upcall->ufid;
+ op->dop.u.flow_put.stats = NULL;
+ op->dop.u.flow_put.actions = ukey->actions->data;
+ op->dop.u.flow_put.actions_len = ukey->actions->size;
}
- if (ofpbuf_size(upcall->xout.odp_actions)) {
+ if (upcall->xout.odp_actions->size) {
op = &ops[n_ops++];
- op->type = DPIF_OP_EXECUTE;
- op->u.execute.packet = CONST_CAST(struct ofpbuf *, packet);
+ op->ukey = NULL;
+ op->dop.type = DPIF_OP_EXECUTE;
+ op->dop.u.execute.packet = CONST_CAST(struct dp_packet *, packet);
odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
- &op->u.execute.md);
- op->u.execute.actions = ofpbuf_data(upcall->xout.odp_actions);
- op->u.execute.actions_len = ofpbuf_size(upcall->xout.odp_actions);
- op->u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
+ &op->dop.u.execute.packet->md);
+ op->dop.u.execute.actions = upcall->xout.odp_actions->data;
+ op->dop.u.execute.actions_len = upcall->xout.odp_actions->size;
+ op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
+ op->dop.u.execute.probe = false;
}
}
- /* Execute batch. */
+ /* Execute batch.
+ *
+ * We install ukeys before installing the flows, locking them for exclusive
+ * access by this thread for the period of installation. This ensures that
+ * other threads won't attempt to delete the flows as we are creating them.
+ */
+ n_opsp = 0;
for (i = 0; i < n_ops; i++) {
- opsp[i] = &ops[i];
+ struct udpif_key *ukey = ops[i].ukey;
+
+ if (ukey) {
+ /* If we can't install the ukey, don't install the flow. */
+ if (!ukey_install_start(udpif, ukey)) {
+ ukey_delete__(ukey);
+ ops[i].ukey = NULL;
+ continue;
+ }
+ }
+ opsp[n_opsp++] = &ops[i].dop;
}
- dpif_operate(udpif->dpif, opsp, n_ops);
+ dpif_operate(udpif->dpif, opsp, n_opsp);
+ for (i = 0; i < n_ops; i++) {
+ if (ops[i].ukey) {
+ ukey_install_finish(ops[i].ukey, ops[i].dop.error);
+ }
+ }
+}
+
+static uint32_t
+get_ufid_hash(const ovs_u128 *ufid)
+{
+ return ufid->u32[0];
}
-/* Must be called with udpif->ukeys[hash % udpif->n_revalidators].mutex. */
static struct udpif_key *
-ukey_lookup(struct udpif *udpif, const struct nlattr *key, size_t key_len,
- uint32_t hash)
- OVS_REQUIRES(udpif->ukeys->mutex)
+ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid)
{
struct udpif_key *ukey;
- struct hmap *hmap = &udpif->ukeys[hash % udpif->n_revalidators].hmap;
+ int idx = get_ufid_hash(ufid) % N_UMAPS;
+ struct cmap *cmap = &udpif->ukeys[idx].cmap;
- HMAP_FOR_EACH_WITH_HASH (ukey, hmap_node, hash, hmap) {
- if (ukey->key_len == key_len && !memcmp(ukey->key, key, key_len)) {
+ CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, get_ufid_hash(ufid), cmap) {
+ if (ovs_u128_equals(&ukey->ufid, ufid)) {
return ukey;
}
}
return NULL;
}
-/* Creates a ukey for 'key' and 'key_len', returning it with ukey->mutex in
- * a locked state. */
static struct udpif_key *
-ukey_create(const struct nlattr *key, size_t key_len, long long int used)
+ukey_create__(const struct nlattr *key, size_t key_len,
+ const struct nlattr *mask, size_t mask_len,
+ bool ufid_present, const ovs_u128 *ufid,
+ const unsigned pmd_id, const struct ofpbuf *actions,
+ uint64_t dump_seq, uint64_t reval_seq, long long int used,
+ const struct recirc_id_node *key_recirc, struct xlate_out *xout)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
- struct udpif_key *ukey = xmalloc(sizeof *ukey);
+ unsigned n_recircs = (key_recirc ? 1 : 0) + (xout ? xout->n_recircs : 0);
+ struct udpif_key *ukey = xmalloc(sizeof *ukey +
+ n_recircs * sizeof *ukey->recircs);
- ovs_mutex_init(&ukey->mutex);
- ukey->key = &ukey->key_buf_nla;
- memcpy(&ukey->key_buf, key, key_len);
+ memcpy(&ukey->keybuf, key, key_len);
+ ukey->key = &ukey->keybuf.nla;
ukey->key_len = key_len;
+ memcpy(&ukey->maskbuf, mask, mask_len);
+ ukey->mask = &ukey->maskbuf.nla;
+ ukey->mask_len = mask_len;
+ ukey->ufid_present = ufid_present;
+ ukey->ufid = *ufid;
+ ukey->pmd_id = pmd_id;
+ ukey->hash = get_ufid_hash(&ukey->ufid);
+ ukey->actions = ofpbuf_clone(actions);
- ovs_mutex_lock(&ukey->mutex);
- ukey->dump_seq = 0;
- ukey->flow_exists = true;
- ukey->created = used ? used : time_msec();
+ ovs_mutex_init(&ukey->mutex);
+ ukey->dump_seq = dump_seq;
+ ukey->reval_seq = reval_seq;
+ ukey->flow_exists = false;
+ ukey->created = time_msec();
memset(&ukey->stats, 0, sizeof ukey->stats);
+ ukey->stats.used = used;
ukey->xcache = NULL;
+ ukey->n_recircs = n_recircs;
+ if (key_recirc) {
+ ukey->recircs[0] = key_recirc->id;
+ }
+ if (xout && xout->n_recircs) {
+ const uint32_t *act_recircs = xlate_out_get_recircs(xout);
+
+ memcpy(ukey->recircs + (key_recirc ? 1 : 0), act_recircs,
+ xout->n_recircs * sizeof *ukey->recircs);
+ xlate_out_take_recircs(xout);
+ }
return ukey;
}
-/* Searches for a ukey in 'udpif->ukeys' that matches 'key' and 'key_len' and
- * attempts to lock the ukey. If the ukey does not exist, create it.
+static struct udpif_key *
+ukey_create_from_upcall(struct upcall *upcall)
+{
+ struct odputil_keybuf keystub, maskstub;
+ struct ofpbuf keybuf, maskbuf;
+ bool megaflow;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = upcall->flow,
+ .mask = &upcall->xout.wc.masks,
+ };
+
+ odp_parms.support = ofproto_dpif_get_support(upcall->ofproto)->odp;
+ if (upcall->key_len) {
+ ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
+ } else {
+ /* dpif-netdev doesn't provide a netlink-formatted flow key in the
+ * upcall, so convert the upcall's flow here. */
+ ofpbuf_use_stack(&keybuf, &keystub, sizeof keystub);
+ odp_parms.odp_in_port = upcall->flow->in_port.odp_port;
+ odp_flow_key_from_flow(&odp_parms, &keybuf);
+ }
+
+ atomic_read_relaxed(&enable_megaflows, &megaflow);
+ ofpbuf_use_stack(&maskbuf, &maskstub, sizeof maskstub);
+ if (megaflow) {
+ odp_parms.odp_in_port = ODPP_NONE;
+ odp_parms.key_buf = &keybuf;
+
+ odp_flow_key_from_mask(&odp_parms, &maskbuf);
+ }
+
+ return ukey_create__(keybuf.data, keybuf.size, maskbuf.data, maskbuf.size,
+ true, upcall->ufid, upcall->pmd_id,
+ &upcall->put_actions, upcall->dump_seq,
+ upcall->reval_seq, 0,
+ upcall->have_recirc_ref ? upcall->recirc : NULL,
+ &upcall->xout);
+}
+
+static int
+ukey_create_from_dpif_flow(const struct udpif *udpif,
+ const struct dpif_flow *flow,
+ struct udpif_key **ukey)
+{
+ struct dpif_flow full_flow;
+ struct ofpbuf actions;
+ uint64_t dump_seq, reval_seq;
+ uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
+ const struct nlattr *a;
+ unsigned int left;
+
+ if (!flow->key_len || !flow->actions_len) {
+ struct ofpbuf buf;
+ int err;
+
+ /* If the key or actions were not provided by the datapath, fetch the
+ * full flow. */
+ ofpbuf_use_stack(&buf, &stub, sizeof stub);
+ err = dpif_flow_get(udpif->dpif, NULL, 0, &flow->ufid,
+ flow->pmd_id, &buf, &full_flow);
+ if (err) {
+ return err;
+ }
+ flow = &full_flow;
+ }
+
+ /* Check the flow actions for recirculation action. As recirculation
+ * relies on OVS userspace internal state, we need to delete all old
+ * datapath flows with recirculation upon OVS restart. */
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, flow->actions, flow->actions_len) {
+ if (nl_attr_type(a) == OVS_ACTION_ATTR_RECIRC) {
+ return EINVAL;
+ }
+ }
+
+ dump_seq = seq_read(udpif->dump_seq);
+ reval_seq = seq_read(udpif->reval_seq);
+ ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
+ *ukey = ukey_create__(flow->key, flow->key_len,
+ flow->mask, flow->mask_len, flow->ufid_present,
+ &flow->ufid, flow->pmd_id, &actions, dump_seq,
+ reval_seq, flow->stats.used, NULL, NULL);
+
+ return 0;
+}
+
+/* Attempts to insert a ukey into the shared ukey maps.
*
- * Returns true on success, setting *result to the matching ukey and returning
- * it in a locked state. Otherwise, returns false and clears *result. */
+ * On success, returns true, installs the ukey and returns it in a locked
+ * state. Otherwise, returns false. */
static bool
-ukey_acquire(struct udpif *udpif, const struct nlattr *key, size_t key_len,
- long long int used, struct udpif_key **result)
- OVS_TRY_LOCK(true, (*result)->mutex)
+ukey_install_start(struct udpif *udpif, struct udpif_key *new_ukey)
+ OVS_TRY_LOCK(true, new_ukey->mutex)
{
- struct udpif_key *ukey;
- uint32_t hash, idx;
+ struct umap *umap;
+ struct udpif_key *old_ukey;
+ uint32_t idx;
bool locked = false;
- hash = hash_bytes(key, key_len, udpif->secret);
- idx = hash % udpif->n_revalidators;
-
- ovs_mutex_lock(&udpif->ukeys[idx].mutex);
- ukey = ukey_lookup(udpif, key, key_len, hash);
- if (!ukey) {
- ukey = ukey_create(key, key_len, used);
- hmap_insert(&udpif->ukeys[idx].hmap, &ukey->hmap_node, hash);
- locked = true;
- } else if (!ovs_mutex_trylock(&ukey->mutex)) {
+ idx = new_ukey->hash % N_UMAPS;
+ umap = &udpif->ukeys[idx];
+ ovs_mutex_lock(&umap->mutex);
+ old_ukey = ukey_lookup(udpif, &new_ukey->ufid);
+ if (old_ukey) {
+ /* Uncommon case: A ukey is already installed with the same UFID. */
+ if (old_ukey->key_len == new_ukey->key_len
+ && !memcmp(old_ukey->key, new_ukey->key, new_ukey->key_len)) {
+ COVERAGE_INC(handler_duplicate_upcall);
+ } else {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ odp_format_ufid(&old_ukey->ufid, &ds);
+ ds_put_cstr(&ds, " ");
+ odp_flow_key_format(old_ukey->key, old_ukey->key_len, &ds);
+ ds_put_cstr(&ds, "\n");
+ odp_format_ufid(&new_ukey->ufid, &ds);
+ ds_put_cstr(&ds, " ");
+ odp_flow_key_format(new_ukey->key, new_ukey->key_len, &ds);
+
+ VLOG_WARN_RL(&rl, "Conflicting ukey for flows:\n%s", ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+ } else {
+ ovs_mutex_lock(&new_ukey->mutex);
+ cmap_insert(&umap->cmap, &new_ukey->cmap_node, new_ukey->hash);
locked = true;
}
- ovs_mutex_unlock(&udpif->ukeys[idx].mutex);
+ ovs_mutex_unlock(&umap->mutex);
- if (locked) {
- *result = ukey;
+ return locked;
+}
+
+static void
+ukey_install_finish__(struct udpif_key *ukey) OVS_REQUIRES(ukey->mutex)
+{
+ ukey->flow_exists = true;
+}
+
+static bool
+ukey_install_finish(struct udpif_key *ukey, int error)
+ OVS_RELEASES(ukey->mutex)
+{
+ if (!error) {
+ ukey_install_finish__(ukey);
+ }
+ ovs_mutex_unlock(&ukey->mutex);
+
+ return !error;
+}
+
+static bool
+ukey_install(struct udpif *udpif, struct udpif_key *ukey)
+{
+ /* The usual way to keep 'ukey->flow_exists' in sync with the datapath is
+ * to call ukey_install_start(), install the corresponding datapath flow,
+ * then call ukey_install_finish(). The netdev interface using upcall_cb()
+ * doesn't provide a function to separately finish the flow installation,
+ * so we perform the operations together here.
+ *
+ * This is fine currently, as revalidator threads will only delete this
+ * ukey during revalidator_sweep() and only if the dump_seq is mismatched.
+ * It is unlikely for a revalidator thread to advance dump_seq and reach
+ * the next GC phase between ukey creation and flow installation. */
+ return ukey_install_start(udpif, ukey) && ukey_install_finish(ukey, 0);
+}
+
+/* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
+ * lock the ukey. If the ukey does not exist, create it.
+ *
+ * Returns 0 on success, setting *result to the matching ukey and returning it
+ * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
+ * indicates that another thread is handling this flow. Other errors indicate
+ * an unexpected condition creating a new ukey.
+ *
+ * *error is an output parameter provided to appease the threadsafety analyser,
+ * and its value matches the return value. */
+static int
+ukey_acquire(struct udpif *udpif, const struct dpif_flow *flow,
+ struct udpif_key **result, int *error)
+ OVS_TRY_LOCK(0, (*result)->mutex)
+{
+ struct udpif_key *ukey;
+ int retval;
+
+ ukey = ukey_lookup(udpif, &flow->ufid);
+ if (ukey) {
+ retval = ovs_mutex_trylock(&ukey->mutex);
} else {
+ /* Usually we try to avoid installing flows from revalidator threads,
+ * because locking on a umap may cause handler threads to block.
+ * However there are certain cases, like when ovs-vswitchd is
+ * restarted, where it is desirable to handle flows that exist in the
+ * datapath gracefully (ie, don't just clear the datapath). */
+ bool install;
+
+ retval = ukey_create_from_dpif_flow(udpif, flow, &ukey);
+ if (retval) {
+ goto done;
+ }
+ install = ukey_install_start(udpif, ukey);
+ if (install) {
+ ukey_install_finish__(ukey);
+ retval = 0;
+ } else {
+ ukey_delete__(ukey);
+ retval = EBUSY;
+ }
+ }
+
+done:
+ *error = retval;
+ if (retval) {
*result = NULL;
+ } else {
+ *result = ukey;
}
- return locked;
+ return retval;
}
static void
-ukey_delete(struct revalidator *revalidator, struct udpif_key *ukey)
+ukey_delete__(struct udpif_key *ukey)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
- if (revalidator) {
- hmap_remove(revalidator->ukeys, &ukey->hmap_node);
+ if (ukey) {
+ for (int i = 0; i < ukey->n_recircs; i++) {
+ recirc_free_id(ukey->recircs[i]);
+ }
+ xlate_cache_delete(ukey->xcache);
+ ofpbuf_delete(ukey->actions);
+ ovs_mutex_destroy(&ukey->mutex);
+ free(ukey);
}
- xlate_cache_delete(ukey->xcache);
- ovs_mutex_destroy(&ukey->mutex);
- free(ukey);
+}
+
+static void
+ukey_delete(struct umap *umap, struct udpif_key *ukey)
+ OVS_REQUIRES(umap->mutex)
+{
+ cmap_remove(&umap->cmap, &ukey->cmap_node, ukey->hash);
+ ovsrcu_postpone(ukey_delete__, ukey);
}
static bool
static bool
revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
- const struct dpif_flow *f)
+ const struct dpif_flow_stats *stats, uint64_t reval_seq)
OVS_REQUIRES(ukey->mutex)
{
uint64_t slow_path_buf[128 / 8];
struct dpif_flow_stats push;
struct ofpbuf xout_actions;
struct flow flow, dp_mask;
- uint32_t *dp32, *xout32;
+ uint64_t *dp64, *xout64;
ofp_port_t ofp_in_port;
struct xlate_in xin;
long long int last_used;
int error;
size_t i;
bool ok;
+ bool need_revalidate;
ok = false;
xoutp = NULL;
netflow = NULL;
+ need_revalidate = (ukey->reval_seq != reval_seq);
last_used = ukey->stats.used;
- push.used = f->stats.used;
- push.tcp_flags = f->stats.tcp_flags;
- push.n_packets = (f->stats.n_packets > ukey->stats.n_packets
- ? f->stats.n_packets - ukey->stats.n_packets
+ push.used = stats->used;
+ push.tcp_flags = stats->tcp_flags;
+ push.n_packets = (stats->n_packets > ukey->stats.n_packets
+ ? stats->n_packets - ukey->stats.n_packets
: 0);
- push.n_bytes = (f->stats.n_bytes > ukey->stats.n_bytes
- ? f->stats.n_bytes - ukey->stats.n_bytes
+ push.n_bytes = (stats->n_bytes > ukey->stats.n_bytes
+ ? stats->n_bytes - ukey->stats.n_bytes
: 0);
- if (udpif->need_revalidate && last_used
+ if (need_revalidate && last_used
&& !should_revalidate(udpif, push.n_packets, last_used)) {
ok = false;
goto exit;
}
/* We will push the stats, so update the ukey stats cache. */
- ukey->stats = f->stats;
- if (!push.n_packets && !udpif->need_revalidate) {
+ ukey->stats = *stats;
+ if (!push.n_packets && !need_revalidate) {
ok = true;
goto exit;
}
- if (ukey->xcache && !udpif->need_revalidate) {
+ if (ukey->xcache && !need_revalidate) {
xlate_push_stats(ukey->xcache, &push);
ok = true;
goto exit;
goto exit;
}
- if (udpif->need_revalidate) {
+ if (need_revalidate) {
xlate_cache_clear(ukey->xcache);
}
if (!ukey->xcache) {
xin.may_learn = true;
}
xin.xcache = ukey->xcache;
- xin.skip_wildcards = !udpif->need_revalidate;
+ xin.skip_wildcards = !need_revalidate;
xlate_actions(&xin, &xout);
xoutp = &xout;
- if (!udpif->need_revalidate) {
+ if (!need_revalidate) {
ok = true;
goto exit;
}
if (!xout.slow) {
- ofpbuf_use_const(&xout_actions, ofpbuf_data(xout.odp_actions),
- ofpbuf_size(xout.odp_actions));
+ ofpbuf_use_const(&xout_actions, xout.odp_actions->data,
+ xout.odp_actions->size);
} else {
ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
&xout_actions);
}
- if (f->actions_len != ofpbuf_size(&xout_actions)
- || memcmp(ofpbuf_data(&xout_actions), f->actions, f->actions_len)) {
+ if (!ofpbuf_equal(&xout_actions, ukey->actions)) {
goto exit;
}
- if (odp_flow_key_to_mask(f->mask, f->mask_len, &dp_mask, &flow)
- == ODP_FIT_ERROR) {
+ if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, ukey->key,
+ ukey->key_len, &dp_mask, &flow) == ODP_FIT_ERROR) {
goto exit;
}
* mask in the kernel is more specific i.e. less wildcarded, than what
* we've calculated here. This guarantees we don't catch any packets we
* shouldn't with the megaflow. */
- dp32 = (uint32_t *) &dp_mask;
- xout32 = (uint32_t *) &xout.wc.masks;
- for (i = 0; i < FLOW_U32S; i++) {
- if ((dp32[i] | xout32[i]) != dp32[i]) {
+ dp64 = (uint64_t *) &dp_mask;
+ xout64 = (uint64_t *) &xout.wc.masks;
+ for (i = 0; i < FLOW_U64S; i++) {
+ if ((dp64[i] | xout64[i]) != dp64[i]) {
goto exit;
}
}
+
ok = true;
exit:
+ if (ok) {
+ ukey->reval_seq = reval_seq;
+ }
if (netflow && !ok) {
netflow_flow_clear(netflow, &flow);
}
return ok;
}
-struct dump_op {
- struct udpif_key *ukey;
- struct dpif_flow_stats stats; /* Stats for 'op'. */
- struct dpif_op op; /* Flow del operation. */
-};
+static void
+delete_op_init__(struct udpif *udpif, struct ukey_op *op,
+ const struct dpif_flow *flow)
+{
+ op->ukey = NULL;
+ op->dop.type = DPIF_OP_FLOW_DEL;
+ op->dop.u.flow_del.key = flow->key;
+ op->dop.u.flow_del.key_len = flow->key_len;
+ op->dop.u.flow_del.ufid = flow->ufid_present ? &flow->ufid : NULL;
+ op->dop.u.flow_del.pmd_id = flow->pmd_id;
+ op->dop.u.flow_del.stats = &op->stats;
+ op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
+}
static void
-dump_op_init(struct dump_op *op, const struct nlattr *key, size_t key_len,
- struct udpif_key *ukey)
+delete_op_init(struct udpif *udpif, struct ukey_op *op, struct udpif_key *ukey)
{
op->ukey = ukey;
- op->op.type = DPIF_OP_FLOW_DEL;
- op->op.u.flow_del.key = key;
- op->op.u.flow_del.key_len = key_len;
- op->op.u.flow_del.stats = &op->stats;
+ op->dop.type = DPIF_OP_FLOW_DEL;
+ op->dop.u.flow_del.key = ukey->key;
+ op->dop.u.flow_del.key_len = ukey->key_len;
+ op->dop.u.flow_del.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
+ op->dop.u.flow_del.pmd_id = ukey->pmd_id;
+ op->dop.u.flow_del.stats = &op->stats;
+ op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
}
static void
-push_dump_ops__(struct udpif *udpif, struct dump_op *ops, size_t n_ops)
+push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
{
struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
size_t i;
ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
for (i = 0; i < n_ops; i++) {
- opsp[i] = &ops[i].op;
+ opsp[i] = &ops[i].dop;
}
dpif_operate(udpif->dpif, opsp, n_ops);
for (i = 0; i < n_ops; i++) {
- struct dump_op *op = &ops[i];
+ struct ukey_op *op = &ops[i];
struct dpif_flow_stats *push, *stats, push_buf;
- stats = op->op.u.flow_del.stats;
+ stats = op->dop.u.flow_del.stats;
push = &push_buf;
- ovs_mutex_lock(&op->ukey->mutex);
- push->used = MAX(stats->used, op->ukey->stats.used);
- push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
- push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
- push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
- ovs_mutex_unlock(&op->ukey->mutex);
+ if (op->ukey) {
+ ovs_mutex_lock(&op->ukey->mutex);
+ push->used = MAX(stats->used, op->ukey->stats.used);
+ push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
+ push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
+ push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
+ ovs_mutex_unlock(&op->ukey->mutex);
+ } else {
+ push = stats;
+ }
if (push->n_packets || netflow_exists()) {
+ const struct nlattr *key = op->dop.u.flow_del.key;
+ size_t key_len = op->dop.u.flow_del.key_len;
struct ofproto_dpif *ofproto;
struct netflow *netflow;
ofp_port_t ofp_in_port;
struct flow flow;
int error;
- ovs_mutex_lock(&op->ukey->mutex);
- if (op->ukey->xcache) {
- xlate_push_stats(op->ukey->xcache, push);
+ if (op->ukey) {
+ ovs_mutex_lock(&op->ukey->mutex);
+ if (op->ukey->xcache) {
+ xlate_push_stats(op->ukey->xcache, push);
+ ovs_mutex_unlock(&op->ukey->mutex);
+ continue;
+ }
ovs_mutex_unlock(&op->ukey->mutex);
- continue;
+ key = op->ukey->key;
+ key_len = op->ukey->key_len;
}
- ovs_mutex_unlock(&op->ukey->mutex);
- if (odp_flow_key_to_flow(op->op.u.flow_del.key,
- op->op.u.flow_del.key_len, &flow)
+ if (odp_flow_key_to_flow(key, key_len, &flow)
== ODP_FIT_ERROR) {
continue;
}
- error = xlate_lookup(udpif->backer, &flow, &ofproto,
- NULL, NULL, &netflow, &ofp_in_port);
+ error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL,
+ &netflow, &ofp_in_port);
if (!error) {
struct xlate_in xin;
}
static void
-push_dump_ops(struct revalidator *revalidator,
- struct dump_op *ops, size_t n_ops)
+push_ukey_ops(struct udpif *udpif, struct umap *umap,
+ struct ukey_op *ops, size_t n_ops)
{
int i;
- push_dump_ops__(revalidator->udpif, ops, n_ops);
+ push_ukey_ops__(udpif, ops, n_ops);
+ ovs_mutex_lock(&umap->mutex);
for (i = 0; i < n_ops; i++) {
- ukey_delete(revalidator, ops[i].ukey);
+ ukey_delete(umap, ops[i].ukey);
}
+ ovs_mutex_unlock(&umap->mutex);
+}
+
+static void
+log_unexpected_flow(const struct dpif_flow *flow, int error)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 60);
+ struct ds ds = DS_EMPTY_INITIALIZER;
+
+ ds_put_format(&ds, "Failed to acquire udpif_key corresponding to "
+ "unexpected flow (%s): ", ovs_strerror(error));
+ odp_format_ufid(&flow->ufid, &ds);
+ VLOG_WARN_RL(&rl, "%s", ds_cstr(&ds));
}
static void
{
struct udpif *udpif = revalidator->udpif;
struct dpif_flow_dump_thread *dump_thread;
- uint64_t dump_seq;
+ uint64_t dump_seq, reval_seq;
unsigned int flow_limit;
dump_seq = seq_read(udpif->dump_seq);
+ reval_seq = seq_read(udpif->reval_seq);
atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
dump_thread = dpif_flow_dump_thread_create(udpif->dump);
for (;;) {
- struct dump_op ops[REVALIDATE_MAX_BATCH];
+ struct ukey_op ops[REVALIDATE_MAX_BATCH];
int n_ops = 0;
struct dpif_flow flows[REVALIDATE_MAX_BATCH];
long long int used = f->stats.used;
struct udpif_key *ukey;
bool already_dumped, keep;
+ int error;
- if (!ukey_acquire(udpif, f->key, f->key_len, used, &ukey)) {
- /* We couldn't acquire the ukey. This means that
- * another revalidator is processing this flow
- * concurrently, so don't bother processing it. */
- COVERAGE_INC(dumped_duplicate_flow);
+ if (ukey_acquire(udpif, f, &ukey, &error)) {
+ if (error == EBUSY) {
+ /* Another thread is processing this flow, so don't bother
+ * processing it.*/
+ COVERAGE_INC(upcall_ukey_contention);
+ } else {
+ log_unexpected_flow(f, error);
+ if (error != ENOENT) {
+ delete_op_init__(udpif, &ops[n_ops++], f);
+ }
+ }
continue;
}
if (kill_them_all || (used && used < now - max_idle)) {
keep = false;
} else {
- keep = revalidate_ukey(udpif, ukey, f);
+ keep = revalidate_ukey(udpif, ukey, &f->stats, reval_seq);
}
ukey->dump_seq = dump_seq;
ukey->flow_exists = keep;
if (!keep) {
- dump_op_init(&ops[n_ops++], f->key, f->key_len, ukey);
+ delete_op_init(udpif, &ops[n_ops++], ukey);
}
ovs_mutex_unlock(&ukey->mutex);
}
if (n_ops) {
- push_dump_ops__(udpif, ops, n_ops);
+ push_ukey_ops__(udpif, ops, n_ops);
}
+ ovsrcu_quiesce();
}
dpif_flow_dump_thread_destroy(dump_thread);
}
-/* Called with exclusive access to 'revalidator' and 'ukey'. */
static bool
-handle_missed_revalidation(struct revalidator *revalidator,
+handle_missed_revalidation(struct udpif *udpif, uint64_t reval_seq,
struct udpif_key *ukey)
- OVS_NO_THREAD_SAFETY_ANALYSIS
{
- struct udpif *udpif = revalidator->udpif;
- struct dpif_flow flow;
- struct ofpbuf buf;
- uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
- bool keep = false;
+ struct dpif_flow_stats stats;
+ bool keep;
COVERAGE_INC(revalidate_missed_dp_flow);
- ofpbuf_use_stub(&buf, &stub, sizeof stub);
- if (!dpif_flow_get(udpif->dpif, ukey->key, ukey->key_len, &buf, &flow)) {
- keep = revalidate_ukey(udpif, ukey, &flow);
- }
- ofpbuf_uninit(&buf);
+ memset(&stats, 0, sizeof stats);
+ ovs_mutex_lock(&ukey->mutex);
+ keep = revalidate_ukey(udpif, ukey, &stats, reval_seq);
+ ovs_mutex_unlock(&ukey->mutex);
return keep;
}
static void
revalidator_sweep__(struct revalidator *revalidator, bool purge)
- OVS_NO_THREAD_SAFETY_ANALYSIS
{
- struct dump_op ops[REVALIDATE_MAX_BATCH];
- struct udpif_key *ukey, *next;
- size_t n_ops;
- uint64_t dump_seq;
+ struct udpif *udpif;
+ uint64_t dump_seq, reval_seq;
+ int slice;
- n_ops = 0;
- dump_seq = seq_read(revalidator->udpif->dump_seq);
-
- /* During garbage collection, this revalidator completely owns its ukeys
- * map, and therefore doesn't need to do any locking. */
- HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, revalidator->ukeys) {
- if (ukey->flow_exists
- && (purge
- || (ukey->dump_seq != dump_seq
- && revalidator->udpif->need_revalidate
- && !handle_missed_revalidation(revalidator, ukey)))) {
- struct dump_op *op = &ops[n_ops++];
-
- dump_op_init(op, ukey->key, ukey->key_len, ukey);
- if (n_ops == REVALIDATE_MAX_BATCH) {
- push_dump_ops(revalidator, ops, n_ops);
- n_ops = 0;
+ udpif = revalidator->udpif;
+ dump_seq = seq_read(udpif->dump_seq);
+ reval_seq = seq_read(udpif->reval_seq);
+ slice = revalidator - udpif->revalidators;
+ ovs_assert(slice < udpif->n_revalidators);
+
+ for (int i = slice; i < N_UMAPS; i += udpif->n_revalidators) {
+ struct ukey_op ops[REVALIDATE_MAX_BATCH];
+ struct udpif_key *ukey;
+ struct umap *umap = &udpif->ukeys[i];
+ size_t n_ops = 0;
+
+ CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
+ bool flow_exists, seq_mismatch;
+
+ /* Handler threads could be holding a ukey lock while it installs a
+ * new flow, so don't hang around waiting for access to it. */
+ if (ovs_mutex_trylock(&ukey->mutex)) {
+ continue;
+ }
+ flow_exists = ukey->flow_exists;
+ seq_mismatch = (ukey->dump_seq != dump_seq
+ && ukey->reval_seq != reval_seq);
+ ovs_mutex_unlock(&ukey->mutex);
+
+ if (flow_exists
+ && (purge
+ || (seq_mismatch
+ && !handle_missed_revalidation(udpif, reval_seq,
+ ukey)))) {
+ struct ukey_op *op = &ops[n_ops++];
+
+ delete_op_init(udpif, op, ukey);
+ if (n_ops == REVALIDATE_MAX_BATCH) {
+ push_ukey_ops(udpif, umap, ops, n_ops);
+ n_ops = 0;
+ }
+ } else if (!flow_exists) {
+ ovs_mutex_lock(&umap->mutex);
+ ukey_delete(umap, ukey);
+ ovs_mutex_unlock(&umap->mutex);
}
- } else if (!ukey->flow_exists) {
- ukey_delete(revalidator, ukey);
}
- }
- if (n_ops) {
- push_dump_ops(revalidator, ops, n_ops);
+ if (n_ops) {
+ push_ukey_ops(udpif, umap, ops, n_ops);
+ }
+ ovsrcu_quiesce();
}
}
LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
unsigned int flow_limit;
+ bool ufid_enabled;
size_t i;
atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
+ ufid_enabled = udpif_use_ufid(udpif);
ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
ds_put_format(&ds, "\tflows : (current %lu)"
" (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
-
+ ds_put_format(&ds, "\tufid enabled : ");
+ if (ufid_enabled) {
+ ds_put_format(&ds, "true\n");
+ } else {
+ ds_put_format(&ds, "false\n");
+ }
ds_put_char(&ds, '\n');
+
for (i = 0; i < n_revalidators; i++) {
struct revalidator *revalidator = &udpif->revalidators[i];
+ int j, elements = 0;
- ovs_mutex_lock(&udpif->ukeys[i].mutex);
- ds_put_format(&ds, "\t%u: (keys %"PRIuSIZE")\n",
- revalidator->id, hmap_count(&udpif->ukeys[i].hmap));
- ovs_mutex_unlock(&udpif->ukeys[i].mutex);
+ for (j = i; j < N_UMAPS; j += n_revalidators) {
+ elements += cmap_count(&udpif->ukeys[j].cmap);
+ }
+ ds_put_format(&ds, "\t%u: (keys %d)\n", revalidator->id, elements);
}
}
unixctl_command_reply(conn, "megaflows enabled");
}
+/* Disable skipping flow attributes during flow dump.
+ *
+ * This command is only needed for advanced debugging, so it's not
+ * documented in the man page. */
+static void
+upcall_unixctl_disable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
+{
+ atomic_store_relaxed(&enable_ufid, false);
+ unixctl_command_reply(conn, "Datapath dumping tersely using UFID disabled");
+}
+
+/* Re-enable skipping flow attributes during flow dump.
+ *
+ * This command is only needed for advanced debugging, so it's not documented
+ * in the man page. */
+static void
+upcall_unixctl_enable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
+{
+ atomic_store_relaxed(&enable_ufid, true);
+ unixctl_command_reply(conn, "Datapath dumping tersely using UFID enabled "
+ "for supported datapaths");
+}
+
/* Set the flow limit.
*
* This command is only needed for advanced debugging, so it's not
unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
}
}
+
+static void
+upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
+{
+ struct udpif *udpif;
+
+ LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
+ int n;
+
+ for (n = 0; n < udpif->n_revalidators; n++) {
+ revalidator_purge(&udpif->revalidators[n]);
+ }
+ }
+ unixctl_command_reply(conn, "");
+}