VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
COVERAGE_DEFINE(upcall_duplicate_flow);
+COVERAGE_DEFINE(revalidate_missed_dp_flow);
/* A thread that reads upcalls from dpif, forwards each upcall's packet,
* and possibly sets up a kernel flow as a cache. */
struct seq *reval_seq; /* Incremented to force revalidation. */
bool need_revalidate; /* As indicated by 'reval_seq'. */
bool reval_exit; /* Set by leader on 'exit_latch. */
- pthread_barrier_t reval_barrier; /* Barrier used by revalidators. */
+ struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
struct dpif_flow_dump dump; /* DPIF flow dump state. */
long long int dump_duration; /* Duration of the last flow dump. */
struct seq *dump_seq; /* Increments each dump iteration. */
atomic_uint flow_limit; /* Datapath flow hard limit. */
/* n_flows_mutex prevents multiple threads updating these concurrently. */
- atomic_uint64_t n_flows; /* Number of flows in the datapath. */
+ atomic_ulong n_flows; /* Number of flows in the datapath. */
atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
struct ovs_mutex n_flows_mutex;
};
size_t n_revalidators);
static void *udpif_upcall_handler(void *);
static void *udpif_revalidator(void *);
-static uint64_t udpif_get_n_flows(struct udpif *);
+static unsigned long udpif_get_n_flows(struct udpif *);
static void revalidate(struct revalidator *);
static void revalidator_sweep(struct revalidator *);
static void revalidator_purge(struct revalidator *);
latch_poll(&udpif->exit_latch);
- xpthread_barrier_destroy(&udpif->reval_barrier);
+ ovs_barrier_destroy(&udpif->reval_barrier);
free(udpif->revalidators);
udpif->revalidators = NULL;
"handler", udpif_upcall_handler, handler);
}
- xpthread_barrier_init(&udpif->reval_barrier, NULL,
- udpif->n_revalidators);
+ ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
udpif->reval_exit = false;
udpif->revalidators = xzalloc(udpif->n_revalidators
* sizeof *udpif->revalidators);
}
\f
-static uint64_t
+static unsigned long
udpif_get_n_flows(struct udpif *udpif)
{
long long int time, now;
- uint64_t flow_count;
+ unsigned long flow_count;
now = time_msec();
atomic_read(&udpif->n_flows_timestamp, &time);
}
/* Wait for the leader to start the flow dump. */
- xpthread_barrier_wait(&udpif->reval_barrier);
+ ovs_barrier_block(&udpif->reval_barrier);
if (udpif->reval_exit) {
break;
}
revalidate(revalidator);
/* Wait for all flows to have been dumped before we garbage collect. */
- xpthread_barrier_wait(&udpif->reval_barrier);
+ ovs_barrier_block(&udpif->reval_barrier);
revalidator_sweep(revalidator);
/* Wait for all revalidators to finish garbage collection. */
- xpthread_barrier_wait(&udpif->reval_barrier);
+ ovs_barrier_block(&udpif->reval_barrier);
if (leader) {
long long int duration;
* in the kernel. */
VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
"port %"PRIu32, odp_in_port);
- dpif_flow_put(udpif->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ dpif_flow_put(udpif->dpif, DPIF_FP_CREATE,
dupcall->key, dupcall->key_len, NULL, 0, NULL, 0,
NULL);
}
op = &ops[n_ops++];
op->type = DPIF_OP_FLOW_PUT;
- op->u.flow_put.flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
+ op->u.flow_put.flags = DPIF_FP_CREATE;
op->u.flow_put.key = miss->key;
op->u.flow_put.key_len = miss->key_len;
op->u.flow_put.mask = ofpbuf_data(&mask);
}
static bool
-should_revalidate(uint64_t packets, long long int used)
+should_revalidate(const struct udpif *udpif, uint64_t packets,
+ long long int used)
{
long long int metric, now, duration;
+ if (udpif->dump_duration < 200) {
+ /* We are likely to handle full revalidation for the flows. */
+ return true;
+ }
+
/* Calculate the mean time between seeing these packets. If this
* exceeds the threshold, then delete the flow rather than performing
* costly revalidation for flows that aren't being hit frequently.
duration = now - used;
metric = duration / packets;
- if (metric > 200) {
- return false;
+ if (metric < 200) {
+ /* The flow is receiving more than ~5pps, so keep it. */
+ return true;
}
- return true;
+ return false;
}
static bool
const struct nlattr *mask, size_t mask_len,
const struct nlattr *actions, size_t actions_len,
const struct dpif_flow_stats *stats)
+ OVS_REQUIRES(ukey->mutex)
{
uint64_t slow_path_buf[128 / 8];
struct xlate_out xout, *xoutp;
xoutp = NULL;
netflow = NULL;
- ovs_mutex_lock(&ukey->mutex);
last_used = ukey->stats.used;
push.used = stats->used;
push.tcp_flags = stats->tcp_flags;
? stats->n_bytes - ukey->stats.n_bytes
: 0;
- if (!ukey->flow_exists) {
- /* Don't bother revalidating if the flow was already deleted. */
- goto exit;
- }
-
if (udpif->need_revalidate && last_used
- && !should_revalidate(push.n_packets, last_used)) {
+ && !should_revalidate(udpif, push.n_packets, last_used)) {
ok = false;
goto exit;
}
ok = true;
exit:
- ovs_mutex_unlock(&ukey->mutex);
if (netflow) {
if (!ok) {
- netflow_expire(netflow, &flow);
netflow_flow_clear(netflow, &flow);
}
netflow_unref(netflow);
xlate_actions_for_side_effects(&xin);
if (netflow) {
- netflow_expire(netflow, &flow);
netflow_flow_clear(netflow, &flow);
netflow_unref(netflow);
}
ukey = ukey_lookup(udpif, key, key_len, hash);
used = stats->used;
- if (!used && ukey) {
- ovs_mutex_lock(&ukey->mutex);
-
- if (ukey->mark || !ukey->flow_exists) {
- /* The flow has already been dumped. This can occasionally
- * occur if the datapath is changed in the middle of a flow
- * dump. Rather than perform the same work twice, skip the
- * flow this time. */
- ovs_mutex_unlock(&ukey->mutex);
+ if (!ukey) {
+ ukey = ukey_create(key, key_len, used);
+ if (!udpif_insert_ukey(udpif, ukey, hash)) {
+ /* The same ukey has already been created. This means that
+ * another revalidator is processing this flow
+ * concurrently, so don't bother processing it. */
COVERAGE_INC(upcall_duplicate_flow);
- continue;
+ ukey_delete(NULL, ukey);
+ goto next;
}
+ }
- used = ukey->created;
+ if (ovs_mutex_trylock(&ukey->mutex)) {
+ /* The flow has been dumped, and is being handled by another
+ * revalidator concurrently. This can occasionally occur if the
+ * datapath is changed in the middle of a flow dump. Rather than
+ * perform the same work twice, skip the flow this time. */
+ COVERAGE_INC(upcall_duplicate_flow);
+ goto next;
+ }
+
+ if (ukey->mark || !ukey->flow_exists) {
+ /* The flow has already been dumped and handled by another
+ * revalidator during this flow dump operation. Skip it. */
+ COVERAGE_INC(upcall_duplicate_flow);
ovs_mutex_unlock(&ukey->mutex);
+ goto next;
}
+ if (!used) {
+ used = ukey->created;
+ }
n_flows = udpif_get_n_flows(udpif);
max_idle = ofproto_max_idle;
if (n_flows > flow_limit) {
if ((used && used < now - max_idle) || n_flows > flow_limit * 2) {
mark = false;
} else {
- if (!ukey) {
- ukey = ukey_create(key, key_len, used);
- if (!udpif_insert_ukey(udpif, ukey, hash)) {
- /* The same ukey has already been created. This means that
- * another revalidator is processing this flow
- * concurrently, so don't bother processing it. */
- ukey_delete(NULL, ukey);
- continue;
- }
- }
-
mark = revalidate_ukey(udpif, ukey, mask, mask_len, actions,
actions_len, stats);
}
-
- if (ukey) {
- ovs_mutex_lock(&ukey->mutex);
- ukey->mark = ukey->flow_exists = mark;
- ovs_mutex_unlock(&ukey->mutex);
- }
+ ukey->mark = ukey->flow_exists = mark;
if (!mark) {
dump_op_init(&ops[n_ops++], key, key_len, ukey);
}
+ ovs_mutex_unlock(&ukey->mutex);
+ next:
may_destroy = dpif_flow_dump_next_may_destroy_keys(&udpif->dump,
state);
dpif_flow_dump_state_uninit(udpif->dpif, state);
}
+/* Called with exclusive access to 'revalidator' and 'ukey'. */
+static bool
+handle_missed_revalidation(struct revalidator *revalidator,
+ struct udpif_key *ukey)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ struct udpif *udpif = revalidator->udpif;
+ struct nlattr *mask, *actions;
+ size_t mask_len, actions_len;
+ struct dpif_flow_stats stats;
+ struct ofpbuf *buf;
+ bool keep = false;
+
+ COVERAGE_INC(revalidate_missed_dp_flow);
+
+ if (!dpif_flow_get(udpif->dpif, ukey->key, ukey->key_len, &buf,
+ &mask, &mask_len, &actions, &actions_len, &stats)) {
+ keep = revalidate_ukey(udpif, ukey, mask, mask_len, actions,
+ actions_len, &stats);
+ ofpbuf_delete(buf);
+ }
+
+ return keep;
+}
+
static void
revalidator_sweep__(struct revalidator *revalidator, bool purge)
OVS_NO_THREAD_SAFETY_ANALYSIS
/* During garbage collection, this revalidator completely owns its ukeys
* map, and therefore doesn't need to do any locking. */
HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, revalidator->ukeys) {
- if (!purge && ukey->mark) {
+ if (ukey->flow_exists) {
+ bool missed_flow = !ukey->mark;
+
ukey->mark = false;
- } else if (!ukey->flow_exists) {
- ukey_delete(revalidator, ukey);
- } else {
- struct dump_op *op = &ops[n_ops++];
-
- /* If we have previously seen a flow in the datapath, but didn't
- * see it during the most recent dump, delete it. This allows us
- * to clean up the ukey and keep the statistics consistent. */
- dump_op_init(op, ukey->key, ukey->key_len, ukey);
- if (n_ops == REVALIDATE_MAX_BATCH) {
- push_dump_ops(revalidator, ops, n_ops);
- n_ops = 0;
+ if (purge
+ || (missed_flow
+ && revalidator->udpif->need_revalidate
+ && !handle_missed_revalidation(revalidator, ukey))) {
+ struct dump_op *op = &ops[n_ops++];
+
+ dump_op_init(op, ukey->key, ukey->key_len, ukey);
+ if (n_ops == REVALIDATE_MAX_BATCH) {
+ push_dump_ops(revalidator, ops, n_ops);
+ n_ops = 0;
+ }
}
+ } else {
+ ukey_delete(revalidator, ukey);
}
}
atomic_read(&udpif->flow_limit, &flow_limit);
ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
- ds_put_format(&ds, "\tflows : (current %"PRIu64")"
+ ds_put_format(&ds, "\tflows : (current %lu)"
" (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);