* overhead. (A facet always has at least one subfacet and in the common
* case has exactly one subfacet.) */
struct subfacet one_subfacet;
+
+ long long int learn_rl; /* Rate limiter for facet_learn(). */
};
static struct facet *facet_create(struct rule_dpif *,
static struct ofport_dpif *
odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
+static void dpif_stats_update_hit_count(struct ofproto_dpif *ofproto,
+ uint64_t delta);
+
struct ofproto_dpif {
struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
struct ofproto up;
struct hmap facets;
struct hmap subfacets;
struct governor *governor;
+ long long int consistency_rl;
/* Revalidation. */
struct table_dpif tables[N_TABLES];
struct sset ghost_ports; /* Ports with no datapath port. */
struct sset port_poll_set; /* Queued names for port_poll() reply. */
int port_poll_errno; /* Last errno for port_poll() reply. */
+
+ /* Per ofproto's dpif stats. */
+ uint64_t n_hit;
+ uint64_t n_missed;
};
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
hmap_init(&ofproto->facets);
hmap_init(&ofproto->subfacets);
ofproto->governor = NULL;
+ ofproto->consistency_rl = LLONG_MIN;
for (i = 0; i < N_TABLES; i++) {
struct table_dpif *table = &ofproto->tables[i];
error = add_internal_flows(ofproto);
ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
+ ofproto->n_hit = 0;
+ ofproto->n_missed = 0;
+
return error;
}
mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
/* Check the consistency of a random facet, to aid debugging. */
- if (!hmap_is_empty(&ofproto->facets)
+ if (time_msec() >= ofproto->consistency_rl
+ && !hmap_is_empty(&ofproto->facets)
&& !ofproto->backer->need_revalidate) {
struct facet *facet;
+ ofproto->consistency_rl = time_msec() + 250;
+
facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
struct facet, hmap_node);
if (!tag_set_intersects(&ofproto->backer->revalidate_set,
if (error) {
continue;
}
+
+ ofproto->n_missed++;
flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
&flow.tunnel, flow.in_port, &miss->flow);
* avoided by calling update_stats() whenever rules are created or
* deleted. However, the performance impact of making so many calls to the
* datapath do not justify the benefit of having perfectly accurate statistics.
+ *
+ * In addition, this function maintains per ofproto flow hit counts. The patch
+ * port is not treated specially. e.g. A packet ingress from br0 patched into
+ * br1 will increase the hit count of br0 by 1, however, does not affect
+ * the hit or miss counts of br1.
*/
static void
update_stats(struct dpif_backer *backer)
subfacet = subfacet_find(ofproto, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
+ /* Update ofproto_dpif's hit count. */
+ if (stats->n_packets > subfacet->dp_packet_count) {
+ uint64_t delta = stats->n_packets - subfacet->dp_packet_count;
+ dpif_stats_update_hit_count(ofproto, delta);
+ }
+
update_subfacet_stats(subfacet, stats);
break;
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
+ facet->learn_rl = time_msec() + 500;
+
return facet;
}
struct subfacet, list_node);
struct action_xlate_ctx ctx;
+ if (time_msec() < facet->learn_rl) {
+ return;
+ }
+
+ facet->learn_rl = time_msec() + 500;
+
if (!facet->has_learn
&& !facet->has_normal
&& (!facet->has_fin_timeout
facet->packet_count += stats->n_packets;
facet->byte_count += stats->n_bytes;
facet->tcp_flags |= stats->tcp_flags;
- facet_push_stats(facet);
netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
}
}
static void
rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule_->ofproto);
struct rule_dpif *rule = rule_dpif_cast(rule_);
struct facet *facet;
+ HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ facet_push_stats(facet);
+ }
+
/* Start from historical data for 'rule' itself that are no longer tracked
* in facets. This counts, for example, facets that have expired. */
*packets = rule->packet_count;
if (out_port != odp_port) {
ctx->flow.vlan_tci = htons(0);
}
+ ctx->flow.skb_mark &= ~IPSEC_MARK;
}
commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
return true;
}
+static bool
+tunnel_ecn_ok(struct action_xlate_ctx *ctx)
+{
+ if (is_ip_any(&ctx->base_flow)
+ && (ctx->base_flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
+ VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
+ " but is not ECN capable");
+ return false;
+ } else {
+ /* Set the ECN CE value in the tunneled packet. */
+ ctx->flow.nw_tos |= IP_ECN_CE;
+ }
+ }
+
+ return true;
+}
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct action_xlate_ctx *ctx)
add_sflow_action(ctx);
- if (!in_port || may_receive(in_port, ctx)) {
+ if (tunnel_ecn_ok(ctx) && (!in_port || may_receive(in_port, ctx))) {
do_xlate_actions(ofpacts, ofpacts_len, ctx);
/* We've let OFPP_NORMAL and the learning action look at the
goto exit;
}
- /* XXX: Since we allow the user to specify an ofproto, it's
- * possible they will specify a different ofproto than the one the
- * port actually belongs too. Ideally we should simply remove the
- * ability to specify the ofproto. */
+ /* The user might have specified the wrong ofproto but within the
+ * same backer. That's OK, ofproto_receive() can find the right
+ * one for us. */
if (ofproto_receive(ofproto->backer, NULL, odp_key.data,
- odp_key.size, &flow, NULL, NULL, NULL,
+ odp_key.size, &flow, NULL, &ofproto, NULL,
&initial_vals)) {
unixctl_command_reply_error(conn, "Invalid flow");
goto exit;
}
+ ds_put_format(&result, "Bridge: %s\n", ofproto->up.name);
} else {
char *error_s;
static void
show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
{
- struct dpif_dp_stats s;
const struct shash_node **ports;
int i;
- dpif_get_dp_stats(ofproto->backer->dpif, &s);
-
ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
dpif_name(ofproto->backer->dpif));
- /* xxx It would be better to show bridge-specific stats instead
- * xxx of dp ones. */
ds_put_format(ds,
- "\tlookups: hit:%"PRIu64" missed:%"PRIu64" lost:%"PRIu64"\n",
- s.n_hit, s.n_missed, s.n_lost);
+ "\tlookups: hit:%"PRIu64" missed:%"PRIu64"\n",
+ ofproto->n_hit, ofproto->n_missed);
ds_put_format(ds, "\tflows: %zu\n",
hmap_count(&ofproto->subfacets));
}
ds_put_cstr(&ds, ", actions:");
- format_odp_actions(&ds, subfacet->actions, subfacet->actions_len);
+ if (subfacet->slow) {
+ uint64_t slow_path_stub[128 / 8];
+ const struct nlattr *actions;
+ size_t actions_len;
+
+ compose_slow_path(ofproto, &subfacet->facet->flow, subfacet->slow,
+ slow_path_stub, sizeof slow_path_stub,
+ &actions, &actions_len);
+ format_odp_actions(&ds, actions, actions_len);
+ } else {
+ format_odp_actions(&ds, subfacet->actions, subfacet->actions_len);
+ }
ds_put_char(&ds, '\n');
}
}
}
+static void
+dpif_stats_update_hit_count(struct ofproto_dpif *ofproto, uint64_t delta)
+{
+ ofproto->n_hit += delta;
+}
+
const struct ofproto_class ofproto_dpif_class = {
init,
enumerate_types,