}
static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
- const struct flow *);
+ const struct flow *,
+ struct flow_wildcards *wc);
static struct rule_dpif *rule_dpif_lookup__(struct ofproto_dpif *,
const struct flow *,
+ struct flow_wildcards *wc,
uint8_t table);
static struct rule_dpif *rule_dpif_miss_rule(struct ofproto_dpif *ofproto,
const struct flow *flow);
struct xlate_ctx;
-/* Initial values of fields of the packet that may be changed during
- * flow processing and needed later. */
-struct initial_vals {
- /* This is the value of vlan_tci in the packet as actually received from
- * dpif. This is the same as the facet's flow.vlan_tci unless the packet
- * was received via a VLAN splinter. In that case, this value is 0
- * (because the packet as actually received from the dpif had no 802.1Q
- * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter
- * represents.
- *
- * This member should be removed when the VLAN splinters feature is no
- * longer needed. */
- ovs_be16 vlan_tci;
-};
-
struct xlate_out {
+ /* Wildcards relevant in translation. Any fields that were used to
+ * calculate the action must be set for caching and kernel
+ * wildcarding to work. For example, if the flow lookup involved
+ * performing the "normal" action on IPv4 and ARP packets, 'wc'
+ * would have the 'in_port' (always set), 'dl_type' (flow match),
+ * 'vlan_tci' (normal action), and 'dl_dst' (normal action) fields
+ * set. */
+ struct flow_wildcards wc;
+
tag_type tags; /* Tags associated with actions. */
enum slow_path_reason slow; /* 0 if fast path may be used. */
bool has_learn; /* Actions include NXAST_LEARN? */
* this flow when actions change header fields. */
struct flow flow;
- struct initial_vals initial_vals;
-
/* The packet corresponding to 'flow', or a null pointer if we are
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
};
static void xlate_in_init(struct xlate_in *, struct ofproto_dpif *,
- const struct flow *, const struct initial_vals *,
- struct rule_dpif *, uint8_t tcp_flags,
- const struct ofpbuf *);
+ const struct flow *, struct rule_dpif *,
+ uint8_t tcp_flags, const struct ofpbuf *);
static void xlate_out_uninit(struct xlate_out *);
static void xlate_report(struct xlate_ctx *ctx, const char *s);
+static void xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src);
+
/* A subfacet (see "struct subfacet" below) has three possible installation
* states:
*
struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
struct list list_node; /* In struct facet's 'facets' list. */
struct facet *facet; /* Owning facet. */
+ struct dpif_backer *backer; /* Owning backer. */
enum odp_key_fitness key_fitness;
struct nlattr *key;
static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss,
long long int now);
-static struct subfacet *subfacet_find(struct ofproto_dpif *,
+static struct subfacet *subfacet_find(struct dpif_backer *,
const struct nlattr *key, size_t key_len,
uint32_t key_hash);
static void subfacet_destroy(struct subfacet *);
static void subfacet_destroy__(struct subfacet *);
-static void subfacet_destroy_batch(struct ofproto_dpif *,
+static void subfacet_destroy_batch(struct dpif_backer *,
struct subfacet **, int n);
static void subfacet_reset_dp_stats(struct subfacet *,
struct dpif_flow_stats *);
struct dpif_flow_stats *);
static void subfacet_uninstall(struct subfacet *);
-/* An exact-match instantiation of an OpenFlow flow.
+/* A unique, non-overlapping instantiation of an OpenFlow flow.
*
* A facet associates a "struct flow", which represents the Open vSwitch
- * userspace idea of an exact-match flow, with one or more subfacets. Each
- * subfacet tracks the datapath's idea of the exact-match flow equivalent to
- * the facet. When the kernel module (or other dpif implementation) and Open
- * vSwitch userspace agree on the definition of a flow key, there is exactly
- * one subfacet per facet. If the dpif implementation supports more-specific
- * flow matching than userspace, however, a facet can have more than one
- * subfacet, each of which corresponds to some distinction in flow that
- * userspace simply doesn't understand.
+ * userspace idea of an exact-match flow, with one or more subfacets.
+ * While the facet is created based on an exact-match flow, it is stored
+ * within the ofproto based on the wildcards that could be expressed
+ * based on the flow table and other configuration. (See the 'wc'
+ * description in "struct xlate_out" for more details.)
+ *
+ * Each subfacet tracks the datapath's idea of the flow equivalent to
+ * the facet. When the kernel module (or other dpif implementation) and
+ * Open vSwitch userspace agree on the definition of a flow key, there
+ * is exactly one subfacet per facet. If the dpif implementation
+ * supports more-specific flow matching than userspace, however, a facet
+ * can have more than one subfacet. Examples include the dpif
+ * implementation not supporting the same wildcards as userspace or some
+ * distinction in flow that userspace simply doesn't understand.
*
- * Flow expiration works in terms of subfacets, so a facet must have at least
- * one subfacet or it will never expire, leaking memory. */
+ * Flow expiration works in terms of subfacets, so a facet must have at
+ * least one subfacet or it will never expire, leaking memory. */
struct facet {
/* Owners. */
struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
long long int used; /* Time last used; time created if not used. */
/* Key. */
- struct flow flow;
+ struct flow flow; /* Flow of the creating subfacet. */
+ struct cls_rule cr; /* In 'ofproto_dpif's facets classifier. */
/* These statistics:
*
struct xlate_out xout;
- /* Initial values of the packet that may be needed later. */
- struct initial_vals initial_vals;
-
/* Storage for a single subfacet, to reduce malloc() time and space
* overhead. (A facet always has at least one subfacet and in the common
* case has exactly one subfacet. However, 'one_subfacet' may not
long long int learn_rl; /* Rate limiter for facet_learn(). */
};
-static struct facet *facet_create(const struct flow_miss *, uint32_t hash);
+static struct facet *facet_create(const struct flow_miss *, struct rule_dpif *,
+ struct xlate_out *,
+ struct dpif_flow_stats *);
static void facet_remove(struct facet *);
static void facet_free(struct facet *);
-static struct facet *facet_find(struct ofproto_dpif *,
- const struct flow *, uint32_t hash);
+static struct facet *facet_find(struct ofproto_dpif *, const struct flow *);
static struct facet *facet_lookup_valid(struct ofproto_dpif *,
- const struct flow *, uint32_t hash);
+ const struct flow *);
static bool facet_revalidate(struct facet *);
static bool facet_check_consistency(struct facet *);
size_t key_len;
};
+struct avg_subfacet_rates {
+ double add_rate; /* Moving average of new flows created per minute. */
+ double del_rate; /* Moving average of flows deleted per minute. */
+};
+
/* All datapaths of a given type share a single dpif backer instance. */
struct dpif_backer {
char *type;
struct hmap drop_keys; /* Set of dropped odp keys. */
bool recv_set_enable; /* Enables or disables receiving packets. */
+
+ struct hmap subfacets;
+ struct governor *governor;
+
+ /* Subfacet statistics.
+ *
+ * These keep track of the total number of subfacets added and deleted and
+ * flow life span. They are useful for computing the flow rates stats
+ * exposed via "ovs-appctl dpif/show". The goal is to learn about
+ * traffic patterns in ways that we can use later to improve Open vSwitch
+ * performance in new situations. */
+ long long int created; /* Time when it is created. */
+ unsigned max_n_subfacet; /* Maximum number of flows */
+ unsigned avg_n_subfacet; /* Average number of flows. */
+ long long int avg_subfacet_life; /* Average life span of subfacets. */
+
+ /* The average number of subfacets... */
+ struct avg_subfacet_rates hourly; /* ...over the last hour. */
+ struct avg_subfacet_rates daily; /* ...over the last day. */
+ struct avg_subfacet_rates lifetime; /* ...over the switch lifetime. */
+ long long int last_minute; /* Last time 'hourly' was updated. */
+
+ /* Number of subfacets added or deleted since 'last_minute'. */
+ unsigned subfacet_add_count;
+ unsigned subfacet_del_count;
+
+ /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
+ unsigned long long int total_subfacet_add_count;
+ unsigned long long int total_subfacet_del_count;
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
static void drop_key_clear(struct dpif_backer *);
static struct ofport_dpif *
odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
-
-struct avg_subfacet_rates {
- double add_rate; /* Moving average of new flows created per minute. */
- double del_rate; /* Moving average of flows deleted per minute. */
-};
-static void show_dp_rates(struct ds *ds, const char *heading,
- const struct avg_subfacet_rates *rates);
-static void exp_mavg(double *avg, int base, double new);
+static void update_moving_averages(struct dpif_backer *backer);
struct ofproto_dpif {
struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
bool has_bonded_bundles;
/* Facets. */
- struct hmap facets;
- struct hmap subfacets;
- struct governor *governor;
+ struct classifier facets; /* Contains 'struct facet's. */
long long int consistency_rl;
/* Revalidation. */
/* Per ofproto's dpif stats. */
uint64_t n_hit;
uint64_t n_missed;
-
- /* Subfacet statistics.
- *
- * These keep track of the total number of subfacets added and deleted and
- * flow life span. They are useful for computing the flow rates stats
- * exposed via "ovs-appctl dpif/show". The goal is to learn about
- * traffic patterns in ways that we can use later to improve Open vSwitch
- * performance in new situations. */
- long long int created; /* Time when it is created. */
- unsigned int max_n_subfacet; /* Maximum number of flows */
-
- /* The average number of subfacets... */
- struct avg_subfacet_rates hourly; /* ...over the last hour. */
- struct avg_subfacet_rates daily; /* ...over the last day. */
- long long int last_minute; /* Last time 'hourly' was updated. */
-
- /* Number of subfacets added or deleted since 'last_minute'. */
- unsigned int subfacet_add_count;
- unsigned int subfacet_del_count;
-
- /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
- unsigned long long int total_subfacet_add_count;
- unsigned long long int total_subfacet_del_count;
-
- /* Sum of the number of milliseconds that each subfacet existed,
- * over the subfacets that have been added and then later deleted. */
- unsigned long long int total_subfacet_life_span;
-
- /* Incremented by the number of currently existing subfacets, each
- * time we pull statistics from the kernel. */
- unsigned long long int total_subfacet_count;
-
- /* Number of times we pull statistics from the kernel. */
- unsigned long long int n_update_stats;
};
-static unsigned long long int avg_subfacet_life_span(
- const struct ofproto_dpif *);
-static double avg_subfacet_count(const struct ofproto_dpif *ofproto);
-static void update_moving_averages(struct ofproto_dpif *ofproto);
-static void update_max_subfacet_count(struct ofproto_dpif *ofproto);
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
* for debugging the asynchronous flow_mod implementation.) */
static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
uint32_t odp_port);
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
- const struct ofpbuf *,
- const struct initial_vals *, struct ds *);
+ const struct ofpbuf *, struct ds *);
/* Packet processing. */
-static void update_learning_table(struct ofproto_dpif *,
- const struct flow *, int vlan,
+static void update_learning_table(struct ofproto_dpif *, const struct flow *,
+ struct flow_wildcards *, int vlan,
struct ofbundle *);
/* Upcalls. */
#define FLOW_MISS_MAX_BATCH 50
}
HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
+ char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
const char *dp_port;
if (!iter->tnl_port) {
continue;
}
- dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
+ dp_port = netdev_vport_get_dpif_port(iter->up.netdev,
+ namebuf, sizeof namebuf);
node = simap_find(&tmp_backers, dp_port);
if (node) {
simap_put(&backer->tnl_backers, dp_port, node->data);
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
struct facet *facet, *next;
+ struct cls_cursor cursor;
if (ofproto->backer != backer) {
continue;
}
- HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) {
if (need_revalidate
|| tag_set_intersects(&revalidate_set, facet->xout.tags)) {
facet_revalidate(facet);
}
}
+ if (backer->governor) {
+ size_t n_subfacets;
+
+ governor_run(backer->governor);
+
+ /* If the governor has shrunk to its minimum size and the number of
+ * subfacets has dwindled, then drop the governor entirely.
+ *
+ * For hysteresis, the number of subfacets to drop the governor is
+ * smaller than the number needed to trigger its creation. */
+ n_subfacets = hmap_count(&backer->subfacets);
+ if (n_subfacets * 4 < flow_eviction_threshold
+ && governor_is_idle(backer->governor)) {
+ governor_destroy(backer->governor);
+ backer->governor = NULL;
+ }
+ }
+
return 0;
}
return;
}
+ if (backer->governor) {
+ governor_wait(backer->governor);
+ }
+
timer_wait(&backer->next_expiration);
}
\f
shash_delete(&all_dpif_backers, node);
dpif_close(backer->dpif);
+ ovs_assert(hmap_is_empty(&backer->subfacets));
+ hmap_destroy(&backer->subfacets);
+ governor_destroy(backer->governor);
+
free(backer);
}
}
backer->type = xstrdup(type);
+ backer->governor = NULL;
backer->refcount = 1;
hmap_init(&backer->odp_to_ofport_map);
hmap_init(&backer->drop_keys);
+ hmap_init(&backer->subfacets);
timer_set_duration(&backer->next_expiration, 1000);
backer->need_revalidate = 0;
simap_init(&backer->tnl_backers);
return error;
}
+ backer->max_n_subfacet = 0;
+ backer->created = time_msec();
+ backer->last_minute = backer->created;
+ memset(&backer->hourly, 0, sizeof backer->hourly);
+ memset(&backer->daily, 0, sizeof backer->daily);
+ memset(&backer->lifetime, 0, sizeof backer->lifetime);
+ backer->subfacet_add_count = 0;
+ backer->subfacet_del_count = 0;
+ backer->total_subfacet_add_count = 0;
+ backer->total_subfacet_del_count = 0;
+ backer->avg_n_subfacet = 0;
+ backer->avg_subfacet_life = 0;
+
return error;
}
}
ofproto->has_bonded_bundles = false;
- hmap_init(&ofproto->facets);
- hmap_init(&ofproto->subfacets);
- ofproto->governor = NULL;
+ classifier_init(&ofproto->facets);
ofproto->consistency_rl = LLONG_MIN;
for (i = 0; i < N_TABLES; i++) {
ofproto->n_hit = 0;
ofproto->n_missed = 0;
- ofproto->max_n_subfacet = 0;
- ofproto->created = time_msec();
- ofproto->last_minute = ofproto->created;
- memset(&ofproto->hourly, 0, sizeof ofproto->hourly);
- memset(&ofproto->daily, 0, sizeof ofproto->daily);
- ofproto->subfacet_add_count = 0;
- ofproto->subfacet_del_count = 0;
- ofproto->total_subfacet_add_count = 0;
- ofproto->total_subfacet_del_count = 0;
- ofproto->total_subfacet_life_span = 0;
- ofproto->total_subfacet_count = 0;
- ofproto->n_update_stats = 0;
-
return error;
}
return error;
}
- *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
+ *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, NULL, TBL_INTERNAL);
ovs_assert(*rulep != NULL);
return 0;
hmap_destroy(&ofproto->bundles);
mac_learning_destroy(ofproto->ml);
- hmap_destroy(&ofproto->facets);
- hmap_destroy(&ofproto->subfacets);
- governor_destroy(ofproto->governor);
+ classifier_destroy(&ofproto->facets);
hmap_destroy(&ofproto->vlandev_map);
hmap_destroy(&ofproto->realdev_vid_map);
/* Check the consistency of a random facet, to aid debugging. */
if (time_msec() >= ofproto->consistency_rl
- && !hmap_is_empty(&ofproto->facets)
+ && !classifier_is_empty(&ofproto->facets)
&& !ofproto->backer->need_revalidate) {
+ struct cls_table *table;
+ struct cls_rule *cr;
struct facet *facet;
ofproto->consistency_rl = time_msec() + 250;
- facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
- struct facet, hmap_node);
+ table = CONTAINER_OF(hmap_random_node(&ofproto->facets.tables),
+ struct cls_table, hmap_node);
+ cr = CONTAINER_OF(hmap_random_node(&table->rules), struct cls_rule,
+ hmap_node);
+ facet = CONTAINER_OF(cr, struct facet, cr);
+
if (!tag_set_intersects(&ofproto->backer->revalidate_set,
facet->xout.tags)) {
if (!facet_check_consistency(facet)) {
}
}
- if (ofproto->governor) {
- size_t n_subfacets;
-
- governor_run(ofproto->governor);
-
- /* If the governor has shrunk to its minimum size and the number of
- * subfacets has dwindled, then drop the governor entirely.
- *
- * For hysteresis, the number of subfacets to drop the governor is
- * smaller than the number needed to trigger its creation. */
- n_subfacets = hmap_count(&ofproto->subfacets);
- if (n_subfacets * 4 < ofproto->up.flow_eviction_threshold
- && governor_is_idle(ofproto->governor)) {
- governor_destroy(ofproto->governor);
- ofproto->governor = NULL;
- }
- }
-
return 0;
}
VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
poll_immediate_wake();
}
- if (ofproto->governor) {
- governor_wait(ofproto->governor);
- }
}
static void
get_memory_usage(const struct ofproto *ofproto_, struct simap *usage)
{
const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct cls_cursor cursor;
+ size_t n_subfacets = 0;
+ struct facet *facet;
+
+ simap_increase(usage, "facets", classifier_count(&ofproto->facets));
- simap_increase(usage, "facets", hmap_count(&ofproto->facets));
- simap_increase(usage, "subfacets", hmap_count(&ofproto->subfacets));
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
+ n_subfacets += list_size(&facet->subfacets);
+ }
+ simap_increase(usage, "subfacets", n_subfacets);
}
static void
n_batch = 0;
HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
- &ofproto->subfacets) {
+ &ofproto->backer->subfacets) {
+ if (ofproto_dpif_cast(subfacet->facet->rule->up.ofproto) != ofproto) {
+ continue;
+ }
+
if (subfacet->path != SF_NOT_INSTALLED) {
batch[n_batch++] = subfacet;
if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
- subfacet_destroy_batch(ofproto, batch, n_batch);
+ subfacet_destroy_batch(ofproto->backer, batch, n_batch);
n_batch = 0;
}
} else {
}
if (n_batch > 0) {
- subfacet_destroy_batch(ofproto, batch, n_batch);
+ subfacet_destroy_batch(ofproto->backer, batch, n_batch);
}
}
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
const struct netdev *netdev = port->up.netdev;
+ char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
struct dpif_port dpif_port;
int error;
}
error = dpif_port_query_by_name(ofproto->backer->dpif,
- netdev_vport_get_dpif_port(netdev),
+ netdev_vport_get_dpif_port(netdev, namebuf,
+ sizeof namebuf),
&dpif_port);
if (error) {
return error;
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
- const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
const char *devname = netdev_get_name(port->up.netdev);
+ char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
+ const char *dp_port_name;
+ dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
+ sizeof namebuf);
if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
/* The underlying device is still there, so delete it. This
* happens when the ofproto is being destroyed, since the caller
port_add(struct ofproto *ofproto_, struct netdev *netdev)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- const char *dp_port_name = netdev_vport_get_dpif_port(netdev);
const char *devname = netdev_get_name(netdev);
+ char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
+ const char *dp_port_name;
if (netdev_vport_is_patch(netdev)) {
sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
return 0;
}
+ dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
uint32_t port_no = UINT32_MAX;
int error;
enum odp_key_fitness key_fitness;
const struct nlattr *key;
size_t key_len;
- struct initial_vals initial_vals;
struct list packets;
enum dpif_upcall_type upcall_type;
};
init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
struct flow_miss_op *op)
{
- if (miss->flow.vlan_tci != miss->initial_vals.vlan_tci) {
+ if (miss->flow.in_port
+ != vsp_realdev_to_vlandev(miss->ofproto, miss->flow.in_port,
+ miss->flow.vlan_tci)) {
/* This packet was received on a VLAN splinter port. We
* added a VLAN to the packet to make the packet resemble
* the flow, but the actions were composed assuming that
}
/* Figures out whether a flow that missed in 'ofproto', whose details are in
- * 'miss', is likely to be worth tracking in detail in userspace and (usually)
- * installing a datapath flow. The answer is usually "yes" (a return value of
- * true). However, for short flows the cost of bookkeeping is much higher than
- * the benefits, so when the datapath holds a large number of flows we impose
- * some heuristics to decide which flows are likely to be worth tracking. */
+ * 'miss' masked by 'wc', is likely to be worth tracking in detail in userspace
+ * and (usually) installing a datapath flow. The answer is usually "yes" (a
+ * return value of true). However, for short flows the cost of bookkeeping is
+ * much higher than the benefits, so when the datapath holds a large number of
+ * flows we impose some heuristics to decide which flows are likely to be worth
+ * tracking. */
static bool
-flow_miss_should_make_facet(struct ofproto_dpif *ofproto,
- struct flow_miss *miss, uint32_t hash)
+flow_miss_should_make_facet(struct flow_miss *miss, struct flow_wildcards *wc)
{
- if (!ofproto->governor) {
+ struct dpif_backer *backer = miss->ofproto->backer;
+ uint32_t hash;
+
+ if (!backer->governor) {
size_t n_subfacets;
- n_subfacets = hmap_count(&ofproto->subfacets);
- if (n_subfacets * 2 <= ofproto->up.flow_eviction_threshold) {
+ n_subfacets = hmap_count(&backer->subfacets);
+ if (n_subfacets * 2 <= flow_eviction_threshold) {
return true;
}
- ofproto->governor = governor_create(ofproto->up.name);
+ backer->governor = governor_create();
}
- return governor_should_install_flow(ofproto->governor, hash,
+ hash = flow_hash_in_wildcards(&miss->flow, wc, 0);
+ return governor_should_install_flow(backer->governor, hash,
list_size(&miss->packets));
}
-/* Handles 'miss', which matches 'rule', without creating a facet or subfacet
- * or creating any datapath flow. May add an "execute" operation to 'ops' and
- * increment '*n_ops'. */
+/* Handles 'miss' without creating a facet or subfacet or creating any datapath
+ * flow. 'miss->flow' must have matched 'rule' and been xlated into 'xout'.
+ * May add an "execute" operation to 'ops' and increment '*n_ops'. */
static void
-handle_flow_miss_without_facet(struct flow_miss *miss,
+handle_flow_miss_without_facet(struct rule_dpif *rule, struct xlate_out *xout,
+ struct flow_miss *miss,
struct flow_miss_op *ops, size_t *n_ops)
{
- struct rule_dpif *rule = rule_dpif_lookup(miss->ofproto, &miss->flow);
- long long int now = time_msec();
struct ofpbuf *packet;
- struct xlate_in xin;
LIST_FOR_EACH (packet, list_node, &miss->packets) {
- struct flow_miss_op *op = &ops[*n_ops];
- struct dpif_flow_stats stats;
COVERAGE_INC(facet_suppress);
handle_flow_miss_common(rule, packet, &miss->flow);
- dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
- rule_credit_stats(rule, &stats);
+ if (xout->slow) {
+ struct xlate_in xin;
- xlate_in_init(&xin, miss->ofproto, &miss->flow, &miss->initial_vals,
- rule, stats.tcp_flags, packet);
- xin.resubmit_stats = &stats;
- xlate_actions(&xin, &op->xout);
+ xlate_in_init(&xin, miss->ofproto, &miss->flow, rule, 0, packet);
+ xlate_actions_for_side_effects(&xin);
+ }
- if (op->xout.odp_actions.size) {
+ if (xout->odp_actions.size) {
+ struct flow_miss_op *op = &ops[*n_ops];
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
+ xlate_out_copy(&op->xout, xout);
execute->actions = op->xout.odp_actions.data;
execute->actions_len = op->xout.odp_actions.size;
op->xout_garbage = true;
(*n_ops)++;
- } else {
- xlate_out_uninit(&op->xout);
}
}
}
* here, then the new subfacet or its packets could look (occasionally) as
* though it was used some time after the facet was used. That can make a
* one-packet flow look like it has a nonzero duration, which looks odd in
- * e.g. NetFlow statistics. */
+ * e.g. NetFlow statistics.
+ *
+ * If non-null, 'stats' will be folded into 'facet'. */
static void
handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
- long long int now,
+ long long int now, struct dpif_flow_stats *stats,
struct flow_miss_op *ops, size_t *n_ops)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct ofpbuf *packet;
subfacet = subfacet_create(facet, miss, now);
- want_path = subfacet->facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
+ want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
+ if (stats) {
+ subfacet_update_stats(subfacet, stats);
+ }
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
- struct dpif_flow_stats stats;
handle_flow_miss_common(facet->rule, packet, &miss->flow);
if (want_path != SF_FAST_PATH) {
struct xlate_in xin;
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, 0, packet);
+ xlate_in_init(&xin, ofproto, &miss->flow, facet->rule, 0, packet);
xlate_actions_for_side_effects(&xin);
}
- dpif_flow_stats_extract(&facet->flow, packet, now, &stats);
- subfacet_update_stats(subfacet, &stats);
-
if (facet->xout.odp_actions.size) {
struct dpif_execute *execute = &op->dpif_op.u.execute;
put->actions = facet->xout.odp_actions.data;
put->actions_len = facet->xout.odp_actions.size;
} else {
- compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
+ compose_slow_path(ofproto, &miss->flow, facet->xout.slow,
op->slow_stub, sizeof op->slow_stub,
&put->actions, &put->actions_len);
}
size_t *n_ops)
{
struct ofproto_dpif *ofproto = miss->ofproto;
+ struct dpif_flow_stats stats__;
+ struct dpif_flow_stats *stats = &stats__;
+ struct ofpbuf *packet;
struct facet *facet;
long long int now;
- uint32_t hash;
- /* The caller must ensure that miss->hmap_node.hash contains
- * flow_hash(miss->flow, 0). */
- hash = miss->hmap_node.hash;
+ now = time_msec();
+ memset(stats, 0, sizeof *stats);
+ stats->used = now;
+ LIST_FOR_EACH (packet, list_node, &miss->packets) {
+ stats->tcp_flags |= packet_get_tcp_flags(packet, &miss->flow);
+ stats->n_bytes += packet->size;
+ stats->n_packets++;
+ }
- facet = facet_lookup_valid(ofproto, &miss->flow, hash);
+ facet = facet_lookup_valid(ofproto, &miss->flow);
if (!facet) {
+ struct flow_wildcards wc;
+ struct rule_dpif *rule;
+ struct xlate_out xout;
+ struct xlate_in xin;
+
+ flow_wildcards_init_catchall(&wc);
+ rule = rule_dpif_lookup(ofproto, &miss->flow, &wc);
+ rule_credit_stats(rule, stats);
+
+ xlate_in_init(&xin, ofproto, &miss->flow, rule, stats->tcp_flags,
+ NULL);
+ xin.resubmit_stats = stats;
+ xin.may_learn = true;
+ xlate_actions(&xin, &xout);
+ flow_wildcards_or(&xout.wc, &xout.wc, &wc);
+
/* There does not exist a bijection between 'struct flow' and datapath
* flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental
* assumption used throughout the facet and subfacet handling code.
* Since we have to handle these misses in userspace anyway, we simply
* skip facet creation, avoiding the problem altogether. */
if (miss->key_fitness == ODP_FIT_TOO_LITTLE
- || !flow_miss_should_make_facet(ofproto, miss, hash)) {
- handle_flow_miss_without_facet(miss, ops, n_ops);
+ || !flow_miss_should_make_facet(miss, &xout.wc)) {
+ handle_flow_miss_without_facet(rule, &xout, miss, ops, n_ops);
return;
}
- facet = facet_create(miss, hash);
- now = facet->used;
- } else {
- now = time_msec();
+ facet = facet_create(miss, rule, &xout, stats);
+ stats = NULL;
}
- handle_flow_miss_with_facet(miss, facet, now, ops, n_ops);
+ handle_flow_miss_with_facet(miss, facet, now, stats, ops, n_ops);
}
static struct drop_key *
* flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
* a VLAN header onto 'packet' (if it is nonnull).
*
- * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci'
- * to the VLAN TCI with which the packet was really received, that is, the
- * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from
- * the value returned in flow->vlan_tci only for packets received on
- * VLAN splinters.)
- *
* Similarly, this function also includes some logic to help with tunnels. It
* may modify 'flow' as necessary to make the tunneling implementation
* transparent to the upcall processing logic.
ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
const struct nlattr *key, size_t key_len,
struct flow *flow, enum odp_key_fitness *fitnessp,
- struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
- struct initial_vals *initial_vals)
+ struct ofproto_dpif **ofproto, uint32_t *odp_in_port)
{
const struct ofport_dpif *port;
enum odp_key_fitness fitness;
goto exit;
}
- if (initial_vals) {
- initial_vals->vlan_tci = flow->vlan_tci;
- }
-
if (odp_in_port) {
*odp_in_port = flow->in_port;
}
error = ofproto_receive(backer, upcall->packet, upcall->key,
upcall->key_len, &flow, &miss->key_fitness,
- &ofproto, &odp_in_port, &miss->initial_vals);
+ &ofproto, &odp_in_port);
if (error == ENODEV) {
struct drop_key *drop_key;
uint32_t odp_in_port;
if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
- &flow, NULL, &ofproto, &odp_in_port, NULL)
+ &flow, NULL, &ofproto, &odp_in_port)
|| !ofproto->sflow) {
return;
}
struct flow flow;
if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
- &flow, NULL, &ofproto, NULL, NULL)
+ &flow, NULL, &ofproto, NULL)
|| !ofproto->ipfix) {
return;
}
struct flow flow;
if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
- &flow, NULL, &ofproto, NULL, NULL)
+ &flow, NULL, &ofproto, NULL)
|| !ofproto->ipfix) {
return;
}
\f
/* Flow expiration. */
-static int subfacet_max_idle(const struct ofproto_dpif *);
+static int subfacet_max_idle(const struct dpif_backer *);
static void update_stats(struct dpif_backer *);
static void rule_expire(struct rule_dpif *);
-static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
+static void expire_subfacets(struct dpif_backer *, int dp_max_idle);
/* This function is called periodically by run(). Its job is to collect
* updates for the flows that have been installed into the datapath, most
expire(struct dpif_backer *backer)
{
struct ofproto_dpif *ofproto;
- int max_idle = INT32_MAX;
+ size_t n_subfacets;
+ int max_idle;
/* Periodically clear out the drop keys in an effort to keep them
* relatively few. */
/* Update stats for each flow in the backer. */
update_stats(backer);
+ n_subfacets = hmap_count(&backer->subfacets);
+ if (n_subfacets) {
+ struct subfacet *subfacet;
+ long long int total, now;
+
+ total = 0;
+ now = time_msec();
+ HMAP_FOR_EACH (subfacet, hmap_node, &backer->subfacets) {
+ total += now - subfacet->created;
+ }
+ backer->avg_subfacet_life += total / n_subfacets;
+ }
+ backer->avg_subfacet_life /= 2;
+
+ backer->avg_n_subfacet += n_subfacets;
+ backer->avg_n_subfacet /= 2;
+
+ backer->max_n_subfacet = MAX(backer->max_n_subfacet, n_subfacets);
+
+ max_idle = subfacet_max_idle(backer);
+ expire_subfacets(backer, max_idle);
+
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
struct rule *rule, *next_rule;
- int dp_max_idle;
if (ofproto->backer != backer) {
continue;
}
- /* Keep track of the max number of flows per ofproto_dpif. */
- update_max_subfacet_count(ofproto);
-
- /* Expire subfacets that have been idle too long. */
- dp_max_idle = subfacet_max_idle(ofproto);
- expire_subfacets(ofproto, dp_max_idle);
-
- max_idle = MIN(max_idle, dp_max_idle);
-
/* Expire OpenFlow flows whose idle_timeout or hard_timeout
* has passed. */
LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
* about, or a flow that shouldn't be installed but was anyway. Delete it. */
static void
-delete_unexpected_flow(struct ofproto_dpif *ofproto,
+delete_unexpected_flow(struct dpif_backer *backer,
const struct nlattr *key, size_t key_len)
{
if (!VLOG_DROP_WARN(&rl)) {
ds_init(&s);
odp_flow_key_format(key, key_len, &s);
- VLOG_WARN("unexpected flow on %s: %s", ofproto->up.name, ds_cstr(&s));
+ VLOG_WARN("unexpected flow: %s", ds_cstr(&s));
ds_destroy(&s);
}
COVERAGE_INC(facet_unexpected);
- dpif_flow_del(ofproto->backer->dpif, key, key_len, NULL);
+ dpif_flow_del(backer->dpif, key, key_len, NULL);
}
/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
const struct dpif_flow_stats *stats;
struct dpif_flow_dump dump;
const struct nlattr *key;
- struct ofproto_dpif *ofproto;
size_t key_len;
dpif_flow_dump_start(&dump, backer->dpif);
while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
- struct flow flow;
struct subfacet *subfacet;
uint32_t key_hash;
- if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
- NULL, NULL)) {
- continue;
- }
-
- ofproto->total_subfacet_count += hmap_count(&ofproto->subfacets);
- ofproto->n_update_stats++;
-
key_hash = odp_flow_key_hash(key, key_len);
- subfacet = subfacet_find(ofproto, key, key_len, key_hash);
+ subfacet = subfacet_find(backer, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
update_subfacet_stats(subfacet, stats);
case SF_NOT_INSTALLED:
default:
- delete_unexpected_flow(ofproto, key, key_len);
+ delete_unexpected_flow(backer, key, key_len);
break;
}
run_fast_rl();
}
dpif_flow_dump_done(&dump);
- HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- update_moving_averages(ofproto);
- }
-
+ update_moving_averages(backer);
}
/* Calculates and returns the number of milliseconds of idle time after which
* its statistics into its facet, and when a facet's last subfacet expires, we
* fold its statistic into its rule. */
static int
-subfacet_max_idle(const struct ofproto_dpif *ofproto)
+subfacet_max_idle(const struct dpif_backer *backer)
{
/*
* Idle time histogram.
* that is installed in the kernel gets dropped in the appropriate bucket.
* After the histogram has been built, we compute the cutoff so that only
* the most-recently-used 1% of subfacets (but at least
- * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
+ * flow_eviction_threshold flows) are kept cached. At least
* the most-recently-used bucket of subfacets is kept, so actually an
* arbitrary number of subfacets can be kept in any given expiration run
* (though the next run will delete most of those unless they receive
long long int now;
int i;
- total = hmap_count(&ofproto->subfacets);
- if (total <= ofproto->up.flow_eviction_threshold) {
+ total = hmap_count(&backer->subfacets);
+ if (total <= flow_eviction_threshold) {
return N_BUCKETS * BUCKET_WIDTH;
}
/* Build histogram. */
now = time_msec();
- HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
+ HMAP_FOR_EACH (subfacet, hmap_node, &backer->subfacets) {
long long int idle = now - subfacet->used;
int bucket = (idle <= 0 ? 0
: idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
do {
subtotal += buckets[bucket++];
} while (bucket < N_BUCKETS &&
- subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
+ subtotal < MAX(flow_eviction_threshold, total / 100));
if (VLOG_IS_DBG_ENABLED()) {
struct ds s;
ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
}
}
- VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
+ VLOG_INFO("%s (msec:count)", ds_cstr(&s));
ds_destroy(&s);
}
}
static void
-expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
+expire_subfacets(struct dpif_backer *backer, int dp_max_idle)
{
/* Cutoff time for most flows. */
long long int normal_cutoff = time_msec() - dp_max_idle;
n_batch = 0;
HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
- &ofproto->subfacets) {
+ &backer->subfacets) {
long long int cutoff;
cutoff = (subfacet->facet->xout.slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP
if (subfacet->path != SF_NOT_INSTALLED) {
batch[n_batch++] = subfacet;
if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
- subfacet_destroy_batch(ofproto, batch, n_batch);
+ subfacet_destroy_batch(backer, batch, n_batch);
n_batch = 0;
}
} else {
}
if (n_batch > 0) {
- subfacet_destroy_batch(ofproto, batch, n_batch);
+ subfacet_destroy_batch(backer, batch, n_batch);
}
}
* The caller must already have determined that no facet with an identical
* 'miss->flow' exists in 'miss->ofproto'.
*
- * 'hash' must be the return value of flow_hash(miss->flow, 0).
+ * 'rule' and 'xout' must have been created based on 'miss'.
+ *
+ * 'facet'' statistics are initialized based on 'stats'.
*
* The facet will initially have no subfacets. The caller should create (at
* least) one subfacet with subfacet_create(). */
static struct facet *
-facet_create(const struct flow_miss *miss, uint32_t hash)
+facet_create(const struct flow_miss *miss, struct rule_dpif *rule,
+ struct xlate_out *xout, struct dpif_flow_stats *stats)
{
struct ofproto_dpif *ofproto = miss->ofproto;
- struct xlate_in xin;
struct facet *facet;
+ struct match match;
facet = xzalloc(sizeof *facet);
- facet->used = time_msec();
+ facet->packet_count = facet->prev_packet_count = stats->n_packets;
+ facet->byte_count = facet->prev_byte_count = stats->n_bytes;
+ facet->tcp_flags = stats->tcp_flags;
+ facet->used = stats->used;
facet->flow = miss->flow;
- facet->initial_vals = miss->initial_vals;
- facet->rule = rule_dpif_lookup(ofproto, &facet->flow);
facet->learn_rl = time_msec() + 500;
+ facet->rule = rule;
- hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
list_push_back(&facet->rule->facets, &facet->list_node);
list_init(&facet->subfacets);
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, 0, NULL);
- xin.may_learn = true;
- xlate_actions(&xin, &facet->xout);
+ xlate_out_copy(&facet->xout, xout);
+
+ match_init(&match, &facet->flow, &facet->xout.wc);
+ cls_rule_init(&facet->cr, &match, OFP_DEFAULT_PRIORITY);
+ classifier_insert(&ofproto->facets, &facet->cr);
+
facet->nf_flow.output_iface = facet->xout.nf_output_iface;
return facet;
return !error;
}
-/* Remove 'facet' from 'ofproto' and free up the associated memory:
+/* Remove 'facet' from its ofproto and free up the associated memory:
*
* - If 'facet' was installed in the datapath, uninstalls it and updates its
* rule's statistics, via subfacet_uninstall().
&facet->subfacets) {
subfacet_destroy__(subfacet);
}
- hmap_remove(&ofproto->facets, &facet->hmap_node);
+ classifier_remove(&ofproto->facets, &facet->cr);
+ cls_rule_destroy(&facet->cr);
list_remove(&facet->list_node);
facet_free(facet);
}
facet->tcp_flags = 0;
}
-/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
- * Returns it if found, otherwise a null pointer.
- *
- * 'hash' must be the return value of flow_hash(flow, 0).
+/* Searches 'ofproto''s table of facets for one which would be responsible for
+ * 'flow'. Returns it if found, otherwise a null pointer.
*
* The returned facet might need revalidation; use facet_lookup_valid()
* instead if that is important. */
static struct facet *
-facet_find(struct ofproto_dpif *ofproto,
- const struct flow *flow, uint32_t hash)
+facet_find(struct ofproto_dpif *ofproto, const struct flow *flow)
{
- struct facet *facet;
-
- HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, hash, &ofproto->facets) {
- if (flow_equal(flow, &facet->flow)) {
- return facet;
- }
- }
-
- return NULL;
+ struct cls_rule *cr = classifier_lookup(&ofproto->facets, flow, NULL);
+ return cr ? CONTAINER_OF(cr, struct facet, cr) : NULL;
}
-/* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
- * Returns it if found, otherwise a null pointer.
- *
- * 'hash' must be the return value of flow_hash(flow, 0).
+/* Searches 'ofproto''s table of facets for one capable that covers
+ * 'flow'. Returns it if found, otherwise a null pointer.
*
* The returned facet is guaranteed to be valid. */
static struct facet *
-facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow,
- uint32_t hash)
+facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow)
{
struct facet *facet;
- facet = facet_find(ofproto, flow, hash);
+ facet = facet_find(ofproto, flow);
if (facet
&& (ofproto->backer->need_revalidate
|| tag_set_intersects(&ofproto->backer->revalidate_set,
bool ok;
/* Check the rule for consistency. */
- rule = rule_dpif_lookup(ofproto, &facet->flow);
+ rule = rule_dpif_lookup(ofproto, &facet->flow, NULL);
if (rule != facet->rule) {
if (!VLOG_DROP_WARN(&rl)) {
struct ds s = DS_EMPTY_INITIALIZER;
}
/* Check the datapath actions for consistency. */
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, rule,
- 0, NULL);
+ xlate_in_init(&xin, ofproto, &facet->flow, rule, 0, NULL);
xlate_actions(&xin, &xout);
ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct rule_dpif *new_rule;
struct subfacet *subfacet;
+ struct flow_wildcards wc;
struct xlate_out xout;
struct xlate_in xin;
error = ofproto_receive(ofproto->backer, NULL, subfacet->key,
subfacet->key_len, &recv_flow, NULL,
- &recv_ofproto, NULL, NULL);
+ &recv_ofproto, NULL);
if (error
|| recv_ofproto != ofproto
- || memcmp(&recv_flow, &facet->flow, sizeof recv_flow)) {
+ || facet != facet_find(ofproto, &recv_flow)) {
facet_remove(facet);
return false;
}
}
- new_rule = rule_dpif_lookup(ofproto, &facet->flow);
+ flow_wildcards_init_catchall(&wc);
+ new_rule = rule_dpif_lookup(ofproto, &facet->flow, &wc);
/* Calculate new datapath actions.
*
* We do not modify any 'facet' state yet, because we might need to, e.g.,
* emit a NetFlow expiration and, if so, we need to have the old state
* around to properly compose it. */
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, new_rule,
- 0, NULL);
+ xlate_in_init(&xin, ofproto, &facet->flow, new_rule, 0, NULL);
xlate_actions(&xin, &xout);
+ flow_wildcards_or(&xout.wc, &xout.wc, &wc);
/* A facet's slow path reason should only change under dramatic
* circumstances. Rather than try to update everything, it's simpler to
- * remove the facet and start over. */
- if (facet->xout.slow != xout.slow) {
+ * remove the facet and start over.
+ *
+ * More importantly, if a facet's wildcards change, it will be relatively
+ * difficult to figure out if its subfacets still belong to it, and if not
+ * which facet they may belong to. Again, to avoid the complexity, we
+ * simply give up instead. */
+ if (facet->xout.slow != xout.slow
+ || memcmp(&facet->xout.wc, &xout.wc, sizeof xout.wc)) {
facet_remove(facet);
xlate_out_uninit(&xout);
return false;
update_mirror_stats(ofproto, facet->xout.mirrors, stats.n_packets,
stats.n_bytes);
- xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
- facet->rule, stats.tcp_flags, NULL);
+ xlate_in_init(&xin, ofproto, &facet->flow, facet->rule,
+ stats.tcp_flags, NULL);
xin.resubmit_stats = &stats;
xin.may_learn = may_learn;
xlate_actions_for_side_effects(&xin);
}
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct cls_cursor cursor;
struct facet *facet;
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
facet_push_stats(facet, false);
if (run_fast) {
run_fast_rl();
/* Subfacets. */
static struct subfacet *
-subfacet_find(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len, uint32_t key_hash)
+subfacet_find(struct dpif_backer *backer, const struct nlattr *key,
+ size_t key_len, uint32_t key_hash)
{
struct subfacet *subfacet;
HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
- &ofproto->subfacets) {
+ &backer->subfacets) {
if (subfacet->key_len == key_len
&& !memcmp(key, subfacet->key, key_len)) {
return subfacet;
subfacet_create(struct facet *facet, struct flow_miss *miss,
long long int now)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct dpif_backer *backer = miss->ofproto->backer;
enum odp_key_fitness key_fitness = miss->key_fitness;
const struct nlattr *key = miss->key;
size_t key_len = miss->key_len;
if (list_is_empty(&facet->subfacets)) {
subfacet = &facet->one_subfacet;
} else {
- subfacet = subfacet_find(ofproto, key, key_len, key_hash);
+ subfacet = subfacet_find(backer, key, key_len, key_hash);
if (subfacet) {
if (subfacet->facet == facet) {
return subfacet;
subfacet = xmalloc(sizeof *subfacet);
}
- hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
+ hmap_insert(&backer->subfacets, &subfacet->hmap_node, key_hash);
list_push_back(&facet->subfacets, &subfacet->list_node);
subfacet->facet = facet;
subfacet->key_fitness = key_fitness;
subfacet->dp_packet_count = 0;
subfacet->dp_byte_count = 0;
subfacet->path = SF_NOT_INSTALLED;
+ subfacet->backer = backer;
- ofproto->subfacet_add_count++;
+ backer->subfacet_add_count++;
return subfacet;
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
/* Update ofproto stats before uninstall the subfacet. */
- ofproto->subfacet_del_count++;
- ofproto->total_subfacet_life_span += (time_msec() - subfacet->created);
+ ofproto->backer->subfacet_del_count++;
subfacet_uninstall(subfacet);
- hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
+ hmap_remove(&subfacet->backer->subfacets, &subfacet->hmap_node);
list_remove(&subfacet->list_node);
free(subfacet->key);
if (subfacet != &facet->one_subfacet) {
}
static void
-subfacet_destroy_batch(struct ofproto_dpif *ofproto,
+subfacet_destroy_batch(struct dpif_backer *backer,
struct subfacet **subfacets, int n)
{
struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
opsp[i] = &ops[i];
}
- dpif_operate(ofproto->backer->dpif, opsp, n);
+ dpif_operate(backer->dpif, opsp, n);
for (i = 0; i < n; i++) {
subfacet_reset_dp_stats(subfacets[i], &stats[i]);
subfacets[i]->path = SF_NOT_INSTALLED;
&actions, &actions_len);
}
- ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
+ ret = dpif_flow_put(subfacet->backer->dpif, flags, subfacet->key,
subfacet->key_len, actions, actions_len, stats);
if (stats) {
\f
/* Rules. */
+/* Lookup 'flow' in 'ofproto''s classifier. If 'wc' is non-null, sets
+ * the fields that were relevant as part of the lookup. */
static struct rule_dpif *
-rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow)
+rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
+ struct flow_wildcards *wc)
{
struct rule_dpif *rule;
- rule = rule_dpif_lookup__(ofproto, flow, 0);
+ rule = rule_dpif_lookup__(ofproto, flow, wc, 0);
if (rule) {
return rule;
}
static struct rule_dpif *
rule_dpif_lookup__(struct ofproto_dpif *ofproto, const struct flow *flow,
- uint8_t table_id)
+ struct flow_wildcards *wc, uint8_t table_id)
{
struct cls_rule *cls_rule;
struct classifier *cls;
struct flow ofpc_normal_flow = *flow;
ofpc_normal_flow.tp_src = htons(0);
ofpc_normal_flow.tp_dst = htons(0);
- cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
+ cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
} else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) {
cls_rule = &ofproto->drop_frags_rule->up.cr;
+ if (wc) {
+ flow_wildcards_init_exact(wc);
+ }
} else {
- cls_rule = classifier_lookup(cls, flow);
+ cls_rule = classifier_lookup(cls, flow, wc);
}
return rule_dpif_cast(rule_from_cls_rule(cls_rule));
}
struct ofpbuf *packet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct initial_vals initial_vals;
struct dpif_flow_stats stats;
struct xlate_out xout;
struct xlate_in xin;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
rule_credit_stats(rule, &stats);
- initial_vals.vlan_tci = flow->vlan_tci;
- xlate_in_init(&xin, ofproto, flow, &initial_vals, rule, stats.tcp_flags,
- packet);
+ xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
xin.resubmit_stats = &stats;
xlate_actions(&xin, &xout);
output.port = ofport->up.ofp_port;
output.max_len = 0;
- xlate_in_init(&xin, ofproto, &flow, NULL, NULL, 0, packet);
+ xlate_in_init(&xin, ofproto, &flow, NULL, 0, packet);
xin.ofpacts_len = sizeof output;
xin.ofpacts = &output.ofpact;
xin.resubmit_stats = &stats;
/* Look up a flow with 'in_port' as the input port. */
ctx->xin->flow.in_port = in_port;
- rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, table_id);
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow,
+ &ctx->xout->wc, table_id);
tag_the_flow(ctx, rule);
{
ovs_assert(eth_type_mpls(eth_type));
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
+ memset(&ctx->xout->wc.masks.mpls_lse, 0xff,
+ sizeof ctx->xout->wc.masks.mpls_lse);
+ memset(&ctx->xout->wc.masks.mpls_depth, 0xff,
+ sizeof ctx->xout->wc.masks.mpls_depth);
+
if (ctx->base_flow.mpls_depth) {
ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
ctx->xin->flow.mpls_depth++;
ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
ovs_assert(!eth_type_mpls(eth_type));
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
+ memset(&ctx->xout->wc.masks.mpls_lse, 0xff,
+ sizeof ctx->xout->wc.masks.mpls_lse);
+ memset(&ctx->xout->wc.masks.mpls_depth, 0xff,
+ sizeof ctx->xout->wc.masks.mpls_depth);
+
if (ctx->xin->flow.mpls_depth) {
ctx->xin->flow.mpls_depth--;
ctx->xin->flow.mpls_lse = htonl(0);
{
uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
if (port <= UINT16_MAX) {
+ union mf_subvalue value;
+
+ memset(&value, 0xff, sizeof value);
+ mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
xlate_output_action(ctx, port, or->max_len, false);
}
}
{
uint16_t port;
- port = bundle_execute(bundle, &ctx->xin->flow, slave_enabled_cb,
- ctx->ofproto);
+ port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
+ slave_enabled_cb, ctx->ofproto);
if (bundle->dst.field) {
nxm_reg_load(&bundle->dst, port, &ctx->xin->flow);
} else {
struct ofpbuf ofpacts;
int error;
+ ctx->xout->has_learn = true;
+
+ learn_mask(learn, &ctx->xout->wc);
+
+ if (!ctx->xin->may_learn) {
+ return;
+ }
+
ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
break;
case OFPACT_SET_IPV4_SRC:
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
}
break;
case OFPACT_SET_IPV4_DST:
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
}
case OFPACT_SET_IPV4_DSCP:
/* OpenFlow 1.0 only supports IPv4. */
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
break;
case OFPACT_SET_L4_SRC_PORT:
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
+ memset(&ctx->xout->wc.masks.nw_proto, 0xff,
+ sizeof ctx->xout->wc.masks.nw_proto);
if (is_ip_any(&ctx->xin->flow)) {
ctx->xin->flow.tp_src =
htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
break;
case OFPACT_SET_L4_DST_PORT:
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
+ memset(&ctx->xout->wc.masks.nw_proto, 0xff,
+ sizeof ctx->xout->wc.masks.nw_proto);
if (is_ip_any(&ctx->xin->flow)) {
ctx->xin->flow.tp_dst =
htons(ofpact_get_SET_L4_DST_PORT(a)->port);
break;
case OFPACT_POP_QUEUE:
+ memset(&ctx->xout->wc.masks.skb_priority, 0xff,
+ sizeof ctx->xout->wc.masks.skb_priority);
+
ctx->xin->flow.skb_priority = ctx->orig_skb_priority;
break;
case OFPACT_REG_MOVE:
- nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow);
+ nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow,
+ &ctx->xout->wc);
break;
case OFPACT_REG_LOAD:
case OFPACT_STACK_PUSH:
nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow,
- &ctx->stack);
+ &ctx->xout->wc, &ctx->stack);
break;
case OFPACT_STACK_POP:
break;
case OFPACT_DEC_TTL:
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
}
break;
case OFPACT_MULTIPATH:
- multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow);
+ multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow,
+ &ctx->xout->wc);
break;
case OFPACT_BUNDLE:
break;
case OFPACT_LEARN:
- ctx->xout->has_learn = true;
- if (ctx->xin->may_learn) {
- xlate_learn_action(ctx, ofpact_get_LEARN(a));
- }
+ xlate_learn_action(ctx, ofpact_get_LEARN(a));
break;
case OFPACT_EXIT:
break;
case OFPACT_FIN_TIMEOUT:
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
+ memset(&ctx->xout->wc.masks.nw_proto, 0xff,
+ sizeof ctx->xout->wc.masks.nw_proto);
ctx->xout->has_fin_timeout = true;
xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
break;
ctx->table_id = ogt->table_id;
/* Look up a flow from the new table. */
- rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, ctx->table_id);
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow,
+ &ctx->xout->wc, ctx->table_id);
tag_the_flow(ctx, rule);
static void
xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
- const struct flow *flow,
- const struct initial_vals *initial_vals,
- struct rule_dpif *rule, uint8_t tcp_flags,
- const struct ofpbuf *packet)
+ const struct flow *flow, struct rule_dpif *rule,
+ uint8_t tcp_flags, const struct ofpbuf *packet)
{
xin->ofproto = ofproto;
xin->flow = *flow;
xin->resubmit_hook = NULL;
xin->report_hook = NULL;
xin->resubmit_stats = NULL;
-
- if (initial_vals) {
- xin->initial_vals = *initial_vals;
- } else {
- xin->initial_vals.vlan_tci = xin->flow.vlan_tci;
- }
}
static void
ctx.rule = xin->rule;
ctx.base_flow = ctx.xin->flow;
- ctx.base_flow.vlan_tci = xin->initial_vals.vlan_tci;
memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst;
+ flow_wildcards_init_catchall(&ctx.xout->wc);
+ memset(&ctx.xout->wc.masks.in_port, 0xff,
+ sizeof ctx.xout->wc.masks.in_port);
+
+ if (tnl_port_should_receive(&ctx.xin->flow)) {
+ memset(&ctx.xout->wc.masks.tunnel, 0xff,
+ sizeof ctx.xout->wc.masks.tunnel);
+ }
+
+ /* Disable most wildcarding for NetFlow. */
+ if (xin->ofproto->netflow) {
+ memset(&ctx.xout->wc.masks.dl_src, 0xff,
+ sizeof ctx.xout->wc.masks.dl_src);
+ memset(&ctx.xout->wc.masks.dl_dst, 0xff,
+ sizeof ctx.xout->wc.masks.dl_dst);
+ memset(&ctx.xout->wc.masks.dl_type, 0xff,
+ sizeof ctx.xout->wc.masks.dl_type);
+ memset(&ctx.xout->wc.masks.vlan_tci, 0xff,
+ sizeof ctx.xout->wc.masks.vlan_tci);
+ memset(&ctx.xout->wc.masks.nw_proto, 0xff,
+ sizeof ctx.xout->wc.masks.nw_proto);
+ memset(&ctx.xout->wc.masks.nw_src, 0xff,
+ sizeof ctx.xout->wc.masks.nw_src);
+ memset(&ctx.xout->wc.masks.nw_dst, 0xff,
+ sizeof ctx.xout->wc.masks.nw_dst);
+ memset(&ctx.xout->wc.masks.tp_src, 0xff,
+ sizeof ctx.xout->wc.masks.tp_src);
+ memset(&ctx.xout->wc.masks.tp_dst, 0xff,
+ sizeof ctx.xout->wc.masks.tp_dst);
+ }
+
ctx.xout->tags = 0;
ctx.xout->slow = 0;
ctx.xout->has_learn = false;
ctx.xout->slow = special;
} else {
static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
- struct initial_vals initial_vals;
size_t sample_actions_len;
uint32_t local_odp_port;
- initial_vals.vlan_tci = ctx.base_flow.vlan_tci;
+ if (ctx.xin->flow.in_port
+ != vsp_realdev_to_vlandev(ctx.ofproto, ctx.xin->flow.in_port,
+ ctx.xin->flow.vlan_tci)) {
+ ctx.base_flow.vlan_tci = 0;
+ }
add_sflow_action(&ctx);
add_ipfix_action(&ctx);
} else if (!VLOG_DROP_ERR(&trace_rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
- ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet,
- &initial_vals, &ds);
+ ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
ds_destroy(&ds);
}
ofpbuf_uninit(&ctx.stack);
+
+ /* Clear the metadata and register wildcard masks, because we won't
+ * use non-header fields as part of the cache. */
+ memset(&ctx.xout->wc.masks.metadata, 0,
+ sizeof ctx.xout->wc.masks.metadata);
+ memset(&ctx.xout->wc.masks.regs, 0, sizeof ctx.xout->wc.masks.regs);
}
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
ctx->xin->report_hook(ctx, s);
}
}
+
+static void
+xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
+{
+ dst->wc = src->wc;
+ dst->tags = src->tags;
+ dst->slow = src->slow;
+ dst->has_learn = src->has_learn;
+ dst->has_normal = src->has_normal;
+ dst->has_fin_timeout = src->has_fin_timeout;
+ dst->nf_output_iface = src->nf_output_iface;
+ dst->mirrors = src->mirrors;
+
+ ofpbuf_use_stub(&dst->odp_actions, dst->odp_actions_stub,
+ sizeof dst->odp_actions_stub);
+ ofpbuf_put(&dst->odp_actions, src->odp_actions.data,
+ src->odp_actions.size);
+}
\f
/* OFPP_NORMAL implementation. */
port = ofbundle_get_a_port(out_bundle);
} else {
port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
- vid, &ctx->xout->tags);
+ &ctx->xout->wc, vid, &ctx->xout->tags);
if (!port) {
/* No slaves enabled, so drop packet. */
return;
m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
+ if (m->vlans) {
+ ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
+ }
+
if (!vlan_is_mirrored(m, vlan)) {
mirrors = zero_rightmost_1bit(mirrors);
continue;
* migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
* indicate this; newer upstream kernels use gratuitous ARP requests. */
static bool
-is_gratuitous_arp(const struct flow *flow)
+is_gratuitous_arp(const struct flow *flow, struct flow_wildcards *wc)
{
- return (flow->dl_type == htons(ETH_TYPE_ARP)
- && eth_addr_is_broadcast(flow->dl_dst)
- && (flow->nw_proto == ARP_OP_REPLY
- || (flow->nw_proto == ARP_OP_REQUEST
- && flow->nw_src == flow->nw_dst)));
+ if (flow->dl_type != htons(ETH_TYPE_ARP)) {
+ return false;
+ }
+
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ if (!eth_addr_is_broadcast(flow->dl_dst)) {
+ return false;
+ }
+
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ if (flow->nw_proto == ARP_OP_REPLY) {
+ return true;
+ } else if (flow->nw_proto == ARP_OP_REQUEST) {
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+
+ return flow->nw_src == flow->nw_dst;
+ } else {
+ return false;
+ }
}
static void
update_learning_table(struct ofproto_dpif *ofproto,
- const struct flow *flow, int vlan,
- struct ofbundle *in_bundle)
+ const struct flow *flow, struct flow_wildcards *wc,
+ int vlan, struct ofbundle *in_bundle)
{
struct mac_entry *mac;
}
mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
- if (is_gratuitous_arp(flow)) {
+ if (is_gratuitous_arp(flow, wc)) {
/* We don't want to learn from gratuitous ARP packets that are
* reflected back over bond slaves so we lock the learning table. */
if (!in_bundle->bond) {
case BV_DROP_IF_MOVED:
mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
if (mac && mac->port.p != in_bundle &&
- (!is_gratuitous_arp(flow)
+ (!is_gratuitous_arp(flow, &ctx->xout->wc)
|| mac_entry_is_grat_arp_locked(mac))) {
xlate_report(ctx, "SLB bond thinks this packet looped back, "
"dropping");
ctx->xout->has_normal = true;
+ /* Check the dl_type, since we may check for gratuituous ARP. */
+ memset(&ctx->xout->wc.masks.dl_type, 0xff,
+ sizeof ctx->xout->wc.masks.dl_type);
+
+ memset(&ctx->xout->wc.masks.dl_src, 0xff,
+ sizeof ctx->xout->wc.masks.dl_src);
+ memset(&ctx->xout->wc.masks.dl_dst, 0xff,
+ sizeof ctx->xout->wc.masks.dl_dst);
+ memset(&ctx->xout->wc.masks.vlan_tci, 0xff,
+ sizeof ctx->xout->wc.masks.vlan_tci);
+
in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port,
ctx->xin->packet != NULL, &in_port);
if (!in_bundle) {
/* Learn source MAC. */
if (ctx->xin->may_learn) {
- update_learning_table(ctx->ofproto, &ctx->xin->flow, vlan, in_bundle);
+ update_learning_table(ctx->ofproto, &ctx->xin->flow, &ctx->xout->wc,
+ vlan, in_bundle);
}
/* Determine output bundle. */
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct initial_vals initial_vals;
struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
struct xlate_out xout;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- initial_vals.vlan_tci = flow->vlan_tci;
- xlate_in_init(&xin, ofproto, flow, &initial_vals, NULL, stats.tcp_flags,
- packet);
+ xlate_in_init(&xin, ofproto, flow, NULL, stats.tcp_flags, packet);
xin.resubmit_stats = &stats;
xin.ofpacts_len = ofpacts_len;
xin.ofpacts = ofpacts;
if (subfacet->path == SF_FAST_PATH) {
struct dpif_flow_stats stats;
- subfacet_install(subfacet, &facet->xout.odp_actions, &stats);
+ subfacet_install(subfacet, &facet->xout.odp_actions,
+ &stats);
subfacet_update_stats(subfacet, &stats);
}
}
static void
send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
{
+ struct cls_cursor cursor;
struct facet *facet;
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
send_active_timeout(ofproto, facet);
}
}
struct ofproto_dpif *ofproto;
struct ofpbuf odp_key;
struct ofpbuf *packet;
- struct initial_vals initial_vals;
struct ds result;
struct flow flow;
char *s;
/* Extract the ofproto_dpif object from the ofproto_receive()
* function. */
if (ofproto_receive(backer, NULL, odp_key.data,
- odp_key.size, &flow, NULL, &ofproto, NULL,
- &initial_vals)) {
+ odp_key.size, &flow, NULL, &ofproto, NULL)) {
unixctl_command_reply_error(conn, "Invalid datapath flow");
goto exit;
}
unixctl_command_reply_error(conn, "Unknown bridge name");
goto exit;
}
- initial_vals.vlan_tci = flow.vlan_tci;
} else {
unixctl_command_reply_error(conn, "Bad flow syntax");
goto exit;
* to reconstruct the flow. */
flow_extract(packet, flow.skb_priority, flow.skb_mark, NULL,
flow.in_port, &flow);
- initial_vals.vlan_tci = flow.vlan_tci;
}
}
- ofproto_trace(ofproto, &flow, packet, &initial_vals, &result);
+ ofproto_trace(ofproto, &flow, packet, &result);
unixctl_command_reply(conn, ds_cstr(&result));
exit:
static void
ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet,
- const struct initial_vals *initial_vals, struct ds *ds)
+ const struct ofpbuf *packet, struct ds *ds)
{
struct rule_dpif *rule;
flow_format(ds, flow);
ds_put_char(ds, '\n');
- rule = rule_dpif_lookup(ofproto, flow);
+ rule = rule_dpif_lookup(ofproto, flow, NULL);
trace_format_rule(ds, 0, 0, rule);
if (rule == ofproto->miss_rule) {
if (rule) {
uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf odp_actions;
-
struct trace_ctx trace;
+ struct match match;
uint8_t tcp_flags;
tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
trace.flow = *flow;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
- xlate_in_init(&trace.xin, ofproto, flow, initial_vals, rule, tcp_flags,
- packet);
+ xlate_in_init(&trace.xin, ofproto, flow, rule, tcp_flags, packet);
trace.xin.resubmit_hook = trace_resubmit;
trace.xin.report_hook = trace_report;
+
xlate_actions(&trace.xin, &trace.xout);
ds_put_char(ds, '\n');
trace_format_flow(ds, 0, "Final flow", &trace);
+
+ match_init(&match, flow, &trace.xout.wc);
+ ds_put_cstr(ds, "Relevant fields: ");
+ match_format(&match, ds, OFP_DEFAULT_PRIORITY);
+ ds_put_char(ds, '\n');
+
ds_put_cstr(ds, "Datapath actions: ");
format_odp_actions(ds, trace.xout.odp_actions.data,
trace.xout.odp_actions.size);
static void
ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
{
+ struct cls_cursor cursor;
struct facet *facet;
int errors;
errors = 0;
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
if (!facet_check_consistency(facet)) {
errors++;
}
}
static void
-show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
+show_dp_rates(struct ds *ds, const char *heading,
+ const struct avg_subfacet_rates *rates)
{
- const struct shash_node **ports;
- int i;
- struct avg_subfacet_rates lifetime;
- unsigned long long int minutes;
- const int min_ms = 60 * 1000; /* milliseconds in one minute. */
+ ds_put_format(ds, "%s add rate: %5.3f/min, del rate: %5.3f/min\n",
+ heading, rates->add_rate, rates->del_rate);
+}
- minutes = (time_msec() - ofproto->created) / min_ms;
+static void
+dpif_show_backer(const struct dpif_backer *backer, struct ds *ds)
+{
+ const struct shash_node **ofprotos;
+ struct ofproto_dpif *ofproto;
+ struct shash ofproto_shash;
+ uint64_t n_hit, n_missed;
+ long long int minutes;
+ size_t i;
- if (minutes > 0) {
- lifetime.add_rate = (double)ofproto->total_subfacet_add_count
- / minutes;
- lifetime.del_rate = (double)ofproto->total_subfacet_del_count
- / minutes;
- }else {
- lifetime.add_rate = 0.0;
- lifetime.del_rate = 0.0;
- }
-
- ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
- dpif_name(ofproto->backer->dpif));
- ds_put_format(ds,
- "\tlookups: hit:%"PRIu64" missed:%"PRIu64"\n",
- ofproto->n_hit, ofproto->n_missed);
- ds_put_format(ds, "\tflows: cur: %zu, avg: %5.3f, max: %d,"
- " life span: %llu(ms)\n",
- hmap_count(&ofproto->subfacets),
- avg_subfacet_count(ofproto),
- ofproto->max_n_subfacet,
- avg_subfacet_life_span(ofproto));
+ n_hit = n_missed = 0;
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ n_missed += ofproto->n_missed;
+ n_hit += ofproto->n_hit;
+ }
+ }
+
+ ds_put_format(ds, "%s: hit:%"PRIu64" missed:%"PRIu64"\n",
+ dpif_name(backer->dpif), n_hit, n_missed);
+ ds_put_format(ds, "\tflows: cur: %zu, avg: %u, max: %u,"
+ " life span: %lldms\n", hmap_count(&backer->subfacets),
+ backer->avg_n_subfacet, backer->max_n_subfacet,
+ backer->avg_subfacet_life);
+
+ minutes = (time_msec() - backer->created) / (1000 * 60);
if (minutes >= 60) {
- show_dp_rates(ds, "\t\thourly avg:", &ofproto->hourly);
+ show_dp_rates(ds, "\thourly avg:", &backer->hourly);
}
if (minutes >= 60 * 24) {
- show_dp_rates(ds, "\t\tdaily avg:", &ofproto->daily);
+ show_dp_rates(ds, "\tdaily avg:", &backer->daily);
}
- show_dp_rates(ds, "\t\toverall avg:", &lifetime);
-
- ports = shash_sort(&ofproto->up.port_by_name);
- for (i = 0; i < shash_count(&ofproto->up.port_by_name); i++) {
- const struct shash_node *node = ports[i];
- struct ofport *ofport = node->data;
- const char *name = netdev_get_name(ofport->netdev);
- const char *type = netdev_get_type(ofport->netdev);
- uint32_t odp_port;
+ show_dp_rates(ds, "\toverall avg:", &backer->lifetime);
- ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
+ shash_init(&ofproto_shash);
+ ofprotos = get_ofprotos(&ofproto_shash);
+ for (i = 0; i < shash_count(&ofproto_shash); i++) {
+ struct ofproto_dpif *ofproto = ofprotos[i]->data;
+ const struct shash_node **ports;
+ size_t j;
- odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
- if (odp_port != OVSP_NONE) {
- ds_put_format(ds, "%"PRIu32":", odp_port);
- } else {
- ds_put_cstr(ds, "none:");
+ if (ofproto->backer != backer) {
+ continue;
}
- if (strcmp(type, "system")) {
- struct netdev *netdev;
- int error;
+ ds_put_format(ds, "\t%s: hit:%"PRIu64" missed:%"PRIu64"\n",
+ ofproto->up.name, ofproto->n_hit, ofproto->n_missed);
+
+ ports = shash_sort(&ofproto->up.port_by_name);
+ for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) {
+ const struct shash_node *node = ports[j];
+ struct ofport *ofport = node->data;
+ struct smap config;
+ uint32_t odp_port;
- ds_put_format(ds, " (%s", type);
+ ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
+ ofport->ofp_port);
- error = netdev_open(name, type, &netdev);
- if (!error) {
- struct smap config;
+ odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
+ if (odp_port != OVSP_NONE) {
+ ds_put_format(ds, "%"PRIu32":", odp_port);
+ } else {
+ ds_put_cstr(ds, "none:");
+ }
- smap_init(&config);
- error = netdev_get_config(netdev, &config);
- if (!error) {
- const struct smap_node **nodes;
- size_t i;
+ ds_put_format(ds, " (%s", netdev_get_type(ofport->netdev));
- nodes = smap_sort(&config);
- for (i = 0; i < smap_count(&config); i++) {
- const struct smap_node *node = nodes[i];
- ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
- node->key, node->value);
- }
- free(nodes);
- }
- smap_destroy(&config);
+ smap_init(&config);
+ if (!netdev_get_config(ofport->netdev, &config)) {
+ const struct smap_node **nodes;
+ size_t i;
- netdev_close(netdev);
+ nodes = smap_sort(&config);
+ for (i = 0; i < smap_count(&config); i++) {
+ const struct smap_node *node = nodes[i];
+ ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
+ node->key, node->value);
+ }
+ free(nodes);
}
+ smap_destroy(&config);
+
ds_put_char(ds, ')');
+ ds_put_char(ds, '\n');
}
- ds_put_char(ds, '\n');
+ free(ports);
}
- free(ports);
+ shash_destroy(&ofproto_shash);
+ free(ofprotos);
}
static void
-ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc,
- const char *argv[], void *aux OVS_UNUSED)
+ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct shash_node **backers;
+ int i;
+
+ backers = shash_sort(&all_dpif_backers);
+ for (i = 0; i < shash_count(&all_dpif_backers); i++) {
+ dpif_show_backer(backers[i]->data, &ds);
+ }
+ free(backers);
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+/* Dump the megaflow (facet) cache. This is useful to check the
+ * correctness of flow wildcarding, since the same mechanism is used for
+ * both xlate caching and kernel wildcarding.
+ *
+ * It's important to note that in the output the flow description uses
+ * OpenFlow (OFP) ports, but the actions use datapath (ODP) ports.
+ *
+ * This command is only needed for advanced debugging, so it's not
+ * documented in the man page. */
+static void
+ofproto_unixctl_dpif_dump_megaflows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[],
+ void *aux OVS_UNUSED)
{
struct ds ds = DS_EMPTY_INITIALIZER;
const struct ofproto_dpif *ofproto;
+ long long int now = time_msec();
+ struct cls_cursor cursor;
+ struct facet *facet;
- if (argc > 1) {
- int i;
- for (i = 1; i < argc; i++) {
- ofproto = ofproto_dpif_lookup(argv[i]);
- if (!ofproto) {
- ds_put_format(&ds, "Unknown bridge %s (use dpif/dump-dps "
- "for help)", argv[i]);
- unixctl_command_reply_error(conn, ds_cstr(&ds));
- return;
- }
- show_dp_format(ofproto, &ds);
- }
- } else {
- struct shash ofproto_shash;
- const struct shash_node **sorted_ofprotos;
- int i;
+ ofproto = ofproto_dpif_lookup(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
- shash_init(&ofproto_shash);
- sorted_ofprotos = get_ofprotos(&ofproto_shash);
- for (i = 0; i < shash_count(&ofproto_shash); i++) {
- const struct shash_node *node = sorted_ofprotos[i];
- show_dp_format(node->data, &ds);
- }
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
+ cls_rule_format(&facet->cr, &ds);
+ ds_put_cstr(&ds, ", ");
+ ds_put_format(&ds, "n_subfacets:%zu, ", list_size(&facet->subfacets));
+ ds_put_format(&ds, "used:%.3fs, ", (now - facet->used) / 1000.0);
+ ds_put_cstr(&ds, "Datapath actions: ");
+ if (facet->xout.slow) {
+ uint64_t slow_path_stub[128 / 8];
+ const struct nlattr *actions;
+ size_t actions_len;
- shash_destroy(&ofproto_shash);
- free(sorted_ofprotos);
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
+ slow_path_stub, sizeof slow_path_stub,
+ &actions, &actions_len);
+ format_odp_actions(&ds, actions, actions_len);
+ } else {
+ format_odp_actions(&ds, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size);
+ }
+ ds_put_cstr(&ds, "\n");
}
+ ds_chomp(&ds, '\n');
unixctl_command_reply(conn, ds_cstr(&ds));
ds_destroy(&ds);
}
update_stats(ofproto->backer);
- HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
+ HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->backer->subfacets) {
struct facet *facet = subfacet->facet;
+ if (ofproto_dpif_cast(facet->rule->up.ofproto) != ofproto) {
+ continue;
+ }
+
odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
ofproto_dpif_self_check, NULL);
unixctl_command_register("dpif/dump-dps", "", 0, 0,
ofproto_unixctl_dpif_dump_dps, NULL);
- unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX,
- ofproto_unixctl_dpif_show, NULL);
+ unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show,
+ NULL);
unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
ofproto_unixctl_dpif_dump_flows, NULL);
unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
ofproto_unixctl_dpif_del_flows, NULL);
+ unixctl_command_register("dpif/dump-megaflows", "bridge", 1, 1,
+ ofproto_unixctl_dpif_dump_megaflows, NULL);
}
\f
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
return OFPP_NONE;
}
}
-static unsigned long long int
-avg_subfacet_life_span(const struct ofproto_dpif *ofproto)
-{
- unsigned long long int dc;
- unsigned long long int avg;
-
- dc = ofproto->total_subfacet_del_count + ofproto->subfacet_del_count;
- avg = dc ? ofproto->total_subfacet_life_span / dc : 0;
-
- return avg;
-}
-
-static double
-avg_subfacet_count(const struct ofproto_dpif *ofproto)
-{
- double avg_c = 0.0;
-
- if (ofproto->n_update_stats) {
- avg_c = (double)ofproto->total_subfacet_count
- / ofproto->n_update_stats;
- }
-
- return avg_c;
-}
-
-static void
-show_dp_rates(struct ds *ds, const char *heading,
- const struct avg_subfacet_rates *rates)
-{
- ds_put_format(ds, "%s add rate: %5.3f/min, del rate: %5.3f/min\n",
- heading, rates->add_rate, rates->del_rate);
-}
-
-static void
-update_max_subfacet_count(struct ofproto_dpif *ofproto)
-{
- ofproto->max_n_subfacet = MAX(ofproto->max_n_subfacet,
- hmap_count(&ofproto->subfacets));
-}
/* Compute exponentially weighted moving average, adding 'new' as the newest,
* most heavily weighted element. 'base' designates the rate of decay: after
}
static void
-update_moving_averages(struct ofproto_dpif *ofproto)
+update_moving_averages(struct dpif_backer *backer)
{
const int min_ms = 60 * 1000; /* milliseconds in one minute. */
+ long long int minutes = (time_msec() - backer->created) / min_ms;
+
+ if (minutes > 0) {
+ backer->lifetime.add_rate = (double) backer->total_subfacet_add_count
+ / minutes;
+ backer->lifetime.del_rate = (double) backer->total_subfacet_del_count
+ / minutes;
+ } else {
+ backer->lifetime.add_rate = 0.0;
+ backer->lifetime.del_rate = 0.0;
+ }
/* Update hourly averages on the minute boundaries. */
- if (time_msec() - ofproto->last_minute >= min_ms) {
- exp_mavg(&ofproto->hourly.add_rate, 60, ofproto->subfacet_add_count);
- exp_mavg(&ofproto->hourly.del_rate, 60, ofproto->subfacet_del_count);
+ if (time_msec() - backer->last_minute >= min_ms) {
+ exp_mavg(&backer->hourly.add_rate, 60, backer->subfacet_add_count);
+ exp_mavg(&backer->hourly.del_rate, 60, backer->subfacet_del_count);
/* Update daily averages on the hour boundaries. */
- if ((ofproto->last_minute - ofproto->created) / min_ms % 60 == 59) {
- exp_mavg(&ofproto->daily.add_rate, 24, ofproto->hourly.add_rate);
- exp_mavg(&ofproto->daily.del_rate, 24, ofproto->hourly.del_rate);
+ if ((backer->last_minute - backer->created) / min_ms % 60 == 59) {
+ exp_mavg(&backer->daily.add_rate, 24, backer->hourly.add_rate);
+ exp_mavg(&backer->daily.del_rate, 24, backer->hourly.del_rate);
}
- ofproto->total_subfacet_add_count += ofproto->subfacet_add_count;
- ofproto->total_subfacet_del_count += ofproto->subfacet_del_count;
- ofproto->subfacet_add_count = 0;
- ofproto->subfacet_del_count = 0;
- ofproto->last_minute += min_ms;
+ backer->total_subfacet_add_count += backer->subfacet_add_count;
+ backer->total_subfacet_del_count += backer->subfacet_del_count;
+ backer->subfacet_add_count = 0;
+ backer->subfacet_del_count = 0;
+ backer->last_minute += min_ms;
}
}