struct ofport_dpif;
struct ofproto_dpif;
struct flow_miss;
+struct facet;
struct rule_dpif {
struct rule up;
static void rule_credit_stats(struct rule_dpif *,
const struct dpif_flow_stats *);
-static void flow_push_stats(struct rule_dpif *, const struct flow *,
- const struct dpif_flow_stats *);
+static void flow_push_stats(struct facet *, const struct dpif_flow_stats *);
static tag_type rule_calculate_tag(const struct flow *,
const struct minimask *, uint32_t basis);
static void rule_invalidate(const struct rule_dpif *);
* this flow when actions change header fields. */
struct flow flow;
+ /* stack for the push and pop actions.
+ * Each stack element is of the type "union mf_subvalue". */
+ struct ofpbuf stack;
+ union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
+
/* The packet corresponding to 'flow', or a null pointer if we are
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
- struct flow orig_flow; /* Copy of original flow. */
};
static void action_xlate_ctx_init(struct action_xlate_ctx *,
struct list list_node; /* In struct facet's 'facets' list. */
struct facet *facet; /* Owning facet. */
- /* Key.
- *
- * To save memory in the common case, 'key' is NULL if 'key_fitness' is
- * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
- * regenerate the ODP flow key from ->facet->flow. */
enum odp_key_fitness key_fitness;
struct nlattr *key;
int key_len;
long long int now);
static struct subfacet *subfacet_find(struct ofproto_dpif *,
const struct nlattr *key, size_t key_len,
- uint32_t key_hash,
- const struct flow *flow);
+ uint32_t key_hash);
static void subfacet_destroy(struct subfacet *);
static void subfacet_destroy__(struct subfacet *);
static void subfacet_destroy_batch(struct ofproto_dpif *,
struct subfacet **, int n);
-static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
- struct ofpbuf *key);
static void subfacet_reset_dp_stats(struct subfacet *,
struct dpif_flow_stats *);
static void subfacet_update_time(struct subfacet *, long long int used);
struct timer next_expiration;
struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
- struct sset tnl_backers; /* Set of dpif ports backing tunnels. */
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
/* Facet revalidation flags applying to facets which use this backer. */
enum revalidate_reason need_revalidate; /* Revalidate every facet. */
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
const struct ofpbuf *, ovs_be16 initial_tci,
struct ds *);
-static bool may_dpif_port_del(struct ofport_dpif *);
/* Packet processing. */
static void update_learning_table(struct ofproto_dpif *,
struct tag_set revalidate_set = backer->revalidate_set;
bool need_revalidate = backer->need_revalidate;
struct ofproto_dpif *ofproto;
+ struct simap_node *node;
+ struct simap tmp_backers;
+
+ /* Handle tunnel garbage collection. */
+ simap_init(&tmp_backers);
+ simap_swap(&backer->tnl_backers, &tmp_backers);
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport_dpif *iter;
+
+ if (backer != ofproto->backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
+ const char *dp_port;
+
+ if (!iter->tnl_port) {
+ continue;
+ }
+
+ dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
+ node = simap_find(&tmp_backers, dp_port);
+ if (node) {
+ simap_put(&backer->tnl_backers, dp_port, node->data);
+ simap_delete(&tmp_backers, node);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ } else {
+ node = simap_find(&backer->tnl_backers, dp_port);
+ if (!node) {
+ uint32_t odp_port = UINT32_MAX;
+
+ if (!dpif_port_add(backer->dpif, iter->up.netdev,
+ &odp_port)) {
+ simap_put(&backer->tnl_backers, dp_port, odp_port);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ }
+ }
+ }
+
+ iter->odp_port = node ? node->data : OVSP_NONE;
+ if (tnl_port_reconfigure(&iter->up, iter->odp_port,
+ &iter->tnl_port)) {
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ }
+
+ SIMAP_FOR_EACH (node, &tmp_backers) {
+ dpif_port_del(backer->dpif, node->data);
+ }
+ simap_destroy(&tmp_backers);
switch (backer->need_revalidate) {
case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
backer->need_revalidate = 0;
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct facet *facet;
+ struct facet *facet, *next;
if (ofproto->backer != backer) {
continue;
}
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
if (need_revalidate
|| tag_set_intersects(&revalidate_set, facet->tags)) {
facet_revalidate(facet);
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
&all_ofproto_dpifs) {
- if (sset_contains(&ofproto->backer->tnl_backers, devname)) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
goto next;
}
}
drop_key_clear(backer);
hmap_destroy(&backer->drop_keys);
- sset_destroy(&backer->tnl_backers);
+ simap_destroy(&backer->tnl_backers);
hmap_destroy(&backer->odp_to_ofport_map);
node = shash_find(&all_dpif_backers, backer->type);
free(backer->type);
hmap_init(&backer->drop_keys);
timer_set_duration(&backer->next_expiration, 1000);
backer->need_revalidate = 0;
- sset_init(&backer->tnl_backers);
+ simap_init(&backer->tnl_backers);
tag_set_init(&backer->revalidate_set);
*backerp = backer;
const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
const char *devname = netdev_get_name(port->up.netdev);
- if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)
- && may_dpif_port_del(port)) {
+ if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
/* The underlying device is still there, so delete it. This
* happens when the ofproto is being destroyed, since the caller
* assumes that removal of attached ports will happen as part of
* destruction. */
- dpif_port_del(ofproto->backer->dpif, port->odp_port);
- sset_find_and_delete(&ofproto->backer->tnl_backers, dp_port_name);
+ if (!port->tnl_port) {
+ dpif_port_del(ofproto->backer->dpif, port->odp_port);
+ }
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (port->odp_port != OVSP_NONE && !port->tnl_port) {
return error;
}
-static int
-get_cfm_fault(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
-}
-
-static int
-get_cfm_opup(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_opup(ofport->cfm) : -1;
-}
-
-static int
-get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
- size_t *n_rmps)
+static bool
+get_cfm_status(const struct ofport *ofport_,
+ struct ofproto_cfm_status *status)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
if (ofport->cfm) {
- cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
- return 0;
+ status->faults = cfm_get_fault(ofport->cfm);
+ status->remote_opstate = cfm_get_opup(ofport->cfm);
+ status->health = cfm_get_health(ofport->cfm);
+ cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps);
+ return true;
} else {
- return -1;
+ return false;
}
}
-
-static int
-get_cfm_health(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
-}
\f
/* Spanning Tree. */
}
if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
- int error = dpif_port_add(ofproto->backer->dpif, netdev, NULL);
+ uint32_t port_no = UINT32_MAX;
+ int error;
+
+ error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
if (error) {
return error;
}
+ if (netdev_get_tunnel_config(netdev)) {
+ simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
+ }
}
if (netdev_get_tunnel_config(netdev)) {
sset_add(&ofproto->ghost_ports, devname);
- sset_add(&ofproto->backer->tnl_backers, dp_port_name);
} else {
sset_add(&ofproto->ports, devname);
}
return 0;
}
-/* Returns true if the odp_port backing 'ofport' may be deleted from the
- * datapath. In most cases, this function simply returns true. However, for
- * tunnels it's possible that multiple ofports use the same odp_port, in which
- * case we need to keep the odp_port backer around until the last ofport is
- * deleted. */
-static bool
-may_dpif_port_del(struct ofport_dpif *ofport)
-{
- struct dpif_backer *backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
- struct ofproto_dpif *ofproto_iter;
-
- if (!ofport->tnl_port) {
- return true;
- }
-
- HMAP_FOR_EACH (ofproto_iter, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct ofport_dpif *iter;
-
- if (backer != ofproto_iter->backer) {
- continue;
- }
-
- HMAP_FOR_EACH (iter, up.hmap_node, &ofproto_iter->up.ports) {
- if (ofport == iter) {
- continue;
- }
-
- if (!strcmp(netdev_vport_get_dpif_port(ofport->up.netdev),
- netdev_vport_get_dpif_port(iter->up.netdev))) {
- return false;
- }
- }
- }
-
- return true;
-}
-
static int
port_del(struct ofproto *ofproto_, uint16_t ofp_port)
{
sset_find_and_delete(&ofproto->ghost_ports,
netdev_get_name(ofport->up.netdev));
- if (may_dpif_port_del(ofport)) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ if (!ofport->tnl_port) {
error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
if (!error) {
- const char *dpif_port;
-
/* The caller is going to close ofport->up.netdev. If this is a
* bonded port, then the bond is using that netdev, so remove it
* from the bond. The client will need to reconfigure everything
* after deleting ports, so then the slave will get re-added. */
- dpif_port = netdev_vport_get_dpif_port(ofport->up.netdev);
- sset_find_and_delete(&ofproto->backer->tnl_backers, dpif_port);
bundle_remove(&ofport->up);
}
}
struct flow_miss_op {
struct dpif_op dpif_op;
- struct subfacet *subfacet; /* Subfacet */
void *garbage; /* Pointer to pass to free(), NULL if none. */
uint64_t stub[1024 / 8]; /* Temporary buffer. */
};
static enum slow_path_reason
process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet)
+ const struct ofport_dpif *ofport, const struct ofpbuf *packet)
{
- struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
-
if (!ofport) {
return 0;
- }
-
- if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
+ } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
if (packet) {
cfm_process_heartbeat(ofport->cfm, packet);
}
stp_process_packet(ofport, packet);
}
return SLOW_STP;
+ } else {
+ return 0;
}
- return 0;
}
static struct flow_miss *
-flow_miss_find(struct hmap *todo, const struct flow *flow, uint32_t hash)
+flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
+ const struct flow *flow, uint32_t hash)
{
struct flow_miss *miss;
HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
- if (flow_equal(&miss->flow, flow)) {
+ if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
return miss;
}
}
eth_pop_vlan(packet);
}
- op->subfacet = NULL;
op->garbage = NULL;
op->dpif_op.type = DPIF_OP_EXECUTE;
op->dpif_op.u.execute.key = miss->key;
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- op->subfacet = subfacet;
if (!subfacet->slow) {
execute->actions = subfacet->actions;
execute->actions_len = subfacet->actions_len;
struct flow_miss_op *op = &ops[(*n_ops)++];
struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
- op->subfacet = subfacet;
+ subfacet->path = want_path;
+
op->garbage = NULL;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
/* Add other packets to a to-do list. */
hash = flow_hash(&miss->flow, 0);
- existing_miss = flow_miss_find(&todo, &miss->flow, hash);
+ existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash);
if (!existing_miss) {
hmap_insert(&todo, &miss->hmap_node, hash);
miss->ofproto = ofproto;
}
dpif_operate(backer->dpif, dpif_ops, n_ops);
- /* Free memory and update facets. */
+ /* Free memory. */
for (i = 0; i < n_ops; i++) {
- struct flow_miss_op *op = &flow_miss_ops[i];
-
- switch (op->dpif_op.type) {
- case DPIF_OP_EXECUTE:
- break;
-
- case DPIF_OP_FLOW_PUT:
- if (!op->dpif_op.error) {
- op->subfacet->path = subfacet_want_path(op->subfacet->slow);
- }
- break;
-
- case DPIF_OP_FLOW_DEL:
- NOT_REACHED();
- }
-
- free(op->garbage);
+ free(flow_miss_ops[i].garbage);
}
hmap_destroy(&todo);
}
}
/* "action" upcalls need a closer look. */
- memcpy(&cookie, &upcall->userdata, sizeof(cookie));
+ if (!upcall->userdata) {
+ VLOG_WARN_RL(&rl, "action upcall missing cookie");
+ return BAD_UPCALL;
+ }
+ if (nl_attr_get_size(upcall->userdata) != sizeof(cookie)) {
+ VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
+ nl_attr_get_size(upcall->userdata));
+ return BAD_UPCALL;
+ }
+ memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
switch (cookie.type) {
case USER_ACTION_COOKIE_SFLOW:
return SFLOW_UPCALL;
case USER_ACTION_COOKIE_UNSPEC:
default:
- VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
+ VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64,
+ nl_attr_get_u64(upcall->userdata));
return BAD_UPCALL;
}
}
return;
}
- memcpy(&cookie, &upcall->userdata, sizeof(cookie));
+ memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof(cookie));
dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
odp_in_port, &cookie);
}
}
key_hash = odp_flow_key_hash(key, key_len);
- subfacet = subfacet_find(ofproto, key, key_len, key_hash, &flow);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
update_subfacet_stats(subfacet, stats);
|| tag_set_intersects(&ofproto->backer->revalidate_set,
facet->tags))) {
facet_revalidate(facet);
+
+ /* facet_revalidate() may have destroyed 'facet'. */
+ facet = facet_find(ofproto, flow, hash);
}
return facet;
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
enum subfacet_path want_path;
- struct odputil_keybuf keybuf;
struct action_xlate_ctx ctx;
- struct ofpbuf key;
struct ds s;
action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
}
ds_init(&s);
- subfacet_get_key(subfacet, &keybuf, &key);
- odp_flow_key_format(key.data, key.size, &s);
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &s);
ds_put_cstr(&s, ": inconsistency in subfacet");
if (want_path != subfacet->path) {
* 'facet' to the new rule and recompiles its actions.
*
* - If the rule found is the same as 'facet''s current rule, leaves 'facet'
- * where it is and recompiles its actions anyway. */
+ * where it is and recompiles its actions anyway.
+ *
+ * - If any of 'facet''s subfacets correspond to a new flow according to
+ * ofproto_receive(), 'facet' is removed. */
static void
facet_revalidate(struct facet *facet)
{
COVERAGE_INC(facet_revalidate);
+ /* Check that child subfacets still correspond to this facet. Tunnel
+ * configuration changes could cause a subfacet's OpenFlow in_port to
+ * change. */
+ LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
+ struct ofproto_dpif *recv_ofproto;
+ struct flow recv_flow;
+ int error;
+
+ error = ofproto_receive(ofproto->backer, NULL, subfacet->key,
+ subfacet->key_len, &recv_flow, NULL,
+ &recv_ofproto, NULL, NULL);
+ if (error
+ || recv_ofproto != ofproto
+ || memcmp(&recv_flow, &facet->flow, sizeof recv_flow)) {
+ facet_remove(facet);
+ return;
+ }
+ }
+
new_rule = rule_dpif_lookup(ofproto, &facet->flow);
/* Calculate new datapath actions.
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
- flow_push_stats(facet->rule, &facet->flow, &stats);
+ flow_push_stats(facet, &stats);
update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
facet->mirrors, stats.n_packets, stats.n_bytes);
ofproto_rule_update_used(&rule->up, stats->used);
}
-/* Pushes flow statistics to the rules which 'flow' resubmits into given
- * 'rule''s actions and mirrors. */
+/* Pushes flow statistics to the rules which 'facet->flow' resubmits
+ * into given 'facet->rule''s actions and mirrors. */
static void
-flow_push_stats(struct rule_dpif *rule,
- const struct flow *flow, const struct dpif_flow_stats *stats)
+flow_push_stats(struct facet *facet, const struct dpif_flow_stats *stats)
{
+ struct rule_dpif *rule = facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct action_xlate_ctx ctx;
ofproto_rule_update_used(&rule->up, stats->used);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
- 0, NULL);
+ action_xlate_ctx_init(&ctx, ofproto, &facet->flow, facet->flow.vlan_tci,
+ rule, 0, NULL);
ctx.resubmit_stats = stats;
xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
rule->up.ofpacts_len);
static struct subfacet *
subfacet_find(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len, uint32_t key_hash,
- const struct flow *flow)
+ const struct nlattr *key, size_t key_len, uint32_t key_hash)
{
struct subfacet *subfacet;
HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
&ofproto->subfacets) {
- if (subfacet->key
- ? (subfacet->key_len == key_len
- && !memcmp(key, subfacet->key, key_len))
- : flow_equal(flow, &subfacet->facet->flow)) {
+ if (subfacet->key_len == key_len
+ && !memcmp(key, subfacet->key, key_len)) {
return subfacet;
}
}
if (list_is_empty(&facet->subfacets)) {
subfacet = &facet->one_subfacet;
} else {
- subfacet = subfacet_find(ofproto, key, key_len, key_hash,
- &facet->flow);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
if (subfacet) {
if (subfacet->facet == facet) {
return subfacet;
list_push_back(&facet->subfacets, &subfacet->list_node);
subfacet->facet = facet;
subfacet->key_fitness = key_fitness;
- if (key_fitness != ODP_FIT_PERFECT) {
- subfacet->key = xmemdup(key, key_len);
- subfacet->key_len = key_len;
- } else {
- subfacet->key = NULL;
- subfacet->key_len = 0;
- }
+ subfacet->key = xmemdup(key, key_len);
+ subfacet->key_len = key_len;
subfacet->used = now;
subfacet->dp_packet_count = 0;
subfacet->dp_byte_count = 0;
subfacet_destroy_batch(struct ofproto_dpif *ofproto,
struct subfacet **subfacets, int n)
{
- struct odputil_keybuf keybufs[SUBFACET_DESTROY_MAX_BATCH];
struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
- struct ofpbuf keys[SUBFACET_DESTROY_MAX_BATCH];
struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
int i;
for (i = 0; i < n; i++) {
ops[i].type = DPIF_OP_FLOW_DEL;
- subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
- ops[i].u.flow_del.key = keys[i].data;
- ops[i].u.flow_del.key_len = keys[i].size;
+ ops[i].u.flow_del.key = subfacets[i]->key;
+ ops[i].u.flow_del.key_len = subfacets[i]->key_len;
ops[i].u.flow_del.stats = &stats[i];
opsp[i] = &ops[i];
}
}
}
-/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
- * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
- * for use as temporary storage. */
-static void
-subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
- struct ofpbuf *key)
-{
-
- if (!subfacet->key) {
- struct flow *flow = &subfacet->facet->flow;
-
- ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
- odp_flow_key_from_flow(key, flow, subfacet->odp_in_port);
- } else {
- ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
- }
-}
-
/* Composes the datapath actions for 'subfacet' based on its rule's actions.
* Translates the actions into 'odp_actions', which the caller must have
* initialized and is responsible for uninitializing. */
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
enum subfacet_path path = subfacet_want_path(slow);
uint64_t slow_path_stub[128 / 8];
- struct odputil_keybuf keybuf;
enum dpif_flow_put_flags flags;
- struct ofpbuf key;
int ret;
flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
&actions, &actions_len);
}
- subfacet_get_key(subfacet, &keybuf, &key);
- ret = dpif_flow_put(ofproto->backer->dpif, flags, key.data, key.size,
- actions, actions_len, stats);
+ ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
+ subfacet->key_len, actions, actions_len, stats);
if (stats) {
subfacet_reset_dp_stats(subfacet, stats);
if (subfacet->path != SF_NOT_INSTALLED) {
struct rule_dpif *rule = subfacet->facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
- struct ofpbuf key;
int error;
- subfacet_get_key(subfacet, &keybuf, &key);
- error = dpif_flow_del(ofproto->backer->dpif,
- key.data, key.size, &stats);
+ error = dpif_flow_del(ofproto->backer->dpif, subfacet->key,
+ subfacet->key_len, &stats);
subfacet_reset_dp_stats(subfacet, &stats);
if (!error) {
subfacet_update_stats(subfacet, &stats);
dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
netdev_vport_inc_tx(ofport->up.netdev, &stats);
odp_put_tunnel_action(&flow.tunnel, &odp_actions);
+ odp_put_skb_mark_action(flow.skb_mark, &odp_actions);
} else {
odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
flow.vlan_tci);
\f
/* OpenFlow to datapath action translation. */
+static bool may_receive(const struct ofport_dpif *, struct action_xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct action_xlate_ctx *);
static void xlate_normal(struct action_xlate_ctx *);
ofpbuf_use_stack(&buf, stub, stub_size);
if (slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)) {
uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
- odp_put_userspace_action(pid, &cookie, &buf);
+ odp_put_userspace_action(pid, &cookie, sizeof cookie, &buf);
} else {
put_userspace_action(ofproto, &buf, flow, &cookie);
}
pid = dpif_port_get_pid(ofproto->backer->dpif,
ofp_port_to_odp_port(ofproto, flow->in_port));
- return odp_put_userspace_action(pid, cookie, odp_actions);
+ return odp_put_userspace_action(pid, cookie, sizeof *cookie, odp_actions);
}
static void
struct ofport_dpif *peer = ofport_get_peer(ofport);
struct flow old_flow = ctx->flow;
const struct ofproto_dpif *peer_ofproto;
+ enum slow_path_reason special;
+ struct ofport_dpif *in_port;
if (!peer) {
xlate_report(ctx, "Nonexistent patch port peer");
ctx->flow.metadata = htonll(0);
memset(&ctx->flow.tunnel, 0, sizeof ctx->flow.tunnel);
memset(ctx->flow.regs, 0, sizeof ctx->flow.regs);
- xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+
+ in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->flow, in_port,
+ ctx->packet);
+ if (special) {
+ ctx->slow |= special;
+ } else if (!in_port || may_receive(in_port, ctx)) {
+ if (!in_port || stp_forward_in_state(in_port->stp_state)) {
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ } else {
+ /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
+ * learning action look at the packet, then drop it. */
+ struct flow old_base_flow = ctx->base_flow;
+ size_t old_size = ctx->odp_actions->size;
+ xlate_table_action(ctx, ctx->flow.in_port, 0, true);
+ ctx->base_flow = old_base_flow;
+ ctx->odp_actions->size = old_size;
+ }
+ }
+
ctx->flow = old_flow;
ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
}
}
+static bool
+execute_set_mpls_ttl_action(struct action_xlate_ctx *ctx, uint8_t ttl)
+{
+ if (!eth_type_mpls(ctx->flow.dl_type)) {
+ return true;
+ }
+
+ set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ return false;
+}
+
+static bool
+execute_dec_mpls_ttl_action(struct action_xlate_ctx *ctx)
+{
+ uint8_t ttl = mpls_lse_to_ttl(ctx->flow.mpls_lse);
+
+ if (!eth_type_mpls(ctx->flow.dl_type)) {
+ return false;
+ }
+
+ if (ttl > 0) {
+ ttl--;
+ set_mpls_lse_ttl(&ctx->flow.mpls_lse, ttl);
+ return false;
+ } else {
+ execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
+
+ /* Stop processing for current table. */
+ return true;
+ }
+}
+
static void
xlate_output_action(struct action_xlate_ctx *ctx,
uint16_t port, uint16_t max_len, bool may_packet_in)
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct action_xlate_ctx *ctx)
{
- const struct ofport_dpif *port;
bool was_evictable = true;
const struct ofpact *a;
- port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- if (port && !may_receive(port, ctx)) {
- /* Drop this flow. */
- return;
- }
-
if (ctx->rule) {
/* Don't let the rule we're working on get evicted underneath us. */
was_evictable = ctx->rule->up.evictable;
nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
break;
+ case OFPACT_STACK_PUSH:
+ nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->flow,
+ &ctx->stack);
+ break;
+
+ case OFPACT_STACK_POP:
+ nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->flow,
+ &ctx->stack);
+ break;
+
case OFPACT_PUSH_MPLS:
execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
break;
execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
break;
+ case OFPACT_SET_MPLS_TTL:
+ if (execute_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl)) {
+ goto out;
+ }
+ break;
+
+ case OFPACT_DEC_MPLS_TTL:
+ if (execute_dec_mpls_ttl_action(ctx)) {
+ goto out;
+ }
+ break;
+
case OFPACT_DEC_TTL:
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
}
out:
- /* We've let OFPP_NORMAL and the learning action look at the packet,
- * so drop it now if forwarding is disabled. */
- if (port && !stp_forward_in_state(port->stp_state)) {
- ofpbuf_clear(ctx->odp_actions);
- add_sflow_action(ctx);
- }
if (ctx->rule) {
ctx->rule->up.evictable = was_evictable;
}
static bool hit_resubmit_limit;
enum slow_path_reason special;
+ struct ofport_dpif *in_port;
+ struct flow orig_flow;
COVERAGE_INC(ofproto_dpif_xlate);
ctx->table_id = 0;
ctx->exit = false;
+ ofpbuf_use_stub(&ctx->stack, ctx->init_stack, sizeof ctx->init_stack);
+
if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
- * shows up in profiles.
- *
- * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
- * believe that I wasn't using it without initializing it if I kept it
- * in a local variable. */
- ctx->orig_flow = ctx->flow;
+ * shows up in profiles. */
+ orig_flow = ctx->flow;
}
if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
}
}
- special = process_special(ctx->ofproto, &ctx->flow, ctx->packet);
+ in_port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->flow, in_port, ctx->packet);
if (special) {
ctx->slow |= special;
} else {
uint32_t local_odp_port;
add_sflow_action(ctx);
- do_xlate_actions(ofpacts, ofpacts_len, ctx);
+
+ if (!in_port || may_receive(in_port, ctx)) {
+ do_xlate_actions(ofpacts, ofpacts_len, ctx);
+
+ /* We've let OFPP_NORMAL and the learning action look at the
+ * packet, so drop it now if forwarding is disabled. */
+ if (in_port && !stp_forward_in_state(in_port->stp_state)) {
+ ofpbuf_clear(ctx->odp_actions);
+ add_sflow_action(ctx);
+ }
+ }
if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
if (!hit_resubmit_limit) {
} else if (!VLOG_DROP_ERR(&trace_rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
- ofproto_trace(ctx->ofproto, &ctx->orig_flow, ctx->packet,
+ ofproto_trace(ctx->ofproto, &orig_flow, ctx->packet,
initial_tci, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
}
}
if (ctx->ofproto->has_mirrors) {
- add_mirror_actions(ctx, &ctx->orig_flow);
+ add_mirror_actions(ctx, &orig_flow);
}
fix_sflow_action(ctx);
}
+
+ ofpbuf_uninit(&ctx->stack);
}
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
update_stats(ofproto->backer);
HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
- struct odputil_keybuf keybuf;
- struct ofpbuf key;
-
- subfacet_get_key(subfacet, &keybuf, &key);
- odp_flow_key_format(key.data, key.size, &ds);
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
subfacet->dp_packet_count, subfacet->dp_byte_count);
get_netflow_ids,
set_sflow,
set_cfm,
- get_cfm_fault,
- get_cfm_opup,
- get_cfm_remote_mpids,
- get_cfm_health,
+ get_cfm_status,
set_stp,
get_stp_status,
set_stp_port,