#define MAX_INTERNAL_RESUBMITS 1 /* Max resbmits allowed using rules in
internal table. */
-/* Timeout for internal rules created to handle recirculation */
-#define RECIRC_TIMEOUT 60
-
/* Maximum number of resubmit actions in a flow translation, whether they are
* recursive or not. */
#define MAX_RESUBMITS (MAX_RESUBMIT_RECURSION * MAX_RESUBMIT_RECURSION)
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
- /* True if the datapath supports recirculation. */
- bool enable_recirc;
-
- /* True if the datapath supports variable-length
- * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
- * False if the datapath supports only 8-byte (or shorter) userdata. */
- bool variable_length_userdata;
-
- /* Number of MPLS label stack entries that the datapath supports
- * in matches. */
- size_t max_mpls_depth;
-
- /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
- * actions. */
- bool masked_set_action;
+ /* Datapath feature support. */
+ struct dpif_backer_support support;
};
struct xbundle {
const struct xbridge *xbridge;
+ /* Flow tables version at the beginning of the translation. */
+ cls_version_t tables_version;
+
/* Flow at the last commit. */
struct flow base_flow;
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
+ /* These are used for non-bond recirculation. The recirculation IDs are
+ * stored in xout and must be associated with a datapath flow (ukey),
+ * otherwise they will be freed when the xout is uninitialized.
+ *
+ *
+ * Steps in Recirculation Translation
+ * ==================================
+ *
+ * At some point during translation, the code recognizes the need for
+ * recirculation. For example, recirculation is necessary when, after
+ * popping the last MPLS label, an action or a match tries to examine or
+ * modify a field that has been newly revealed following the MPLS label.
+ *
+ * The simplest part of the work to be done is to commit existing changes to
+ * the packet, which produces datapath actions corresponding to the changes,
+ * and after this, add an OVS_ACTION_ATTR_RECIRC datapath action.
+ *
+ * The main problem here is preserving state. When the datapath executes
+ * OVS_ACTION_ATTR_RECIRC, it will upcall to userspace to get a translation
+ * for the post-recirculation actions. At this point userspace has to
+ * resume the translation where it left off, which means that it has to
+ * execute the following:
+ *
+ * - The action that prompted recirculation, and any actions following
+ * it within the same flow.
+ *
+ * - If the action that prompted recirculation was invoked within a
+ * NXAST_RESUBMIT, then any actions following the resubmit. These
+ * "resubmit"s can be nested, so this has to go all the way up the
+ * control stack.
+ *
+ * - The OpenFlow 1.1+ action set.
+ *
+ * State that actions and flow table lookups can depend on, such as the
+ * following, must also be preserved:
+ *
+ * - Metadata fields (input port, registers, OF1.1+ metadata, ...).
+ *
+ * - Action set, stack
+ *
+ * - The table ID and cookie of the flow being translated at each level
+ * of the control stack (since OFPAT_CONTROLLER actions send these to
+ * the controller).
+ *
+ * Translation allows for the control of this state preservation via these
+ * members. When a need for recirculation is identified, the translation
+ * process:
+ *
+ * 1. Sets 'recirc_action_offset' to the current size of 'action_set'. The
+ * action set is part of what needs to be preserved, so this allows the
+ * action set and the additional state to share the 'action_set' buffer.
+ * Later steps can tell that setup for recirculation is in progress from
+ * the nonnegative value of 'recirc_action_offset'.
+ *
+ * 2. Sets 'exit' to true to tell later steps that we're exiting from the
+ * translation process.
+ *
+ * 3. Adds an OFPACT_UNROLL_XLATE action to 'action_set'. This action
+ * holds the current table ID and cookie so that they can be restored
+ * during a post-recirculation upcall translation.
+ *
+ * 4. Adds the action that prompted recirculation and any actions following
+ * it within the same flow to 'action_set', so that they can be executed
+ * during a post-recirculation upcall translation.
+ *
+ * 5. Returns.
+ *
+ * 6. The action that prompted recirculation might be nested in a stack of
+ * nested "resubmit"s that have actions remaining. Each of these notices
+ * that we're exiting (from 'exit') and that recirculation setup is in
+ * progress (from 'recirc_action_offset') and responds by adding more
+ * OFPACT_UNROLL_XLATE actions to 'action_set', as necessary, and any
+ * actions that were yet unprocessed.
+ *
+ * The caller stores all the state produced by this process associated with
+ * the recirculation ID. For post-recirculation upcall translation, the
+ * caller passes it back in for the new translation to execute. The
+ * process yielded a set of ofpacts that can be translated directly, so it
+ * is not much of a special case at that point.
+ */
+ int recirc_action_offset; /* Offset in 'action_set' to actions to be
+ * executed after recirculation, or -1. */
+ int last_unroll_offset; /* Offset in 'action_set' to the latest unroll
+ * action, or -1. */
+
/* True if a packet was but is no longer MPLS (due to an MPLS pop action).
* This is a trigger for recirculation in cases where translating an action
* or looking up a flow requires access to the fields of the packet after
- * the MPLS label stack that was originally present.
- *
- * XXX: output to a table and patch port do not currently recirculate even
- * if this is true. */
+ * the MPLS label stack that was originally present. */
bool was_mpls;
/* OpenFlow 1.1+ action set.
static void xlate_action_set(struct xlate_ctx *ctx);
+static void
+ctx_trigger_recirculation(struct xlate_ctx *ctx)
+{
+ ctx->exit = true;
+ ctx->recirc_action_offset = ctx->action_set.size;
+}
+
+static bool
+ctx_first_recirculation_action(const struct xlate_ctx *ctx)
+{
+ return ctx->recirc_action_offset == ctx->action_set.size;
+}
+
+static inline bool
+exit_recirculates(const struct xlate_ctx *ctx)
+{
+ /* When recirculating the 'recirc_action_offset' has a non-negative value.
+ */
+ return ctx->recirc_action_offset >= 0;
+}
+
+static void compose_recirculate_action(struct xlate_ctx *ctx);
+
/* A controller may use OFPP_NONE as the ingress port to indicate that
* it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
* when an input bundle is needed for validation (e.g., mirroring or
const struct dpif_ipfix *,
const struct netflow *,
bool forward_bpdu, bool has_in_band,
- bool enable_recirc,
- bool variable_length_userdata,
- size_t max_mpls_depth,
- bool masked_set_action);
+ const struct dpif_backer_support *);
static void xlate_xbundle_set(struct xbundle *xbundle,
enum port_vlan_mode vlan_mode, int vlan,
unsigned long *trunks, bool use_priority_tags,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow,
bool forward_bpdu, bool has_in_band,
- bool enable_recirc,
- bool variable_length_userdata,
- size_t max_mpls_depth,
- bool masked_set_action)
+ const struct dpif_backer_support *support)
{
if (xbridge->ml != ml) {
mac_learning_unref(xbridge->ml);
xbridge->dpif = dpif;
xbridge->forward_bpdu = forward_bpdu;
xbridge->has_in_band = has_in_band;
- xbridge->enable_recirc = enable_recirc;
- xbridge->variable_length_userdata = variable_length_userdata;
- xbridge->max_mpls_depth = max_mpls_depth;
- xbridge->masked_set_action = masked_set_action;
+ xbridge->support = *support;
}
static void
xbridge->dpif, xbridge->ml, xbridge->stp,
xbridge->rstp, xbridge->ms, xbridge->mbridge,
xbridge->sflow, xbridge->ipfix, xbridge->netflow,
- xbridge->forward_bpdu,
- xbridge->has_in_band, xbridge->enable_recirc,
- xbridge->variable_length_userdata,
- xbridge->max_mpls_depth, xbridge->masked_set_action);
+ xbridge->forward_bpdu, xbridge->has_in_band,
+ &xbridge->support);
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
xlate_xbundle_copy(new_xbridge, xbundle);
}
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow,
- bool forward_bpdu, bool has_in_band, bool enable_recirc,
- bool variable_length_userdata, size_t max_mpls_depth,
- bool masked_set_action)
+ bool forward_bpdu, bool has_in_band,
+ const struct dpif_backer_support *support)
{
struct xbridge *xbridge;
xbridge->name = xstrdup(name);
xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
- netflow, forward_bpdu, has_in_band, enable_recirc,
- variable_length_userdata, max_mpls_depth,
- masked_set_action);
+ netflow, forward_bpdu, has_in_band, support);
}
static void
static void
xlate_xbundle_remove(struct xlate_cfg *xcfg, struct xbundle *xbundle)
{
- struct xport *xport, *next;
+ struct xport *xport;
if (!xbundle) {
return;
}
- LIST_FOR_EACH_SAFE (xport, next, bundle_node, &xbundle->xports) {
- list_remove(&xport->bundle_node);
+ LIST_FOR_EACH_POP (xport, bundle_node, &xbundle->xports) {
xport->xbundle = NULL;
}
xlate_xport_remove(new_xcfg, xport);
}
-/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
- * returns the corresponding struct xport, or NULL if none is found. */
-static struct xport *
-xlate_lookup_xport(const struct dpif_backer *backer, const struct flow *flow)
-{
- struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
-
- return xport_lookup(xcfg, tnl_port_should_receive(flow)
- ? tnl_port_receive(flow)
- : odp_port_to_ofport(backer, flow->in_port.odp_port));
-}
-
static struct ofproto_dpif *
xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
ofp_port_t *ofp_in_port, const struct xport **xportp)
{
- struct ofproto_dpif *recv_ofproto = NULL;
- struct ofproto_dpif *recirc_ofproto = NULL;
+ struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
const struct xport *xport;
- ofp_port_t in_port = OFPP_NONE;
-
- *xportp = xport = xlate_lookup_xport(backer, flow);
- if (xport) {
- recv_ofproto = xport->xbridge->ofproto;
- in_port = xport->ofp_port;
- }
-
- /* When recirc_id is set in 'flow', checks whether the ofproto_dpif that
- * corresponds to the recirc_id is same as the receiving bridge. If they
- * are the same, uses the 'recv_ofproto' and keeps the 'ofp_in_port' as
- * assigned. Otherwise, uses the 'recirc_ofproto' that owns recirc_id and
- * assigns OFPP_NONE to 'ofp_in_port'. Doing this is in that, the
- * recirculated flow must be processced by the ofproto which originates
- * the recirculation, and as bridges can only see their own ports, the
- * in_port of the 'recv_ofproto' should not be passed to the
- * 'recirc_ofproto'.
- *
- * Admittedly, setting the 'ofp_in_port' to OFPP_NONE limits the
- * 'recirc_ofproto' from meaningfully matching on in_port of recirculated
- * flow, and should be fixed in the near future.
- *
- * TODO: Restore the original patch port.
- */
- if (recv_ofproto && flow->recirc_id) {
- recirc_ofproto = ofproto_dpif_recirc_get_ofproto(backer,
- flow->recirc_id);
- if (recv_ofproto != recirc_ofproto) {
- *xportp = xport = NULL;
- in_port = OFPP_NONE;
- }
+ xport = xport_lookup(xcfg, tnl_port_should_receive(flow)
+ ? tnl_port_receive(flow)
+ : odp_port_to_ofport(backer, flow->in_port.odp_port));
+ if (OVS_UNLIKELY(!xport)) {
+ return NULL;
}
-
+ *xportp = xport;
if (ofp_in_port) {
- *ofp_in_port = in_port;
+ *ofp_in_port = xport->ofp_port;
}
-
- return xport ? recv_ofproto : recirc_ofproto;
+ return xport->xbridge->ofproto;
}
/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
struct flow_wildcards *wc = &ctx->xout->wc;
struct ofport_dpif *ofport;
- if (ctx->xbridge->enable_recirc) {
+ if (ctx->xbridge->support.recirc) {
use_recirc = bond_may_recirc(
out_xbundle->bond, &xr.recirc_id, &xr.hash_basis);
: rstp_process_packet(xport, packet);
}
return SLOW_STP;
- } else if (xport->lldp && lldp_should_process_flow(flow)) {
+ } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
if (packet) {
lldp_process_packet(xport->lldp, packet);
}
const struct xport *peer = xport->peer;
struct flow old_flow = ctx->xin->flow;
bool old_was_mpls = ctx->was_mpls;
+ cls_version_t old_version = ctx->tables_version;
enum slow_path_reason special;
- uint8_t table_id = rule_dpif_lookup_get_init_table_id(&ctx->xin->flow);
struct ofpbuf old_stack = ctx->stack;
union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
struct ofpbuf old_action_set = ctx->action_set;
memset(flow->regs, 0, sizeof flow->regs);
flow->actset_output = OFPP_UNSET;
+ /* The bridge is now known so obtain its table version. */
+ ctx->tables_version
+ = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
+
special = process_special(ctx, &ctx->xin->flow, peer,
ctx->xin->packet);
if (special) {
ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
- xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
- true, true);
+ xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
if (ctx->action_set.size) {
- /* Translate action set only if not dropping the packet. */
- xlate_action_set(ctx);
+ /* Translate action set only if not dropping the packet and
+ * not recirculating. */
+ if (!exit_recirculates(ctx)) {
+ xlate_action_set(ctx);
+ }
+ }
+ /* Check if need to recirculate. */
+ if (exit_recirculates(ctx)) {
+ compose_recirculate_action(ctx);
}
} else {
/* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
size_t old_size = ctx->xout->odp_actions->size;
mirror_mask_t old_mirrors = ctx->xout->mirrors;
- xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
- true, true);
+ xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
ctx->xout->odp_actions->size = old_size;
+
+ /* Undo changes that may have been done for recirculation. */
+ if (exit_recirculates(ctx)) {
+ ctx->action_set.size = ctx->recirc_action_offset;
+ ctx->recirc_action_offset = -1;
+ ctx->last_unroll_offset = -1;
+ }
}
}
ofpbuf_uninit(&ctx->stack);
ctx->stack = old_stack;
+ /* Restore calling bridge's lookup version. */
+ ctx->tables_version = old_version;
+
/* The peer bridge popping MPLS should have no effect on the original
* bridge. */
ctx->was_mpls = old_was_mpls;
}
if (out_port != ODPP_NONE) {
+ bool use_masked = ctx->xbridge->support.masked_set_action;
+
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
ctx->xout->odp_actions,
- wc,
- ctx->xbridge->masked_set_action);
+ wc, use_masked);
if (xr) {
struct ovs_action_hash *act_hash;
xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
bool may_packet_in, bool honor_table_miss)
{
+ /* Check if we need to recirculate before matching in a table. */
+ if (ctx->was_mpls) {
+ ctx_trigger_recirculation(ctx);
+ return;
+ }
if (xlate_resubmit_resource_check(ctx)) {
struct flow_wildcards *wc;
uint8_t old_table_id = ctx->table_id;
wc = (ctx->xin->skip_wildcards) ? NULL : &ctx->xout->wc;
rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
+ ctx->tables_version,
&ctx->xin->flow, wc,
ctx->xin->xcache != NULL,
ctx->xin->resubmit_stats,
ofpbuf_uninit(&action_set);
ofpbuf_uninit(&action_list);
+ /* Check if need to recirculate. */
+ if (exit_recirculates(ctx)) {
+ compose_recirculate_action(ctx);
+ }
+
/* Roll back flow to previous state.
* This is equivalent to cloning the packet for each bucket.
*
uint32_t basis;
basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
+ flow_mask_hash_fields(&ctx->xin->flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
bucket = group_best_live_bucket(ctx, group, basis);
if (bucket) {
- memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
- memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
- memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
- memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
- memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
- memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
- memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
-
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
}
{
struct ofproto_packet_in *pin;
struct dp_packet *packet;
+ bool use_masked;
ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
packet = dp_packet_clone(ctx->xin->packet);
+ use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ &ctx->xout->wc, use_masked);
odp_execute_actions(NULL, &packet, 1, false,
ctx->xout->odp_actions->data,
pin->up.table_id = ctx->table_id;
pin->up.cookie = ctx->rule_cookie;
- flow_get_metadata(&ctx->xin->flow, &pin->up.fmd);
+ flow_get_metadata(&ctx->xin->flow, &pin->up.flow_metadata);
pin->controller_id = controller_id;
pin->send_len = len;
dp_packet_delete(packet);
}
+/* Called only when ctx->recirc_action_offset is set. */
static void
-compose_recirculate_action(struct xlate_ctx *ctx,
- const struct ofpact *ofpacts_base,
- const struct ofpact *ofpact_current,
- size_t ofpacts_base_len)
+compose_recirculate_action(struct xlate_ctx *ctx)
{
+ struct recirc_metadata md;
+ bool use_masked;
uint32_t id;
- int error;
- unsigned ofpacts_len;
- struct match match;
- struct rule *rule;
- struct ofpbuf ofpacts;
- ctx->exit = true;
+ use_masked = ctx->xbridge->support.masked_set_action;
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ ctx->xout->odp_actions,
+ &ctx->xout->wc, use_masked);
- ofpacts_len = ofpacts_base_len -
- ((uint8_t *)ofpact_current - (uint8_t *)ofpacts_base);
+ recirc_metadata_from_flow(&md, &ctx->xin->flow);
- if (ctx->rule) {
- id = rule_dpif_get_recirc_id(ctx->rule);
- } else {
- /* In the case where ctx has no rule then allocate a recirc id.
- * The life-cycle of this recirc id is managed by associating it
- * with the internal rule that is created to to handle
- * recirculation below.
- *
- * The known use-case of this is packet_out which
- * translates actions without a rule */
- id = ofproto_dpif_alloc_recirc_id(ctx->xbridge->ofproto);
- }
- if (!id) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_ERR_RL(&rl, "Failed to allocate recirculation id");
- return;
- }
+ ovs_assert(ctx->recirc_action_offset >= 0);
- match_init_catchall(&match);
- match_set_recirc_id(&match, id);
- ofpbuf_use_const(&ofpacts, ofpact_current, ofpacts_len);
- error = ofproto_dpif_add_internal_flow(ctx->xbridge->ofproto, &match,
- RECIRC_RULE_PRIORITY,
- RECIRC_TIMEOUT, &ofpacts, &rule);
- if (error) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_ERR_RL(&rl, "Failed to add post recirculation flow %s",
- match_to_string(&match, 0));
- if (!ctx->rule) {
- ofproto_dpif_free_recirc_id(ctx->xbridge->ofproto, id);
+ /* Only allocate recirculation ID if we have a packet. */
+ if (ctx->xin->packet) {
+ /* Allocate a unique recirc id for the given metadata state in the
+ * flow. The life-cycle of this recirc id is managed by associating it
+ * with the udpif key ('ukey') created for each new datapath flow. */
+ id = recirc_alloc_id_ctx(ctx->xbridge->ofproto, 0, &md, &ctx->stack,
+ ctx->recirc_action_offset,
+ ctx->action_set.size, ctx->action_set.data);
+ if (!id) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_ERR_RL(&rl, "Failed to allocate recirculation id");
+ return;
}
- return;
- }
- /* If ctx has no rule then associate the recirc id, which
- * was allocated above, with the internal rule. This allows
- * the recirc id to be released when the internal rule times out. */
- if (!ctx->rule) {
- rule_set_recirc_id(rule, id);
+ xlate_out_add_recirc(ctx->xout, id);
+ } else {
+ /* Look up an existing recirc id for the given metadata state in the
+ * flow. No new reference is taken, as the ID is RCU protected and is
+ * only required temporarily for verification. */
+ id = recirc_find_id(ctx->xbridge->ofproto, 0, &md, &ctx->stack,
+ ctx->recirc_action_offset,
+ ctx->action_set.size, ctx->action_set.data);
+ /* We let zero 'id' to be used in the RECIRC action below, which will
+ * fail all revalidations as zero is not a valid recirculation ID. */
}
- ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
+
+ /* Undo changes done by recirculation. */
+ ctx->action_set.size = ctx->recirc_action_offset;
+ ctx->recirc_action_offset = -1;
+ ctx->last_unroll_offset = -1;
}
static void
n = flow_count_mpls_labels(flow, wc);
if (!n) {
+ bool use_masked = ctx->xbridge->support.masked_set_action;
+
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ &ctx->xout->wc, use_masked);
} else if (n >= FLOW_MAX_MPLS_LABELS) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
int n = flow_count_mpls_labels(flow, wc);
if (flow_pop_mpls(flow, n, eth_type, wc)) {
- if (ctx->xbridge->enable_recirc) {
+ if (ctx->xbridge->support.recirc) {
ctx->was_mpls = true;
}
} else if (n >= FLOW_MAX_MPLS_LABELS) {
/* Scale the probability from 16-bit to 32-bit while representing
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
+ bool use_masked;
- if (!ctx->xbridge->variable_length_userdata) {
+ if (!ctx->xbridge->support.variable_length_userdata) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
return;
}
+ use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ &ctx->xout->wc, use_masked);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
ofpbuf_uninit(&action_list);
}
-static bool
-ofpact_needs_recirculation_after_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
+static void
+recirc_put_unroll_xlate(struct xlate_ctx *ctx)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
- struct flow *flow = &ctx->xin->flow;
+ struct ofpact_unroll_xlate *unroll;
- if (!ctx->was_mpls) {
- return false;
+ unroll = ctx->last_unroll_offset < 0
+ ? NULL
+ : ALIGNED_CAST(struct ofpact_unroll_xlate *,
+ (char *)ctx->action_set.data + ctx->last_unroll_offset);
+
+ /* Restore the table_id and rule cookie for a potential PACKET
+ * IN if needed. */
+ if (!unroll ||
+ (ctx->table_id != unroll->rule_table_id
+ || ctx->rule_cookie != unroll->rule_cookie)) {
+
+ ctx->last_unroll_offset = ctx->action_set.size;
+ unroll = ofpact_put_UNROLL_XLATE(&ctx->action_set);
+ unroll->rule_table_id = ctx->table_id;
+ unroll->rule_cookie = ctx->rule_cookie;
}
+}
- switch (a->type) {
- case OFPACT_OUTPUT:
- case OFPACT_GROUP:
- case OFPACT_CONTROLLER:
- case OFPACT_STRIP_VLAN:
- case OFPACT_SET_VLAN_PCP:
- case OFPACT_SET_VLAN_VID:
- case OFPACT_ENQUEUE:
- case OFPACT_PUSH_VLAN:
- case OFPACT_SET_ETH_SRC:
- case OFPACT_SET_ETH_DST:
- case OFPACT_SET_TUNNEL:
- case OFPACT_SET_QUEUE:
- case OFPACT_POP_QUEUE:
- case OFPACT_CONJUNCTION:
- case OFPACT_NOTE:
- case OFPACT_OUTPUT_REG:
- case OFPACT_EXIT:
- case OFPACT_METER:
- case OFPACT_WRITE_METADATA:
- case OFPACT_WRITE_ACTIONS:
- case OFPACT_CLEAR_ACTIONS:
- case OFPACT_SAMPLE:
- return false;
- case OFPACT_POP_MPLS:
- case OFPACT_DEC_MPLS_TTL:
- case OFPACT_SET_MPLS_TTL:
- case OFPACT_SET_MPLS_TC:
- case OFPACT_SET_MPLS_LABEL:
- case OFPACT_SET_IPV4_SRC:
- case OFPACT_SET_IPV4_DST:
- case OFPACT_SET_IP_DSCP:
- case OFPACT_SET_IP_ECN:
- case OFPACT_SET_IP_TTL:
- case OFPACT_SET_L4_SRC_PORT:
- case OFPACT_SET_L4_DST_PORT:
- case OFPACT_RESUBMIT:
- case OFPACT_STACK_PUSH:
- case OFPACT_STACK_POP:
- case OFPACT_DEC_TTL:
- case OFPACT_MULTIPATH:
- case OFPACT_BUNDLE:
- case OFPACT_LEARN:
- case OFPACT_FIN_TIMEOUT:
- case OFPACT_GOTO_TABLE:
- return true;
+/* Copy remaining actions to the action_set to be executed after recirculation.
+ * UNROLL_XLATE action is inserted, if not already done so, before actions that
+ * may generate PACKET_INs from the current table and without matching another
+ * rule. */
+static void
+recirc_unroll_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+ struct xlate_ctx *ctx)
+{
+ const struct ofpact *a;
- case OFPACT_REG_MOVE:
- return (mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->dst.field) ||
- mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->src.field));
+ OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
+ switch (a->type) {
+ /* May generate PACKET INs. */
+ case OFPACT_OUTPUT_REG:
+ case OFPACT_GROUP:
+ case OFPACT_OUTPUT:
+ case OFPACT_CONTROLLER:
+ case OFPACT_DEC_MPLS_TTL:
+ case OFPACT_DEC_TTL:
+ recirc_put_unroll_xlate(ctx);
+ break;
- case OFPACT_SET_FIELD:
- return mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field);
+ /* These may not generate PACKET INs. */
+ case OFPACT_SET_TUNNEL:
+ case OFPACT_REG_MOVE:
+ case OFPACT_SET_FIELD:
+ case OFPACT_STACK_PUSH:
+ case OFPACT_STACK_POP:
+ case OFPACT_LEARN:
+ case OFPACT_WRITE_METADATA:
+ case OFPACT_RESUBMIT: /* May indirectly generate PACKET INs, */
+ case OFPACT_GOTO_TABLE: /* but from a different table and rule. */
+ case OFPACT_ENQUEUE:
+ case OFPACT_SET_VLAN_VID:
+ case OFPACT_SET_VLAN_PCP:
+ case OFPACT_STRIP_VLAN:
+ case OFPACT_PUSH_VLAN:
+ case OFPACT_SET_ETH_SRC:
+ case OFPACT_SET_ETH_DST:
+ case OFPACT_SET_IPV4_SRC:
+ case OFPACT_SET_IPV4_DST:
+ case OFPACT_SET_IP_DSCP:
+ case OFPACT_SET_IP_ECN:
+ case OFPACT_SET_IP_TTL:
+ case OFPACT_SET_L4_SRC_PORT:
+ case OFPACT_SET_L4_DST_PORT:
+ case OFPACT_SET_QUEUE:
+ case OFPACT_POP_QUEUE:
+ case OFPACT_PUSH_MPLS:
+ case OFPACT_POP_MPLS:
+ case OFPACT_SET_MPLS_LABEL:
+ case OFPACT_SET_MPLS_TC:
+ case OFPACT_SET_MPLS_TTL:
+ case OFPACT_MULTIPATH:
+ case OFPACT_BUNDLE:
+ case OFPACT_EXIT:
+ case OFPACT_UNROLL_XLATE:
+ case OFPACT_FIN_TIMEOUT:
+ case OFPACT_CLEAR_ACTIONS:
+ case OFPACT_WRITE_ACTIONS:
+ case OFPACT_METER:
+ case OFPACT_SAMPLE:
+ break;
- case OFPACT_PUSH_MPLS:
- /* Recirculate if it is an IP packet with a zero ttl. This may
- * indicate that the packet was previously MPLS and an MPLS pop action
- * converted it to IP. In this case recirculating should reveal the IP
- * TTL which is used as the basis for a new MPLS LSE. */
- return (!flow_count_mpls_labels(flow, wc)
- && flow->nw_ttl == 0
- && is_ip_any(flow));
+ /* These need not be copied for restoration. */
+ case OFPACT_NOTE:
+ case OFPACT_CONJUNCTION:
+ continue;
+ }
+ /* Copy the action over. */
+ ofpbuf_put(&ctx->action_set, a, OFPACT_ALIGN(a->len));
}
-
- OVS_NOT_REACHED();
}
+#define CHECK_MPLS_RECIRCULATION() \
+ if (ctx->was_mpls) { \
+ ctx_trigger_recirculation(ctx); \
+ break; \
+ }
+#define CHECK_MPLS_RECIRCULATION_IF(COND) \
+ if (COND) { \
+ CHECK_MPLS_RECIRCULATION(); \
+ }
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct xlate_ctx *ctx)
const struct mf_field *mf;
if (ctx->exit) {
+ /* Check if need to store the remaining actions for later
+ * execution. */
+ if (exit_recirculates(ctx)) {
+ recirc_unroll_actions(a, OFPACT_ALIGN(ofpacts_len -
+ ((uint8_t *)a -
+ (uint8_t *)ofpacts)),
+ ctx);
+ }
break;
}
- if (ofpact_needs_recirculation_after_mpls(a, ctx)) {
- compose_recirculate_action(ctx, ofpacts, a, ofpacts_len);
- return;
- }
-
switch (a->type) {
case OFPACT_OUTPUT:
xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
case OFPACT_GROUP:
if (xlate_group_action(ctx, ofpact_get_GROUP(a)->group_id)) {
+ /* Group could not be found. */
return;
}
break;
break;
case OFPACT_SET_IPV4_SRC:
+ CHECK_MPLS_RECIRCULATION();
if (flow->dl_type == htons(ETH_TYPE_IP)) {
memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
flow->nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
break;
case OFPACT_SET_IPV4_DST:
+ CHECK_MPLS_RECIRCULATION();
if (flow->dl_type == htons(ETH_TYPE_IP)) {
memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
flow->nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
break;
case OFPACT_SET_IP_DSCP:
+ CHECK_MPLS_RECIRCULATION();
if (is_ip_any(flow)) {
wc->masks.nw_tos |= IP_DSCP_MASK;
flow->nw_tos &= ~IP_DSCP_MASK;
break;
case OFPACT_SET_IP_ECN:
+ CHECK_MPLS_RECIRCULATION();
if (is_ip_any(flow)) {
wc->masks.nw_tos |= IP_ECN_MASK;
flow->nw_tos &= ~IP_ECN_MASK;
break;
case OFPACT_SET_IP_TTL:
+ CHECK_MPLS_RECIRCULATION();
if (is_ip_any(flow)) {
wc->masks.nw_ttl = 0xff;
flow->nw_ttl = ofpact_get_SET_IP_TTL(a)->ttl;
break;
case OFPACT_SET_L4_SRC_PORT:
+ CHECK_MPLS_RECIRCULATION();
if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
break;
case OFPACT_SET_L4_DST_PORT:
+ CHECK_MPLS_RECIRCULATION();
if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
break;
case OFPACT_REG_MOVE:
+ CHECK_MPLS_RECIRCULATION_IF(
+ mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->dst.field) ||
+ mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->src.field));
nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
break;
case OFPACT_SET_FIELD:
+ CHECK_MPLS_RECIRCULATION_IF(
+ mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field));
set_field = ofpact_get_SET_FIELD(a);
mf = set_field->field;
break;
case OFPACT_STACK_PUSH:
+ CHECK_MPLS_RECIRCULATION_IF(
+ mf_is_l3_or_higher(ofpact_get_STACK_PUSH(a)->subfield.field));
nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), flow, wc,
&ctx->stack);
break;
case OFPACT_STACK_POP:
+ CHECK_MPLS_RECIRCULATION_IF(
+ mf_is_l3_or_higher(ofpact_get_STACK_POP(a)->subfield.field));
nxm_execute_stack_pop(ofpact_get_STACK_POP(a), flow, wc,
&ctx->stack);
break;
case OFPACT_PUSH_MPLS:
+ /* Recirculate if it is an IP packet with a zero ttl. This may
+ * indicate that the packet was previously MPLS and an MPLS pop
+ * action converted it to IP. In this case recirculating should
+ * reveal the IP TTL which is used as the basis for a new MPLS
+ * LSE. */
+ CHECK_MPLS_RECIRCULATION_IF(
+ !flow_count_mpls_labels(flow, wc)
+ && flow->nw_ttl == 0
+ && is_ip_any(flow));
compose_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a));
break;
case OFPACT_POP_MPLS:
+ CHECK_MPLS_RECIRCULATION();
compose_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
break;
case OFPACT_SET_MPLS_LABEL:
+ CHECK_MPLS_RECIRCULATION();
compose_set_mpls_label_action(
ctx, ofpact_get_SET_MPLS_LABEL(a)->label);
- break;
+ break;
case OFPACT_SET_MPLS_TC:
+ CHECK_MPLS_RECIRCULATION();
compose_set_mpls_tc_action(ctx, ofpact_get_SET_MPLS_TC(a)->tc);
break;
case OFPACT_SET_MPLS_TTL:
+ CHECK_MPLS_RECIRCULATION();
compose_set_mpls_ttl_action(ctx, ofpact_get_SET_MPLS_TTL(a)->ttl);
break;
case OFPACT_DEC_MPLS_TTL:
+ CHECK_MPLS_RECIRCULATION();
if (compose_dec_mpls_ttl_action(ctx)) {
return;
}
break;
case OFPACT_DEC_TTL:
+ CHECK_MPLS_RECIRCULATION();
wc->masks.nw_ttl = 0xff;
if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
return;
break;
case OFPACT_MULTIPATH:
+ CHECK_MPLS_RECIRCULATION();
multipath_execute(ofpact_get_MULTIPATH(a), flow, wc);
break;
case OFPACT_BUNDLE:
+ CHECK_MPLS_RECIRCULATION();
xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
break;
break;
case OFPACT_LEARN:
+ CHECK_MPLS_RECIRCULATION();
xlate_learn_action(ctx, ofpact_get_LEARN(a));
break;
ctx->exit = true;
break;
+ case OFPACT_UNROLL_XLATE: {
+ struct ofpact_unroll_xlate *unroll = ofpact_get_UNROLL_XLATE(a);
+
+ /* Restore translation context data that was stored earlier. */
+ ctx->table_id = unroll->rule_table_id;
+ ctx->rule_cookie = unroll->rule_cookie;
+ break;
+ }
case OFPACT_FIN_TIMEOUT:
+ CHECK_MPLS_RECIRCULATION();
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
ctx->xout->has_fin_timeout = true;
xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
break;
}
+
+ /* Check if need to store this and the remaining actions for later
+ * execution. */
+ if (ctx->exit && ctx_first_recirculation_action(ctx)) {
+ recirc_unroll_actions(a, OFPACT_ALIGN(ofpacts_len -
+ ((uint8_t *)a -
+ (uint8_t *)ofpacts)),
+ ctx);
+ break;
+ }
}
}
xin->resubmit_stats = NULL;
xin->skip_wildcards = false;
xin->odp_actions = NULL;
+
+ /* Do recirc lookup. */
+ xin->recirc = flow->recirc_id
+ ? recirc_id_node_find(flow->recirc_id)
+ : NULL;
}
void
xlate_out_uninit(struct xlate_out *xout)
{
- if (xout && xout->odp_actions == &xout->odp_actions_buf) {
- ofpbuf_uninit(xout->odp_actions);
+ if (xout) {
+ if (xout->odp_actions == &xout->odp_actions_buf) {
+ ofpbuf_uninit(xout->odp_actions);
+ }
+ xlate_out_free_recircs(xout);
}
}
#endif
}
-/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
- * into datapath actions in 'odp_actions', using 'ctx'.
- *
+/* Translates the flow, actions, or rule in 'xin' into datapath actions in
+ * 'xout'.
* The caller must take responsibility for eventually freeing 'xout', with
* xlate_out_uninit(). */
void
enum slow_path_reason special;
const struct ofpact *ofpacts;
+ struct xbridge *xbridge;
struct xport *in_port;
struct flow orig_flow;
struct xlate_ctx ctx;
ctx.xout->has_fin_timeout = false;
ctx.xout->nf_output_iface = NF_OUT_DROP;
ctx.xout->mirrors = 0;
+ ctx.xout->n_recircs = 0;
xout->odp_actions = xin->odp_actions;
if (!xout->odp_actions) {
}
ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE);
- ctx.xbridge = xbridge_lookup(xcfg, xin->ofproto);
- if (!ctx.xbridge) {
+ xbridge = xbridge_lookup(xcfg, xin->ofproto);
+ if (!xbridge) {
return;
}
+ /* 'ctx.xbridge' may be changed by action processing, whereas 'xbridge'
+ * will remain set on the original input bridge. */
+ ctx.xbridge = xbridge;
ctx.rule = xin->rule;
ctx.base_flow = *flow;
if (is_ip_any(flow)) {
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
}
- if (ctx.xbridge->enable_recirc) {
+ if (xbridge->support.recirc) {
/* Always exactly match recirc_id when datapath supports
* recirculation. */
wc->masks.recirc_id = UINT32_MAX;
}
- if (ctx.xbridge->netflow) {
+ if (xbridge->netflow) {
netflow_mask_wc(flow, wc);
}
}
ctx.rule_cookie = OVS_BE64_MAX;
ctx.exit = false;
ctx.was_mpls = false;
+ ctx.recirc_action_offset = -1;
+ ctx.last_unroll_offset = -1;
+
+ ctx.action_set_has_group = false;
+ ofpbuf_use_stub(&ctx.action_set,
+ ctx.action_set_stub, sizeof ctx.action_set_stub);
+
+ ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
+
+ /* The in_port of the original packet before recirculation. */
+ in_port = get_ofp_port(xbridge, flow->in_port.ofp_port);
+
+ if (xin->recirc) {
+ const struct recirc_id_node *recirc = xin->recirc;
+
+ if (xin->ofpacts_len > 0 || ctx.rule) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_WARN_RL(&rl, "Recirculation conflict (%s)!",
+ xin->ofpacts_len > 0
+ ? "actions"
+ : "rule");
+ return;
+ }
+
+ /* Set the bridge for post-recirculation processing if needed. */
+ if (ctx.xbridge->ofproto != recirc->ofproto) {
+ struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
+ const struct xbridge *new_bridge = xbridge_lookup(xcfg,
+ recirc->ofproto);
+
+ if (OVS_UNLIKELY(!new_bridge)) {
+ /* Drop the packet if the bridge cannot be found. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists.");
+ return;
+ }
+ ctx.xbridge = new_bridge;
+ }
+
+ /* Set the post-recirculation table id. Note: A table lookup is done
+ * only if there are no post-recirculation actions. */
+ ctx.table_id = recirc->table_id;
+
+ /* Restore pipeline metadata. May change flow's in_port and other
+ * metadata to the values that existed when recirculation was
+ * triggered. */
+ recirc_metadata_to_flow(&recirc->metadata, flow);
+
+ /* Restore stack, if any. */
+ if (recirc->stack) {
+ ofpbuf_put(&ctx.stack, recirc->stack->data, recirc->stack->size);
+ }
+
+ /* Restore action set, if any. */
+ if (recirc->action_set_len) {
+ const struct ofpact *a;
+
+ ofpbuf_put(&ctx.action_set, recirc->ofpacts,
+ recirc->action_set_len);
+
+ OFPACT_FOR_EACH(a, recirc->ofpacts, recirc->action_set_len) {
+ if (a->type == OFPACT_GROUP) {
+ ctx.action_set_has_group = true;
+ break;
+ }
+ }
+ }
+
+ /* Restore recirculation actions. If there are no actions, processing
+ * will start with a lookup in the table set above. */
+ if (recirc->ofpacts_len > recirc->action_set_len) {
+ xin->ofpacts_len = recirc->ofpacts_len - recirc->action_set_len;
+ xin->ofpacts = recirc->ofpacts +
+ recirc->action_set_len / sizeof *recirc->ofpacts;
+ }
+ } else if (OVS_UNLIKELY(flow->recirc_id)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32,
+ flow->recirc_id);
+ return;
+ }
+ /* The bridge is now known so obtain its table version. */
+ ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
if (!xin->ofpacts && !ctx.rule) {
- rule = rule_dpif_lookup(ctx.xbridge->ofproto, flow, wc,
- ctx.xin->xcache != NULL,
- ctx.xin->resubmit_stats, &ctx.table_id);
+ rule = rule_dpif_lookup_from_table(ctx.xbridge->ofproto,
+ ctx.tables_version, flow, wc,
+ ctx.xin->xcache != NULL,
+ ctx.xin->resubmit_stats,
+ &ctx.table_id,
+ flow->in_port.ofp_port, true, true);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
}
OVS_NOT_REACHED();
}
- ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
-
- ctx.action_set_has_group = false;
- ofpbuf_use_stub(&ctx.action_set,
- ctx.action_set_stub, sizeof ctx.action_set_stub);
-
- if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
+ if (mbridge_has_mirrors(xbridge->mbridge)) {
/* Do this conditionally because the copy is expensive enough that it
* shows up in profiles. */
orig_flow = *flow;
}
- in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
- if (in_port && in_port->is_tunnel) {
+ /* Tunnel stats only for non-recirculated packets. */
+ if (!xin->recirc && in_port && in_port->is_tunnel) {
if (ctx.xin->resubmit_stats) {
netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats);
if (in_port->bfd) {
}
}
- special = process_special(&ctx, flow, in_port, ctx.xin->packet);
- if (special) {
+ /* Do not perform special processing on recirculated packets,
+ * as recirculated packets are not really received by the bridge. */
+ if (!xin->recirc &&
+ (special = process_special(&ctx, flow, in_port, ctx.xin->packet))) {
ctx.xout->slow |= special;
} else {
size_t sample_actions_len;
if (flow->in_port.ofp_port
- != vsp_realdev_to_vlandev(ctx.xbridge->ofproto,
+ != vsp_realdev_to_vlandev(xbridge->ofproto,
flow->in_port.ofp_port,
flow->vlan_tci)) {
ctx.base_flow.vlan_tci = 0;
}
- add_sflow_action(&ctx);
- add_ipfix_action(&ctx);
- sample_actions_len = ctx.xout->odp_actions->size;
+ /* Sampling is done only for packets really received by the bridge. */
+ if (!xin->recirc) {
+ add_sflow_action(&ctx);
+ add_ipfix_action(&ctx);
+ sample_actions_len = ctx.xout->odp_actions->size;
+ } else {
+ sample_actions_len = 0;
+ }
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
!xport_rstp_forward_state(in_port))) {
/* Drop all actions added by do_xlate_actions() above. */
ctx.xout->odp_actions->size = sample_actions_len;
+
+ /* Undo changes that may have been done for recirculation. */
+ if (exit_recirculates(&ctx)) {
+ ctx.action_set.size = ctx.recirc_action_offset;
+ ctx.recirc_action_offset = -1;
+ ctx.last_unroll_offset = -1;
+ }
} else if (ctx.action_set.size) {
- /* Translate action set only if not dropping the packet. */
- xlate_action_set(&ctx);
+ /* Translate action set only if not dropping the packet and
+ * not recirculating. */
+ if (!exit_recirculates(&ctx)) {
+ xlate_action_set(&ctx);
+ }
+ }
+ /* Check if need to recirculate. */
+ if (exit_recirculates(&ctx)) {
+ compose_recirculate_action(&ctx);
}
}
- if (ctx.xbridge->has_in_band
+ /* Output only fully processed packets. */
+ if (!exit_recirculates(&ctx)
+ && xbridge->has_in_band
&& in_band_must_output_to_local_port(flow)
&& !actions_output_to_local_port(&ctx)) {
compose_output_action(&ctx, OFPP_LOCAL, NULL);
}
- fix_sflow_action(&ctx);
-
- if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
+ if (!xin->recirc) {
+ fix_sflow_action(&ctx);
+ }
+ /* Only mirror fully processed packets. */
+ if (!exit_recirculates(&ctx)
+ && mbridge_has_mirrors(xbridge->mbridge)) {
add_mirror_actions(&ctx, &orig_flow);
}
}
ctx.xout->slow |= SLOW_ACTION;
}
- if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
+ /* Update mirror stats only for packets really received by the bridge. */
+ if (!xin->recirc && mbridge_has_mirrors(xbridge->mbridge)) {
if (ctx.xin->resubmit_stats) {
- mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors,
+ mirror_update_stats(xbridge->mbridge, xout->mirrors,
ctx.xin->resubmit_stats->n_packets,
ctx.xin->resubmit_stats->n_bytes);
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
- entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge);
+ entry->u.mirror.mbridge = mbridge_ref(xbridge->mbridge);
entry->u.mirror.mirrors = xout->mirrors;
}
}
- if (ctx.xbridge->netflow) {
+ /* Do netflow only for packets really received by the bridge. */
+ if (!xin->recirc && xbridge->netflow) {
/* Only update netflow if we don't have controller flow. We don't
* report NetFlow expiration messages for such facets because they
* are just part of the control logic for the network, not real
|| ofpacts->type != OFPACT_CONTROLLER
|| ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
if (ctx.xin->resubmit_stats) {
- netflow_flow_update(ctx.xbridge->netflow, flow,
+ netflow_flow_update(xbridge->netflow, flow,
xout->nf_output_iface,
ctx.xin->resubmit_stats);
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
- entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow);
+ entry->u.nf.netflow = netflow_ref(xbridge->netflow);
entry->u.nf.flow = xmemdup(flow, sizeof *flow);
entry->u.nf.iface = xout->nf_output_iface;
}
wc->masks.tp_src &= htons(UINT8_MAX);
wc->masks.tp_dst &= htons(UINT8_MAX);
}
+ /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
+ if (wc->masks.vlan_tci) {
+ wc->masks.vlan_tci |= htons(VLAN_CFI);
+ }
}
}