return ctx->recirc_action_offset >= 0;
}
+static void
+ctx_cancel_recirculation(struct xlate_ctx *ctx)
+{
+ if (exit_recirculates(ctx)) {
+ ctx->action_set.size = ctx->recirc_action_offset;
+ ctx->recirc_action_offset = -1;
+ ctx->last_unroll_offset = -1;
+ }
+}
+
static void compose_recirculate_action(struct xlate_ctx *ctx);
/* A controller may use OFPP_NONE as the ingress port to indicate that
} u;
};
-#define XC_ENTRY_FOR_EACH(entry, entries, xcache) \
- entries = xcache->entries; \
- for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \
- entry; \
- entry = ofpbuf_try_pull(&entries, sizeof *entry))
+#define XC_ENTRY_FOR_EACH(ENTRY, ENTRIES, XCACHE) \
+ ENTRIES = XCACHE->entries; \
+ for (ENTRY = ofpbuf_try_pull(&ENTRIES, sizeof *ENTRY); \
+ ENTRY; \
+ ENTRY = ofpbuf_try_pull(&ENTRIES, sizeof *ENTRY))
struct xlate_cache {
struct ofpbuf entries;
static struct xbridge *xbridge_lookup(struct xlate_cfg *,
const struct ofproto_dpif *);
+static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
+ const struct uuid *);
static struct xbundle *xbundle_lookup(struct xlate_cfg *,
const struct ofbundle *);
static struct xport *xport_lookup(struct xlate_cfg *,
return NULL;
}
+static struct xbridge *
+xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
+{
+ struct xbridge *xbridge;
+
+ HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
+ if (uuid_equals(ofproto_dpif_get_uuid(xbridge->ofproto), uuid)) {
+ return xbridge;
+ }
+ }
+ return NULL;
+}
+
static struct xbundle *
xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
{
ctx->odp_actions->size = old_size;
/* Undo changes that may have been done for recirculation. */
- if (exit_recirculates(ctx)) {
- ctx->action_set.size = ctx->recirc_action_offset;
- ctx->recirc_action_offset = -1;
- ctx->last_unroll_offset = -1;
- }
+ ctx_cancel_recirculation(ctx);
}
}
struct recirc_state state = {
.table_id = table,
- .ofproto = ctx->xbridge->ofproto,
+ .ofproto_uuid = *ofproto_dpif_get_uuid(ctx->xbridge->ofproto),
.metadata = md,
.stack = ctx->stack.data,
.n_stack = ctx->stack.size / sizeof(union mf_subvalue),
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
/* Undo changes done by recirculation. */
- ctx->action_set.size = ctx->recirc_action_offset;
- ctx->recirc_action_offset = -1;
- ctx->last_unroll_offset = -1;
+ ctx_cancel_recirculation(ctx);
}
/* Called only when ctx->recirc_action_offset is set. */
}
-/* Copy remaining actions to the action_set to be executed after recirculation.
- * UNROLL_XLATE action is inserted, if not already done so, before actions that
- * may generate asynchronous messages from the current table and without
- * matching another rule. */
+/* Copy actions 'a' through 'end' to the action_set to be executed after
+ * recirculation. UNROLL_XLATE action is inserted, if not already done so,
+ * before actions that may depend on the current table ID or flow cookie. */
static void
-recirc_unroll_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+recirc_unroll_actions(const struct ofpact *a, const struct ofpact *end,
struct xlate_ctx *ctx)
{
- const struct ofpact *a;
-
- OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
+ for (; a < end; a = ofpact_next(a)) {
switch (a->type) {
- /* May generate asynchronous messages. */
case OFPACT_OUTPUT_REG:
case OFPACT_GROUP:
case OFPACT_OUTPUT:
case OFPACT_CONTROLLER:
case OFPACT_DEC_MPLS_TTL:
case OFPACT_DEC_TTL:
+ /* These actions may generate asynchronous messages, which include
+ * table ID and flow cookie information. */
recirc_put_unroll_xlate(ctx);
break;
- /* These may not generate PACKET INs. */
+ case OFPACT_RESUBMIT:
+ if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
+ /* This resubmit action is relative to the current table, so we
+ * need to track what table that is.*/
+ recirc_put_unroll_xlate(ctx);
+ }
+ break;
+
case OFPACT_SET_TUNNEL:
case OFPACT_REG_MOVE:
case OFPACT_SET_FIELD:
case OFPACT_STACK_POP:
case OFPACT_LEARN:
case OFPACT_WRITE_METADATA:
- case OFPACT_RESUBMIT: /* May indirectly generate PACKET INs, */
- case OFPACT_GOTO_TABLE: /* but from a different table and rule. */
+ case OFPACT_GOTO_TABLE:
case OFPACT_ENQUEUE:
case OFPACT_SET_VLAN_VID:
case OFPACT_SET_VLAN_PCP:
case OFPACT_DEBUG_RECIRC:
case OFPACT_CT:
case OFPACT_NAT:
+ /* These may not generate PACKET INs. */
break;
- /* These need not be copied for restoration. */
case OFPACT_NOTE:
case OFPACT_CONJUNCTION:
+ /* These need not be copied for restoration. */
continue;
}
/* Copy the action over. */
/* Check if need to store the remaining actions for later
* execution. */
if (exit_recirculates(ctx)) {
- recirc_unroll_actions(a, OFPACT_ALIGN(ofpacts_len -
- ((uint8_t *)a -
- (uint8_t *)ofpacts)),
+ recirc_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len),
ctx);
}
break;
break;
case OFPACT_RESUBMIT:
+ /* Recirculation complicates resubmit. There are two cases:
+ *
+ * - If mpls_pop has been executed, then the flow table lookup
+ * as part of resubmit might depend on fields that can only
+ * be obtained via recirculation, so the resubmit itself
+ * triggers recirculation and we need to make sure that the
+ * resubmit is executed again after recirculation.
+ * Therefore, in this case we trigger recirculation and let
+ * the code following this "switch" append the resubmit to
+ * the post-recirculation actions.
+ *
+ * - Otherwise, some action in the flow entry found by resubmit
+ * might trigger recirculation. If that happens, then we do
+ * not want to execute the resubmit again after
+ * recirculation, so we want to skip back to the head of the
+ * loop to avoid that, only adding any actions that follow
+ * the resubmit to the post-recirculation actions.
+ */
+ if (ctx->was_mpls) {
+ ctx_trigger_recirculation(ctx);
+ break;
+ }
xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
- break;
+ continue;
case OFPACT_SET_TUNNEL:
flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
case OFPACT_GOTO_TABLE: {
struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
- /* Allow ctx->table_id == TBL_INTERNAL, which will be greater
- * than ogt->table_id. This is to allow goto_table actions that
- * triggered recirculation: ctx->table_id will be TBL_INTERNAL
- * after recirculation. */
- ovs_assert(ctx->table_id == TBL_INTERNAL
- || ctx->table_id < ogt->table_id);
+ ovs_assert(ctx->table_id < ogt->table_id);
+
xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
ogt->table_id, true, true);
break;
/* Check if need to store this and the remaining actions for later
* execution. */
if (!ctx->error && ctx->exit && ctx_first_recirculation_action(ctx)) {
- recirc_unroll_actions(a, OFPACT_ALIGN(ofpacts_len -
- ((uint8_t *)a -
- (uint8_t *)ofpacts)),
- ctx);
+ recirc_unroll_actions(a, ofpact_end(ofpacts, ofpacts_len), ctx);
break;
}
}
xin->odp_actions = odp_actions;
/* Do recirc lookup. */
- xin->recirc = flow->recirc_id
- ? recirc_id_node_find(flow->recirc_id)
- : NULL;
+ xin->recirc = NULL;
+ if (flow->recirc_id) {
+ const struct recirc_id_node *node
+ = recirc_id_node_find(flow->recirc_id);
+ if (node) {
+ xin->recirc = &node->state;
+ }
+ }
}
void
COVERAGE_INC(xlate_actions);
if (xin->recirc) {
- const struct recirc_state *state = &xin->recirc->state;
+ const struct recirc_state *state = xin->recirc;
xlate_report(&ctx, "Restoring state post-recirculation:");
}
/* Set the bridge for post-recirculation processing if needed. */
- if (ctx.xbridge->ofproto != state->ofproto) {
+ if (!uuid_equals(ofproto_dpif_get_uuid(ctx.xbridge->ofproto),
+ &state->ofproto_uuid)) {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
const struct xbridge *new_bridge
- = xbridge_lookup(xcfg, state->ofproto);
+ = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
if (OVS_UNLIKELY(!new_bridge)) {
/* Drop the packet if the bridge cannot be found. */
ctx.odp_actions->size = sample_actions_len;
/* Undo changes that may have been done for recirculation. */
- if (exit_recirculates(&ctx)) {
- ctx.action_set.size = ctx.recirc_action_offset;
- ctx.recirc_action_offset = -1;
- ctx.last_unroll_offset = -1;
- }
+ ctx_cancel_recirculation(&ctx);
} else if (ctx.action_set.size) {
/* Translate action set only if not dropping the packet and
* not recirculating. */