static struct xbridge *xbridge_lookup(struct xlate_cfg *,
const struct ofproto_dpif *);
+static struct xbridge *xbridge_lookup_by_uuid(struct xlate_cfg *,
+ const struct uuid *);
static struct xbundle *xbundle_lookup(struct xlate_cfg *,
const struct ofbundle *);
static struct xport *xport_lookup(struct xlate_cfg *,
return NULL;
}
+static struct xbridge *
+xbridge_lookup_by_uuid(struct xlate_cfg *xcfg, const struct uuid *uuid)
+{
+ struct xbridge *xbridge;
+
+ HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
+ if (uuid_equals(ofproto_dpif_get_uuid(xbridge->ofproto), uuid)) {
+ return xbridge;
+ }
+ }
+ return NULL;
+}
+
static struct xbundle *
xbundle_lookup(struct xlate_cfg *xcfg, const struct ofbundle *ofbundle)
{
struct recirc_state state = {
.table_id = table,
- .ofproto = ctx->xbridge->ofproto,
+ .ofproto_uuid = *ofproto_dpif_get_uuid(ctx->xbridge->ofproto),
.metadata = md,
.stack = ctx->stack.data,
.n_stack = ctx->stack.size / sizeof(union mf_subvalue),
/* Copy remaining actions to the action_set to be executed after recirculation.
* UNROLL_XLATE action is inserted, if not already done so, before actions that
- * may generate asynchronous messages from the current table and without
- * matching another rule. */
+ * may depend on the current table ID or flow cookie. */
static void
recirc_unroll_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct xlate_ctx *ctx)
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
switch (a->type) {
- /* May generate asynchronous messages. */
case OFPACT_OUTPUT_REG:
case OFPACT_GROUP:
case OFPACT_OUTPUT:
case OFPACT_CONTROLLER:
case OFPACT_DEC_MPLS_TTL:
case OFPACT_DEC_TTL:
+ /* These actions may generate asynchronous messages, which include
+ * table ID and flow cookie information. */
recirc_put_unroll_xlate(ctx);
break;
- /* These may not generate PACKET INs. */
+ case OFPACT_RESUBMIT:
+ if (ofpact_get_RESUBMIT(a)->table_id == 0xff) {
+ /* This resubmit action is relative to the current table, so we
+ * need to track what table that is.*/
+ recirc_put_unroll_xlate(ctx);
+ }
+ break;
+
case OFPACT_SET_TUNNEL:
case OFPACT_REG_MOVE:
case OFPACT_SET_FIELD:
case OFPACT_STACK_POP:
case OFPACT_LEARN:
case OFPACT_WRITE_METADATA:
- case OFPACT_RESUBMIT: /* May indirectly generate PACKET INs, */
- case OFPACT_GOTO_TABLE: /* but from a different table and rule. */
+ case OFPACT_GOTO_TABLE:
case OFPACT_ENQUEUE:
case OFPACT_SET_VLAN_VID:
case OFPACT_SET_VLAN_PCP:
case OFPACT_DEBUG_RECIRC:
case OFPACT_CT:
case OFPACT_NAT:
+ /* These may not generate PACKET INs. */
break;
- /* These need not be copied for restoration. */
case OFPACT_NOTE:
case OFPACT_CONJUNCTION:
+ /* These need not be copied for restoration. */
continue;
}
/* Copy the action over. */
break;
case OFPACT_RESUBMIT:
+ /* Recirculation complicates resubmit. There are two cases:
+ *
+ * - If mpls_pop has been executed, then the flow table lookup
+ * as part of resubmit might depend on fields that can only
+ * be obtained via recirculation, so the resubmit itself
+ * triggers recirculation and we need to make sure that the
+ * resubmit is executed again after recirculation.
+ * Therefore, in this case we trigger recirculation and let
+ * the code following this "switch" append the resubmit to
+ * the post-recirculation actions.
+ *
+ * - Otherwise, some action in the flow entry found by resubmit
+ * might trigger recirculation. If that happens, then we do
+ * not want to execute the resubmit again after
+ * recirculation, so we want to skip back to the head of the
+ * loop to avoid that, only adding any actions that follow
+ * the resubmit to the post-recirculation actions.
+ */
+ if (ctx->was_mpls) {
+ ctx_trigger_recirculation(ctx);
+ break;
+ }
xlate_ofpact_resubmit(ctx, ofpact_get_RESUBMIT(a));
- break;
+ continue;
case OFPACT_SET_TUNNEL:
flow->tunnel.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
case OFPACT_GOTO_TABLE: {
struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
- /* Allow ctx->table_id == TBL_INTERNAL, which will be greater
- * than ogt->table_id. This is to allow goto_table actions that
- * triggered recirculation: ctx->table_id will be TBL_INTERNAL
- * after recirculation. */
- ovs_assert(ctx->table_id == TBL_INTERNAL
- || ctx->table_id < ogt->table_id);
+ ovs_assert(ctx->table_id < ogt->table_id);
+
xlate_table_action(ctx, ctx->xin->flow.in_port.ofp_port,
ogt->table_id, true, true);
break;
xin->odp_actions = odp_actions;
/* Do recirc lookup. */
- xin->recirc = flow->recirc_id
- ? recirc_id_node_find(flow->recirc_id)
- : NULL;
+ xin->recirc = NULL;
+ if (flow->recirc_id) {
+ const struct recirc_id_node *node
+ = recirc_id_node_find(flow->recirc_id);
+ if (node) {
+ xin->recirc = &node->state;
+ }
+ }
}
void
COVERAGE_INC(xlate_actions);
if (xin->recirc) {
- const struct recirc_state *state = &xin->recirc->state;
+ const struct recirc_state *state = xin->recirc;
xlate_report(&ctx, "Restoring state post-recirculation:");
}
/* Set the bridge for post-recirculation processing if needed. */
- if (ctx.xbridge->ofproto != state->ofproto) {
+ if (!uuid_equals(ofproto_dpif_get_uuid(ctx.xbridge->ofproto),
+ &state->ofproto_uuid)) {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
const struct xbridge *new_bridge
- = xbridge_lookup(xcfg, state->ofproto);
+ = xbridge_lookup_by_uuid(xcfg, &state->ofproto_uuid);
if (OVS_UNLIKELY(!new_bridge)) {
/* Drop the packet if the bridge cannot be found. */