* state from the datapath should be honored after recirculation. */
bool conntracked;
+ /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
+ struct ofpact_nat *ct_nat_action;
+
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
.ofpacts = ctx->action_set.data,
};
- /* Only allocate recirculation ID if we have a packet. */
- if (ctx->xin->packet) {
- /* Allocate a unique recirc id for the given metadata state in the
- * flow. The life-cycle of this recirc id is managed by associating it
- * with the udpif key ('ukey') created for each new datapath flow. */
- id = recirc_alloc_id_ctx(&state);
- if (!id) {
- XLATE_REPORT_ERROR(ctx, "Failed to allocate recirculation id");
- ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
- return;
- }
- xlate_out_add_recirc(ctx->xout, id);
- } else {
- /* Look up an existing recirc id for the given metadata state in the
- * flow. No new reference is taken, as the ID is RCU protected and is
- * only required temporarily for verification.
- * If flow tables have changed sufficiently this can fail and we will
- * delete the old datapath flow. */
- id = recirc_find_id(&state);
- if (!id) {
- ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
- return;
- }
+ /* Allocate a unique recirc id for the given metadata state in the
+ * flow. An existing id, with a new reference to the corresponding
+ * recirculation context, will be returned if possible.
+ * The life-cycle of this recirc id is managed by associating it
+ * with the udpif key ('ukey') created for each new datapath flow. */
+ id = recirc_alloc_id_ctx(&state);
+ if (!id) {
+ XLATE_REPORT_ERROR(ctx, "Failed to allocate recirculation id");
+ ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
+ return;
}
+ recirc_refs_add(&ctx->xout->recircs, id);
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
case OFPACT_SAMPLE:
case OFPACT_DEBUG_RECIRC:
case OFPACT_CT:
+ case OFPACT_NAT:
break;
/* These need not be copied for restoration. */
}
}
+static void
+put_ct_nat(struct xlate_ctx *ctx)
+{
+ struct ofpact_nat *ofn = ctx->ct_nat_action;
+ size_t nat_offset;
+
+ if (!ofn) {
+ return;
+ }
+
+ nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
+ if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
+ nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
+ ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
+ if (ofn->flags & NX_NAT_F_PERSISTENT) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
+ }
+ if (ofn->flags & NX_NAT_F_PROTO_HASH) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
+ } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
+ }
+ if (ofn->range_af == AF_INET) {
+ nl_msg_put_u32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
+ ofn->range.addr.ipv4.min);
+ if (ofn->range.addr.ipv4.max &&
+ ofn->range.addr.ipv4.max > ofn->range.addr.ipv4.min) {
+ nl_msg_put_u32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
+ ofn->range.addr.ipv4.max);
+ }
+ } else if (ofn->range_af == AF_INET6) {
+ nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
+ &ofn->range.addr.ipv6.min,
+ sizeof ofn->range.addr.ipv6.min);
+ if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
+ memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
+ sizeof ofn->range.addr.ipv6.max) > 0) {
+ nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
+ &ofn->range.addr.ipv6.max,
+ sizeof ofn->range.addr.ipv6.max);
+ }
+ }
+ if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
+ nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
+ ofn->range.proto.min);
+ if (ofn->range.proto.max &&
+ ofn->range.proto.max > ofn->range.proto.min) {
+ nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
+ ofn->range.proto.max);
+ }
+ }
+ }
+ nl_msg_end_nested(ctx->odp_actions, nat_offset);
+}
+
static void
compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc)
{
xlate_commit_actions(ctx);
/* Process nested actions first, to populate the key. */
+ ctx->ct_nat_action = NULL;
do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx);
if (ofc->zone_src.field) {
put_ct_mark(&ctx->xin->flow, &ctx->base_flow, ctx->odp_actions, ctx->wc);
put_ct_label(&ctx->xin->flow, &ctx->base_flow, ctx->odp_actions, ctx->wc);
put_ct_helper(ctx->odp_actions, ofc);
+ put_ct_nat(ctx);
+ ctx->ct_nat_action = NULL;
nl_msg_end_nested(ctx->odp_actions, ct_offset);
/* Restore the original ct fields in the key. These should only be exposed
compose_conntrack_action(ctx, ofpact_get_CT(a));
break;
+ case OFPACT_NAT:
+ /* This will be processed by compose_conntrack_action(). */
+ ctx->ct_nat_action = ofpact_get_NAT(a);
+ break;
+
case OFPACT_DEBUG_RECIRC:
ctx_trigger_recirculation(ctx);
a = ofpact_next(a);
xlate_out_uninit(struct xlate_out *xout)
{
if (xout) {
- xlate_out_free_recircs(xout);
+ recirc_refs_unref(&xout->recircs);
}
}
*xout = (struct xlate_out) {
.slow = 0,
.fail_open = false,
- .n_recircs = 0,
+ .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
};
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
.was_mpls = false,
.conntracked = false,
+ .ct_nat_action = NULL,
+
.action_set_has_group = false,
.action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
};