+static void
+xlate_learn_action__(struct xlate_ctx *ctx, const struct ofpact_learn *learn,
+ struct ofputil_flow_mod *fm, struct ofpbuf *ofpacts)
+{
+ learn_execute(learn, &ctx->xin->flow, fm, ofpacts);
+ if (ctx->xin->may_learn) {
+ ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
+ }
+}
+
+static void
+xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
+{
+ learn_mask(learn, ctx->wc);
+
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
+ entry->u.learn.ofproto = ctx->xbridge->ofproto;
+ entry->u.learn.fm = xmalloc(sizeof *entry->u.learn.fm);
+ entry->u.learn.ofpacts = ofpbuf_new(64);
+ xlate_learn_action__(ctx, learn, entry->u.learn.fm,
+ entry->u.learn.ofpacts);
+ } else if (ctx->xin->may_learn) {
+ uint64_t ofpacts_stub[1024 / 8];
+ struct ofputil_flow_mod fm;
+ struct ofpbuf ofpacts;
+
+ ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
+ xlate_learn_action__(ctx, learn, &fm, &ofpacts);
+ ofpbuf_uninit(&ofpacts);
+ }
+}
+
+static void
+xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags,
+ uint16_t idle_timeout, uint16_t hard_timeout)
+{
+ if (tcp_flags & (TCP_FIN | TCP_RST)) {
+ rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout);
+ }
+}
+
+static void
+xlate_fin_timeout(struct xlate_ctx *ctx,
+ const struct ofpact_fin_timeout *oft)
+{
+ if (ctx->rule) {
+ xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags,
+ oft->fin_idle_timeout, oft->fin_hard_timeout);
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+ /* XC_RULE already holds a reference on the rule, none is taken
+ * here. */
+ entry->u.fin.rule = ctx->rule;
+ entry->u.fin.idle = oft->fin_idle_timeout;
+ entry->u.fin.hard = oft->fin_hard_timeout;
+ }
+ }
+}
+
+static void
+xlate_sample_action(struct xlate_ctx *ctx,
+ const struct ofpact_sample *os)
+{
+ /* Scale the probability from 16-bit to 32-bit while representing
+ * the same percentage. */
+ uint32_t probability = (os->probability << 16) | os->probability;
+
+ if (!ctx->xbridge->support.variable_length_userdata) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+
+ VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
+ "lacks support (needs Linux 3.10+ or kernel module from "
+ "OVS 1.11+)");
+ return;
+ }
+
+ xlate_commit_actions(ctx);
+
+ union user_action_cookie cookie = {
+ .flow_sample = {
+ .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
+ .probability = os->probability,
+ .collector_set_id = os->collector_set_id,
+ .obs_domain_id = os->obs_domain_id,
+ .obs_point_id = os->obs_point_id,
+ }
+ };
+ compose_sample_action(ctx, probability, &cookie, sizeof cookie.flow_sample,
+ ODPP_NONE, false);
+}
+
+static bool
+may_receive(const struct xport *xport, struct xlate_ctx *ctx)
+{
+ if (xport->config & (is_stp(&ctx->xin->flow)
+ ? OFPUTIL_PC_NO_RECV_STP
+ : OFPUTIL_PC_NO_RECV)) {
+ return false;
+ }
+
+ /* Only drop packets here if both forwarding and learning are
+ * disabled. If just learning is enabled, we need to have
+ * OFPP_NORMAL and the learning action have a look at the packet
+ * before we can drop it. */
+ if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
+ (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
+ return false;
+ }
+
+ return true;
+}
+
+static void
+xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
+{
+ const struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
+ size_t on_len = ofpact_nest_get_action_len(on);
+ const struct ofpact *inner;
+
+ /* Maintain actset_output depending on the contents of the action set:
+ *
+ * - OFPP_UNSET, if there is no "output" action.
+ *
+ * - The output port, if there is an "output" action and no "group"
+ * action.
+ *
+ * - OFPP_UNSET, if there is a "group" action.
+ */
+ if (!ctx->action_set_has_group) {
+ OFPACT_FOR_EACH (inner, on->actions, on_len) {
+ if (inner->type == OFPACT_OUTPUT) {
+ ctx->xin->flow.actset_output = ofpact_get_OUTPUT(inner)->port;
+ } else if (inner->type == OFPACT_GROUP) {
+ ctx->xin->flow.actset_output = OFPP_UNSET;
+ ctx->action_set_has_group = true;
+ break;
+ }
+ }
+ }
+
+ ofpbuf_put(&ctx->action_set, on->actions, on_len);
+ ofpact_pad(&ctx->action_set);
+}
+
+static void
+xlate_action_set(struct xlate_ctx *ctx)
+{
+ uint64_t action_list_stub[1024 / 64];
+ struct ofpbuf action_list;
+
+ ctx->in_action_set = true;
+ ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
+ ofpacts_execute_action_set(&action_list, &ctx->action_set);
+ /* Clear the action set, as it is not needed any more. */
+ ofpbuf_clear(&ctx->action_set);
+ do_xlate_actions(action_list.data, action_list.size, ctx);
+ ctx->in_action_set = false;
+ ofpbuf_uninit(&action_list);
+}
+
+static void
+recirc_put_unroll_xlate(struct xlate_ctx *ctx)
+{
+ struct ofpact_unroll_xlate *unroll;
+
+ unroll = ctx->last_unroll_offset < 0
+ ? NULL
+ : ALIGNED_CAST(struct ofpact_unroll_xlate *,
+ (char *)ctx->action_set.data + ctx->last_unroll_offset);
+
+ /* Restore the table_id and rule cookie for a potential PACKET
+ * IN if needed. */
+ if (!unroll ||
+ (ctx->table_id != unroll->rule_table_id
+ || ctx->rule_cookie != unroll->rule_cookie)) {
+
+ ctx->last_unroll_offset = ctx->action_set.size;
+ unroll = ofpact_put_UNROLL_XLATE(&ctx->action_set);
+ unroll->rule_table_id = ctx->table_id;
+ unroll->rule_cookie = ctx->rule_cookie;
+ }
+}
+
+
+/* Copy remaining actions to the action_set to be executed after recirculation.
+ * UNROLL_XLATE action is inserted, if not already done so, before actions that
+ * may generate PACKET_INs from the current table and without matching another
+ * rule. */
+static void
+recirc_unroll_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
+ struct xlate_ctx *ctx)
+{
+ const struct ofpact *a;
+
+ OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
+ switch (a->type) {
+ /* May generate PACKET INs. */
+ case OFPACT_OUTPUT_REG:
+ case OFPACT_GROUP:
+ case OFPACT_OUTPUT:
+ case OFPACT_CONTROLLER:
+ case OFPACT_DEC_MPLS_TTL:
+ case OFPACT_DEC_TTL:
+ recirc_put_unroll_xlate(ctx);
+ break;
+
+ /* These may not generate PACKET INs. */
+ case OFPACT_SET_TUNNEL:
+ case OFPACT_REG_MOVE:
+ case OFPACT_SET_FIELD:
+ case OFPACT_STACK_PUSH:
+ case OFPACT_STACK_POP:
+ case OFPACT_LEARN:
+ case OFPACT_WRITE_METADATA:
+ case OFPACT_RESUBMIT: /* May indirectly generate PACKET INs, */
+ case OFPACT_GOTO_TABLE: /* but from a different table and rule. */
+ case OFPACT_ENQUEUE:
+ case OFPACT_SET_VLAN_VID:
+ case OFPACT_SET_VLAN_PCP:
+ case OFPACT_STRIP_VLAN:
+ case OFPACT_PUSH_VLAN:
+ case OFPACT_SET_ETH_SRC:
+ case OFPACT_SET_ETH_DST:
+ case OFPACT_SET_IPV4_SRC:
+ case OFPACT_SET_IPV4_DST:
+ case OFPACT_SET_IP_DSCP:
+ case OFPACT_SET_IP_ECN:
+ case OFPACT_SET_IP_TTL:
+ case OFPACT_SET_L4_SRC_PORT:
+ case OFPACT_SET_L4_DST_PORT:
+ case OFPACT_SET_QUEUE:
+ case OFPACT_POP_QUEUE:
+ case OFPACT_PUSH_MPLS:
+ case OFPACT_POP_MPLS:
+ case OFPACT_SET_MPLS_LABEL:
+ case OFPACT_SET_MPLS_TC:
+ case OFPACT_SET_MPLS_TTL:
+ case OFPACT_MULTIPATH:
+ case OFPACT_BUNDLE:
+ case OFPACT_EXIT:
+ case OFPACT_UNROLL_XLATE:
+ case OFPACT_FIN_TIMEOUT:
+ case OFPACT_CLEAR_ACTIONS:
+ case OFPACT_WRITE_ACTIONS:
+ case OFPACT_METER:
+ case OFPACT_SAMPLE:
+ case OFPACT_DEBUG_RECIRC:
+ case OFPACT_CT:
+ break;
+
+ /* These need not be copied for restoration. */
+ case OFPACT_NOTE:
+ case OFPACT_CONJUNCTION:
+ continue;
+ }
+ /* Copy the action over. */
+ ofpbuf_put(&ctx->action_set, a, OFPACT_ALIGN(a->len));
+ }
+}
+
+#define CHECK_MPLS_RECIRCULATION() \
+ if (ctx->was_mpls) { \
+ ctx_trigger_recirculation(ctx); \
+ break; \
+ }
+#define CHECK_MPLS_RECIRCULATION_IF(COND) \
+ if (COND) { \
+ CHECK_MPLS_RECIRCULATION(); \