ofproto-dpif-xlate: Eliminate 'is_icmp' from xlate_actions().
[cascardo/ovs.git] / ofproto / ofproto-dpif-xlate.c
index aa48662..3430a57 100644 (file)
@@ -175,12 +175,25 @@ struct xlate_ctx {
 
     /* Stack for the push and pop actions.  Each stack element is of type
      * "union mf_subvalue". */
-    union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
     struct ofpbuf stack;
 
     /* The rule that we are currently translating, or NULL. */
     struct rule_dpif *rule;
 
+    /* Flow translation populates this with wildcards relevant in translation.
+     * When 'xin->wc' is nonnull, this is the same pointer.  When 'xin->wc' is
+     * null, this is a pointer to uninitialized scratch memory.  This allows
+     * code to blindly write to 'ctx->wc' without worrying about whether the
+     * caller really wants wildcards. */
+    struct flow_wildcards *wc;
+
+    /* Output buffer for datapath actions.  When 'xin->odp_actions' is nonnull,
+     * this is the same pointer.  When 'xin->odp_actions' is null, this points
+     * to a scratch ofpbuf.  This allows code to add actions to
+     * 'ctx->odp_actions' without worrying about whether the caller really
+     * wants actions. */
+    struct ofpbuf *odp_actions;
+
     /* Resubmit statistics, via xlate_table_action(). */
     int recurse;                /* Current resubmit nesting depth. */
     int resubmits;              /* Total number of resubmits. */
@@ -294,7 +307,6 @@ struct xlate_ctx {
      * datapath actions. */
     bool action_set_has_group;  /* Action set contains OFPACT_GROUP? */
     struct ofpbuf action_set;   /* Action set. */
-    uint64_t action_set_stub[1024 / 8];
 };
 
 static void xlate_action_set(struct xlate_ctx *ctx);
@@ -1543,7 +1555,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
                          "%s, which is reserved exclusively for mirroring",
                          ctx->xbridge->name, in_xbundle->name);
         }
-        ofpbuf_clear(ctx->xout->odp_actions);
+        ofpbuf_clear(ctx->odp_actions);
         return;
     }
 
@@ -1564,7 +1576,7 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
     while (mirrors) {
         mirror_mask_t dup_mirrors;
         struct ofbundle *out;
-        unsigned long *vlans;
+        const unsigned long *vlans;
         bool vlan_mirrored;
         bool has_mirror;
         int out_vlan;
@@ -1574,10 +1586,9 @@ add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
         ovs_assert(has_mirror);
 
         if (vlans) {
-            ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
+            ctx->wc->masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
         }
         vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
-        free(vlans);
 
         if (!vlan_mirrored) {
             mirrors = zero_rightmost_1bit(mirrors);
@@ -1732,10 +1743,10 @@ output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle,
                              bundle_node);
     } else {
         struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
-        struct flow_wildcards *wc = &ctx->xout->wc;
+        struct flow_wildcards *wc = ctx->wc;
         struct ofport_dpif *ofport;
 
-        if (ctx->xbridge->support.recirc) {
+        if (ctx->xbridge->support.odp.recirc) {
             use_recirc = bond_may_recirc(
                 out_xbundle->bond, &xr.recirc_id, &xr.hash_basis);
 
@@ -1866,7 +1877,7 @@ is_admissible(struct xlate_ctx *ctx, struct xport *in_port,
             mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
             if (mac
                 && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
-                && (!is_gratuitous_arp(flow, &ctx->xout->wc)
+                && (!is_gratuitous_arp(flow, ctx->wc)
                     || mac_entry_is_grat_arp_locked(mac))) {
                 ovs_rwlock_unlock(&xbridge->ml->rwlock);
                 xlate_report(ctx, "SLB bond thinks this packet looped back, "
@@ -1998,16 +2009,16 @@ update_learning_table(const struct xbridge *xbridge,
 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
  * was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
 static void
-update_mcast_snooping_table__(const struct xbridge *xbridge,
-                              const struct flow *flow,
-                              struct mcast_snooping *ms,
-                              ovs_be32 ip4, int vlan,
-                              struct xbundle *in_xbundle,
-                              const struct dp_packet *packet)
+update_mcast_snooping_table4__(const struct xbridge *xbridge,
+                               const struct flow *flow,
+                               struct mcast_snooping *ms, int vlan,
+                               struct xbundle *in_xbundle,
+                               const struct dp_packet *packet)
     OVS_REQ_WRLOCK(ms->rwlock)
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 30);
     int count;
+    ovs_be32 ip4 = flow->igmp_group_ip4;
 
     switch (ntohs(flow->tp_src)) {
     case IGMP_HOST_MEMBERSHIP_REPORT:
@@ -2045,6 +2056,39 @@ update_mcast_snooping_table__(const struct xbridge *xbridge,
     }
 }
 
+static void
+update_mcast_snooping_table6__(const struct xbridge *xbridge,
+                               const struct flow *flow,
+                               struct mcast_snooping *ms, int vlan,
+                               struct xbundle *in_xbundle,
+                               const struct dp_packet *packet)
+    OVS_REQ_WRLOCK(ms->rwlock)
+{
+    static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 30);
+    int count;
+
+    switch (ntohs(flow->tp_src)) {
+    case MLD_QUERY:
+        if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
+            && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
+            VLOG_DBG_RL(&rl, "bridge %s: multicast snooping query on port %s"
+                        "in VLAN %d",
+                        xbridge->name, in_xbundle->name, vlan);
+        }
+        break;
+    case MLD_REPORT:
+    case MLD_DONE:
+    case MLD2_REPORT:
+        count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
+        if (count) {
+            VLOG_DBG_RL(&rl, "bridge %s: multicast snooping processed %d "
+                        "addresses on port %s in VLAN %d",
+                        xbridge->name, count, in_xbundle->name, vlan);
+        }
+        break;
+    }
+}
+
 /* Updates multicast snooping table 'ms' given that a packet matching 'flow'
  * was received on 'in_xbundle' in 'vlan'. */
 static void
@@ -2075,8 +2119,13 @@ update_mcast_snooping_table(const struct xbridge *xbridge,
     }
 
     if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
-        update_mcast_snooping_table__(xbridge, flow, ms, flow->igmp_group_ip4,
-                                      vlan, in_xbundle, packet);
+        if (flow->dl_type == htons(ETH_TYPE_IP)) {
+            update_mcast_snooping_table4__(xbridge, flow, ms, vlan,
+                                           in_xbundle, packet);
+        } else {
+            update_mcast_snooping_table6__(xbridge, flow, ms, vlan,
+                                           in_xbundle, packet);
+        }
     }
     ovs_rwlock_unlock(&ms->rwlock);
 }
@@ -2202,7 +2251,7 @@ xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
 static void
 xlate_normal(struct xlate_ctx *ctx)
 {
-    struct flow_wildcards *wc = &ctx->xout->wc;
+    struct flow_wildcards *wc = ctx->wc;
     struct flow *flow = &ctx->xin->flow;
     struct xbundle *in_xbundle;
     struct xport *in_port;
@@ -2280,11 +2329,11 @@ xlate_normal(struct xlate_ctx *ctx)
     if (mcast_snooping_enabled(ctx->xbridge->ms)
         && !eth_addr_is_broadcast(flow->dl_dst)
         && eth_addr_is_multicast(flow->dl_dst)
-        && flow->dl_type == htons(ETH_TYPE_IP)) {
+        && is_ip_any(flow)) {
         struct mcast_snooping *ms = ctx->xbridge->ms;
-        struct mcast_group *grp;
+        struct mcast_group *grp = NULL;
 
-        if (flow->nw_proto == IPPROTO_IGMP) {
+        if (is_igmp(flow)) {
             if (mcast_snooping_is_membership(flow->tp_src) ||
                 mcast_snooping_is_query(flow->tp_src)) {
                 if (ctx->xin->may_learn) {
@@ -2317,8 +2366,26 @@ xlate_normal(struct xlate_ctx *ctx)
                 xlate_normal_flood(ctx, in_xbundle, vlan);
             }
             return;
+        } else if (is_mld(flow)) {
+            ctx->xout->slow |= SLOW_ACTION;
+            if (ctx->xin->may_learn) {
+                update_mcast_snooping_table(ctx->xbridge, flow, vlan,
+                                            in_xbundle, ctx->xin->packet);
+            }
+            if (is_mld_report(flow)) {
+                ovs_rwlock_rdlock(&ms->rwlock);
+                xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
+                xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
+                ovs_rwlock_unlock(&ms->rwlock);
+            } else {
+                xlate_report(ctx, "MLD query, flooding");
+                xlate_normal_flood(ctx, in_xbundle, vlan);
+            }
         } else {
-            if (ip_is_local_multicast(flow->nw_dst)) {
+            if ((flow->dl_type == htons(ETH_TYPE_IP)
+                 && ip_is_local_multicast(flow->nw_dst))
+                || (flow->dl_type == htons(ETH_TYPE_IPV6)
+                    && ipv6_is_all_hosts(&flow->ipv6_dst))) {
                 /* RFC4541: section 2.1.2, item 2: Packets with a dst IP
                  * address in the 224.0.0.x range which are not IGMP must
                  * be forwarded on all ports */
@@ -2330,7 +2397,11 @@ xlate_normal(struct xlate_ctx *ctx)
 
         /* forwarding to group base ports */
         ovs_rwlock_rdlock(&ms->rwlock);
-        grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
+        if (flow->dl_type == htons(ETH_TYPE_IP)) {
+            grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
+        } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+            grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
+        }
         if (grp) {
             xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, vlan);
             xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
@@ -2380,7 +2451,8 @@ compose_sample_action(const struct xbridge *xbridge,
                       const uint32_t probability,
                       const union user_action_cookie *cookie,
                       const size_t cookie_size,
-                      const odp_port_t tunnel_out_port)
+                      const odp_port_t tunnel_out_port,
+                      bool include_actions)
 {
     size_t sample_offset, actions_offset;
     odp_port_t odp_port;
@@ -2397,7 +2469,9 @@ compose_sample_action(const struct xbridge *xbridge,
     pid = dpif_port_get_pid(xbridge->dpif, odp_port,
                             flow_hash_5tuple(flow, 0));
     cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
-                                             tunnel_out_port, odp_actions);
+                                             tunnel_out_port,
+                                             include_actions,
+                                             odp_actions);
 
     nl_msg_end_nested(odp_actions, actions_offset);
     nl_msg_end_nested(odp_actions, sample_offset);
@@ -2455,7 +2529,8 @@ compose_sflow_action(const struct xbridge *xbridge,
                          odp_port == ODPP_NONE ? 0 : 1, &cookie);
 
     return compose_sample_action(xbridge, odp_actions, flow,  probability,
-                                 &cookie, sizeof cookie.sflow, ODPP_NONE);
+                                 &cookie, sizeof cookie.sflow, ODPP_NONE,
+                                 true);
 }
 
 static void
@@ -2518,7 +2593,8 @@ compose_ipfix_action(const struct xbridge *xbridge,
     compose_ipfix_cookie(&cookie, output_odp_port);
 
     compose_sample_action(xbridge, odp_actions, flow,  probability,
-                          &cookie, sizeof cookie.ipfix, tunnel_out_port);
+                          &cookie, sizeof cookie.ipfix, tunnel_out_port,
+                          false);
 }
 
 /* SAMPLE action for sFlow must be first action in any given list of
@@ -2528,7 +2604,7 @@ static void
 add_sflow_action(struct xlate_ctx *ctx)
 {
     ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
-                                                   ctx->xout->odp_actions,
+                                                   ctx->odp_actions,
                                                    &ctx->xin->flow, ODPP_NONE);
     ctx->sflow_odp_port = 0;
     ctx->sflow_n_outputs = 0;
@@ -2539,14 +2615,14 @@ add_sflow_action(struct xlate_ctx *ctx)
 static void
 add_ipfix_action(struct xlate_ctx *ctx)
 {
-    compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
+    compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
                          &ctx->xin->flow, ODPP_NONE);
 }
 
 static void
 add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port)
 {
-    compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
+    compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
                          &ctx->xin->flow, port);
 }
 
@@ -2563,7 +2639,7 @@ fix_sflow_action(struct xlate_ctx *ctx)
         return;
     }
 
-    cookie = ofpbuf_at(ctx->xout->odp_actions, ctx->user_cookie_offset,
+    cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
                        sizeof cookie->sflow);
     ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
 
@@ -2571,20 +2647,22 @@ fix_sflow_action(struct xlate_ctx *ctx)
                          ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
 }
 
-static enum slow_path_reason
-process_special(struct xlate_ctx *ctx, const struct flow *flow,
-                const struct xport *xport, const struct dp_packet *packet)
+static bool
+process_special(struct xlate_ctx *ctx, const struct xport *xport)
 {
-    struct flow_wildcards *wc = &ctx->xout->wc;
+    const struct flow *flow = &ctx->xin->flow;
+    struct flow_wildcards *wc = ctx->wc;
     const struct xbridge *xbridge = ctx->xbridge;
+    const struct dp_packet *packet = ctx->xin->packet;
+    enum slow_path_reason slow;
 
     if (!xport) {
-        return 0;
+        slow = 0;
     } else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
         if (packet) {
             cfm_process_heartbeat(xport->cfm, packet);
         }
-        return SLOW_CFM;
+        slow = SLOW_CFM;
     } else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
         if (packet) {
             bfd_process_packet(xport->bfd, flow, packet);
@@ -2593,13 +2671,13 @@ process_special(struct xlate_ctx *ctx, const struct flow *flow,
                 ofproto_dpif_monitor_port_send_soon(xport->ofport);
             }
         }
-        return SLOW_BFD;
+        slow = SLOW_BFD;
     } else if (xport->xbundle && xport->xbundle->lacp
                && flow->dl_type == htons(ETH_TYPE_LACP)) {
         if (packet) {
             lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
         }
-        return SLOW_LACP;
+        slow = SLOW_LACP;
     } else if ((xbridge->stp || xbridge->rstp) &&
                stp_should_process_flow(flow, wc)) {
         if (packet) {
@@ -2607,14 +2685,21 @@ process_special(struct xlate_ctx *ctx, const struct flow *flow,
                 ? stp_process_packet(xport, packet)
                 : rstp_process_packet(xport, packet);
         }
-        return SLOW_STP;
+        slow = SLOW_STP;
     } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
         if (packet) {
             lldp_process_packet(xport->lldp, packet);
         }
-        return SLOW_LLDP;
+        slow = SLOW_LLDP;
     } else {
-        return 0;
+        slow = 0;
+    }
+
+    if (slow) {
+        ctx->xout->slow |= slow;
+        return true;
+    } else {
+        return false;
     }
 }
 
@@ -2748,7 +2833,7 @@ build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport,
     }
     tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
     tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
-    odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data);
+    odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
     return 0;
 }
 
@@ -2757,7 +2842,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
                         const struct xlate_bond_recirc *xr, bool check_stp)
 {
     const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
-    struct flow_wildcards *wc = &ctx->xout->wc;
+    struct flow_wildcards *wc = ctx->wc;
     struct flow *flow = &ctx->xin->flow;
     struct flow_tnl flow_tnl;
     ovs_be16 flow_vlan_tci;
@@ -2769,7 +2854,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
 
     /* If 'struct flow' gets additional metadata, we'll need to zero it out
      * before traversing a patch port. */
-    BUILD_ASSERT_DECL(FLOW_WC_SEQ == 32);
+    BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
     memset(&flow_tnl, 0, sizeof flow_tnl);
 
     if (!xport) {
@@ -2814,7 +2899,6 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         struct flow old_flow = ctx->xin->flow;
         bool old_was_mpls = ctx->was_mpls;
         cls_version_t old_version = ctx->tables_version;
-        enum slow_path_reason special;
         struct ofpbuf old_stack = ctx->stack;
         union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
         struct ofpbuf old_action_set = ctx->action_set;
@@ -2833,11 +2917,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         ctx->tables_version
             = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
 
-        special = process_special(ctx, &ctx->xin->flow, peer,
-                                  ctx->xin->packet);
-        if (special) {
-            ctx->xout->slow |= special;
-        } else if (may_receive(peer, ctx)) {
+        if (!process_special(ctx, peer) && may_receive(peer, ctx)) {
             if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
                 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
                 if (ctx->action_set.size) {
@@ -2855,13 +2935,13 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
                 /* Forwarding is disabled by STP and RSTP.  Let OFPP_NORMAL and
                  * the learning action look at the packet, then drop it. */
                 struct flow old_base_flow = ctx->base_flow;
-                size_t old_size = ctx->xout->odp_actions->size;
+                size_t old_size = ctx->odp_actions->size;
                 mirror_mask_t old_mirrors = ctx->xout->mirrors;
 
                 xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
                 ctx->xout->mirrors = old_mirrors;
                 ctx->base_flow = old_base_flow;
-                ctx->xout->odp_actions->size = old_size;
+                ctx->odp_actions->size = old_size;
 
                 /* Undo changes that may have been done for recirculation. */
                 if (exit_recirculates(ctx)) {
@@ -2930,7 +3010,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
           * matches, while explicit set actions on tunnel metadata are.
           */
         flow_tnl = flow->tunnel;
-        odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
+        odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
         if (odp_port == ODPP_NONE) {
             xlate_report(ctx, "Tunneling decided against output");
             goto out; /* restore flow_nw_tos */
@@ -2954,8 +3034,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
             tnl_push_pop_send = true;
         } else {
             xlate_report(ctx, "output to kernel tunnel");
-            commit_odp_tunnel_action(flow, &ctx->base_flow,
-                                     ctx->xout->odp_actions);
+            commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
             flow->tunnel = flow_tnl; /* Restore tunnel metadata */
         }
     } else {
@@ -2978,21 +3057,21 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
         bool use_masked = ctx->xbridge->support.masked_set_action;
 
         ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
-                                              ctx->xout->odp_actions,
+                                              ctx->odp_actions,
                                               wc, use_masked);
 
         if (xr) {
             struct ovs_action_hash *act_hash;
 
             /* Hash action. */
-            act_hash = nl_msg_put_unspec_uninit(ctx->xout->odp_actions,
+            act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
                                                 OVS_ACTION_ATTR_HASH,
                                                 sizeof *act_hash);
             act_hash->hash_alg = xr->hash_alg;
             act_hash->hash_basis = xr->hash_basis;
 
             /* Recirc action. */
-            nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+            nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
                            xr->recirc_id);
         } else {
 
@@ -3011,14 +3090,14 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
                 }
 
                 if (odp_tnl_port != ODPP_NONE) {
-                    nl_msg_put_odp_port(ctx->xout->odp_actions,
+                    nl_msg_put_odp_port(ctx->odp_actions,
                                         OVS_ACTION_ATTR_TUNNEL_POP,
                                         odp_tnl_port);
                 } else {
                     /* Tunnel push-pop action is not compatible with
                      * IPFIX action. */
                     add_ipfix_output_action(ctx, out_port);
-                    nl_msg_put_odp_port(ctx->xout->odp_actions,
+                    nl_msg_put_odp_port(ctx->odp_actions,
                                         OVS_ACTION_ATTR_OUTPUT,
                                         out_port);
                }
@@ -3076,7 +3155,7 @@ xlate_resubmit_resource_check(struct xlate_ctx *ctx)
                     MAX_RESUBMIT_RECURSION);
     } else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
         VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
-    } else if (ctx->xout->odp_actions->size > UINT16_MAX) {
+    } else if (ctx->odp_actions->size > UINT16_MAX) {
         VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
     } else if (ctx->stack.size >= 65536) {
         VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
@@ -3097,16 +3176,14 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id,
         return;
     }
     if (xlate_resubmit_resource_check(ctx)) {
-        struct flow_wildcards *wc;
         uint8_t old_table_id = ctx->table_id;
         struct rule_dpif *rule;
 
         ctx->table_id = table_id;
-        wc = (ctx->xin->skip_wildcards) ? NULL : &ctx->xout->wc;
 
         rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
                                            ctx->tables_version,
-                                           &ctx->xin->flow, wc,
+                                           &ctx->xin->flow, ctx->xin->wc,
                                            ctx->xin->xcache != NULL,
                                            ctx->xin->resubmit_stats,
                                            &ctx->table_id, in_port,
@@ -3234,7 +3311,7 @@ xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group)
 static void
 xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
 {
-    struct flow_wildcards *wc = &ctx->xout->wc;
+    struct flow_wildcards *wc = ctx->wc;
     struct ofputil_bucket *bucket;
     uint32_t basis;
 
@@ -3251,7 +3328,6 @@ static void
 xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
 {
     struct mf_bitmap hash_fields = MF_BITMAP_INITIALIZER;
-    struct flow_wildcards *wc = &ctx->xout->wc;
     const struct field_array *fields;
     struct ofputil_bucket *bucket;
     uint32_t basis;
@@ -3301,7 +3377,7 @@ xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
             }
             basis = hash_bytes(&value, mf->n_bytes, basis);
 
-            mf_mask_field(mf, &wc->masks);
+            mf_mask_field(mf, &ctx->wc->masks);
         }
     }
 
@@ -3438,12 +3514,11 @@ execute_controller_action(struct xlate_ctx *ctx, int len,
 
     use_masked = ctx->xbridge->support.masked_set_action;
     ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                                          ctx->xout->odp_actions,
-                                          &ctx->xout->wc, use_masked);
+                                          ctx->odp_actions,
+                                          ctx->wc, use_masked);
 
     odp_execute_actions(NULL, &packet, 1, false,
-                        ctx->xout->odp_actions->data,
-                        ctx->xout->odp_actions->size, NULL);
+                        ctx->odp_actions->data, ctx->odp_actions->size, NULL);
 
     pin = xmalloc(sizeof *pin);
     pin->up.packet_len = dp_packet_size(packet);
@@ -3486,8 +3561,8 @@ compose_recirculate_action(struct xlate_ctx *ctx)
 
     use_masked = ctx->xbridge->support.masked_set_action;
     ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                                          ctx->xout->odp_actions,
-                                          &ctx->xout->wc, use_masked);
+                                          ctx->odp_actions,
+                                          ctx->wc, use_masked);
 
     recirc_metadata_from_flow(&md, &ctx->xin->flow);
 
@@ -3518,7 +3593,7 @@ compose_recirculate_action(struct xlate_ctx *ctx)
          * fail all revalidations as zero is not a valid recirculation ID. */
     }
 
-    nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
+    nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
 
     /* Undo changes done by recirculation. */
     ctx->action_set.size = ctx->recirc_action_offset;
@@ -3529,19 +3604,18 @@ compose_recirculate_action(struct xlate_ctx *ctx)
 static void
 compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
 {
-    struct flow_wildcards *wc = &ctx->xout->wc;
     struct flow *flow = &ctx->xin->flow;
     int n;
 
     ovs_assert(eth_type_mpls(mpls->ethertype));
 
-    n = flow_count_mpls_labels(flow, wc);
+    n = flow_count_mpls_labels(flow, ctx->wc);
     if (!n) {
         bool use_masked = ctx->xbridge->support.masked_set_action;
 
         ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
-                                              ctx->xout->odp_actions,
-                                              &ctx->xout->wc, use_masked);
+                                              ctx->odp_actions,
+                                              ctx->wc, use_masked);
     } else if (n >= FLOW_MAX_MPLS_LABELS) {
         if (ctx->xin->packet != NULL) {
             static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
@@ -3554,18 +3628,17 @@ compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
         return;
     }
 
-    flow_push_mpls(flow, n, mpls->ethertype, wc);
+    flow_push_mpls(flow, n, mpls->ethertype, ctx->wc);
 }
 
 static void
 compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
 {
-    struct flow_wildcards *wc = &ctx->xout->wc;
     struct flow *flow = &ctx->xin->flow;
-    int n = flow_count_mpls_labels(flow, wc);
+    int n = flow_count_mpls_labels(flow, ctx->wc);
 
-    if (flow_pop_mpls(flow, n, eth_type, wc)) {
-        if (ctx->xbridge->support.recirc) {
+    if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
+        if (ctx->xbridge->support.odp.recirc) {
             ctx->was_mpls = true;
         }
     } else if (n >= FLOW_MAX_MPLS_LABELS) {
@@ -3577,7 +3650,7 @@ compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
                          ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
         }
         ctx->exit = true;
-        ofpbuf_clear(ctx->xout->odp_actions);
+        ofpbuf_clear(ctx->odp_actions);
     }
 }
 
@@ -3590,7 +3663,7 @@ compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
         return false;
     }
 
-    ctx->xout->wc.masks.nw_ttl = 0xff;
+    ctx->wc->masks.nw_ttl = 0xff;
     if (flow->nw_ttl > 1) {
         flow->nw_ttl--;
         return false;
@@ -3611,7 +3684,7 @@ static void
 compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
 {
     if (eth_type_mpls(ctx->xin->flow.dl_type)) {
-        ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
+        ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
         set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
     }
 }
@@ -3620,7 +3693,7 @@ static void
 compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
 {
     if (eth_type_mpls(ctx->xin->flow.dl_type)) {
-        ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
+        ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
         set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
     }
 }
@@ -3629,7 +3702,7 @@ static void
 compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
 {
     if (eth_type_mpls(ctx->xin->flow.dl_type)) {
-        ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
+        ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
         set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
     }
 }
@@ -3638,12 +3711,11 @@ static bool
 compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
 {
     struct flow *flow = &ctx->xin->flow;
-    struct flow_wildcards *wc = &ctx->xout->wc;
 
     if (eth_type_mpls(flow->dl_type)) {
         uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
 
-        wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
+        ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
         if (ttl > 1) {
             ttl--;
             set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
@@ -3720,7 +3792,7 @@ xlate_output_reg_action(struct xlate_ctx *ctx,
         union mf_subvalue value;
 
         memset(&value, 0xff, sizeof value);
-        mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
+        mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
         xlate_output_action(ctx, u16_to_ofp(port),
                             or->max_len, false);
     }
@@ -3805,12 +3877,10 @@ xlate_bundle_action(struct xlate_ctx *ctx,
 {
     ofp_port_t port;
 
-    port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
-                          slave_enabled_cb,
+    port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
                           CONST_CAST(struct xbridge *, ctx->xbridge));
     if (bundle->dst.field) {
-        nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
-                     &ctx->xout->wc);
+        nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
     } else {
         xlate_output_action(ctx, port, 0, false);
     }
@@ -3830,7 +3900,7 @@ static void
 xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
 {
     ctx->xout->has_learn = true;
-    learn_mask(learn, &ctx->xout->wc);
+    learn_mask(learn, ctx->wc);
 
     if (ctx->xin->xcache) {
         struct xc_entry *entry;
@@ -3902,14 +3972,15 @@ xlate_sample_action(struct xlate_ctx *ctx,
 
     use_masked = ctx->xbridge->support.masked_set_action;
     ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
-                                          ctx->xout->odp_actions,
-                                          &ctx->xout->wc, use_masked);
+                                          ctx->odp_actions,
+                                          ctx->wc, use_masked);
 
     compose_flow_sample_cookie(os->probability, os->collector_set_id,
                                os->obs_domain_id, os->obs_point_id, &cookie);
-    compose_sample_action(ctx->xbridge, ctx->xout->odp_actions,
+    compose_sample_action(ctx->xbridge, ctx->odp_actions,
                           &ctx->xin->flow, probability, &cookie,
-                          sizeof cookie.flow_sample, ODPP_NONE);
+                          sizeof cookie.flow_sample, ODPP_NONE,
+                          false);
 }
 
 static bool
@@ -3956,6 +4027,7 @@ xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
             } else if (inner->type == OFPACT_GROUP) {
                 ctx->xin->flow.actset_output = OFPP_UNSET;
                 ctx->action_set_has_group = true;
+                break;
             }
         }
     }
@@ -4092,7 +4164,7 @@ static void
 do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
                  struct xlate_ctx *ctx)
 {
-    struct flow_wildcards *wc = &ctx->xout->wc;
+    struct flow_wildcards *wc = ctx->wc;
     struct flow *flow = &ctx->xin->flow;
     const struct ofpact *a;
 
@@ -4470,7 +4542,8 @@ void
 xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
               const struct flow *flow, ofp_port_t in_port,
               struct rule_dpif *rule, uint16_t tcp_flags,
-              const struct dp_packet *packet)
+              const struct dp_packet *packet, struct flow_wildcards *wc,
+              struct ofpbuf *odp_actions)
 {
     xin->ofproto = ofproto;
     xin->flow = *flow;
@@ -4486,8 +4559,8 @@ xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
     xin->resubmit_hook = NULL;
     xin->report_hook = NULL;
     xin->resubmit_stats = NULL;
-    xin->skip_wildcards = false;
-    xin->odp_actions = NULL;
+    xin->wc = wc;
+    xin->odp_actions = odp_actions;
 
     /* Do recirc lookup. */
     xin->recirc = flow->recirc_id
@@ -4499,9 +4572,6 @@ void
 xlate_out_uninit(struct xlate_out *xout)
 {
     if (xout) {
-        if (xout->odp_actions == &xout->odp_actions_buf) {
-            ofpbuf_uninit(xout->odp_actions);
-        }
         xlate_out_free_recircs(xout);
     }
 }
@@ -4516,23 +4586,6 @@ xlate_actions_for_side_effects(struct xlate_in *xin)
     xlate_actions(xin, &xout);
     xlate_out_uninit(&xout);
 }
-
-void
-xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
-{
-    dst->wc = src->wc;
-    dst->slow = src->slow;
-    dst->has_learn = src->has_learn;
-    dst->has_normal = src->has_normal;
-    dst->has_fin_timeout = src->has_fin_timeout;
-    dst->nf_output_iface = src->nf_output_iface;
-    dst->mirrors = src->mirrors;
-
-    dst->odp_actions = &dst->odp_actions_buf;
-    ofpbuf_use_stub(dst->odp_actions, dst->odp_actions_stub,
-                    sizeof dst->odp_actions_stub);
-    ofpbuf_put(dst->odp_actions, src->odp_actions->data, src->odp_actions->size);
-}
 \f
 static struct skb_priority_to_dscp *
 get_skb_priority(const struct xport *xport, uint32_t skb_priority)
@@ -4582,8 +4635,8 @@ actions_output_to_local_port(const struct xlate_ctx *ctx)
     const struct nlattr *a;
     unsigned int left;
 
-    NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions->data,
-                             ctx->xout->odp_actions->size) {
+    NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
+                             ctx->odp_actions->size) {
         if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
             && nl_attr_get_odp_port(a) == local_odp_port) {
             return true;
@@ -4671,20 +4724,68 @@ too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
 void
 xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 {
+    *xout = (struct xlate_out) {
+        .slow = 0,
+        .fail_open = false,
+        .has_learn = false,
+        .has_normal = false,
+        .has_fin_timeout = false,
+        .nf_output_iface = NF_OUT_DROP,
+        .mirrors = 0,
+        .n_recircs = 0,
+    };
+
     struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
-    struct flow_wildcards *wc = NULL;
+    struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
+    if (!xbridge) {
+        return;
+    }
+
     struct flow *flow = &xin->flow;
-    struct rule_dpif *rule = NULL;
 
-    enum slow_path_reason special;
-    const struct ofpact *ofpacts;
-    struct xbridge *xbridge;
+    union mf_subvalue stack_stub[1024 / sizeof(union mf_subvalue)];
+    uint64_t action_set_stub[1024 / 8];
+    struct flow_wildcards scratch_wc;
+    uint64_t actions_stub[256 / 8];
+    struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
+    struct xlate_ctx ctx = {
+        .xin = xin,
+        .xout = xout,
+        .base_flow = *flow,
+        .orig_tunnel_ip_dst = flow->tunnel.ip_dst,
+        .xbridge = xbridge,
+        .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
+        .rule = xin->rule,
+        .wc = xin->wc ? xin->wc : &scratch_wc,
+        .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
+
+        .recurse = 0,
+        .resubmits = 0,
+        .in_group = false,
+        .in_action_set = false,
+
+        .table_id = 0,
+        .rule_cookie = OVS_BE64_MAX,
+        .orig_skb_priority = flow->skb_priority,
+        .sflow_n_outputs = 0,
+        .sflow_odp_port = 0,
+        .user_cookie_offset = 0,
+        .exit = false,
+
+        .recirc_action_offset = -1,
+        .last_unroll_offset = -1,
+
+        .was_mpls = false,
+
+        .action_set_has_group = false,
+        .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
+    };
+    memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+    ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
+
     struct xport *in_port;
     struct flow orig_flow;
-    struct xlate_ctx ctx;
-    size_t ofpacts_len;
     bool tnl_may_send;
-    bool is_icmp;
 
     COVERAGE_INC(xlate_actions);
 
@@ -4709,75 +4810,24 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
      *   kernel does.  If we wish to maintain the original values an action
      *   needs to be generated. */
 
-    ctx.xin = xin;
-    ctx.xout = xout;
-    ctx.xout->slow = 0;
-    ctx.xout->has_learn = false;
-    ctx.xout->has_normal = false;
-    ctx.xout->has_fin_timeout = false;
-    ctx.xout->nf_output_iface = NF_OUT_DROP;
-    ctx.xout->mirrors = 0;
-    ctx.xout->n_recircs = 0;
-
-    xout->odp_actions = xin->odp_actions;
-    if (!xout->odp_actions) {
-        xout->odp_actions = &xout->odp_actions_buf;
-        ofpbuf_use_stub(xout->odp_actions, xout->odp_actions_stub,
-                        sizeof xout->odp_actions_stub);
-    }
-    ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE);
-
-    xbridge = xbridge_lookup(xcfg, xin->ofproto);
-    if (!xbridge) {
-        return;
-    }
-    /* 'ctx.xbridge' may be changed by action processing, whereas 'xbridge'
-     * will remain set on the original input bridge. */
-    ctx.xbridge = xbridge;
-    ctx.rule = xin->rule;
-
-    ctx.base_flow = *flow;
-    memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
-    ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
-
-    if (!xin->skip_wildcards) {
-        wc = &xout->wc;
-        flow_wildcards_init_catchall(wc);
-        memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
-        memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
+    if (xin->wc) {
+        flow_wildcards_init_catchall(ctx.wc);
+        memset(&ctx.wc->masks.in_port, 0xff, sizeof ctx.wc->masks.in_port);
+        memset(&ctx.wc->masks.dl_type, 0xff, sizeof ctx.wc->masks.dl_type);
         if (is_ip_any(flow)) {
-            wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+            ctx.wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
         }
-        if (xbridge->support.recirc) {
+        if (xbridge->support.odp.recirc) {
             /* Always exactly match recirc_id when datapath supports
              * recirculation.  */
-            wc->masks.recirc_id = UINT32_MAX;
+            ctx.wc->masks.recirc_id = UINT32_MAX;
         }
         if (xbridge->netflow) {
-            netflow_mask_wc(flow, wc);
+            netflow_mask_wc(flow, ctx.wc);
         }
     }
-    is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
-
-    tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
-
-    ctx.recurse = 0;
-    ctx.resubmits = 0;
-    ctx.in_group = false;
-    ctx.in_action_set = false;
-    ctx.orig_skb_priority = flow->skb_priority;
-    ctx.table_id = 0;
-    ctx.rule_cookie = OVS_BE64_MAX;
-    ctx.exit = false;
-    ctx.was_mpls = false;
-    ctx.recirc_action_offset = -1;
-    ctx.last_unroll_offset = -1;
 
-    ctx.action_set_has_group = false;
-    ofpbuf_use_stub(&ctx.action_set,
-                    ctx.action_set_stub, sizeof ctx.action_set_stub);
-
-    ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
+    tnl_may_send = tnl_xlate_init(flow, xin->wc);
 
     /* The in_port of the original packet before recirculation. */
     in_port = get_ofp_port(xbridge, flow->in_port.ofp_port);
@@ -4792,7 +4842,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
                          xin->ofpacts_len > 0
                          ? "actions"
                          : "rule");
-            return;
+            goto exit;
         }
 
         /* Set the bridge for post-recirculation processing if needed. */
@@ -4805,7 +4855,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
                 /* Drop the packet if the bridge cannot be found. */
                 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
                 VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists.");
-                return;
+                goto exit;
             }
             ctx.xbridge = new_bridge;
         }
@@ -4851,49 +4901,32 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
         VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32,
                      flow->recirc_id);
-        return;
+        goto exit;
     }
     /* The bridge is now known so obtain its table version. */
     ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
 
     if (!xin->ofpacts && !ctx.rule) {
-        rule = rule_dpif_lookup_from_table(ctx.xbridge->ofproto,
-                                           ctx.tables_version, flow, wc,
-                                           ctx.xin->xcache != NULL,
-                                           ctx.xin->resubmit_stats,
-                                           &ctx.table_id,
-                                           flow->in_port.ofp_port, true, true);
+        ctx.rule = rule_dpif_lookup_from_table(
+            ctx.xbridge->ofproto, ctx.tables_version, flow, xin->wc,
+            ctx.xin->xcache != NULL, ctx.xin->resubmit_stats, &ctx.table_id,
+            flow->in_port.ofp_port, true, true);
         if (ctx.xin->resubmit_stats) {
-            rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
+            rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
         }
         if (ctx.xin->xcache) {
             struct xc_entry *entry;
 
             entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
-            entry->u.rule = rule;
+            entry->u.rule = ctx.rule;
         }
-        ctx.rule = rule;
 
         if (OVS_UNLIKELY(ctx.xin->resubmit_hook)) {
-            ctx.xin->resubmit_hook(ctx.xin, rule, 0);
+            ctx.xin->resubmit_hook(ctx.xin, ctx.rule, 0);
         }
     }
     xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
 
-    if (xin->ofpacts) {
-        ofpacts = xin->ofpacts;
-        ofpacts_len = xin->ofpacts_len;
-    } else if (ctx.rule) {
-        const struct rule_actions *actions = rule_dpif_get_actions(ctx.rule);
-
-        ofpacts = actions->ofpacts;
-        ofpacts_len = actions->ofpacts_len;
-
-        ctx.rule_cookie = rule_dpif_get_flow_cookie(ctx.rule);
-    } else {
-        OVS_NOT_REACHED();
-    }
-
     if (mbridge_has_mirrors(xbridge->mbridge)) {
         /* Do this conditionally because the copy is expensive enough that it
          * shows up in profiles. */
@@ -4919,10 +4952,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
 
     /* Do not perform special processing on recirculated packets,
      * as recirculated packets are not really received by the bridge. */
-    if (!xin->recirc &&
-        (special = process_special(&ctx, flow, in_port, ctx.xin->packet))) {
-        ctx.xout->slow |= special;
-    } else {
+    if (xin->recirc || !process_special(&ctx, in_port)) {
         size_t sample_actions_len;
 
         if (flow->in_port.ofp_port
@@ -4936,12 +4966,28 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
         if (!xin->recirc) {
             add_sflow_action(&ctx);
             add_ipfix_action(&ctx);
-            sample_actions_len = ctx.xout->odp_actions->size;
+            sample_actions_len = ctx.odp_actions->size;
         } else {
             sample_actions_len = 0;
         }
 
         if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
+            const struct ofpact *ofpacts;
+            size_t ofpacts_len;
+
+            if (xin->ofpacts) {
+                ofpacts = xin->ofpacts;
+                ofpacts_len = xin->ofpacts_len;
+            } else if (ctx.rule) {
+                const struct rule_actions *actions
+                    = rule_dpif_get_actions(ctx.rule);
+                ofpacts = actions->ofpacts;
+                ofpacts_len = actions->ofpacts_len;
+                ctx.rule_cookie = rule_dpif_get_flow_cookie(ctx.rule);
+            } else {
+                OVS_NOT_REACHED();
+            }
+
             do_xlate_actions(ofpacts, ofpacts_len, &ctx);
 
             /* We've let OFPP_NORMAL and the learning action look at the
@@ -4949,7 +4995,7 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
             if (in_port && (!xport_stp_forward_state(in_port) ||
                             !xport_rstp_forward_state(in_port))) {
                 /* Drop all actions added by do_xlate_actions() above. */
-                ctx.xout->odp_actions->size = sample_actions_len;
+                ctx.odp_actions->size = sample_actions_len;
 
                 /* Undo changes that may have been done for recirculation. */
                 if (exit_recirculates(&ctx)) {
@@ -4988,14 +5034,14 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
         }
     }
 
-    if (nl_attr_oversized(ctx.xout->odp_actions->size)) {
+    if (nl_attr_oversized(ctx.odp_actions->size)) {
         /* These datapath actions are too big for a Netlink attribute, so we
          * can't hand them to the kernel directly.  dpif_execute() can execute
          * them one by one with help, so just mark the result as SLOW_ACTION to
          * prevent the flow from being installed. */
         COVERAGE_INC(xlate_actions_oversize);
         ctx.xout->slow |= SLOW_ACTION;
-    } else if (too_many_output_actions(ctx.xout->odp_actions)) {
+    } else if (too_many_output_actions(ctx.odp_actions)) {
         COVERAGE_INC(xlate_actions_too_many_output);
         ctx.xout->slow |= SLOW_ACTION;
     }
@@ -5016,38 +5062,29 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
         }
     }
 
-    /* Do netflow only for packets really received by the bridge. */
-    if (!xin->recirc && xbridge->netflow) {
-        /* Only update netflow if we don't have controller flow.  We don't
-         * report NetFlow expiration messages for such facets because they
-         * are just part of the control logic for the network, not real
-         * traffic. */
-        if (ofpacts_len == 0
-            || ofpacts->type != OFPACT_CONTROLLER
-            || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
-            if (ctx.xin->resubmit_stats) {
-                netflow_flow_update(xbridge->netflow, flow,
-                                    xout->nf_output_iface,
-                                    ctx.xin->resubmit_stats);
-            }
-            if (ctx.xin->xcache) {
-                struct xc_entry *entry;
+    /* Do netflow only for packets really received by the bridge and not sent
+     * to the controller.  We consider packets sent to the controller to be
+     * part of the control plane rather than the data plane. */
+    if (!xin->recirc && xbridge->netflow && !(xout->slow & SLOW_CONTROLLER)) {
+        if (ctx.xin->resubmit_stats) {
+            netflow_flow_update(xbridge->netflow, flow,
+                                xout->nf_output_iface,
+                                ctx.xin->resubmit_stats);
+        }
+        if (ctx.xin->xcache) {
+            struct xc_entry *entry;
 
-                entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
-                entry->u.nf.netflow = netflow_ref(xbridge->netflow);
-                entry->u.nf.flow = xmemdup(flow, sizeof *flow);
-                entry->u.nf.iface = xout->nf_output_iface;
-            }
+            entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
+            entry->u.nf.netflow = netflow_ref(xbridge->netflow);
+            entry->u.nf.flow = xmemdup(flow, sizeof *flow);
+            entry->u.nf.iface = xout->nf_output_iface;
         }
     }
 
-    ofpbuf_uninit(&ctx.stack);
-    ofpbuf_uninit(&ctx.action_set);
-
-    if (wc) {
+    if (xin->wc) {
         /* Clear the metadata and register wildcard masks, because we won't
          * use non-header fields as part of the cache. */
-        flow_wildcards_clear_non_packet_fields(wc);
+        flow_wildcards_clear_non_packet_fields(ctx.wc);
 
         /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields.  struct flow
          * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
@@ -5059,15 +5096,20 @@ xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
          * Avoid the problem here by making sure that only the low 8 bits of
          * either field can be unwildcarded for ICMP.
          */
-        if (is_icmp) {
-            wc->masks.tp_src &= htons(UINT8_MAX);
-            wc->masks.tp_dst &= htons(UINT8_MAX);
+        if (is_icmpv4(flow) || is_icmpv6(flow)) {
+            ctx.wc->masks.tp_src &= htons(UINT8_MAX);
+            ctx.wc->masks.tp_dst &= htons(UINT8_MAX);
         }
         /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
-        if (wc->masks.vlan_tci) {
-            wc->masks.vlan_tci |= htons(VLAN_CFI);
+        if (ctx.wc->masks.vlan_tci) {
+            ctx.wc->masks.vlan_tci |= htons(VLAN_CFI);
         }
     }
+
+exit:
+    ofpbuf_uninit(&ctx.stack);
+    ofpbuf_uninit(&ctx.action_set);
+    ofpbuf_uninit(&scratch_actions);
 }
 
 /* Sends 'packet' out 'ofport'.