ofproto-dpif-rid: Don't carry actset_output explicitly in metadata.
[cascardo/ovs.git] / ofproto / ofproto-dpif.c
index 837ae0b..3281eaa 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -89,6 +89,15 @@ struct rule_dpif {
     struct ovs_mutex stats_mutex;
     struct dpif_flow_stats stats OVS_GUARDED;
 
+   /* In non-NULL, will point to a new rule (for which a reference is held) to
+    * which all the stats updates should be forwarded. This exists only
+    * transitionally when flows are replaced.
+    *
+    * Protected by stats_mutex.  If both 'rule->stats_mutex' and
+    * 'rule->new_rule->stats_mutex' must be held together, acquire them in that
+    * order, */
+    struct rule_dpif *new_rule OVS_GUARDED;
+
     /* If non-zero then the recirculation id that has
      * been allocated for use with this rule.
      * The recirculation id and associated internal flow should
@@ -271,30 +280,11 @@ struct dpif_backer {
 
     bool recv_set_enable; /* Enables or disables receiving packets. */
 
-    /* Recirculation. */
-    bool enable_recirc;   /* True if the datapath supports recirculation */
-
-    /* True if the datapath supports unique flow identifiers */
-    bool enable_ufid;
-
-    /* True if the datapath supports variable-length
-     * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
-     * False if the datapath supports only 8-byte (or shorter) userdata. */
-    bool variable_length_userdata;
-
-    /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
-     * actions. */
-    bool masked_set_action;
-
-    /* Maximum number of MPLS label stack entries that the datapath supports
-     * in a match */
-    size_t max_mpls_depth;
-
     /* Version string of the datapath stored in OVSDB. */
     char *dp_version_string;
 
-    /* True if the datapath supports tnl_push and pop actions. */
-    bool enable_tnl_push_pop;
+    /* Datapath feature support. */
+    struct dpif_backer_support support;
     struct atomic_count tnl_count;
 };
 
@@ -306,12 +296,14 @@ struct ofproto_dpif {
     struct ofproto up;
     struct dpif_backer *backer;
 
+    ATOMIC(cls_version_t) tables_version;  /* For classifier lookups. */
+
     uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
 
     /* Special OpenFlow rules. */
     struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
     struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
-    struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
+    struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
 
     /* Bridging. */
     struct netflow *netflow;
@@ -349,9 +341,9 @@ struct ofproto_dpif {
     uint64_t change_seq;           /* Connectivity status changes. */
 
     /* Work queues. */
-    struct guarded_list pins;      /* Contains "struct ofputil_packet_in"s. */
-    struct seq *pins_seq;          /* For notifying 'pins' reception. */
-    uint64_t pins_seqno;
+    struct guarded_list ams;      /* Contains "struct ofproto_async_msgs"s. */
+    struct seq *ams_seq;          /* For notifying 'ams' reception. */
+    uint64_t ams_seqno;
 };
 
 /* All existing ofproto_dpif instances, indexed by ->up.name. */
@@ -367,22 +359,16 @@ ofproto_dpif_cast(const struct ofproto *ofproto)
     return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
 }
 
-size_t
-ofproto_dpif_get_max_mpls_depth(const struct ofproto_dpif *ofproto)
-{
-    return ofproto->backer->max_mpls_depth;
-}
-
 bool
-ofproto_dpif_get_enable_recirc(const struct ofproto_dpif *ofproto)
+ofproto_dpif_get_enable_ufid(const struct dpif_backer *backer)
 {
-    return ofproto->backer->enable_recirc;
+    return backer->support.ufid;
 }
 
-bool
-ofproto_dpif_get_enable_ufid(struct dpif_backer *backer)
+struct dpif_backer_support *
+ofproto_dpif_get_support(const struct ofproto_dpif *ofproto)
 {
-    return backer->enable_ufid;
+    return &ofproto->backer->support;
 }
 
 static void ofproto_trace(struct ofproto_dpif *, struct flow *,
@@ -400,25 +386,33 @@ static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
  * it. */
 void
 ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto,
-                      struct ofputil_flow_mod *fm)
+                      const struct ofputil_flow_mod *fm)
 {
-    ofproto_flow_mod(&ofproto->up, fm);
+    struct ofproto_flow_mod ofm;
+
+    /* Multiple threads may do this for the same 'fm' at the same time.
+     * Allocate ofproto_flow_mod with execution context from stack.
+     *
+     * Note: This copy could be avoided by making ofproto_flow_mod more
+     * complex, but that may not be desireable, and a learn action is not that
+     * fast to begin with. */
+    ofm.fm = *fm;
+    ofproto_flow_mod(&ofproto->up, &ofm);
 }
 
-/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
- * Takes ownership of 'pin' and pin->packet. */
+/* Appends 'am' to the queue of asynchronous messages to be sent to the
+ * controller.  Takes ownership of 'am' and any data it points to. */
 void
-ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
-                            struct ofproto_packet_in *pin)
+ofproto_dpif_send_async_msg(struct ofproto_dpif *ofproto,
+                            struct ofproto_async_msg *am)
 {
-    if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) {
+    if (!guarded_list_push_back(&ofproto->ams, &am->list_node, 1024)) {
         COVERAGE_INC(packet_in_overflow);
-        free(CONST_CAST(void *, pin->up.packet));
-        free(pin);
+        ofproto_async_msg_free(am);
     }
 
     /* Wakes up main thread for packet-in I/O. */
-    seq_change(ofproto->pins_seq);
+    seq_change(ofproto->ams_seq);
 }
 
 /* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
@@ -451,6 +445,9 @@ init(const struct shash *iface_hints)
 
         shash_add(&init_ofp_ports, node->name, new_hint);
     }
+
+    ofproto_unixctl_init();
+    udpif_init();
 }
 
 static void
@@ -517,6 +514,12 @@ lookup_ofproto_dpif_by_port_name(const char *name)
     return NULL;
 }
 
+bool
+ofproto_dpif_backer_enabled(struct dpif_backer* backer)
+{
+    return backer->recv_set_enable;
+}
+
 static int
 type_run(const char *type)
 {
@@ -646,10 +649,7 @@ type_run(const char *type)
                               ofproto->netflow,
                               ofproto->up.forward_bpdu,
                               connmgr_has_in_band(ofproto->up.connmgr),
-                              ofproto->backer->enable_recirc,
-                              ofproto->backer->variable_length_userdata,
-                              ofproto->backer->max_mpls_depth,
-                              ofproto->backer->masked_set_action);
+                              &ofproto->backer->support);
 
             HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
                 xlate_bundle_set(ofproto, bundle, bundle->name,
@@ -834,7 +834,7 @@ static int add_internal_flows(struct ofproto_dpif *);
 static struct ofproto *
 alloc(void)
 {
-    struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
+    struct ofproto_dpif *ofproto = xzalloc(sizeof *ofproto);
     return &ofproto->up;
 }
 
@@ -873,10 +873,7 @@ struct odp_garbage {
 };
 
 static bool check_variable_length_userdata(struct dpif_backer *backer);
-static size_t check_max_mpls_depth(struct dpif_backer *backer);
-static bool check_recirc(struct dpif_backer *backer);
-static bool check_ufid(struct dpif_backer *backer);
-static bool check_masked_set_action(struct dpif_backer *backer);
+static void check_support(struct dpif_backer *backer);
 
 static int
 open_dpif_backer(const char *type, struct dpif_backer **backerp)
@@ -886,7 +883,7 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
     struct dpif_port port;
     struct shash_node *node;
     struct ovs_list garbage_list;
-    struct odp_garbage *garbage, *next;
+    struct odp_garbage *garbage;
 
     struct sset names;
     char *backer_name;
@@ -964,20 +961,14 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
     }
     dpif_port_dump_done(&port_dump);
 
-    LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
+    LIST_FOR_EACH_POP (garbage, list_node, &garbage_list) {
         dpif_port_del(backer->dpif, garbage->odp_port);
-        list_remove(&garbage->list_node);
         free(garbage);
     }
 
     shash_add(&all_dpif_backers, type, backer);
 
-    backer->enable_recirc = check_recirc(backer);
-    backer->max_mpls_depth = check_max_mpls_depth(backer);
-    backer->masked_set_action = check_masked_set_action(backer);
-    backer->enable_ufid = check_ufid(backer);
-
-    backer->enable_tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
+    check_support(backer);
     atomic_count_init(&backer->tnl_count, 0);
 
     error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
@@ -995,7 +986,8 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
     /* This check fails if performed before udpif threads have been set,
      * as the kernel module checks that the 'pid' in userspace action
      * is non-zero. */
-    backer->variable_length_userdata = check_variable_length_userdata(backer);
+    backer->support.variable_length_userdata
+        = check_variable_length_userdata(backer);
     backer->dp_version_string = dpif_get_dp_version(backer->dpif);
 
     return error;
@@ -1004,7 +996,7 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
 bool
 ovs_native_tunneling_is_on(struct ofproto_dpif *ofproto)
 {
-    return ofproto_use_tnl_push_pop && ofproto->backer->enable_tnl_push_pop &&
+    return ofproto_use_tnl_push_pop && ofproto->backer->support.tnl_push_pop &&
            atomic_count_get(&ofproto->backer->tnl_count);
 }
 
@@ -1022,13 +1014,19 @@ check_recirc(struct dpif_backer *backer)
     struct odputil_keybuf keybuf;
     struct ofpbuf key;
     bool enable_recirc;
+    struct odp_flow_key_parms odp_parms = {
+        .flow = &flow,
+        .support = {
+            .recirc = true,
+        },
+    };
 
     memset(&flow, 0, sizeof flow);
     flow.recirc_id = 1;
     flow.dp_hash = 1;
 
     ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
-    odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
+    odp_flow_key_from_flow(&odp_parms, &key);
     enable_recirc = dpif_probe_feature(backer->dpif, "recirculation", &key,
                                        NULL);
 
@@ -1056,12 +1054,15 @@ check_ufid(struct dpif_backer *backer)
     struct ofpbuf key;
     ovs_u128 ufid;
     bool enable_ufid;
+    struct odp_flow_key_parms odp_parms = {
+        .flow = &flow,
+    };
 
     memset(&flow, 0, sizeof flow);
     flow.dl_type = htons(0x1234);
 
     ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
-    odp_flow_key_from_flow(&key, &flow, NULL, 0, true);
+    odp_flow_key_from_flow(&odp_parms, &key);
     dpif_flow_hash(backer->dpif, key.data, key.size, &ufid);
 
     enable_ufid = dpif_probe_feature(backer->dpif, "UFID", &key, &ufid);
@@ -1120,6 +1121,7 @@ check_variable_length_userdata(struct dpif_backer *backer)
     execute.packet = &packet;
     execute.needs_help = false;
     execute.probe = true;
+    execute.mtu = 0;
 
     error = dpif_execute(backer->dpif, &execute);
 
@@ -1163,13 +1165,16 @@ check_max_mpls_depth(struct dpif_backer *backer)
     for (n = 0; n < FLOW_MAX_MPLS_LABELS; n++) {
         struct odputil_keybuf keybuf;
         struct ofpbuf key;
+        struct odp_flow_key_parms odp_parms = {
+            .flow = &flow,
+        };
 
         memset(&flow, 0, sizeof flow);
         flow.dl_type = htons(ETH_TYPE_MPLS);
         flow_set_mpls_bos(&flow, n, 1);
 
         ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
-        odp_flow_key_from_flow(&key, &flow, NULL, 0, false);
+        odp_flow_key_from_flow(&odp_parms, &key);
         if (!dpif_probe_feature(backer->dpif, "MPLS", &key, NULL)) {
             break;
         }
@@ -1215,6 +1220,7 @@ check_masked_set_action(struct dpif_backer *backer)
     execute.packet = &packet;
     execute.needs_help = false;
     execute.probe = true;
+    execute.mtu = 0;
 
     error = dpif_execute(backer->dpif, &execute);
 
@@ -1229,6 +1235,68 @@ check_masked_set_action(struct dpif_backer *backer)
     return !error;
 }
 
+#define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE)                        \
+static bool                                                                 \
+check_##NAME(struct dpif_backer *backer)                                    \
+{                                                                           \
+    struct flow flow;                                                       \
+    struct odputil_keybuf keybuf;                                           \
+    struct ofpbuf key;                                                      \
+    bool enable;                                                            \
+    struct odp_flow_key_parms odp_parms = {                                 \
+        .flow = &flow,                                                      \
+        .support = {                                                        \
+            .SUPPORT = true,                                                \
+        },                                                                  \
+    };                                                                      \
+                                                                            \
+    memset(&flow, 0, sizeof flow);                                          \
+    flow.FIELD = VALUE;                                                     \
+                                                                            \
+    ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);                         \
+    odp_flow_key_from_flow(&odp_parms, &key);                               \
+    enable = dpif_probe_feature(backer->dpif, #NAME, &key, NULL);           \
+                                                                            \
+    if (enable) {                                                           \
+        VLOG_INFO("%s: Datapath supports "#NAME, dpif_name(backer->dpif));  \
+    } else {                                                                \
+        VLOG_INFO("%s: Datapath does not support "#NAME,                    \
+                  dpif_name(backer->dpif));                                 \
+    }                                                                       \
+                                                                            \
+    return enable;                                                          \
+}
+#define CHECK_FEATURE(FIELD) CHECK_FEATURE__(FIELD, FIELD, FIELD, 1)
+
+CHECK_FEATURE(ct_state)
+CHECK_FEATURE(ct_zone)
+CHECK_FEATURE(ct_mark)
+CHECK_FEATURE__(ct_label, ct_label, ct_label.u64.lo, 1)
+CHECK_FEATURE__(ct_state_nat, ct_state, ct_state, CS_TRACKED|CS_SRC_NAT)
+
+#undef CHECK_FEATURE
+#undef CHECK_FEATURE__
+
+static void
+check_support(struct dpif_backer *backer)
+{
+    /* This feature needs to be tested after udpif threads are set. */
+    backer->support.variable_length_userdata = false;
+
+    backer->support.odp.recirc = check_recirc(backer);
+    backer->support.odp.max_mpls_depth = check_max_mpls_depth(backer);
+    backer->support.masked_set_action = check_masked_set_action(backer);
+    backer->support.ufid = check_ufid(backer);
+    backer->support.tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
+
+    backer->support.odp.ct_state = check_ct_state(backer);
+    backer->support.odp.ct_zone = check_ct_zone(backer);
+    backer->support.odp.ct_mark = check_ct_mark(backer);
+    backer->support.odp.ct_label = check_ct_label(backer);
+
+    backer->support.odp.ct_state_nat = check_ct_state_nat(backer);
+}
+
 static int
 construct(struct ofproto *ofproto_)
 {
@@ -1244,6 +1312,7 @@ construct(struct ofproto *ofproto_)
         return error;
     }
 
+    atomic_init(&ofproto->tables_version, CLS_MIN_VERSION);
     ofproto->netflow = NULL;
     ofproto->sflow = NULL;
     ofproto->ipfix = NULL;
@@ -1259,9 +1328,7 @@ construct(struct ofproto *ofproto_)
     ovs_mutex_init_adaptive(&ofproto->stats_mutex);
     ovs_mutex_init(&ofproto->vsp_mutex);
 
-    guarded_list_init(&ofproto->pins);
-
-    ofproto_unixctl_init();
+    guarded_list_init(&ofproto->ams);
 
     hmap_init(&ofproto->vlandev_map);
     hmap_init(&ofproto->realdev_vid_map);
@@ -1271,8 +1338,8 @@ construct(struct ofproto *ofproto_)
     sset_init(&ofproto->port_poll_set);
     ofproto->port_poll_errno = 0;
     ofproto->change_seq = 0;
-    ofproto->pins_seq = seq_create();
-    ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+    ofproto->ams_seq = seq_create();
+    ofproto->ams_seqno = seq_read(ofproto->ams_seq);
 
 
     SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
@@ -1338,8 +1405,7 @@ add_internal_flows(struct ofproto_dpif *ofproto)
     controller = ofpact_put_CONTROLLER(&ofpacts);
     controller->max_len = UINT16_MAX;
     controller->controller_id = 0;
-    controller->reason = OFPR_NO_MATCH;
-    ofpact_pad(&ofpacts);
+    controller->reason = OFPR_IMPLICIT_MISS;
 
     error = add_internal_miss_flow(ofproto, id++, &ofpacts,
                                    &ofproto->miss_rule);
@@ -1377,10 +1443,10 @@ static void
 destruct(struct ofproto *ofproto_)
 {
     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-    struct ofproto_packet_in *pin, *next_pin;
+    struct ofproto_async_msg *am;
     struct rule_dpif *rule;
     struct oftable *table;
-    struct ovs_list pins;
+    struct ovs_list ams;
 
     ofproto->backer->need_revalidate = REV_RECONFIGURE;
     xlate_txn_start();
@@ -1398,14 +1464,13 @@ destruct(struct ofproto *ofproto_)
             ofproto_rule_delete(&ofproto->up, &rule->up);
         }
     }
+    ofproto_group_delete_all(&ofproto->up);
 
-    guarded_list_pop_all(&ofproto->pins, &pins);
-    LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
-        list_remove(&pin->list_node);
-        free(CONST_CAST(void *, pin->up.packet));
-        free(pin);
+    guarded_list_pop_all(&ofproto->ams, &ams);
+    LIST_FOR_EACH_POP (am, list_node, &ams) {
+        ofproto_async_msg_free(am);
     }
-    guarded_list_destroy(&ofproto->pins);
+    guarded_list_destroy(&ofproto->ams);
 
     recirc_free_ofproto(ofproto, ofproto->up.name);
 
@@ -1428,7 +1493,7 @@ destruct(struct ofproto *ofproto_)
     ovs_mutex_destroy(&ofproto->stats_mutex);
     ovs_mutex_destroy(&ofproto->vsp_mutex);
 
-    seq_destroy(ofproto->pins_seq);
+    seq_destroy(ofproto->ams_seq);
 
     close_dpif_backer(ofproto->backer);
 }
@@ -1447,24 +1512,22 @@ run(struct ofproto *ofproto_)
         mcast_snooping_mdb_flush(ofproto->ms);
     }
 
-    /* Always updates the ofproto->pins_seqno to avoid frequent wakeup during
+    /* Always updates the ofproto->ams_seqno to avoid frequent wakeup during
      * flow restore.  Even though nothing is processed during flow restore,
-     * all queued 'pins' will be handled immediately when flow restore
+     * all queued 'ams' will be handled immediately when flow restore
      * completes. */
-    ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+    ofproto->ams_seqno = seq_read(ofproto->ams_seq);
 
     /* Do not perform any periodic activity required by 'ofproto' while
      * waiting for flow restore to complete. */
     if (!ofproto_get_flow_restore_wait()) {
-        struct ofproto_packet_in *pin, *next_pin;
-        struct ovs_list pins;
+        struct ofproto_async_msg *am;
+        struct ovs_list ams;
 
-        guarded_list_pop_all(&ofproto->pins, &pins);
-        LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
-            connmgr_send_packet_in(ofproto->up.connmgr, pin);
-            list_remove(&pin->list_node);
-            free(CONST_CAST(void *, pin->up.packet));
-            free(pin);
+        guarded_list_pop_all(&ofproto->ams, &ams);
+        LIST_FOR_EACH_POP (am, list_node, &ams) {
+            connmgr_send_async_msg(ofproto->up.connmgr, am);
+            ofproto_async_msg_free(am);
         }
     }
 
@@ -1571,13 +1634,11 @@ wait(struct ofproto *ofproto_)
     mcast_snooping_wait(ofproto->ms);
     stp_wait(ofproto);
     if (ofproto->backer->need_revalidate) {
-        /* Shouldn't happen, but if it does just go around again. */
-        VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
         poll_immediate_wake();
     }
 
     seq_wait(udpif_dump_seq(ofproto->backer->udpif), ofproto->dump_seq);
-    seq_wait(ofproto->pins_seq, ofproto->pins_seqno);
+    seq_wait(ofproto->ams_seq, ofproto->ams_seqno);
 }
 
 static void
@@ -1624,10 +1685,19 @@ query_tables(struct ofproto *ofproto,
     }
 }
 
+static void
+set_tables_version(struct ofproto *ofproto_, cls_version_t version)
+{
+    struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+    atomic_store_relaxed(&ofproto->tables_version, version);
+}
+
+
 static struct ofport *
 port_alloc(void)
 {
-    struct ofport_dpif *port = xmalloc(sizeof *port);
+    struct ofport_dpif *port = xzalloc(sizeof *port);
     return &port->up;
 }
 
@@ -1645,6 +1715,7 @@ port_construct(struct ofport *port_)
     struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
     const struct netdev *netdev = port->up.netdev;
     char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
+    const char *dp_port_name;
     struct dpif_port dpif_port;
     int error;
 
@@ -1678,9 +1749,8 @@ port_construct(struct ofport *port_)
         return 0;
     }
 
-    error = dpif_port_query_by_name(ofproto->backer->dpif,
-                                    netdev_vport_get_dpif_port(netdev, namebuf,
-                                                               sizeof namebuf),
+    dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
+    error = dpif_port_query_by_name(ofproto->backer->dpif, dp_port_name,
                                     &dpif_port);
     if (error) {
         return error;
@@ -1690,8 +1760,14 @@ port_construct(struct ofport *port_)
 
     if (netdev_get_tunnel_config(netdev)) {
         atomic_count_inc(&ofproto->backer->tnl_count);
-        tnl_port_add(port, port->up.netdev, port->odp_port,
-                     ovs_native_tunneling_is_on(ofproto), namebuf);
+        error = tnl_port_add(port, port->up.netdev, port->odp_port,
+                             ovs_native_tunneling_is_on(ofproto), dp_port_name);
+        if (error) {
+            atomic_count_dec(&ofproto->backer->tnl_count);
+            dpif_port_destroy(&dpif_port);
+            return error;
+        }
+
         port->is_tunnel = true;
         if (ofproto->ipfix) {
            dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
@@ -1788,6 +1864,7 @@ port_modified(struct ofport *port_)
 {
     struct ofport_dpif *port = ofport_dpif_cast(port_);
     char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
+    const char *dp_port_name;
     struct netdev *netdev = port->up.netdev;
 
     if (port->bundle && port->bundle->bond) {
@@ -1803,15 +1880,16 @@ port_modified(struct ofport *port_)
     }
 
     ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
-                                     port->lldp, port->up.pp.hw_addr);
+                                     port->lldp, &port->up.pp.hw_addr);
 
-    netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
+    dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
 
     if (port->is_tunnel) {
         struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
 
         if (tnl_port_reconfigure(port, netdev, port->odp_port,
-                                 ovs_native_tunneling_is_on(ofproto), namebuf)) {
+                                 ovs_native_tunneling_is_on(ofproto),
+                                 dp_port_name)) {
             ofproto->backer->need_revalidate = REV_RECONFIGURE;
         }
     }
@@ -1938,7 +2016,7 @@ out:
         ofproto->backer->need_revalidate = REV_RECONFIGURE;
     }
     ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
-                                     ofport->lldp, ofport->up.pp.hw_addr);
+                                     ofport->lldp, &ofport->up.pp.hw_addr);
     return error;
 }
 
@@ -1980,7 +2058,7 @@ set_bfd(struct ofport *ofport_, const struct smap *cfg)
         ofproto->backer->need_revalidate = REV_RECONFIGURE;
     }
     ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
-                                     ofport->lldp, ofport->up.pp.hw_addr);
+                                     ofport->lldp, &ofport->up.pp.hw_addr);
     return 0;
 }
 
@@ -2023,21 +2101,20 @@ set_lldp(struct ofport *ofport_,
             ofport->lldp = lldp_create(ofport->up.netdev, ofport_->mtu, cfg);
         }
 
-        if (lldp_configure(ofport->lldp)) {
-            error = 0;
-            goto out;
+        if (!lldp_configure(ofport->lldp, cfg)) {
+            error = EINVAL;
         }
-
-        error = EINVAL;
     }
-    lldp_unref(ofport->lldp);
-    ofport->lldp = NULL;
-out:
+    if (error) {
+        lldp_unref(ofport->lldp);
+        ofport->lldp = NULL;
+    }
+
     ofproto_dpif_monitor_port_update(ofport,
                                      ofport->bfd,
                                      ofport->cfm,
                                      ofport->lldp,
-                                     ofport->up.pp.hw_addr);
+                                     &ofport->up.pp.hw_addr);
     return error;
 }
 
@@ -2093,7 +2170,7 @@ rstp_send_bpdu_cb(struct dp_packet *pkt, void *ofport_, void *ofproto_)
     struct ofport_dpif *ofport = ofport_;
     struct eth_header *eth = dp_packet_l2(pkt);
 
-    netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
+    netdev_get_etheraddr(ofport->up.netdev, &eth->eth_src);
     if (eth_addr_is_zero(eth->eth_src)) {
         VLOG_WARN_RL(&rl, "%s port %d: cannot send RSTP BPDU on a port which "
                      "does not have a configured source MAC address.",
@@ -2118,7 +2195,7 @@ send_bpdu_cb(struct dp_packet *pkt, int port_num, void *ofproto_)
     } else {
         struct eth_header *eth = dp_packet_l2(pkt);
 
-        netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
+        netdev_get_etheraddr(ofport->up.netdev, &eth->eth_src);
         if (eth_addr_is_zero(eth->eth_src)) {
             VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
                          "with unknown MAC", ofproto->up.name, port_num);
@@ -2264,9 +2341,14 @@ rstp_run(struct ofproto_dpif *ofproto)
         }
 
         if (rstp_shift_root_learned_address(ofproto->rstp)) {
-            bundle_move(((struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp))->bundle,
-                        ((struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp))->bundle);
-            rstp_reset_root_changed(ofproto->rstp);
+            struct ofport_dpif *old_root_aux =
+                (struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp);
+            struct ofport_dpif *new_root_aux =
+                (struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp);
+            if (old_root_aux != NULL && new_root_aux != NULL) {
+                bundle_move(old_root_aux->bundle, new_root_aux->bundle);
+                rstp_reset_root_changed(ofproto->rstp);
+            }
         }
     }
 }
@@ -2506,8 +2588,11 @@ set_rstp_port(struct ofport *ofport_,
 
     if (!s || !s->enable) {
         if (rp) {
-            rstp_port_unref(rp);
+            rstp_port_set_aux(rp, NULL);
+            rstp_port_set_state(rp, RSTP_DISABLED);
+            rstp_port_set_mac_operational(rp, false);
             ofport->rstp_port = NULL;
+            rstp_port_unref(rp);
             update_rstp_port_state(ofport);
         }
         return;
@@ -2952,10 +3037,10 @@ send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
 {
     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
     struct ofport_dpif *port = port_;
-    uint8_t ea[ETH_ADDR_LEN];
+    struct eth_addr ea;
     int error;
 
-    error = netdev_get_etheraddr(port->up.netdev, ea);
+    error = netdev_get_etheraddr(port->up.netdev, &ea);
     if (!error) {
         struct dp_packet packet;
         void *packet_pdu;
@@ -2978,43 +3063,41 @@ static void
 bundle_send_learning_packets(struct ofbundle *bundle)
 {
     struct ofproto_dpif *ofproto = bundle->ofproto;
-    struct dp_packet *learning_packet;
     int error, n_packets, n_errors;
     struct mac_entry *e;
+    struct pkt_list {
+        struct ovs_list list_node;
+        struct ofport_dpif *port;
+        struct dp_packet *pkt;
+    } *pkt_node;
     struct ovs_list packets;
 
     list_init(&packets);
     ovs_rwlock_rdlock(&ofproto->ml->rwlock);
     LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
         if (mac_entry_get_port(ofproto->ml, e) != bundle) {
-            void *port_void;
-
-            learning_packet = bond_compose_learning_packet(bundle->bond,
-                                                           e->mac, e->vlan,
-                                                           &port_void);
-            /* Temporarily use 'frame' as a private pointer (see below). */
-            ovs_assert(learning_packet->frame == dp_packet_data(learning_packet));
-            learning_packet->frame = port_void;
-            list_push_back(&packets, &learning_packet->list_node);
+            pkt_node = xmalloc(sizeof *pkt_node);
+            pkt_node->pkt = bond_compose_learning_packet(bundle->bond,
+                                                         e->mac, e->vlan,
+                                                         (void **)&pkt_node->port);
+            list_push_back(&packets, &pkt_node->list_node);
         }
     }
     ovs_rwlock_unlock(&ofproto->ml->rwlock);
 
     error = n_packets = n_errors = 0;
-    LIST_FOR_EACH (learning_packet, list_node, &packets) {
+    LIST_FOR_EACH_POP (pkt_node, list_node, &packets) {
         int ret;
-        void *port_void = learning_packet->frame;
 
-        /* Restore 'frame'. */
-        learning_packet->frame = dp_packet_data(learning_packet);
-        ret = ofproto_dpif_send_packet(port_void, learning_packet);
+        ret = ofproto_dpif_send_packet(pkt_node->port, pkt_node->pkt);
+        dp_packet_delete(pkt_node->pkt);
+        free(pkt_node);
         if (ret) {
             error = ret;
             n_errors++;
         }
         n_packets++;
     }
-    dp_packet_list_delete(&packets);
 
     if (n_errors) {
         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
@@ -3612,14 +3695,13 @@ rule_expire(struct rule_dpif *rule)
     }
 }
 
-/* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
- * 'flow' must reflect the data in 'packet'. */
 int
-ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
-                             const struct flow *flow,
-                             struct rule_dpif *rule,
-                             const struct ofpact *ofpacts, size_t ofpacts_len,
-                             struct dp_packet *packet)
+ofproto_dpif_execute_actions__(struct ofproto_dpif *ofproto,
+                               const struct flow *flow,
+                               struct rule_dpif *rule,
+                               const struct ofpact *ofpacts, size_t ofpacts_len,
+                               int recurse, int resubmits,
+                               struct dp_packet *packet)
 {
     struct dpif_flow_stats stats;
     struct xlate_out xout;
@@ -3636,20 +3718,28 @@ ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
         rule_dpif_credit_stats(rule, &stats);
     }
 
+    uint64_t odp_actions_stub[1024 / 8];
+    struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
     xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
-                  stats.tcp_flags, packet);
+                  stats.tcp_flags, packet, NULL, &odp_actions);
     xin.ofpacts = ofpacts;
     xin.ofpacts_len = ofpacts_len;
     xin.resubmit_stats = &stats;
-    xlate_actions(&xin, &xout);
+    xin.recurse = recurse;
+    xin.resubmits = resubmits;
+    if (xlate_actions(&xin, &xout) != XLATE_OK) {
+        error = EINVAL;
+        goto out;
+    }
 
-    execute.actions = xout.odp_actions->data;
-    execute.actions_len = xout.odp_actions->size;
+    execute.actions = odp_actions.data;
+    execute.actions_len = odp_actions.size;
 
     pkt_metadata_from_flow(&packet->md, flow);
     execute.packet = packet;
     execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
     execute.probe = false;
+    execute.mtu = 0;
 
     /* Fix up in_port. */
     in_port = flow->in_port.ofp_port;
@@ -3659,20 +3749,38 @@ ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
     execute.packet->md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
 
     error = dpif_execute(ofproto->backer->dpif, &execute);
-
+out:
     xlate_out_uninit(&xout);
+    ofpbuf_uninit(&odp_actions);
 
     return error;
 }
 
+/* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
+ * 'flow' must reflect the data in 'packet'. */
+int
+ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
+                             const struct flow *flow,
+                             struct rule_dpif *rule,
+                             const struct ofpact *ofpacts, size_t ofpacts_len,
+                             struct dp_packet *packet)
+{
+    return ofproto_dpif_execute_actions__(ofproto, flow, rule, ofpacts,
+                                          ofpacts_len, 0, 0, packet);
+}
+
 void
 rule_dpif_credit_stats(struct rule_dpif *rule,
                        const struct dpif_flow_stats *stats)
 {
     ovs_mutex_lock(&rule->stats_mutex);
-    rule->stats.n_packets += stats->n_packets;
-    rule->stats.n_bytes += stats->n_bytes;
-    rule->stats.used = MAX(rule->stats.used, stats->used);
+    if (OVS_UNLIKELY(rule->new_rule)) {
+        rule_dpif_credit_stats(rule->new_rule, stats);
+    } else {
+        rule->stats.n_packets += stats->n_packets;
+        rule->stats.n_bytes += stats->n_bytes;
+        rule->stats.used = MAX(rule->stats.used, stats->used);
+    }
     ovs_mutex_unlock(&rule->stats_mutex);
 }
 
@@ -3723,38 +3831,38 @@ rule_set_recirc_id(struct rule *rule_, uint32_t id)
     ovs_mutex_unlock(&rule->up.mutex);
 }
 
+cls_version_t
+ofproto_dpif_get_tables_version(struct ofproto_dpif *ofproto OVS_UNUSED)
+{
+    cls_version_t version;
+
+    atomic_read_relaxed(&ofproto->tables_version, &version);
+
+    return version;
+}
+
 /* The returned rule (if any) is valid at least until the next RCU quiescent
- * period.  If the rule needs to stay around longer, a non-zero 'take_ref'
- * must be passed in to cause a reference to be taken on it.
+ * period.  If the rule needs to stay around longer, the caller should take
+ * a reference.
  *
  * 'flow' is non-const to allow for temporary modifications during the lookup.
  * Any changes are restored before returning. */
 static struct rule_dpif *
-rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
-                          struct flow *flow, struct flow_wildcards *wc,
-                          bool take_ref)
+rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, cls_version_t version,
+                          uint8_t table_id, struct flow *flow,
+                          struct flow_wildcards *wc)
 {
     struct classifier *cls = &ofproto->up.tables[table_id].cls;
-    const struct cls_rule *cls_rule;
-    struct rule_dpif *rule;
-
-    do {
-        cls_rule = classifier_lookup(cls, flow, wc);
-
-        rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
-
-        /* Try again if the rule was released before we get the reference. */
-    } while (rule && take_ref && !rule_dpif_try_ref(rule));
-
-    return rule;
+    return rule_dpif_cast(rule_from_cls_rule(classifier_lookup(cls, version,
+                                                               flow, wc)));
 }
 
-/* Look up 'flow' in 'ofproto''s classifier starting from table '*table_id'.
- * Returns the rule that was found, which may be one of the special rules
- * according to packet miss hadling.  If 'may_packet_in' is false, returning of
- * the miss_rule (which issues packet ins for the controller) is avoided.
- * Updates 'wc', if nonnull, to reflect the fields that were used during the
- * lookup.
+/* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
+ * table '*table_id'.  Returns the rule that was found, which may be one of the
+ * special rules according to packet miss hadling.  If 'may_packet_in' is
+ * false, returning of the miss_rule (which issues packet ins for the
+ * controller) is avoided.  Updates 'wc', if nonnull, to reflect the fields
+ * that were used during the lookup.
  *
  * If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
  * if none is found then the table miss configuration for that table is
@@ -3766,9 +3874,8 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
  * '*table_id'.
  *
  * The rule is returned in '*rule', which is valid at least until the next
- * RCU quiescent period.  If the '*rule' needs to stay around longer,
- * a non-zero 'take_ref' must be passed in to cause a reference to be taken
- * on it before this returns.
+ * RCU quiescent period.  If the '*rule' needs to stay around longer, the
+ * caller must take a reference.
  *
  * 'in_port' allows the lookup to take place as if the in port had the value
  * 'in_port'.  This is needed for resubmit action support.
@@ -3776,8 +3883,9 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
  * 'flow' is non-const to allow for temporary modifications during the lookup.
  * Any changes are restored before returning. */
 struct rule_dpif *
-rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
-                            struct flow_wildcards *wc, bool take_ref,
+rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto,
+                            cls_version_t version, struct flow *flow,
+                            struct flow_wildcards *wc,
                             const struct dpif_flow_stats *stats,
                             uint8_t *table_id, ofp_port_t in_port,
                             bool may_packet_in, bool honor_table_miss)
@@ -3791,18 +3899,15 @@ rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
     /* We always unwildcard nw_frag (for IP), so they
      * need not be unwildcarded here. */
     if (flow->nw_frag & FLOW_NW_FRAG_ANY
-        && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
-        if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+        && ofproto->up.frag_handling != OFPUTIL_FRAG_NX_MATCH) {
+        if (ofproto->up.frag_handling == OFPUTIL_FRAG_NORMAL) {
             /* We must pretend that transport ports are unavailable. */
             flow->tp_src = htons(0);
             flow->tp_dst = htons(0);
         } else {
-            /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM).
+            /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
              * Use the drop_frags_rule (which cannot disappear). */
             rule = ofproto->drop_frags_rule;
-            if (take_ref) {
-                rule_dpif_ref(rule);
-            }
             if (stats) {
                 struct oftable *tbl = &ofproto->up.tables[*table_id];
                 unsigned long orig;
@@ -3829,7 +3934,7 @@ rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
          next_id++, next_id += (next_id == TBL_INTERNAL))
     {
         *table_id = next_id;
-        rule = rule_dpif_lookup_in_table(ofproto, next_id, flow, wc, take_ref);
+        rule = rule_dpif_lookup_in_table(ofproto, version, next_id, flow, wc);
         if (stats) {
             struct oftable *tbl = &ofproto->up.tables[next_id];
             unsigned long orig;
@@ -3868,9 +3973,6 @@ rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
             rule = ofproto->miss_rule;
         }
     }
-    if (take_ref) {
-        rule_dpif_ref(rule);
-    }
 out:
     /* Restore port numbers, as they may have been modified above. */
     flow->tp_src = old_tp_src;
@@ -3898,7 +4000,7 @@ static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
 static struct rule *
 rule_alloc(void)
 {
-    struct rule_dpif *rule = xmalloc(sizeof *rule);
+    struct rule_dpif *rule = xzalloc(sizeof *rule);
     return &rule->up;
 }
 
@@ -3909,27 +4011,140 @@ rule_dealloc(struct rule *rule_)
     free(rule);
 }
 
+static enum ofperr
+check_mask(struct ofproto_dpif *ofproto, const struct miniflow *flow)
+{
+    const struct odp_support *support;
+    uint16_t ct_state, ct_zone;
+    ovs_u128 ct_label;
+    uint32_t ct_mark;
+
+    support = &ofproto_dpif_get_support(ofproto)->odp;
+    ct_state = MINIFLOW_GET_U16(flow, ct_state);
+    if (support->ct_state && support->ct_zone && support->ct_mark
+        && support->ct_label && support->ct_state_nat) {
+        return ct_state & CS_UNSUPPORTED_MASK ? OFPERR_OFPBMC_BAD_MASK : 0;
+    }
+
+    ct_zone = MINIFLOW_GET_U16(flow, ct_zone);
+    ct_mark = MINIFLOW_GET_U32(flow, ct_mark);
+    ct_label = MINIFLOW_GET_U128(flow, ct_label);
+
+    if ((ct_state && !support->ct_state)
+        || (ct_state & CS_UNSUPPORTED_MASK)
+        || ((ct_state & (CS_SRC_NAT | CS_DST_NAT)) && !support->ct_state_nat)
+        || (ct_zone && !support->ct_zone)
+        || (ct_mark && !support->ct_mark)
+        || (!ovs_u128_is_zero(&ct_label) && !support->ct_label)) {
+        return OFPERR_OFPBMC_BAD_MASK;
+    }
+
+    return 0;
+}
+
+static enum ofperr
+check_actions(const struct ofproto_dpif *ofproto,
+              const struct rule_actions *const actions)
+{
+    const struct ofpact *ofpact;
+
+    OFPACT_FOR_EACH (ofpact, actions->ofpacts, actions->ofpacts_len) {
+        const struct odp_support *support;
+        const struct ofpact_conntrack *ct;
+        const struct ofpact *a;
+
+        if (ofpact->type != OFPACT_CT) {
+            continue;
+        }
+
+        ct = CONTAINER_OF(ofpact, struct ofpact_conntrack, ofpact);
+        support = &ofproto_dpif_get_support(ofproto)->odp;
+
+        if (!support->ct_state) {
+            return OFPERR_OFPBAC_BAD_TYPE;
+        }
+        if ((ct->zone_imm || ct->zone_src.field) && !support->ct_zone) {
+            return OFPERR_OFPBAC_BAD_ARGUMENT;
+        }
+
+        OFPACT_FOR_EACH(a, ct->actions, ofpact_ct_get_action_len(ct)) {
+            const struct mf_field *dst = ofpact_get_mf_dst(a);
+
+            if (a->type == OFPACT_NAT && !support->ct_state_nat) {
+                /* The backer doesn't seem to support the NAT bits in
+                 * 'ct_state': assume that it doesn't support the NAT
+                 * action. */
+                return OFPERR_OFPBAC_BAD_TYPE;
+            }
+            if (dst && ((dst->id == MFF_CT_MARK && !support->ct_mark)
+                        || (dst->id == MFF_CT_LABEL && !support->ct_label))) {
+                return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
+            }
+        }
+    }
+
+    return 0;
+}
+
+static enum ofperr
+rule_check(struct rule *rule)
+{
+    struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->ofproto);
+    enum ofperr err;
+
+    err = check_mask(ofproto, &rule->cr.match.mask->masks);
+    if (err) {
+        return err;
+    }
+    return check_actions(ofproto, rule->actions);
+}
+
 static enum ofperr
 rule_construct(struct rule *rule_)
     OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     struct rule_dpif *rule = rule_dpif_cast(rule_);
+    int error;
+
+    error = rule_check(rule_);
+    if (error) {
+        return error;
+    }
+
     ovs_mutex_init_adaptive(&rule->stats_mutex);
     rule->stats.n_packets = 0;
     rule->stats.n_bytes = 0;
     rule->stats.used = rule->up.modified;
     rule->recirc_id = 0;
+    rule->new_rule = NULL;
 
     return 0;
 }
 
-static enum ofperr
-rule_insert(struct rule *rule_)
+static void
+rule_insert(struct rule *rule_, struct rule *old_rule_, bool forward_stats)
     OVS_REQUIRES(ofproto_mutex)
 {
     struct rule_dpif *rule = rule_dpif_cast(rule_);
+
+    if (old_rule_ && forward_stats) {
+        struct rule_dpif *old_rule = rule_dpif_cast(old_rule_);
+
+        ovs_assert(!old_rule->new_rule);
+
+        /* Take a reference to the new rule, and refer all stats updates from
+         * the old rule to the new rule. */
+        rule_dpif_ref(rule);
+
+        ovs_mutex_lock(&old_rule->stats_mutex);
+        ovs_mutex_lock(&rule->stats_mutex);
+        old_rule->new_rule = rule;       /* Forward future stats. */
+        rule->stats = old_rule->stats;   /* Transfer stats to the new rule. */
+        ovs_mutex_unlock(&rule->stats_mutex);
+        ovs_mutex_unlock(&old_rule->stats_mutex);
+    }
+
     complete_operation(rule);
-    return 0;
 }
 
 static void
@@ -3942,10 +4157,15 @@ rule_delete(struct rule *rule_)
 
 static void
 rule_destruct(struct rule *rule_)
+    OVS_NO_THREAD_SAFETY_ANALYSIS
 {
     struct rule_dpif *rule = rule_dpif_cast(rule_);
 
     ovs_mutex_destroy(&rule->stats_mutex);
+    /* Release reference to the new rule, if any. */
+    if (rule->new_rule) {
+        rule_dpif_unref(rule->new_rule);
+    }
     if (rule->recirc_id) {
         recirc_free_id(rule->recirc_id);
     }
@@ -3958,9 +4178,13 @@ rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
     struct rule_dpif *rule = rule_dpif_cast(rule_);
 
     ovs_mutex_lock(&rule->stats_mutex);
-    *packets = rule->stats.n_packets;
-    *bytes = rule->stats.n_bytes;
-    *used = rule->stats.used;
+    if (OVS_UNLIKELY(rule->new_rule)) {
+        rule_get_stats(&rule->new_rule->up, packets, bytes, used);
+    } else {
+        *packets = rule->stats.n_packets;
+        *bytes = rule->stats.n_bytes;
+        *used = rule->stats.used;
+    }
     ovs_mutex_unlock(&rule->stats_mutex);
 }
 
@@ -3982,22 +4206,6 @@ rule_execute(struct rule *rule, const struct flow *flow,
     return 0;
 }
 
-static void
-rule_modify_actions(struct rule *rule_, bool reset_counters)
-    OVS_REQUIRES(ofproto_mutex)
-{
-    struct rule_dpif *rule = rule_dpif_cast(rule_);
-
-    if (reset_counters) {
-        ovs_mutex_lock(&rule->stats_mutex);
-        rule->stats.n_packets = 0;
-        rule->stats.n_bytes = 0;
-        ovs_mutex_unlock(&rule->stats_mutex);
-    }
-
-    complete_operation(rule);
-}
-
 static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
 {
     return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
@@ -4061,19 +4269,6 @@ static enum ofperr
 group_construct(struct ofgroup *group_)
 {
     struct group_dpif *group = group_dpif_cast(group_);
-    const struct ofputil_bucket *bucket;
-
-    /* Prevent group chaining because our locking structure makes it hard to
-     * implement deadlock-free.  (See xlate_group_resource_check().) */
-    LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
-        const struct ofpact *a;
-
-        OFPACT_FOR_EACH (a, bucket->ofpacts, bucket->ofpacts_len) {
-            if (a->type == OFPACT_GROUP) {
-                return OFPERR_OFPGMFC_CHAINING_UNSUPPORTED;
-            }
-        }
-    }
 
     ovs_mutex_init_adaptive(&group->stats_mutex);
     ovs_mutex_lock(&group->stats_mutex);
@@ -4202,10 +4397,10 @@ get_datapath_version(const struct ofproto *ofproto_)
 
 static bool
 set_frag_handling(struct ofproto *ofproto_,
-                  enum ofp_config_flags frag_handling)
+                  enum ofputil_frag_handling frag_handling)
 {
     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-    if (frag_handling != OFPC_FRAG_REASM) {
+    if (frag_handling != OFPUTIL_FRAG_REASM) {
         ofproto->backer->need_revalidate = REV_RECONFIGURE;
         return true;
     } else {
@@ -4398,8 +4593,9 @@ ofproto_unixctl_mcast_snooping_show(struct unixctl_conn *conn,
             bundle = b->port;
             ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
                                    name, sizeof name);
-            ds_put_format(&ds, "%5s  %4d  "IP_FMT"         %3d\n",
-                          name, grp->vlan, IP_ARGS(grp->ip4),
+            ds_put_format(&ds, "%5s  %4d  ", name, grp->vlan);
+            ipv6_format_mapped(&grp->addr, &ds);
+            ds_put_format(&ds, "         %3d\n",
                           mcast_bundle_age(ofproto->ms, b));
         }
     }
@@ -4411,7 +4607,7 @@ ofproto_unixctl_mcast_snooping_show(struct unixctl_conn *conn,
         bundle = mrouter->port;
         ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
                                name, sizeof name);
-            ds_put_format(&ds, "%5s  %4d  querier             %3d\n",
+        ds_put_format(&ds, "%5s  %4d  querier             %3d\n",
                       name, mrouter->vlan,
                       mcast_mrouter_age(ofproto->ms, mrouter));
     }
@@ -4425,8 +4621,9 @@ struct trace_ctx {
     struct xlate_in xin;
     const struct flow *key;
     struct flow flow;
-    struct flow_wildcards wc;
     struct ds *result;
+    struct flow_wildcards wc;
+    struct ofpbuf odp_actions;
 };
 
 static void
@@ -4493,7 +4690,7 @@ static void
 trace_format_odp(struct ds *result, int level, const char *title,
                  struct trace_ctx *trace)
 {
-    struct ofpbuf *odp_actions = trace->xout.odp_actions;
+    struct ofpbuf *odp_actions = &trace->odp_actions;
 
     ds_put_char_multiple(result, '\t', level);
     ds_put_format(result, "%s: ", title);
@@ -4509,13 +4706,17 @@ trace_format_megaflow(struct ds *result, int level, const char *title,
 
     ds_put_char_multiple(result, '\t', level);
     ds_put_format(result, "%s: ", title);
-    flow_wildcards_or(&trace->wc, &trace->xout.wc, &trace->wc);
     match_init(&match, trace->key, &trace->wc);
     match_format(&match, result, OFP_DEFAULT_PRIORITY);
     ds_put_char(result, '\n');
 }
 
-static void trace_report(struct xlate_in *xin, const char *s, int recurse);
+static void trace_report(struct xlate_in *, int recurse,
+                         const char *format, ...)
+    OVS_PRINTF_FORMAT(3, 4);
+static void trace_report_valist(struct xlate_in *, int recurse,
+                                const char *format, va_list args)
+    OVS_PRINTF_FORMAT(3, 0);
 
 static void
 trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
@@ -4525,15 +4726,15 @@ trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
 
     if (!recurse) {
         if (rule == xin->ofproto->miss_rule) {
-            trace_report(xin, "No match, flow generates \"packet in\"s.",
-                         recurse);
+            trace_report(xin, recurse,
+                         "No match, flow generates \"packet in\"s.");
         } else if (rule == xin->ofproto->no_packet_in_rule) {
-            trace_report(xin, "No match, packets dropped because "
-                         "OFPPC_NO_PACKET_IN is set on in_port.", recurse);
+            trace_report(xin, recurse, "No match, packets dropped because "
+                         "OFPPC_NO_PACKET_IN is set on in_port.");
         } else if (rule == xin->ofproto->drop_frags_rule) {
-            trace_report(xin, "Packets dropped because they are IP "
+            trace_report(xin, recurse, "Packets dropped because they are IP "
                          "fragments and the fragment handling mode is "
-                         "\"drop\".", recurse);
+                         "\"drop\".");
         }
     }
 
@@ -4548,16 +4749,27 @@ trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
 }
 
 static void
-trace_report(struct xlate_in *xin, const char *s, int recurse)
+trace_report_valist(struct xlate_in *xin, int recurse,
+                    const char *format, va_list args)
 {
     struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
     struct ds *result = trace->result;
 
     ds_put_char_multiple(result, '\t', recurse);
-    ds_put_cstr(result, s);
+    ds_put_format_valist(result, format, args);
     ds_put_char(result, '\n');
 }
 
+static void
+trace_report(struct xlate_in *xin, int recurse, const char *format, ...)
+{
+    va_list args;
+
+    va_start(args, format);
+    trace_report_valist(xin, recurse, format, args);
+    va_end(args);
+}
+
 /* Parses the 'argc' elements of 'argv', ignoring argv[0].  The following
  * forms are supported:
  *
@@ -4640,7 +4852,7 @@ parse_flow_and_packet(int argc, const char *argv[],
         }
 
         if (odp_flow_key_to_flow(odp_key.data, odp_key.size, flow) == ODP_FIT_ERROR) {
-            error = "Failed to parse flow key";
+            error = "Failed to parse datapath flow key";
             goto exit;
         }
 
@@ -4657,7 +4869,7 @@ parse_flow_and_packet(int argc, const char *argv[],
         char *err = parse_ofp_exact_flow(flow, NULL, argv[argc - 1], NULL);
 
         if (err) {
-            m_err = xasprintf("Bad flow syntax: %s", err);
+            m_err = xasprintf("Bad openflow flow syntax: %s", err);
             free(err);
             goto exit;
         } else {
@@ -4788,13 +5000,18 @@ ofproto_unixctl_trace_actions(struct unixctl_conn *conn, int argc,
         goto exit;
     }
     if (enforce_consistency) {
-        retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size,
-                                           &flow, u16_to_ofp(ofproto->up.max_ports),
-                                           0, 0, usable_protocols);
+        retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
+                                           u16_to_ofp(ofproto->up.max_ports),
+                                           0, ofproto->up.n_tables,
+                                           usable_protocols);
     } else {
         retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
-                               u16_to_ofp(ofproto->up.max_ports), 0, 0,
-                               &usable_protocols);
+                               u16_to_ofp(ofproto->up.max_ports), 0,
+                               ofproto->up.n_tables, &usable_protocols);
+    }
+    if (!retval) {
+        retval = ofproto_check_ofpacts(&ofproto->up, ofpacts.data,
+                                       ofpacts.size);
     }
 
     if (retval) {
@@ -4830,35 +5047,38 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow,
               struct ds *ds)
 {
     struct trace_ctx trace;
+    enum xlate_error error;
 
     ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
     ds_put_cstr(ds, "Flow: ");
     flow_format(ds, flow);
     ds_put_char(ds, '\n');
 
-    flow_wildcards_init_catchall(&trace.wc);
+    ofpbuf_init(&trace.odp_actions, 0);
 
     trace.result = ds;
     trace.key = flow; /* Original flow key, used for megaflow. */
     trace.flow = *flow; /* May be modified by actions. */
     xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL,
-                  ntohs(flow->tcp_flags), packet);
+                  ntohs(flow->tcp_flags), packet, &trace.wc,
+                  &trace.odp_actions);
     trace.xin.ofpacts = ofpacts;
     trace.xin.ofpacts_len = ofpacts_len;
     trace.xin.resubmit_hook = trace_resubmit;
-    trace.xin.report_hook = trace_report;
-
-    xlate_actions(&trace.xin, &trace.xout);
+    trace.xin.report_hook = trace_report_valist;
 
+    error = xlate_actions(&trace.xin, &trace.xout);
     ds_put_char(ds, '\n');
     trace_format_flow(ds, 0, "Final flow", &trace);
     trace_format_megaflow(ds, 0, "Megaflow", &trace);
 
     ds_put_cstr(ds, "Datapath actions: ");
-    format_odp_actions(ds, trace.xout.odp_actions->data,
-                       trace.xout.odp_actions->size);
+    format_odp_actions(ds, trace.odp_actions.data, trace.odp_actions.size);
 
-    if (trace.xout.slow) {
+    if (error != XLATE_OK) {
+        ds_put_format(ds, "\nTranslation failed (%s), packet is dropped.\n",
+                      xlate_strerror(error));
+    } else if (trace.xout.slow) {
         enum slow_path_reason slow;
 
         ds_put_cstr(ds, "\nThis flow is handled by the userspace "
@@ -4876,6 +5096,7 @@ ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow,
     }
 
     xlate_out_uninit(&trace.xout);
+    ofpbuf_uninit(&trace.odp_actions);
 }
 
 /* Store the current ofprotos in 'ofproto_shash'.  Returns a sorted list
@@ -5103,6 +5324,8 @@ disable_tnl_push_pop(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
         ofproto_use_tnl_push_pop = true;
         unixctl_command_reply(conn, "Tunnel push-pop on");
         ofproto_revalidate_all_backers();
+    } else {
+        unixctl_command_reply_error(conn, "Invalid argument");
     }
 }
 
@@ -5429,28 +5652,23 @@ ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
                                const struct ofpbuf *ofpacts,
                                struct rule **rulep)
 {
-    struct ofputil_flow_mod fm;
+    struct ofproto_flow_mod ofm;
     struct rule_dpif *rule;
     int error;
 
-    fm.match = *match;
-    fm.priority = priority;
-    fm.new_cookie = htonll(0);
-    fm.cookie = htonll(0);
-    fm.cookie_mask = htonll(0);
-    fm.modify_cookie = false;
-    fm.table_id = TBL_INTERNAL;
-    fm.command = OFPFC_ADD;
-    fm.idle_timeout = idle_timeout;
-    fm.hard_timeout = 0;
-    fm.importance = 0;
-    fm.buffer_id = 0;
-    fm.out_port = 0;
-    fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
-    fm.ofpacts = ofpacts->data;
-    fm.ofpacts_len = ofpacts->size;
-
-    error = ofproto_flow_mod(&ofproto->up, &fm);
+    ofm.fm = (struct ofputil_flow_mod) {
+        .match = *match,
+        .priority = priority,
+        .table_id = TBL_INTERNAL,
+        .command = OFPFC_ADD,
+        .idle_timeout = idle_timeout,
+        .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
+        .ofpacts = ofpacts->data,
+        .ofpacts_len = ofpacts->size,
+        .delete_reason = OVS_OFPRR_NONE,
+    };
+
+    error = ofproto_flow_mod(&ofproto->up, &ofm);
     if (error) {
         VLOG_ERR_RL(&rl, "failed to add internal flow (%s)",
                     ofperr_to_string(error));
@@ -5458,8 +5676,10 @@ ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
         return error;
     }
 
-    rule = rule_dpif_lookup_in_table(ofproto, TBL_INTERNAL, &fm.match.flow,
-                                     &fm.match.wc, false);
+    rule = rule_dpif_lookup_in_table(ofproto,
+                                     ofproto_dpif_get_tables_version(ofproto),
+                                     TBL_INTERNAL, &ofm.fm.match.flow,
+                                     &ofm.fm.match.wc);
     if (rule) {
         *rulep = &rule->up;
     } else {
@@ -5472,20 +5692,18 @@ int
 ofproto_dpif_delete_internal_flow(struct ofproto_dpif *ofproto,
                                   struct match *match, int priority)
 {
-    struct ofputil_flow_mod fm;
+    struct ofproto_flow_mod ofm;
     int error;
 
-    fm.match = *match;
-    fm.priority = priority;
-    fm.new_cookie = htonll(0);
-    fm.cookie = htonll(0);
-    fm.cookie_mask = htonll(0);
-    fm.modify_cookie = false;
-    fm.table_id = TBL_INTERNAL;
-    fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
-    fm.command = OFPFC_DELETE_STRICT;
-
-    error = ofproto_flow_mod(&ofproto->up, &fm);
+    ofm.fm = (struct ofputil_flow_mod) {
+        .match = *match,
+        .priority = priority,
+        .table_id = TBL_INTERNAL,
+        .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
+        .command = OFPFC_DELETE_STRICT,
+    };
+
+    error = ofproto_flow_mod(&ofproto->up, &ofm);
     if (error) {
         VLOG_ERR_RL(&rl, "failed to delete internal flow (%s)",
                     ofperr_to_string(error));
@@ -5513,6 +5731,7 @@ const struct ofproto_class ofproto_dpif_class = {
     type_get_memory_usage,
     flush,
     query_tables,
+    set_tables_version,
     port_alloc,
     port_construct,
     port_destruct,
@@ -5539,8 +5758,6 @@ const struct ofproto_class ofproto_dpif_class = {
     rule_dealloc,
     rule_get_stats,
     rule_execute,
-    NULL,                       /* rule_premodify_actions */
-    rule_modify_actions,
     set_frag_handling,
     packet_out,
     set_netflow,