classifier: Add support for conjunctive matches.
[cascardo/ovs.git] / ofproto / ofproto.c
index bad0e29..ba4263e 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2009-2015 Nicira, Inc.
  * Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
@@ -57,7 +57,7 @@
 #include "timeval.h"
 #include "unaligned.h"
 #include "unixctl.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
 #include "bundles.h"
 
 VLOG_DEFINE_THIS_MODULE(ofproto);
@@ -89,8 +89,6 @@ static void oftable_enable_eviction(struct oftable *,
                                     size_t n_fields);
 
 static void oftable_remove_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex);
-static void oftable_remove_rule__(struct ofproto *, struct rule *)
-    OVS_REQUIRES(ofproto_mutex);
 
 /* A set of rules within a single OpenFlow table (oftable) that have the same
  * values for the oftable's eviction_fields.  A rule to be evicted, when one is
@@ -153,20 +151,23 @@ struct rule_criteria {
 };
 
 static void rule_criteria_init(struct rule_criteria *, uint8_t table_id,
-                               const struct match *match,
-                               unsigned int priority,
+                               const struct match *match, int priority,
                                ovs_be64 cookie, ovs_be64 cookie_mask,
                                ofp_port_t out_port, uint32_t out_group);
 static void rule_criteria_require_rw(struct rule_criteria *,
                                      bool can_write_readonly);
 static void rule_criteria_destroy(struct rule_criteria *);
 
+static enum ofperr collect_rules_loose(struct ofproto *,
+                                       const struct rule_criteria *,
+                                       struct rule_collection *);
+
 /* A packet that needs to be passed to rule_execute().
  *
  * (We can't do this immediately from ofopgroup_complete() because that holds
  * ofproto_mutex, which rule_execute() needs released.) */
 struct rule_execute {
-    struct list list_node;      /* In struct ofproto's "rule_executes" list. */
+    struct ovs_list list_node;  /* In struct ofproto's "rule_executes" list. */
     struct rule *rule;          /* Owns a reference to the rule. */
     ofp_port_t in_port;
     struct ofpbuf *packet;      /* Owns the packet. */
@@ -175,6 +176,37 @@ struct rule_execute {
 static void run_rule_executes(struct ofproto *) OVS_EXCLUDED(ofproto_mutex);
 static void destroy_rule_executes(struct ofproto *);
 
+struct learned_cookie {
+    union {
+        /* In struct ofproto's 'learned_cookies' hmap. */
+        struct hmap_node hmap_node OVS_GUARDED_BY(ofproto_mutex);
+
+        /* In 'dead_cookies' list when removed from hmap. */
+        struct ovs_list list_node;
+    } u;
+
+    /* Key. */
+    ovs_be64 cookie OVS_GUARDED_BY(ofproto_mutex);
+    uint8_t table_id OVS_GUARDED_BY(ofproto_mutex);
+
+    /* Number of references from "learn" actions.
+     *
+     * When this drops to 0, all of the flows in 'table_id' with the specified
+     * 'cookie' are deleted. */
+    int n OVS_GUARDED_BY(ofproto_mutex);
+};
+
+static const struct ofpact_learn *next_learn_with_delete(
+    const struct rule_actions *, const struct ofpact_learn *start);
+
+static void learned_cookies_inc(struct ofproto *, const struct rule_actions *)
+    OVS_REQUIRES(ofproto_mutex);
+static void learned_cookies_dec(struct ofproto *, const struct rule_actions *,
+                                struct ovs_list *dead_cookies)
+    OVS_REQUIRES(ofproto_mutex);
+static void learned_cookies_flush(struct ofproto *, struct ovs_list *dead_cookies)
+    OVS_REQUIRES(ofproto_mutex);
+
 /* ofport. */
 static void ofport_destroy__(struct ofport *) OVS_EXCLUDED(ofproto_mutex);
 static void ofport_destroy(struct ofport *);
@@ -203,6 +235,8 @@ struct ofport_usage {
 /* rule. */
 static void ofproto_rule_send_removed(struct rule *, uint8_t reason);
 static bool rule_is_readonly(const struct rule *);
+static void ofproto_rule_remove__(struct ofproto *, struct rule *)
+    OVS_REQUIRES(ofproto_mutex);
 
 /* The source of a flow_mod request, in the code that processes flow_mods.
  *
@@ -222,8 +256,9 @@ static enum ofperr add_flow(struct ofproto *, struct ofputil_flow_mod *,
 static enum ofperr modify_flows__(struct ofproto *, struct ofputil_flow_mod *,
                                   const struct rule_collection *,
                                   const struct flow_mod_requester *);
-static void delete_flow__(struct rule *, enum ofp_flow_removed_reason,
-                          const struct flow_mod_requester *)
+static void delete_flows__(const struct rule_collection *,
+                           enum ofp_flow_removed_reason,
+                           const struct flow_mod_requester *)
     OVS_REQUIRES(ofproto_mutex);
 
 static enum ofperr send_buffered_packet(struct ofconn *, uint32_t buffer_id,
@@ -268,6 +303,8 @@ unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT;
 unsigned ofproto_max_idle = OFPROTO_MAX_IDLE_DEFAULT;
 
 size_t n_handlers, n_revalidators;
+size_t n_dpdk_rxqs;
+char *pmd_cpu_mask;
 
 /* Map from datapath name to struct ofproto, for use by unixctl commands. */
 static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos);
@@ -476,6 +513,7 @@ ofproto_create(const char *datapath_name, const char *datapath_type,
     ofproto->tables = NULL;
     ofproto->n_tables = 0;
     hindex_init(&ofproto->cookies);
+    hmap_init(&ofproto->learned_cookies);
     list_init(&ofproto->expirable);
     ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name);
     guarded_list_init(&ofproto->rule_executes);
@@ -485,35 +523,19 @@ ofproto_create(const char *datapath_name, const char *datapath_type,
     ovs_rwlock_init(&ofproto->groups_rwlock);
     hmap_init(&ofproto->groups);
     ovs_mutex_unlock(&ofproto_mutex);
+    ofproto->ogf.types = 0xf;
     ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS |
                                 OFPGFC_SELECT_WEIGHT;
-    ofproto->ogf.max_groups[OFPGT11_ALL] = OFPG_MAX;
-    ofproto->ogf.max_groups[OFPGT11_SELECT] = OFPG_MAX;
-    ofproto->ogf.max_groups[OFPGT11_INDIRECT] = OFPG_MAX;
-    ofproto->ogf.max_groups[OFPGT11_FF] = OFPG_MAX;
-    ofproto->ogf.actions[0] =
-        (1 << OFPAT11_OUTPUT) |
-        (1 << OFPAT11_COPY_TTL_OUT) |
-        (1 << OFPAT11_COPY_TTL_IN) |
-        (1 << OFPAT11_SET_MPLS_TTL) |
-        (1 << OFPAT11_DEC_MPLS_TTL) |
-        (1 << OFPAT11_PUSH_VLAN) |
-        (1 << OFPAT11_POP_VLAN) |
-        (1 << OFPAT11_PUSH_MPLS) |
-        (1 << OFPAT11_POP_MPLS) |
-        (1 << OFPAT11_SET_QUEUE) |
-        (1 << OFPAT11_GROUP) |
-        (1 << OFPAT11_SET_NW_TTL) |
-        (1 << OFPAT11_DEC_NW_TTL) |
-        (1 << OFPAT12_SET_FIELD);
-/* not supported:
- *      (1 << OFPAT13_PUSH_PBB) |
- *      (1 << OFPAT13_POP_PBB) */
+    for (i = 0; i < 4; i++) {
+        ofproto->ogf.max_groups[i] = OFPG_MAX;
+        ofproto->ogf.ofpacts[i] = (UINT64_C(1) << N_OFPACTS) - 1;
+    }
 
     error = ofproto->ofproto_class->construct(ofproto);
     if (error) {
         VLOG_ERR("failed to open datapath %s: %s",
                  datapath_name, ovs_strerror(error));
+        connmgr_destroy(ofproto->connmgr);
         ofproto_destroy__(ofproto);
         return error;
     }
@@ -684,6 +706,47 @@ ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time,
     }
 }
 
+/* Multicast snooping configuration. */
+
+/* Configures multicast snooping on 'ofproto' using the settings
+ * defined in 's'.  If 's' is NULL, disables multicast snooping.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
+ofproto_set_mcast_snooping(struct ofproto *ofproto,
+                           const struct ofproto_mcast_snooping_settings *s)
+{
+    return (ofproto->ofproto_class->set_mcast_snooping
+            ? ofproto->ofproto_class->set_mcast_snooping(ofproto, s)
+            : EOPNOTSUPP);
+}
+
+/* Configures multicast snooping flood setting on 'ofp_port' of 'ofproto'.
+ *
+ * Returns 0 if successful, otherwise a positive errno value.*/
+int
+ofproto_port_set_mcast_snooping(struct ofproto *ofproto, void *aux, bool flood)
+{
+    return (ofproto->ofproto_class->set_mcast_snooping_port
+            ? ofproto->ofproto_class->set_mcast_snooping_port(ofproto, aux,
+                                                              flood)
+            : EOPNOTSUPP);
+}
+
+void
+ofproto_set_n_dpdk_rxqs(int n_rxqs)
+{
+    n_dpdk_rxqs = MAX(n_rxqs, 0);
+}
+
+void
+ofproto_set_cpu_mask(const char *cmask)
+{
+    free(pmd_cpu_mask);
+
+    pmd_cpu_mask = cmask ? xstrdup(cmask) : NULL;
+}
+
 void
 ofproto_set_threads(int n_handlers_, int n_revalidators_)
 {
@@ -865,6 +928,87 @@ ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port,
             ? ofproto->ofproto_class->get_stp_port_stats(ofport, s)
             : EOPNOTSUPP);
 }
+
+/* Rapid Spanning Tree Protocol (RSTP) configuration. */
+
+/* Configures RSTP on 'ofproto' using the settings defined in 's'.  If
+ * 's' is NULL, disables RSTP.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
+ofproto_set_rstp(struct ofproto *ofproto,
+                 const struct ofproto_rstp_settings *s)
+{
+    if (!ofproto->ofproto_class->set_rstp) {
+        return EOPNOTSUPP;
+    }
+    ofproto->ofproto_class->set_rstp(ofproto, s);
+    return 0;
+}
+
+/* Retrieves RSTP status of 'ofproto' and stores it in 's'.  If the
+ * 'enabled' member of 's' is false, then the other members are not
+ * meaningful.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. */
+int
+ofproto_get_rstp_status(struct ofproto *ofproto,
+                        struct ofproto_rstp_status *s)
+{
+    if (!ofproto->ofproto_class->get_rstp_status) {
+        return EOPNOTSUPP;
+    }
+    ofproto->ofproto_class->get_rstp_status(ofproto, s);
+    return 0;
+}
+
+/* Configures RSTP on 'ofp_port' of 'ofproto' using the settings defined
+ * in 's'.  The caller is responsible for assigning RSTP port numbers
+ * (using the 'port_num' member in the range of 1 through 255, inclusive)
+ * and ensuring there are no duplicates.  If the 's' is NULL, then RSTP
+ * is disabled on the port.
+ *
+ * Returns 0 if successful, otherwise a positive errno value.*/
+int
+ofproto_port_set_rstp(struct ofproto *ofproto, ofp_port_t ofp_port,
+                      const struct ofproto_port_rstp_settings *s)
+{
+    struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
+    if (!ofport) {
+        VLOG_WARN("%s: cannot configure RSTP on nonexistent port %"PRIu16,
+                ofproto->name, ofp_port);
+        return ENODEV;
+    }
+
+    if (!ofproto->ofproto_class->set_rstp_port) {
+        return  EOPNOTSUPP;
+    }
+    ofproto->ofproto_class->set_rstp_port(ofport, s);
+    return 0;
+}
+
+/* Retrieves RSTP port status of 'ofp_port' on 'ofproto' and stores it in
+ * 's'.  If the 'enabled' member in 's' is false, then the other members
+ * are not meaningful.
+ *
+ * Returns 0 if successful, otherwise a positive errno value.*/
+int
+ofproto_port_get_rstp_status(struct ofproto *ofproto, ofp_port_t ofp_port,
+                             struct ofproto_port_rstp_status *s)
+{
+    struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
+    if (!ofport) {
+        VLOG_WARN_RL(&rl, "%s: cannot get RSTP status on nonexistent "
+                "port %"PRIu16, ofproto->name, ofp_port);
+        return ENODEV;
+    }
+
+    if (!ofproto->ofproto_class->get_rstp_port_status) {
+        return  EOPNOTSUPP;
+    }
+    ofproto->ofproto_class->get_rstp_port_status(ofport, s);
+    return 0;
+}
 \f
 /* Queue DSCP configuration. */
 
@@ -964,10 +1108,21 @@ ofproto_port_set_bfd(struct ofproto *ofproto, ofp_port_t ofp_port,
     }
 }
 
+/* Checks the status change of BFD on 'ofport'.
+ *
+ * Returns true if 'ofproto_class' does not support 'bfd_status_changed'. */
+bool
+ofproto_port_bfd_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port)
+{
+    struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
+    return (ofport && ofproto->ofproto_class->bfd_status_changed
+            ? ofproto->ofproto_class->bfd_status_changed(ofport)
+            : true);
+}
+
 /* Populates 'status' with the status of BFD on 'ofport'.  Returns 0 on
- * success.  Returns a negative number if there is no status change since
- * last update.  Returns a positive errno otherwise.  Has no effect if
- * 'ofp_port' is not an OpenFlow port in 'ofproto'.
+ * success.  Returns a positive errno otherwise.  Has no effect if 'ofp_port'
+ * is not an OpenFlow port in 'ofproto'.
  *
  * The caller must provide and own '*status'. */
 int
@@ -992,6 +1147,21 @@ ofproto_port_is_lacp_current(struct ofproto *ofproto, ofp_port_t ofp_port)
             ? ofproto->ofproto_class->port_is_lacp_current(ofport)
             : -1);
 }
+
+int
+ofproto_port_get_lacp_stats(const struct ofport *port, struct lacp_slave_stats *stats)
+{
+    struct ofproto *ofproto = port->ofproto;
+    int error;
+
+    if (ofproto->ofproto_class->port_get_lacp_stats) {
+        error = ofproto->ofproto_class->port_get_lacp_stats(port, stats);
+    } else {
+        error = EOPNOTSUPP;
+    }
+
+    return error;
+}
 \f
 /* Bundles. */
 
@@ -1139,10 +1309,11 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id,
     }
 
     table->max_flows = s->max_flows;
-    fat_rwlock_wrlock(&table->cls.rwlock);
-    classifier_set_prefix_fields(&table->cls,
-                                 s->prefix_fields, s->n_prefix_fields);
-    fat_rwlock_unlock(&table->cls.rwlock);
+
+    if (classifier_set_prefix_fields(&table->cls,
+                                     s->prefix_fields, s->n_prefix_fields)) {
+        /* XXX: Trigger revalidation. */
+    }
 
     ovs_mutex_lock(&ofproto_mutex);
     evict_rules_from_table(table, 0);
@@ -1161,16 +1332,6 @@ ofproto_get_snoops(const struct ofproto *ofproto, struct sset *snoops)
     connmgr_get_snoops(ofproto->connmgr, snoops);
 }
 
-static void
-ofproto_rule_delete__(struct rule *rule, uint8_t reason)
-    OVS_REQUIRES(ofproto_mutex)
-{
-    struct ofproto *ofproto = rule->ofproto;
-
-    delete_flow__(rule, reason, NULL);
-    ofmonitor_flush(ofproto->connmgr);
-}
-
 /* Deletes 'rule' from 'ofproto'.
  *
  * Within an ofproto implementation, this function allows an ofproto
@@ -1188,7 +1349,7 @@ ofproto_rule_delete(struct ofproto *ofproto, struct rule *rule)
      * switch is being deleted and any OpenFlow channels have been or soon will
      * be killed. */
     ovs_mutex_lock(&ofproto_mutex);
-    oftable_remove_rule__(ofproto, rule);
+    oftable_remove_rule(rule);
     ofproto->ofproto_class->rule_delete(rule);
     ovs_mutex_unlock(&ofproto_mutex);
 }
@@ -1199,26 +1360,37 @@ ofproto_flush__(struct ofproto *ofproto)
 {
     struct oftable *table;
 
+    /* This will flush all datapath flows. */
     if (ofproto->ofproto_class->flush) {
         ofproto->ofproto_class->flush(ofproto);
     }
 
+    /* XXX: There is a small race window here, where new datapath flows can be
+     * created by upcall handlers based on the existing flow table.  We can not
+     * call ofproto class flush while holding 'ofproto_mutex' to prevent this,
+     * as then we could deadlock on syncing with the handler threads waiting on
+     * the same mutex. */
+
     ovs_mutex_lock(&ofproto_mutex);
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
-        struct rule *rule, *next_rule;
-        struct cls_cursor cursor;
+        struct rule_collection rules;
+        struct rule *rule;
 
         if (table->flags & OFTABLE_HIDDEN) {
             continue;
         }
 
-        fat_rwlock_rdlock(&table->cls.rwlock);
-        cls_cursor_init(&cursor, &table->cls, NULL);
-        fat_rwlock_unlock(&table->cls.rwlock);
-        CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
-            ofproto_rule_delete__(rule, OFPRR_DELETE);
+        rule_collection_init(&rules);
+
+        CLS_FOR_EACH (rule, cr, &table->cls) {
+            rule_collection_add(&rules, rule);
         }
+        delete_flows__(&rules, OFPRR_DELETE, NULL);
+        rule_collection_destroy(&rules);
     }
+    /* XXX: Concurrent handler threads may insert new learned flows based on
+     * learn actions of the now deleted flows right after we release
+     * 'ofproto_mutex'. */
     ovs_mutex_unlock(&ofproto_mutex);
 }
 
@@ -1237,8 +1409,6 @@ ofproto_destroy__(struct ofproto *ofproto)
     ovs_rwlock_destroy(&ofproto->groups_rwlock);
     hmap_destroy(&ofproto->groups);
 
-    connmgr_destroy(ofproto->connmgr);
-
     hmap_remove(&all_ofprotos, &ofproto->hmap_node);
     free(ofproto->name);
     free(ofproto->type);
@@ -1260,6 +1430,9 @@ ofproto_destroy__(struct ofproto *ofproto)
     ovs_assert(hindex_is_empty(&ofproto->cookies));
     hindex_destroy(&ofproto->cookies);
 
+    ovs_assert(hmap_is_empty(&ofproto->learned_cookies));
+    hmap_destroy(&ofproto->learned_cookies);
+
     free(ofproto->vlan_bitmap);
 
     ofproto->ofproto_class->dealloc(ofproto);
@@ -1294,6 +1467,12 @@ ofproto_destroy(struct ofproto *p)
     }
 
     p->ofproto_class->destruct(p);
+
+    /* We should not postpone this because it involves deleting a listening
+     * socket which we may want to reopen soon. 'connmgr' should not be used
+     * by other threads */
+    connmgr_destroy(p->connmgr);
+
     /* Destroying rules is deferred, must have 'ofproto' around for them. */
     ovsrcu_postpone(ofproto_destroy__, p);
 }
@@ -1376,17 +1555,22 @@ ofproto_run(struct ofproto *p)
         for (i = 0; i < p->n_tables; i++) {
             struct oftable *table = &p->tables[i];
             struct eviction_group *evg;
-            struct cls_cursor cursor;
             struct rule *rule;
 
             if (!table->eviction_fields) {
                 continue;
             }
 
+            if (classifier_count(&table->cls) > 100000) {
+                static struct vlog_rate_limit count_rl =
+                    VLOG_RATE_LIMIT_INIT(1, 1);
+                VLOG_WARN_RL(&count_rl, "Table %"PRIuSIZE" has an excessive"
+                             " number of rules: %d", i,
+                             classifier_count(&table->cls));
+            }
+
             ovs_mutex_lock(&ofproto_mutex);
-            fat_rwlock_rdlock(&table->cls.rwlock);
-            cls_cursor_init(&cursor, &table->cls, NULL);
-            CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+            CLS_FOR_EACH (rule, cr, &table->cls) {
                 if (rule->idle_timeout || rule->hard_timeout) {
                     if (!rule->eviction_group) {
                         eviction_group_add_rule(rule);
@@ -1396,7 +1580,6 @@ ofproto_run(struct ofproto *p)
                     }
                 }
             }
-            fat_rwlock_unlock(&table->cls.rwlock);
 
             HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
                 heap_rebuild(&evg->rules);
@@ -1477,9 +1660,7 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage)
 
     n_rules = 0;
     OFPROTO_FOR_EACH_TABLE (table, ofproto) {
-        fat_rwlock_rdlock(&table->cls.rwlock);
         n_rules += classifier_count(&table->cls);
-        fat_rwlock_unlock(&table->cls.rwlock);
     }
     simap_increase(usage, "rules", n_rules);
 
@@ -1709,7 +1890,7 @@ ofproto_port_del(struct ofproto *ofproto, ofp_port_t ofp_port)
 
 static void
 flow_mod_init(struct ofputil_flow_mod *fm,
-              const struct match *match, unsigned int priority,
+              const struct match *match, int priority,
               const struct ofpact *ofpacts, size_t ofpacts_len,
               enum ofp_flow_mod_command command)
 {
@@ -1723,6 +1904,7 @@ flow_mod_init(struct ofputil_flow_mod *fm,
     fm->command = command;
     fm->idle_timeout = 0;
     fm->hard_timeout = 0;
+    fm->importance = 0;
     fm->buffer_id = UINT32_MAX;
     fm->out_port = OFPP_ANY;
     fm->out_group = OFPG_ANY;
@@ -1734,7 +1916,7 @@ flow_mod_init(struct ofputil_flow_mod *fm,
 
 static int
 simple_flow_mod(struct ofproto *ofproto,
-                const struct match *match, unsigned int priority,
+                const struct match *match, int priority,
                 const struct ofpact *ofpacts, size_t ofpacts_len,
                 enum ofp_flow_mod_command command)
 {
@@ -1758,7 +1940,7 @@ simple_flow_mod(struct ofproto *ofproto,
  * This is a helper function for in-band control and fail-open. */
 void
 ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
-                 unsigned int priority,
+                 int priority,
                  const struct ofpact *ofpacts, size_t ofpacts_len)
     OVS_EXCLUDED(ofproto_mutex)
 {
@@ -1767,7 +1949,6 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
 
     /* First do a cheap check whether the rule we're looking for already exists
      * with the actions that we want.  If it does, then we're done. */
-    fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(
                                   &ofproto->tables[0].cls, match, priority));
     if (rule) {
@@ -1777,7 +1958,6 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match,
     } else {
         must_add = true;
     }
-    fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
 
     /* If there's no such rule or the rule doesn't have the actions we want,
      * fall back to a executing a full flow mod.  We can't optimize this at
@@ -1808,7 +1988,6 @@ ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
         struct rule *rule;
         bool done = false;
 
-        fat_rwlock_rdlock(&table->cls.rwlock);
         rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
                                                                 &fm->match,
                                                                 fm->priority));
@@ -1822,6 +2001,7 @@ ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
             actions = rule_get_actions(rule);
             if (rule->idle_timeout == fm->idle_timeout
                 && rule->hard_timeout == fm->hard_timeout
+                && rule->importance == fm->importance
                 && rule->flags == (fm->flags & OFPUTIL_FF_STATE)
                 && (!fm->modify_cookie || (fm->new_cookie == rule->flow_cookie))
                 && ofpacts_equal(fm->ofpacts, fm->ofpacts_len,
@@ -1833,7 +2013,6 @@ ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
             }
             ovs_mutex_unlock(&rule->mutex);
         }
-        fat_rwlock_unlock(&table->cls.rwlock);
 
         if (done) {
             return 0;
@@ -1849,7 +2028,7 @@ ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
  * This is a helper function for in-band control and fail-open. */
 void
 ofproto_delete_flow(struct ofproto *ofproto,
-                    const struct match *target, unsigned int priority)
+                    const struct match *target, int priority)
     OVS_EXCLUDED(ofproto_mutex)
 {
     struct classifier *cls = &ofproto->tables[0].cls;
@@ -1857,10 +2036,8 @@ ofproto_delete_flow(struct ofproto *ofproto,
 
     /* First do a cheap check whether the rule we're looking for has already
      * been deleted.  If so, then we're done. */
-    fat_rwlock_rdlock(&cls->rwlock);
     rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
                                                             priority));
-    fat_rwlock_unlock(&cls->rwlock);
     if (!rule) {
         return;
     }
@@ -2144,6 +2321,9 @@ ofproto_port_unregister(struct ofproto *ofproto, ofp_port_t ofp_port)
         if (port->ofproto->ofproto_class->set_stp_port) {
             port->ofproto->ofproto_class->set_stp_port(port, NULL);
         }
+        if (port->ofproto->ofproto_class->set_rstp_port) {
+            port->ofproto->ofproto_class->set_rstp_port(port, NULL);
+        }
         if (port->ofproto->ofproto_class->set_cfm) {
             port->ofproto->ofproto_class->set_cfm(port, NULL);
         }
@@ -2456,6 +2636,15 @@ ofproto_rule_ref(struct rule *rule)
     }
 }
 
+bool
+ofproto_rule_try_ref(struct rule *rule)
+{
+    if (rule) {
+        return ovs_refcount_try_ref_rcu(&rule->ref_count);
+    }
+    return false;
+}
+
 /* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the
  * ref_count reaches 0.
  *
@@ -2465,7 +2654,7 @@ ofproto_rule_ref(struct rule *rule)
 void
 ofproto_rule_unref(struct rule *rule)
 {
-    if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
+    if (rule && ovs_refcount_unref_relaxed(&rule->ref_count) == 1) {
         ovsrcu_postpone(rule_destroy_cb, rule);
     }
 }
@@ -2494,18 +2683,18 @@ static uint32_t get_provider_meter_id(const struct ofproto *,
 /* Creates and returns a new 'struct rule_actions', whose actions are a copy
  * of from the 'ofpacts_len' bytes of 'ofpacts'. */
 const struct rule_actions *
-rule_actions_create(const struct ofproto *ofproto,
-                    const struct ofpact *ofpacts, size_t ofpacts_len)
+rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len)
 {
     struct rule_actions *actions;
 
     actions = xmalloc(sizeof *actions + ofpacts_len);
     actions->ofpacts_len = ofpacts_len;
-    actions->provider_meter_id
-        = get_provider_meter_id(ofproto,
-                                ofpacts_get_meter(ofpacts, ofpacts_len));
+    actions->has_meter = ofpacts_get_meter(ofpacts, ofpacts_len) != 0;
     memcpy(actions->ofpacts, ofpacts, ofpacts_len);
 
+    actions->has_learn_with_delete = (next_learn_with_delete(actions, NULL)
+                                      != NULL);
+
     return actions;
 }
 
@@ -2520,7 +2709,7 @@ rule_actions_destroy(const struct rule_actions *actions)
 
 /* Returns true if 'rule' has an OpenFlow OFPAT_OUTPUT or OFPAT_ENQUEUE action
  * that outputs to 'port' (output to OFPP_FLOOD and OFPP_ALL doesn't count). */
-static bool
+bool
 ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port)
     OVS_REQUIRES(ofproto_mutex)
 {
@@ -2562,7 +2751,7 @@ run_rule_executes(struct ofproto *ofproto)
     OVS_EXCLUDED(ofproto_mutex)
 {
     struct rule_execute *e, *next;
-    struct list executes;
+    struct ovs_list executes;
 
     guarded_list_pop_all(&ofproto->rule_executes, &executes);
     LIST_FOR_EACH_SAFE (e, next, list_node, &executes) {
@@ -2582,7 +2771,7 @@ static void
 destroy_rule_executes(struct ofproto *ofproto)
 {
     struct rule_execute *e, *next;
-    struct list executes;
+    struct ovs_list executes;
 
     guarded_list_pop_all(&ofproto->rule_executes, &executes);
     LIST_FOR_EACH_SAFE (e, next, list_node, &executes) {
@@ -2598,6 +2787,122 @@ rule_is_readonly(const struct rule *rule)
     return (table->flags & OFTABLE_READONLY) != 0;
 }
 \f
+static uint32_t
+hash_learned_cookie(ovs_be64 cookie_, uint8_t table_id)
+{
+    uint64_t cookie = (OVS_FORCE uint64_t) cookie_;
+    return hash_3words(cookie, cookie >> 32, table_id);
+}
+
+static void
+learned_cookies_update_one__(struct ofproto *ofproto,
+                             const struct ofpact_learn *learn,
+                             int delta, struct ovs_list *dead_cookies)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    uint32_t hash = hash_learned_cookie(learn->cookie, learn->table_id);
+    struct learned_cookie *c;
+
+    HMAP_FOR_EACH_WITH_HASH (c, u.hmap_node, hash, &ofproto->learned_cookies) {
+        if (c->cookie == learn->cookie && c->table_id == learn->table_id) {
+            c->n += delta;
+            ovs_assert(c->n >= 0);
+
+            if (!c->n) {
+                hmap_remove(&ofproto->learned_cookies, &c->u.hmap_node);
+                list_push_back(dead_cookies, &c->u.list_node);
+            }
+
+            return;
+        }
+    }
+
+    ovs_assert(delta > 0);
+    c = xmalloc(sizeof *c);
+    hmap_insert(&ofproto->learned_cookies, &c->u.hmap_node, hash);
+    c->cookie = learn->cookie;
+    c->table_id = learn->table_id;
+    c->n = delta;
+}
+
+static const struct ofpact_learn *
+next_learn_with_delete(const struct rule_actions *actions,
+                       const struct ofpact_learn *start)
+{
+    const struct ofpact *pos;
+
+    for (pos = start ? ofpact_next(&start->ofpact) : actions->ofpacts;
+         pos < ofpact_end(actions->ofpacts, actions->ofpacts_len);
+         pos = ofpact_next(pos)) {
+        if (pos->type == OFPACT_LEARN) {
+            const struct ofpact_learn *learn = ofpact_get_LEARN(pos);
+            if (learn->flags & NX_LEARN_F_DELETE_LEARNED) {
+                return learn;
+            }
+        }
+    }
+
+    return NULL;
+}
+
+static void
+learned_cookies_update__(struct ofproto *ofproto,
+                         const struct rule_actions *actions,
+                         int delta, struct ovs_list *dead_cookies)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    if (actions->has_learn_with_delete) {
+        const struct ofpact_learn *learn;
+
+        for (learn = next_learn_with_delete(actions, NULL); learn;
+             learn = next_learn_with_delete(actions, learn)) {
+            learned_cookies_update_one__(ofproto, learn, delta, dead_cookies);
+        }
+    }
+}
+
+static void
+learned_cookies_inc(struct ofproto *ofproto,
+                    const struct rule_actions *actions)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    learned_cookies_update__(ofproto, actions, +1, NULL);
+}
+
+static void
+learned_cookies_dec(struct ofproto *ofproto,
+                    const struct rule_actions *actions,
+                    struct ovs_list *dead_cookies)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    learned_cookies_update__(ofproto, actions, -1, dead_cookies);
+}
+
+static void
+learned_cookies_flush(struct ofproto *ofproto, struct ovs_list *dead_cookies)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    struct learned_cookie *c, *next;
+
+    LIST_FOR_EACH_SAFE (c, next, u.list_node, dead_cookies) {
+        struct rule_criteria criteria;
+        struct rule_collection rules;
+        struct match match;
+
+        match_init_catchall(&match);
+        rule_criteria_init(&criteria, c->table_id, &match, 0,
+                           c->cookie, OVS_BE64_MAX, OFPP_ANY, OFPG_ANY);
+        rule_criteria_require_rw(&criteria, false);
+        collect_rules_loose(ofproto, &criteria, &rules);
+        delete_flows__(&rules, OFPRR_DELETE, NULL);
+        rule_criteria_destroy(&criteria);
+        rule_collection_destroy(&rules);
+
+        list_remove(&c->u.list_node);
+        free(c);
+    }
+}
+\f
 static enum ofperr
 handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
 {
@@ -2605,6 +2910,107 @@ handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
     return 0;
 }
 
+static void
+query_tables(struct ofproto *ofproto,
+             struct ofputil_table_features **featuresp,
+             struct ofputil_table_stats **statsp)
+{
+    struct mf_bitmap rw_fields = oxm_writable_fields();
+    struct mf_bitmap match = oxm_matchable_fields();
+    struct mf_bitmap mask = oxm_maskable_fields();
+
+    struct ofputil_table_features *features;
+    struct ofputil_table_stats *stats;
+    int i;
+
+    features = *featuresp = xcalloc(ofproto->n_tables, sizeof *features);
+    for (i = 0; i < ofproto->n_tables; i++) {
+        struct ofputil_table_features *f = &features[i];
+
+        f->table_id = i;
+        sprintf(f->name, "table%d", i);
+        f->metadata_match = OVS_BE64_MAX;
+        f->metadata_write = OVS_BE64_MAX;
+        atomic_read_relaxed(&ofproto->tables[i].miss_config, &f->miss_config);
+        f->max_entries = 1000000;
+
+        bool more_tables = false;
+        for (int j = i + 1; j < ofproto->n_tables; j++) {
+            if (!(ofproto->tables[j].flags & OFTABLE_HIDDEN)) {
+                bitmap_set1(f->nonmiss.next, j);
+                more_tables = true;
+            }
+        }
+        f->nonmiss.instructions = (1u << N_OVS_INSTRUCTIONS) - 1;
+        if (!more_tables) {
+            f->nonmiss.instructions &= ~(1u << OVSINST_OFPIT11_GOTO_TABLE);
+        }
+        f->nonmiss.write.ofpacts = (UINT64_C(1) << N_OFPACTS) - 1;
+        f->nonmiss.write.set_fields = rw_fields;
+        f->nonmiss.apply = f->nonmiss.write;
+        f->miss = f->nonmiss;
+
+        f->match = match;
+        f->mask = mask;
+        f->wildcard = match;
+    }
+
+    if (statsp) {
+        stats = *statsp = xcalloc(ofproto->n_tables, sizeof *stats);
+        for (i = 0; i < ofproto->n_tables; i++) {
+            struct ofputil_table_stats *s = &stats[i];
+            struct classifier *cls = &ofproto->tables[i].cls;
+
+            s->table_id = i;
+            s->active_count = classifier_count(cls);
+            if (i == 0) {
+                s->active_count -= connmgr_count_hidden_rules(
+                    ofproto->connmgr);
+            }
+        }
+    } else {
+        stats = NULL;
+    }
+
+    ofproto->ofproto_class->query_tables(ofproto, features, stats);
+
+    for (i = 0; i < ofproto->n_tables; i++) {
+        const struct oftable *table = &ofproto->tables[i];
+        struct ofputil_table_features *f = &features[i];
+
+        if (table->name) {
+            ovs_strzcpy(f->name, table->name, sizeof f->name);
+        }
+
+        if (table->max_flows < f->max_entries) {
+            f->max_entries = table->max_flows;
+        }
+    }
+}
+
+static void
+query_switch_features(struct ofproto *ofproto,
+                      bool *arp_match_ip, uint64_t *ofpacts)
+{
+    struct ofputil_table_features *features, *f;
+
+    *arp_match_ip = false;
+    *ofpacts = 0;
+
+    query_tables(ofproto, &features, NULL);
+    for (f = features; f < &features[ofproto->n_tables]; f++) {
+        *ofpacts |= f->nonmiss.apply.ofpacts | f->miss.apply.ofpacts;
+        if (bitmap_is_set(f->match.bm, MFF_ARP_SPA) ||
+            bitmap_is_set(f->match.bm, MFF_ARP_TPA)) {
+            *arp_match_ip = true;
+        }
+    }
+    free(features);
+
+    /* Sanity check. */
+    ovs_assert(*ofpacts & (UINT64_C(1) << OFPACT_OUTPUT));
+}
+
 static enum ofperr
 handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
 {
@@ -2614,15 +3020,14 @@ handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
     bool arp_match_ip;
     struct ofpbuf *b;
 
-    ofproto->ofproto_class->get_features(ofproto, &arp_match_ip,
-                                         &features.actions);
-    ovs_assert(features.actions & OFPUTIL_A_OUTPUT); /* sanity check */
+    query_switch_features(ofproto, &arp_match_ip, &features.ofpacts);
 
     features.datapath_id = ofproto->datapath_id;
     features.n_buffers = pktbuf_capacity();
     features.n_tables = ofproto_get_n_visible_tables(ofproto);
     features.capabilities = (OFPUTIL_C_FLOW_STATS | OFPUTIL_C_TABLE_STATS |
-                             OFPUTIL_C_PORT_STATS | OFPUTIL_C_QUEUE_STATS);
+                             OFPUTIL_C_PORT_STATS | OFPUTIL_C_QUEUE_STATS |
+                             OFPUTIL_C_GROUP_STATS);
     if (arp_match_ip) {
         features.capabilities |= OFPUTIL_C_ARP_MATCH_IP;
     }
@@ -2891,69 +3296,61 @@ static enum ofperr
 handle_table_stats_request(struct ofconn *ofconn,
                            const struct ofp_header *request)
 {
-    struct ofproto *p = ofconn_get_ofproto(ofconn);
-    struct ofp12_table_stats *ots;
-    struct ofpbuf *msg;
-    int n_tables;
+    struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+    struct ofputil_table_features *features;
+    struct ofputil_table_stats *stats;
+    struct ofpbuf *reply;
     size_t i;
 
-    /* Set up default values.
-     *
-     * ofp12_table_stats is used as a generic structure as
-     * it is able to hold all the fields for ofp10_table_stats
-     * and ofp11_table_stats (and of course itself).
-     */
-    ots = xcalloc(p->n_tables, sizeof *ots);
-    for (i = 0; i < p->n_tables; i++) {
-        ots[i].table_id = i;
-        sprintf(ots[i].name, "table%"PRIuSIZE, i);
-        ots[i].match = htonll(OFPXMT13_MASK);
-        ots[i].wildcards = htonll(OFPXMT13_MASK);
-        ots[i].write_actions = htonl(OFPAT11_OUTPUT);
-        ots[i].apply_actions = htonl(OFPAT11_OUTPUT);
-        ots[i].write_setfields = htonll(OFPXMT13_MASK);
-        ots[i].apply_setfields = htonll(OFPXMT13_MASK);
-        ots[i].metadata_match = OVS_BE64_MAX;
-        ots[i].metadata_write = OVS_BE64_MAX;
-        ots[i].instructions = htonl(OFPIT11_ALL);
-        ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
-        ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
-        fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
-        ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
-        fat_rwlock_unlock(&p->tables[i].cls.rwlock);
-    }
-
-    p->ofproto_class->get_tables(p, ots);
-
-    /* Post-process the tables, dropping hidden tables. */
-    n_tables = p->n_tables;
-    for (i = 0; i < p->n_tables; i++) {
-        const struct oftable *table = &p->tables[i];
+    query_tables(ofproto, &features, &stats);
 
-        if (table->flags & OFTABLE_HIDDEN) {
-            n_tables = i;
-            break;
+    reply = ofputil_encode_table_stats_reply(request);
+    for (i = 0; i < ofproto->n_tables; i++) {
+        if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) {
+            ofputil_append_table_stats_reply(reply, &stats[i], &features[i]);
         }
+    }
+    ofconn_send_reply(ofconn, reply);
 
-        if (table->name) {
-            ovs_strzcpy(ots[i].name, table->name, sizeof ots[i].name);
-        }
+    free(features);
+    free(stats);
 
-        if (table->max_flows < ntohl(ots[i].max_entries)) {
-            ots[i].max_entries = htonl(table->max_flows);
-        }
+    return 0;
+}
+
+static enum ofperr
+handle_table_features_request(struct ofconn *ofconn,
+                              const struct ofp_header *request)
+{
+    struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+    struct ofputil_table_features *features;
+    struct ovs_list replies;
+    struct ofpbuf msg;
+    size_t i;
+
+    ofpbuf_use_const(&msg, request, ntohs(request->length));
+    ofpraw_pull_assert(&msg);
+    if (ofpbuf_size(&msg) || ofpmp_more(request)) {
+        return OFPERR_OFPTFFC_EPERM;
     }
 
-    msg = ofputil_encode_table_stats_reply(ots, n_tables, request);
-    ofconn_send_reply(ofconn, msg);
+    query_tables(ofproto, &features, NULL);
 
-    free(ots);
+    ofpmp_init(&replies, request);
+    for (i = 0; i < ofproto->n_tables; i++) {
+        if (!(ofproto->tables[i].flags & OFTABLE_HIDDEN)) {
+            ofputil_append_table_features_reply(&features[i], &replies);
+        }
+    }
+    ofconn_send_replies(ofconn, &replies);
+
+    free(features);
 
     return 0;
 }
 
 static void
-append_port_stat(struct ofport *port, struct list *replies)
+append_port_stat(struct ofport *port, struct ovs_list *replies)
 {
     struct ofputil_port_stats ops = { .port_no = port->pp.port_no };
 
@@ -2971,11 +3368,11 @@ append_port_stat(struct ofport *port, struct list *replies)
 static void
 handle_port_request(struct ofconn *ofconn,
                     const struct ofp_header *request, ofp_port_t port_no,
-                    void (*cb)(struct ofport *, struct list *replies))
+                    void (*cb)(struct ofport *, struct ovs_list *replies))
 {
     struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
     struct ofport *port;
-    struct list replies;
+    struct ovs_list replies;
 
     ofpmp_init(&replies, request);
     if (port_no != OFPP_ANY) {
@@ -3007,7 +3404,7 @@ handle_port_stats_request(struct ofconn *ofconn,
 }
 
 static void
-append_port_desc(struct ofport *port, struct list *replies)
+append_port_desc(struct ofport *port, struct ovs_list *replies)
 {
     ofputil_append_port_desc_stats_reply(&port->pp, replies);
 }
@@ -3132,7 +3529,7 @@ next_matching_table(const struct ofproto *ofproto,
  * supplied as 0. */
 static void
 rule_criteria_init(struct rule_criteria *criteria, uint8_t table_id,
-                   const struct match *match, unsigned int priority,
+                   const struct match *match, int priority,
                    ovs_be64 cookie, ovs_be64 cookie_mask,
                    ofp_port_t out_port, uint32_t out_group)
 {
@@ -3232,6 +3629,9 @@ rule_collection_destroy(struct rule_collection *rules)
     if (rules->rules != rules->stub) {
         free(rules->rules);
     }
+
+    /* Make repeated destruction harmless. */
+    rule_collection_init(rules);
 }
 
 /* Checks whether 'rule' matches 'c' and, if so, adds it to 'rules'.  This
@@ -3296,15 +3696,11 @@ collect_rules_loose(struct ofproto *ofproto,
         }
     } else {
         FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
-            struct cls_cursor cursor;
             struct rule *rule;
 
-            fat_rwlock_rdlock(&table->cls.rwlock);
-            cls_cursor_init(&cursor, &table->cls, &criteria->cr);
-            CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+            CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &criteria->cr) {
                 collect_rule(rule, criteria, rules, &n_readonly);
             }
-            fat_rwlock_unlock(&table->cls.rwlock);
         }
     }
 
@@ -3357,10 +3753,8 @@ collect_rules_strict(struct ofproto *ofproto,
         FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
             struct rule *rule;
 
-            fat_rwlock_rdlock(&table->cls.rwlock);
             rule = rule_from_cls_rule(classifier_find_rule_exactly(
                                           &table->cls, &criteria->cr));
-            fat_rwlock_unlock(&table->cls.rwlock);
             if (rule) {
                 collect_rule(rule, criteria, rules, &n_readonly);
             }
@@ -3398,7 +3792,7 @@ handle_flow_stats_request(struct ofconn *ofconn,
     struct ofputil_flow_stats_request fsr;
     struct rule_criteria criteria;
     struct rule_collection rules;
-    struct list replies;
+    struct ovs_list replies;
     enum ofperr error;
     size_t i;
 
@@ -3435,6 +3829,7 @@ handle_flow_stats_request(struct ofconn *ofconn,
         fs.cookie = rule->flow_cookie;
         fs.idle_timeout = rule->idle_timeout;
         fs.hard_timeout = rule->hard_timeout;
+        fs.importance = rule->importance;
         created = rule->created;
         modified = rule->modified;
         actions = rule_get_actions(rule);
@@ -3503,15 +3898,11 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results)
     struct oftable *table;
 
     OFPROTO_FOR_EACH_TABLE (table, p) {
-        struct cls_cursor cursor;
         struct rule *rule;
 
-        fat_rwlock_rdlock(&table->cls.rwlock);
-        cls_cursor_init(&cursor, &table->cls, NULL);
-        CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+        CLS_FOR_EACH (rule, cr, &table->cls) {
             flow_stats_ds(rule, results);
         }
-        fat_rwlock_unlock(&table->cls.rwlock);
     }
 }
 
@@ -3524,17 +3915,28 @@ ofproto_get_netflow_ids(const struct ofproto *ofproto,
     ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id);
 }
 
+/* Checks the status change of CFM on 'ofport'.
+ *
+ * Returns true if 'ofproto_class' does not support 'cfm_status_changed'. */
+bool
+ofproto_port_cfm_status_changed(struct ofproto *ofproto, ofp_port_t ofp_port)
+{
+    struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
+    return (ofport && ofproto->ofproto_class->cfm_status_changed
+            ? ofproto->ofproto_class->cfm_status_changed(ofport)
+            : true);
+}
+
 /* Checks the status of CFM configured on 'ofp_port' within 'ofproto'.
  * Returns 0 if the port's CFM status was successfully stored into
  * '*status'.  Returns positive errno if the port did not have CFM
- * configured.  Returns negative number if there is no status change
- * since last update.
+ * configured.
  *
  * The caller must provide and own '*status', and must free 'status->rmps'.
  * '*status' is indeterminate if the return value is non-zero. */
 int
 ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
-                            struct ofproto_cfm_status *status)
+                            struct cfm_status *status)
 {
     struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
     return (ofport && ofproto->ofproto_class->get_cfm_status
@@ -3621,7 +4023,7 @@ handle_aggregate_stats_request(struct ofconn *ofconn,
 
 struct queue_stats_cbdata {
     struct ofport *ofport;
-    struct list replies;
+    struct ovs_list replies;
     long long int now;
 };
 
@@ -3717,29 +4119,80 @@ handle_queue_stats_request(struct ofconn *ofconn,
     return error;
 }
 
-static bool
-should_evict_a_rule(struct oftable *table, unsigned int extra_space)
-    OVS_REQUIRES(ofproto_mutex)
-    OVS_NO_THREAD_SAFETY_ANALYSIS
-{
-    return classifier_count(&table->cls) + extra_space > table->max_flows;
-}
-
 static enum ofperr
 evict_rules_from_table(struct oftable *table, unsigned int extra_space)
     OVS_REQUIRES(ofproto_mutex)
 {
-    while (should_evict_a_rule(table, extra_space)) {
+    enum ofperr error = 0;
+    struct rule_collection rules;
+    unsigned int count = classifier_count(&table->cls) + extra_space;
+    unsigned int max_flows = table->max_flows;
+
+    rule_collection_init(&rules);
+
+    while (count-- > max_flows) {
         struct rule *rule;
 
         if (!choose_rule_to_evict(table, &rule)) {
-            return OFPERR_OFPFMFC_TABLE_FULL;
+            error = OFPERR_OFPFMFC_TABLE_FULL;
+            break;
         } else {
-            ofproto_rule_delete__(rule, OFPRR_EVICTION);
+            eviction_group_remove_rule(rule);
+            rule_collection_add(&rules, rule);
         }
     }
+    delete_flows__(&rules, OFPRR_EVICTION, NULL);
+    rule_collection_destroy(&rules);
 
-    return 0;
+    return error;
+}
+
+static bool
+is_conjunction(const struct ofpact *ofpacts, size_t ofpacts_len)
+{
+    return ofpacts_len > 0 && ofpacts->type == OFPACT_CONJUNCTION;
+}
+
+static void
+get_conjunctions(const struct ofputil_flow_mod *fm,
+                 struct cls_conjunction **conjsp, size_t *n_conjsp)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    struct cls_conjunction *conjs = NULL;
+    int n_conjs = 0;
+
+    if (is_conjunction(fm->ofpacts, fm->ofpacts_len)) {
+        const struct ofpact *ofpact;
+        int i;
+
+        n_conjs = 0;
+        OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) {
+            n_conjs++;
+        }
+
+        conjs = xzalloc(n_conjs * sizeof *conjs);
+        i = 0;
+        OFPACT_FOR_EACH (ofpact, fm->ofpacts, fm->ofpacts_len) {
+            struct ofpact_conjunction *oc = ofpact_get_CONJUNCTION(ofpact);
+            conjs[i].clause = oc->clause;
+            conjs[i].n_clauses = oc->n_clauses;
+            conjs[i].id = oc->id;
+            i++;
+        }
+    }
+
+    *conjsp = conjs;
+    *n_conjsp = n_conjs;
+}
+
+static void
+set_conjunctions(struct rule *rule, const struct cls_conjunction *conjs,
+                 size_t n_conjs)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    struct cls_rule *cr = CONST_CAST(struct cls_rule *, &rule->cr);
+
+    cls_rule_set_conjunctions(cr, conjs, n_conjs);
 }
 
 /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT
@@ -3807,9 +4260,7 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
     cls_rule_init(&cr, &fm->match, fm->priority);
 
     /* Transform "add" into "modify" if there's an existing identical flow. */
-    fat_rwlock_rdlock(&table->cls.rwlock);
     rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
-    fat_rwlock_unlock(&table->cls.rwlock);
     if (rule) {
         struct rule_collection rules;
 
@@ -3826,13 +4277,7 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
 
     /* Check for overlap, if requested. */
     if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
-        bool overlaps;
-
-        fat_rwlock_rdlock(&table->cls.rwlock);
-        overlaps = classifier_rule_overlaps(&table->cls, &cr);
-        fat_rwlock_unlock(&table->cls.rwlock);
-
-        if (overlaps) {
+        if (classifier_rule_overlaps(&table->cls, &cr)) {
             cls_rule_destroy(&cr);
             return OFPERR_OFPFMFC_OVERLAP;
         }
@@ -3865,11 +4310,12 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
     ovs_mutex_lock(&rule->mutex);
     rule->idle_timeout = fm->idle_timeout;
     rule->hard_timeout = fm->hard_timeout;
+    rule->importance = fm->importance;
     ovs_mutex_unlock(&rule->mutex);
 
     *CONST_CAST(uint8_t *, &rule->table_id) = table - ofproto->tables;
     rule->flags = fm->flags & OFPUTIL_FF_STATE;
-    actions = rule_actions_create(ofproto, fm->ofpacts, fm->ofpacts_len);
+    actions = rule_actions_create(fm->ofpacts, fm->ofpacts_len);
     ovsrcu_set(&rule->actions, actions);
     list_init(&rule->meter_list_node);
     rule->eviction_group = NULL;
@@ -3890,13 +4336,17 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
     }
     cookies_insert(ofproto, rule);
     eviction_group_add_rule(rule);
-    if (actions->provider_meter_id != UINT32_MAX) {
+    if (actions->has_meter) {
         meter_insert_rule(rule);
     }
 
-    fat_rwlock_wrlock(&table->cls.rwlock);
-    classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    fat_rwlock_unlock(&table->cls.rwlock);
+    classifier_defer(&table->cls);
+
+    struct cls_conjunction *conjs;
+    size_t n_conjs;
+    get_conjunctions(fm, &conjs, &n_conjs);
+    classifier_insert(&table->cls, &rule->cr, conjs, n_conjs);
+    free(conjs);
 
     error = ofproto->ofproto_class->rule_insert(rule);
     if (error) {
@@ -3904,6 +4354,9 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
         ofproto_rule_unref(rule);
         return error;
     }
+    classifier_publish(&table->cls);
+
+    learned_cookies_inc(ofproto, actions);
 
     if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
         if (ofproto->vlan_bitmap) {
@@ -3918,7 +4371,7 @@ add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
     }
 
     ofmonitor_report(ofproto->connmgr, rule, NXFME_ADDED, 0,
-                     req ? req->ofconn : NULL, req ? req->xid : 0);
+                     req ? req->ofconn : NULL, req ? req->xid : 0, NULL);
 
     return req ? send_buffered_packet(req->ofconn, fm->buffer_id, rule) : 0;
 }
@@ -3938,13 +4391,14 @@ modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
                const struct flow_mod_requester *req)
     OVS_REQUIRES(ofproto_mutex)
 {
+    struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies);
     enum nx_flow_update_event event;
-    enum ofperr error;
     size_t i;
 
     if (ofproto->ofproto_class->rule_premodify_actions) {
         for (i = 0; i < rules->n; i++) {
             struct rule *rule = rules->rules[i];
+            enum ofperr error;
 
             error = ofproto->ofproto_class->rule_premodify_actions(
                 rule, fm->ofpacts, fm->ofpacts_len);
@@ -3972,8 +4426,6 @@ modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
 
         long long int now = time_msec();
 
-        /* FIXME: Implement OFPFUTIL_FF_RESET_COUNTS */
-
         if (change_cookie) {
             cookies_remove(ofproto, rule);
         }
@@ -3982,6 +4434,7 @@ modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
         if (fm->command == OFPFC_ADD) {
             rule->idle_timeout = fm->idle_timeout;
             rule->hard_timeout = fm->hard_timeout;
+            rule->importance = fm->importance;
             rule->flags = fm->flags & OFPUTIL_FF_STATE;
             rule->created = now;
         }
@@ -3995,7 +4448,7 @@ modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
             cookies_insert(ofproto, rule);
         }
         if (fm->command == OFPFC_ADD) {
-            if (fm->idle_timeout || fm->hard_timeout) {
+            if (fm->idle_timeout || fm->hard_timeout || fm->importance) {
                 if (!rule->eviction_group) {
                     eviction_group_add_rule(rule);
                 }
@@ -4005,10 +4458,43 @@ modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
         }
 
         if (change_actions) {
-            ovsrcu_set(&rule->actions, rule_actions_create(ofproto,
-                                                           fm->ofpacts,
+           /* We have to change the actions.  The rule's conjunctive match set
+            * is a function of its actions, so we need to update that too.  The
+            * conjunctive match set is used in the lookup process to figure
+            * which (if any) collection of conjunctive sets the packet matches
+            * with.  However, a rule with conjunction actions is never to be
+            * returned as a classifier lookup result.  To make sure a rule with
+            * conjunction actions is not returned as a lookup result, we update
+            * them in a carefully chosen order:
+            *
+            * - If we're adding a conjunctive match set where there wasn't one
+            *   before, we have to make the conjunctive match set available to
+            *   lookups before the rule's actions are changed, as otherwise
+            *   rule with a conjunction action could be returned as a lookup
+            *   result.
+            *
+            * - To clear some nonempty conjunctive set, we set the rule's
+            *   actions first, so that a lookup can't return a rule with
+            *   conjunction actions.
+            *
+            * - Otherwise, order doesn't matter for changing one nonempty
+            *   conjunctive match set to some other nonempty set, since the
+            *   rule's actions are not seen by the classifier, and hence don't
+            *   matter either before or after the change. */
+            struct cls_conjunction *conjs;
+            size_t n_conjs;
+            get_conjunctions(fm, &conjs, &n_conjs);
+
+            if (n_conjs) {
+                set_conjunctions(rule, conjs, n_conjs);
+            }
+            ovsrcu_set(&rule->actions, rule_actions_create(fm->ofpacts,
                                                            fm->ofpacts_len));
-            rule_actions_destroy(actions);
+            if (!conjs) {
+                set_conjunctions(rule, conjs, n_conjs);
+            }
+
+            free(conjs);
         }
 
         if (change_actions || reset_counters) {
@@ -4017,16 +4503,24 @@ modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
 
         if (event != NXFME_MODIFIED || change_actions || change_cookie) {
             ofmonitor_report(ofproto->connmgr, rule, event, 0,
-                             req ? req->ofconn : NULL, req ? req->xid : 0);
+                             req ? req->ofconn : NULL, req ? req->xid : 0,
+                             change_actions ? actions : NULL);
+        }
+
+        if (change_actions) {
+            learned_cookies_inc(ofproto, rule_get_actions(rule));
+            learned_cookies_dec(ofproto, actions, &dead_cookies);
+            rule_actions_destroy(actions);
         }
     }
+    learned_cookies_flush(ofproto, &dead_cookies);
 
     if (fm->buffer_id != UINT32_MAX && req) {
-        error = send_buffered_packet(req->ofconn, fm->buffer_id,
-                                     rules->rules[0]);
+        return send_buffered_packet(req->ofconn, fm->buffer_id,
+                                    rules->rules[0]);
     }
 
-    return error;
+    return 0;
 }
 
 static enum ofperr
@@ -4105,39 +4599,48 @@ modify_flow_strict(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
 \f
 /* OFPFC_DELETE implementation. */
 
+/* Deletes the rules listed in 'rules'. */
 static void
-delete_flow__(struct rule *rule, enum ofp_flow_removed_reason reason,
-              const struct flow_mod_requester *req)
+delete_flows__(const struct rule_collection *rules,
+               enum ofp_flow_removed_reason reason,
+               const struct flow_mod_requester *req)
     OVS_REQUIRES(ofproto_mutex)
 {
-    struct ofproto *ofproto = rule->ofproto;
+    if (rules->n) {
+        struct ovs_list dead_cookies = OVS_LIST_INITIALIZER(&dead_cookies);
+        struct ofproto *ofproto = rules->rules[0]->ofproto;
+        struct rule *rule, *next;
+        size_t i;
 
-    ofproto_rule_send_removed(rule, reason);
+        for (i = 0, next = rules->rules[0];
+             rule = next, next = (++i < rules->n) ? rules->rules[i] : NULL,
+                 rule; ) {
+            struct classifier *cls = &ofproto->tables[rule->table_id].cls;
+            uint8_t next_table = next ? next->table_id : UINT8_MAX;
 
-    ofmonitor_report(ofproto->connmgr, rule, NXFME_DELETED, reason,
-                     req ? req->ofconn : NULL, req ? req->xid : 0);
-    oftable_remove_rule(rule);
-    ofproto->ofproto_class->rule_delete(rule);
-}
+            ofproto_rule_send_removed(rule, reason);
 
-/* Deletes the rules listed in 'rules'.
- *
- * Returns 0 on success, otherwise an OpenFlow error code. */
-static enum ofperr
-delete_flows__(struct ofproto *ofproto,
-               const struct rule_collection *rules,
-               enum ofp_flow_removed_reason reason,
-               const struct flow_mod_requester *req)
-    OVS_REQUIRES(ofproto_mutex)
-{
-    size_t i;
+            ofmonitor_report(ofproto->connmgr, rule, NXFME_DELETED, reason,
+                             req ? req->ofconn : NULL, req ? req->xid : 0,
+                             NULL);
 
-    for (i = 0; i < rules->n; i++) {
-        delete_flow__(rules->rules[i], reason, req);
-    }
-    ofmonitor_flush(ofproto->connmgr);
+            if (next_table == rule->table_id) {
+                classifier_defer(cls);
+            }
+            classifier_remove(cls, &rule->cr);
+            if (next_table != rule->table_id) {
+                classifier_publish(cls);
+            }
+            ofproto_rule_remove__(ofproto, rule);
 
-    return 0;
+            ofproto->ofproto_class->rule_delete(rule);
+
+            learned_cookies_dec(ofproto, rule_get_actions(rule),
+                                &dead_cookies);
+        }
+        learned_cookies_flush(ofproto, &dead_cookies);
+        ofmonitor_flush(ofproto->connmgr);
+    }
 }
 
 /* Implements OFPFC_DELETE. */
@@ -4159,8 +4662,8 @@ delete_flows_loose(struct ofproto *ofproto,
     error = collect_rules_loose(ofproto, &criteria, &rules);
     rule_criteria_destroy(&criteria);
 
-    if (!error && rules.n > 0) {
-        error = delete_flows__(ofproto, &rules, fm->delete_reason, req);
+    if (!error) {
+        delete_flows__(&rules, fm->delete_reason, req);
     }
     rule_collection_destroy(&rules);
 
@@ -4185,8 +4688,8 @@ delete_flow_strict(struct ofproto *ofproto, const struct ofputil_flow_mod *fm,
     error = collect_rules_strict(ofproto, &criteria, &rules);
     rule_criteria_destroy(&criteria);
 
-    if (!error && rules.n > 0) {
-        error = delete_flows__(ofproto, &rules, fm->delete_reason, req);
+    if (!error) {
+        delete_flows__(&rules, fm->delete_reason, req);
     }
     rule_collection_destroy(&rules);
 
@@ -4232,7 +4735,12 @@ void
 ofproto_rule_expire(struct rule *rule, uint8_t reason)
     OVS_REQUIRES(ofproto_mutex)
 {
-    ofproto_rule_delete__(rule, reason);
+    struct rule_collection rules;
+
+    rules.rules = rules.stub;
+    rules.n = 1;
+    rules.stub[0] = rule;
+    delete_flows__(&rules, reason, NULL);
 }
 
 /* Reduces '*timeout' to no more than 'max'.  A value of zero in either case
@@ -4516,7 +5024,7 @@ handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh)
 static void
 ofproto_compose_flow_refresh_update(const struct rule *rule,
                                     enum nx_flow_monitor_flags flags,
-                                    struct list *msgs)
+                                    struct ovs_list *msgs)
     OVS_REQUIRES(ofproto_mutex)
 {
     const struct rule_actions *actions;
@@ -4548,7 +5056,7 @@ ofproto_compose_flow_refresh_update(const struct rule *rule,
 
 void
 ofmonitor_compose_refresh_updates(struct rule_collection *rules,
-                                  struct list *msgs)
+                                  struct ovs_list *msgs)
     OVS_REQUIRES(ofproto_mutex)
 {
     size_t i;
@@ -4612,15 +5120,11 @@ ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m,
 
     cls_rule_init_from_minimatch(&target, &m->match, 0);
     FOR_EACH_MATCHING_TABLE (table, m->table_id, ofproto) {
-        struct cls_cursor cursor;
         struct rule *rule;
 
-        fat_rwlock_rdlock(&table->cls.rwlock);
-        cls_cursor_init(&cursor, &table->cls, &target);
-        CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+        CLS_FOR_EACH_TARGET (rule, cr, &table->cls, &target) {
             ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
         }
-        fat_rwlock_unlock(&table->cls.rwlock);
     }
     cls_rule_destroy(&target);
 }
@@ -4643,6 +5147,24 @@ ofmonitor_collect_resume_rules(struct ofmonitor *m,
     ofproto_collect_ofmonitor_refresh_rules(m, seqno, rules);
 }
 
+static enum ofperr
+flow_monitor_delete(struct ofconn *ofconn, uint32_t id)
+    OVS_REQUIRES(ofproto_mutex)
+{
+    struct ofmonitor *m;
+    enum ofperr error;
+
+    m = ofmonitor_lookup(ofconn, id);
+    if (m) {
+        ofmonitor_destroy(m);
+        error = 0;
+    } else {
+        error = OFPERR_OFPMOFC_UNKNOWN_MONITOR;
+    }
+
+    return error;
+}
+
 static enum ofperr
 handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh)
     OVS_EXCLUDED(ofproto_mutex)
@@ -4651,12 +5173,11 @@ handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh)
     struct ofmonitor **monitors;
     size_t n_monitors, allocated_monitors;
     struct rule_collection rules;
-    struct list replies;
+    struct ovs_list replies;
     enum ofperr error;
     struct ofpbuf b;
     size_t i;
 
-    error = 0;
     ofpbuf_use_const(&b, oh, ntohs(oh->length));
     monitors = NULL;
     n_monitors = allocated_monitors = 0;
@@ -4723,20 +5244,13 @@ static enum ofperr
 handle_flow_monitor_cancel(struct ofconn *ofconn, const struct ofp_header *oh)
     OVS_EXCLUDED(ofproto_mutex)
 {
-    struct ofmonitor *m;
     enum ofperr error;
     uint32_t id;
 
     id = ofputil_decode_flow_monitor_cancel(oh);
 
     ovs_mutex_lock(&ofproto_mutex);
-    m = ofmonitor_lookup(ofconn, id);
-    if (m) {
-        ofmonitor_destroy(m);
-        error = 0;
-    } else {
-        error = OFPERR_NXBRC_FM_BAD_ID;
-    }
+    error = flow_monitor_delete(ofconn, id);
     ovs_mutex_unlock(&ofproto_mutex);
 
     return error;
@@ -4752,7 +5266,7 @@ handle_flow_monitor_cancel(struct ofconn *ofconn, const struct ofp_header *oh)
  */
 struct meter {
     long long int created;      /* Time created. */
-    struct list rules;          /* List of "struct rule_dpif"s. */
+    struct ovs_list rules;      /* List of "struct rule_dpif"s. */
     ofproto_meter_id provider_meter_id;
     uint16_t flags;             /* Meter flags. */
     uint16_t n_bands;           /* Number of meter bands. */
@@ -4908,9 +5422,7 @@ handle_delete_meter(struct ofconn *ofconn, struct ofputil_meter_mod *mm)
             }
         }
     }
-    if (rules.n > 0) {
-        delete_flows__(ofproto, &rules, OFPRR_METER_DELETE, NULL);
-    }
+    delete_flows__(&rules, OFPRR_METER_DELETE, NULL);
 
     /* Delete the meters. */
     meter_delete(ofproto, first, last);
@@ -5007,7 +5519,7 @@ handle_meter_request(struct ofconn *ofconn, const struct ofp_header *request,
                      enum ofptype type)
 {
     struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
-    struct list replies;
+    struct ovs_list replies;
     uint64_t bands_stub[256 / 8];
     struct ofpbuf bands;
     uint32_t meter_id, first, last;
@@ -5157,7 +5669,7 @@ group_get_ref_count(struct ofgroup *group)
 }
 
 static void
-append_group_stats(struct ofgroup *group, struct list *replies)
+append_group_stats(struct ofgroup *group, struct ovs_list *replies)
 {
     struct ofputil_group_stats ogs;
     const struct ofproto *ofproto = group->ofproto;
@@ -5191,11 +5703,11 @@ append_group_stats(struct ofgroup *group, struct list *replies)
 static void
 handle_group_request(struct ofconn *ofconn,
                      const struct ofp_header *request, uint32_t group_id,
-                     void (*cb)(struct ofgroup *, struct list *replies))
+                     void (*cb)(struct ofgroup *, struct ovs_list *replies))
 {
     struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
     struct ofgroup *group;
-    struct list replies;
+    struct ovs_list replies;
 
     ofpmp_init(&replies, request);
     if (group_id == OFPG_ALL) {
@@ -5230,7 +5742,7 @@ handle_group_stats_request(struct ofconn *ofconn,
 }
 
 static void
-append_group_desc(struct ofgroup *group, struct list *replies)
+append_group_desc(struct ofgroup *group, struct ovs_list *replies)
 {
     struct ofputil_group_desc gds;
 
@@ -5395,7 +5907,102 @@ add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
     return error;
 }
 
-/* Implements OFPGC11_MODIFY.  Returns 0 on success or an OpenFlow error code
+/* Adds all of the buckets from 'ofgroup' to 'new_ofgroup'.  The buckets
+ * already in 'new_ofgroup' will be placed just after the (copy of the) bucket
+ * in 'ofgroup' with bucket ID 'command_bucket_id'.  Special
+ * 'command_bucket_id' values OFPG15_BUCKET_FIRST and OFPG15_BUCKET_LAST are
+ * also honored. */
+static enum ofperr
+copy_buckets_for_insert_bucket(const struct ofgroup *ofgroup,
+                               struct ofgroup *new_ofgroup,
+                               uint32_t command_bucket_id)
+{
+    struct ofputil_bucket *last = NULL;
+
+    if (command_bucket_id <= OFPG15_BUCKET_MAX) {
+        /* Check here to ensure that a bucket corresponding to
+         * command_bucket_id exists in the old bucket list.
+         *
+         * The subsequent search of below of new_ofgroup covers
+         * both buckets in the old bucket list and buckets added
+         * by the insert buckets group mod message this function processes. */
+        if (!ofputil_bucket_find(&ofgroup->buckets, command_bucket_id)) {
+            return OFPERR_OFPGMFC_UNKNOWN_BUCKET;
+        }
+
+        if (!list_is_empty(&new_ofgroup->buckets)) {
+            last = ofputil_bucket_list_back(&new_ofgroup->buckets);
+        }
+    }
+
+    ofputil_bucket_clone_list(&new_ofgroup->buckets, &ofgroup->buckets, NULL);
+
+    if (ofputil_bucket_check_duplicate_id(&ofgroup->buckets)) {
+            VLOG_WARN_RL(&rl, "Duplicate bucket id");
+            return OFPERR_OFPGMFC_BUCKET_EXISTS;
+    }
+
+    /* Rearrange list according to command_bucket_id */
+    if (command_bucket_id == OFPG15_BUCKET_LAST) {
+        struct ofputil_bucket *new_first;
+        const struct ofputil_bucket *first;
+
+        first = ofputil_bucket_list_front(&ofgroup->buckets);
+        new_first = ofputil_bucket_find(&new_ofgroup->buckets,
+                                        first->bucket_id);
+
+        list_splice(new_ofgroup->buckets.next, &new_first->list_node,
+                    &new_ofgroup->buckets);
+    } else if (command_bucket_id <= OFPG15_BUCKET_MAX && last) {
+        struct ofputil_bucket *after;
+
+        /* Presence of bucket is checked above so after should never be NULL */
+        after = ofputil_bucket_find(&new_ofgroup->buckets, command_bucket_id);
+
+        list_splice(after->list_node.next, new_ofgroup->buckets.next,
+                    last->list_node.next);
+    }
+
+    return 0;
+}
+
+/* Appends all of the a copy of all the buckets from 'ofgroup' to 'new_ofgroup'
+ * with the exception of the bucket whose bucket id is 'command_bucket_id'.
+ * Special 'command_bucket_id' values OFPG15_BUCKET_FIRST, OFPG15_BUCKET_LAST
+ * and OFPG15_BUCKET_ALL are also honored. */
+static enum ofperr
+copy_buckets_for_remove_bucket(const struct ofgroup *ofgroup,
+                               struct ofgroup *new_ofgroup,
+                               uint32_t command_bucket_id)
+{
+    const struct ofputil_bucket *skip = NULL;
+
+    if (command_bucket_id == OFPG15_BUCKET_ALL) {
+        return 0;
+    }
+
+    if (command_bucket_id == OFPG15_BUCKET_FIRST) {
+        if (!list_is_empty(&ofgroup->buckets)) {
+            skip = ofputil_bucket_list_front(&ofgroup->buckets);
+        }
+    } else if (command_bucket_id == OFPG15_BUCKET_LAST) {
+        if (!list_is_empty(&ofgroup->buckets)) {
+            skip = ofputil_bucket_list_back(&ofgroup->buckets);
+        }
+    } else {
+        skip = ofputil_bucket_find(&ofgroup->buckets, command_bucket_id);
+        if (!skip) {
+            return OFPERR_OFPGMFC_UNKNOWN_BUCKET;
+        }
+    }
+
+    ofputil_bucket_clone_list(&new_ofgroup->buckets, &ofgroup->buckets, skip);
+
+    return 0;
+}
+
+/* Implements OFPGC11_MODIFY, OFPGC15_INSERT_BUCKET and
+ * OFPGC15_REMOVE_BUCKET.  Returns 0 on success or an OpenFlow error code
  * on failure.
  *
  * Note that the group is re-created and then replaces the old group in
@@ -5427,6 +6034,18 @@ modify_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
         goto out;
     }
 
+    /* Manipulate bucket list for bucket commands */
+    if (gm->command == OFPGC15_INSERT_BUCKET) {
+        error = copy_buckets_for_insert_bucket(ofgroup, new_ofgroup,
+                                               gm->command_bucket_id);
+    } else if (gm->command == OFPGC15_REMOVE_BUCKET) {
+        error = copy_buckets_for_remove_bucket(ofgroup, new_ofgroup,
+                                               gm->command_bucket_id);
+    }
+    if (error) {
+        goto out;
+    }
+
     /* The group creation time does not change during modification. */
     *CONST_CAST(long long int *, &(new_ofgroup->created)) = ofgroup->created;
     *CONST_CAST(long long int *, &(new_ofgroup->modified)) = time_msec();
@@ -5532,6 +6151,12 @@ handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh)
         delete_group(ofproto, gm.group_id);
         return 0;
 
+    case OFPGC15_INSERT_BUCKET:
+        return modify_group(ofproto, &gm);
+
+    case OFPGC15_REMOVE_BUCKET:
+        return modify_group(ofproto, &gm);
+
     default:
         if (gm.command > OFPGC11_DELETE) {
             VLOG_WARN_RL(&rl, "%s: Invalid group_mod command type %d",
@@ -5541,35 +6166,32 @@ handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh)
     }
 }
 
-enum ofproto_table_config
-ofproto_table_get_config(const struct ofproto *ofproto, uint8_t table_id)
+enum ofputil_table_miss
+ofproto_table_get_miss_config(const struct ofproto *ofproto, uint8_t table_id)
 {
-    unsigned int value;
-    atomic_read(&ofproto->tables[table_id].config, &value);
-    return (enum ofproto_table_config)value;
+    enum ofputil_table_miss value;
+
+    atomic_read_relaxed(&ofproto->tables[table_id].miss_config, &value);
+    return value;
 }
 
 static enum ofperr
 table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm)
 {
-    /* Only accept currently supported configurations */
-    if (tm->config & ~OFPTC11_TABLE_MISS_MASK) {
-        return OFPERR_OFPTMFC_BAD_CONFIG;
-    }
-
-    if (tm->table_id == OFPTT_ALL) {
-        int i;
-        for (i = 0; i < ofproto->n_tables; i++) {
-            atomic_store(&ofproto->tables[i].config,
-                         (unsigned int)tm->config);
-        }
-    } else if (!check_table_id(ofproto, tm->table_id)) {
+    if (!check_table_id(ofproto, tm->table_id)) {
         return OFPERR_OFPTMFC_BAD_TABLE;
-    } else {
-        atomic_store(&ofproto->tables[tm->table_id].config,
-                     (unsigned int)tm->config);
+    } else if (tm->miss_config != OFPUTIL_TABLE_MISS_DEFAULT) {
+        if (tm->table_id == OFPTT_ALL) {
+            int i;
+            for (i = 0; i < ofproto->n_tables; i++) {
+                atomic_store_relaxed(&ofproto->tables[i].miss_config,
+                                     tm->miss_config);
+            }
+        } else {
+            atomic_store_relaxed(&ofproto->tables[tm->table_id].miss_config,
+                                 tm->miss_config);
+        }
     }
-
     return 0;
 }
 
@@ -5601,6 +6223,11 @@ handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh)
     struct ofpbuf *buf;
     struct ofputil_bundle_ctrl_msg reply;
 
+    error = reject_slave_controller(ofconn);
+    if (error) {
+        return error;
+    }
+
     error = ofputil_decode_bundle_ctrl(oh, &bctrl);
     if (error) {
         return error;
@@ -5648,6 +6275,11 @@ handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh)
     enum ofperr error;
     struct ofputil_bundle_add_msg badd;
 
+    error = reject_slave_controller(ofconn);
+    if (error) {
+        return error;
+    }
+
     error = ofputil_decode_bundle_add(oh, &badd);
     if (error) {
         return error;
@@ -5757,6 +6389,9 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_TABLE_STATS_REQUEST:
         return handle_table_stats_request(ofconn, oh);
 
+    case OFPTYPE_TABLE_FEATURES_STATS_REQUEST:
+        return handle_table_features_request(ofconn, oh);
+
     case OFPTYPE_PORT_STATS_REQUEST:
         return handle_port_stats_request(ofconn, oh);
 
@@ -5821,7 +6456,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
     case OFPTYPE_METER_STATS_REPLY:
     case OFPTYPE_METER_CONFIG_STATS_REPLY:
     case OFPTYPE_METER_FEATURES_STATS_REPLY:
-    case OFPTYPE_TABLE_FEATURES_STATS_REQUEST:
     case OFPTYPE_TABLE_FEATURES_STATS_REPLY:
     case OFPTYPE_ROLE_STATUS:
     default:
@@ -6155,14 +6789,12 @@ static void
 oftable_init(struct oftable *table)
 {
     memset(table, 0, sizeof *table);
-    classifier_init(&table->cls, flow_segment_u32s);
+    classifier_init(&table->cls, flow_segment_u64s);
     table->max_flows = UINT_MAX;
-    atomic_init(&table->config, (unsigned int)OFPROTO_TABLE_MISS_DEFAULT);
+    atomic_init(&table->miss_config, OFPUTIL_TABLE_MISS_DEFAULT);
 
-    fat_rwlock_wrlock(&table->cls.rwlock);
     classifier_set_prefix_fields(&table->cls, default_prefix_fields,
                                  ARRAY_SIZE(default_prefix_fields));
-    fat_rwlock_unlock(&table->cls.rwlock);
 
     atomic_init(&table->n_matched, 0);
     atomic_init(&table->n_missed, 0);
@@ -6174,9 +6806,7 @@ oftable_init(struct oftable *table)
 static void
 oftable_destroy(struct oftable *table)
 {
-    fat_rwlock_rdlock(&table->cls.rwlock);
     ovs_assert(classifier_is_empty(&table->cls));
-    fat_rwlock_unlock(&table->cls.rwlock);
     oftable_disable_eviction(table);
     classifier_destroy(&table->cls);
     free(table->name);
@@ -6237,7 +6867,6 @@ oftable_enable_eviction(struct oftable *table,
                         const struct mf_subfield *fields, size_t n_fields)
     OVS_REQUIRES(ofproto_mutex)
 {
-    struct cls_cursor cursor;
     struct rule *rule;
 
     if (table->eviction_fields
@@ -6258,25 +6887,17 @@ oftable_enable_eviction(struct oftable *table,
     hmap_init(&table->eviction_groups_by_id);
     heap_init(&table->eviction_groups_by_size);
 
-    fat_rwlock_rdlock(&table->cls.rwlock);
-    cls_cursor_init(&cursor, &table->cls, NULL);
-    CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+    CLS_FOR_EACH (rule, cr, &table->cls) {
         eviction_group_add_rule(rule);
     }
-    fat_rwlock_unlock(&table->cls.rwlock);
 }
 
-/* Removes 'rule' from the oftable that contains it. */
+/* Removes 'rule' from the ofproto data structures AFTER caller has removed
+ * it from the classifier. */
 static void
-oftable_remove_rule__(struct ofproto *ofproto, struct rule *rule)
+ofproto_rule_remove__(struct ofproto *ofproto, struct rule *rule)
     OVS_REQUIRES(ofproto_mutex)
 {
-    struct classifier *cls = &ofproto->tables[rule->table_id].cls;
-
-    fat_rwlock_wrlock(&cls->rwlock);
-    classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
-    fat_rwlock_unlock(&cls->rwlock);
-
     cookies_remove(ofproto, rule);
 
     eviction_group_remove_rule(rule);
@@ -6293,7 +6914,11 @@ static void
 oftable_remove_rule(struct rule *rule)
     OVS_REQUIRES(ofproto_mutex)
 {
-    oftable_remove_rule__(rule->ofproto, rule);
+    struct classifier *cls = &rule->ofproto->tables[rule->table_id].cls;
+
+    if (classifier_remove(cls, &rule->cr)) {
+        ofproto_rule_remove__(rule->ofproto, rule);
+    }
 }
 \f
 /* unixctl commands. */
@@ -6365,12 +6990,9 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
     ofproto->vlans_changed = false;
 
     OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
-        struct cls_cursor cursor;
         struct rule *rule;
 
-        fat_rwlock_rdlock(&oftable->cls.rwlock);
-        cls_cursor_init(&cursor, &oftable->cls, &target);
-        CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+        CLS_FOR_EACH_TARGET (rule, cr, &oftable->cls, &target) {
             if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
                 uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
 
@@ -6378,7 +7000,6 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
                 bitmap_set1(ofproto->vlan_bitmap, vid);
             }
         }
-        fat_rwlock_unlock(&oftable->cls.rwlock);
     }
 
     cls_rule_destroy(&target);