datapath: net: make skb_gso_segment error handling more robust
[cascardo/ovs.git] / datapath / datapath.c
index 10706f5..31db569 100644 (file)
@@ -48,8 +48,6 @@
 #include <linux/openvswitch.h>
 #include <linux/rculist.h>
 #include <linux/dmi.h>
-#include <linux/genetlink.h>
-#include <net/genetlink.h>
 #include <net/genetlink.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
@@ -82,11 +80,12 @@ struct genl_multicast_group ovs_dp_vport_multicast_group = {
 
 /* Check if need to build a reply message.
  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
-static bool ovs_must_notify(struct genl_info *info,
-                           const struct genl_multicast_group *grp)
+static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
+                           unsigned int group)
 {
        return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
-               netlink_has_listeners(genl_info_net(info)->genl_sock, GROUP_ID(grp));
+              genl_has_listeners(family, genl_info_net(info)->genl_sock,
+                                 group);
 }
 
 static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
@@ -136,23 +135,35 @@ int lockdep_ovsl_is_held(void)
 #endif
 
 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
+                            const struct sw_flow_key *,
                             const struct dp_upcall_info *);
 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
+                                 const struct sw_flow_key *key,
                                  const struct dp_upcall_info *);
 
-/* Must be called with rcu_read_lock or ovs_mutex. */
-static struct datapath *get_dp(struct net *net, int dp_ifindex)
+/* Must be called with rcu_read_lock. */
+static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
 {
-       struct datapath *dp = NULL;
-       struct net_device *dev;
+       struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
 
-       rcu_read_lock();
-       dev = dev_get_by_index_rcu(net, dp_ifindex);
        if (dev) {
                struct vport *vport = ovs_internal_dev_get_vport(dev);
                if (vport)
-                       dp = vport->dp;
+                       return vport->dp;
        }
+
+       return NULL;
+}
+
+/* The caller must hold either ovs_mutex or rcu_read_lock to keep the
+ * returned dp pointer valid. */
+static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
+{
+       struct datapath *dp;
+
+       WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
+       rcu_read_lock();
+       dp = get_dp_rcu(net, dp_ifindex);
        rcu_read_unlock();
 
        return dp;
@@ -165,7 +176,7 @@ const char *ovs_dp_name(const struct datapath *dp)
        return vport->ops->get_name(vport);
 }
 
-static int get_dpifindex(struct datapath *dp)
+static int get_dpifindex(const struct datapath *dp)
 {
        struct vport *local;
        int ifindex;
@@ -240,12 +251,13 @@ void ovs_dp_detach_port(struct vport *p)
        ovs_vport_del(p);
 }
 
-void ovs_dp_process_packet_with_key(struct sk_buff *skb,
-               struct sw_flow_key *pkt_key)
+/* Must be called with rcu_read_lock. */
+void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
 {
        const struct vport *p = OVS_CB(skb)->input_vport;
        struct datapath *dp = p->dp;
        struct sw_flow *flow;
+       struct sw_flow_actions *sf_acts;
        struct dp_stats_percpu *stats;
        u64 *stats_counter;
        u32 n_mask_hit;
@@ -253,55 +265,43 @@ void ovs_dp_process_packet_with_key(struct sk_buff *skb,
        stats = this_cpu_ptr(dp->stats_percpu);
 
        /* Look up flow. */
-       flow = ovs_flow_tbl_lookup_stats(&dp->table, pkt_key, skb_get_rxhash(skb),
+       flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
                                         &n_mask_hit);
        if (unlikely(!flow)) {
                struct dp_upcall_info upcall;
+               int error;
 
                upcall.cmd = OVS_PACKET_CMD_MISS;
-               upcall.key = pkt_key;
                upcall.userdata = NULL;
                upcall.portid = ovs_vport_find_upcall_portid(p, skb);
-               ovs_dp_upcall(dp, skb, &upcall);
-               consume_skb(skb);
+               upcall.egress_tun_info = NULL;
+
+               error = ovs_dp_upcall(dp, skb, key, &upcall);
+               if (unlikely(error))
+                       kfree_skb(skb);
+               else
+                       consume_skb(skb);
+
                stats_counter = &stats->n_missed;
                goto out;
        }
 
-       OVS_CB(skb)->pkt_key = pkt_key;
-       OVS_CB(skb)->flow = flow;
+       ovs_flow_stats_update(flow, key->tp.flags, skb);
 
-       ovs_flow_stats_update(OVS_CB(skb)->flow, pkt_key->tp.flags, skb);
-       ovs_execute_actions(dp, skb);
+       sf_acts = rcu_dereference(flow->sf_acts);
+       ovs_execute_actions(dp, skb, key, sf_acts);
        stats_counter = &stats->n_hit;
 
 out:
        /* Update datapath statistics. */
-       u64_stats_update_begin(&stats->sync);
+       u64_stats_update_begin(&stats->syncp);
        (*stats_counter)++;
        stats->n_mask_hit += n_mask_hit;
-       u64_stats_update_end(&stats->sync);
-}
-
-/* Must be called with rcu_read_lock. */
-void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
-{
-       int error;
-       struct sw_flow_key key;
-
-       OVS_CB(skb)->input_vport = p;
-
-       /* Extract flow from 'skb' into 'key'. */
-       error = ovs_flow_extract(skb, p->port_no, &key);
-       if (unlikely(error)) {
-               kfree_skb(skb);
-               return;
-       }
-
-       ovs_dp_process_packet_with_key(skb, &key);
+       u64_stats_update_end(&stats->syncp);
 }
 
 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
+                 const struct sw_flow_key *key,
                  const struct dp_upcall_info *upcall_info)
 {
        struct dp_stats_percpu *stats;
@@ -313,9 +313,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
        }
 
        if (!skb_is_gso(skb))
-               err = queue_userspace_packet(dp, skb, upcall_info);
+               err = queue_userspace_packet(dp, skb, key, upcall_info);
        else
-               err = queue_gso_packets(dp, skb, upcall_info);
+               err = queue_gso_packets(dp, skb, key, upcall_info);
        if (err)
                goto err;
 
@@ -324,45 +324,51 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
 err:
        stats = this_cpu_ptr(dp->stats_percpu);
 
-       u64_stats_update_begin(&stats->sync);
+       u64_stats_update_begin(&stats->syncp);
        stats->n_lost++;
-       u64_stats_update_end(&stats->sync);
+       u64_stats_update_end(&stats->syncp);
 
        return err;
 }
 
 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
+                            const struct sw_flow_key *key,
                             const struct dp_upcall_info *upcall_info)
 {
        unsigned short gso_type = skb_shinfo(skb)->gso_type;
-       struct dp_upcall_info later_info;
        struct sw_flow_key later_key;
        struct sk_buff *segs, *nskb;
+       struct ovs_skb_cb ovs_cb;
        int err;
 
+       ovs_cb = *OVS_CB(skb);
        segs = __skb_gso_segment(skb, NETIF_F_SG, false);
+       *OVS_CB(skb) = ovs_cb;
        if (IS_ERR(segs))
                return PTR_ERR(segs);
+       if (segs == NULL)
+               return -EINVAL;
+
+       if (gso_type & SKB_GSO_UDP) {
+               /* The initial flow key extracted by ovs_flow_key_extract()
+                * in this case is for a first fragment, so we need to
+                * properly mark later fragments.
+                */
+               later_key = *key;
+               later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+       }
 
        /* Queue all of the segments. */
        skb = segs;
        do {
-               err = queue_userspace_packet(dp, skb, upcall_info);
+               *OVS_CB(skb) = ovs_cb;
+               if (gso_type & SKB_GSO_UDP && skb != segs)
+                       key = &later_key;
+
+               err = queue_userspace_packet(dp, skb, key, upcall_info);
                if (err)
                        break;
 
-               if (skb == segs && gso_type & SKB_GSO_UDP) {
-                       /* The initial flow key extracted by ovs_flow_extract()
-                        * in this case is for a first fragment, so we need to
-                        * properly mark later fragments.
-                        */
-                       later_key = *upcall_info->key;
-                       later_key.ip.frag = OVS_FRAG_TYPE_LATER;
-
-                       later_info = *upcall_info;
-                       later_info.key = &later_key;
-                       upcall_info = &later_info;
-               }
        } while ((skb = skb->next));
 
        /* Free all of the segments. */
@@ -377,52 +383,34 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
        return err;
 }
 
-static size_t key_attr_size(void)
-{
-       return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
-               + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
-                 + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
-                 + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
-                 + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
-                 + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
-                 + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
-                 + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
-                 + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
-               + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
-               + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
-               + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
-               + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
-               + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
-               + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
-               + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
-               + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
-               + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
-               + nla_total_size(28); /* OVS_KEY_ATTR_ND */
-}
-
-static size_t upcall_msg_size(const struct nlattr *userdata,
+static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
                              unsigned int hdrlen)
 {
        size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
                + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
-               + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
+               + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
 
        /* OVS_PACKET_ATTR_USERDATA */
-       if (userdata)
-               size += NLA_ALIGN(userdata->nla_len);
+       if (upcall_info->userdata)
+               size += NLA_ALIGN(upcall_info->userdata->nla_len);
+
+       /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
+       if (upcall_info->egress_tun_info)
+               size += nla_total_size(ovs_tun_key_attr_size());
 
        return size;
 }
 
 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
+                                 const struct sw_flow_key *key,
                                  const struct dp_upcall_info *upcall_info)
 {
        struct ovs_header *upcall;
        struct sk_buff *nskb = NULL;
-       struct sk_buff *user_skb; /* to be queued to userspace */
+       struct sk_buff *user_skb = NULL; /* to be queued to userspace */
        struct nlattr *nla;
        struct genl_info info = {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+#ifdef HAVE_GENLMSG_NEW_UNICAST
                .dst_sk = ovs_dp_get_net(dp)->genl_sock,
 #endif
                .snd_portid = upcall_info->portid,
@@ -468,7 +456,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        else
                hlen = skb->len;
 
-       len = upcall_msg_size(upcall_info->userdata, hlen);
+       len = upcall_msg_size(upcall_info, hlen);
        user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
        if (!user_skb) {
                err = -ENOMEM;
@@ -480,7 +468,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        upcall->dp_ifindex = dp_ifindex;
 
        nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
-       ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
+       err = ovs_nla_put_flow(key, key, user_skb);
+       BUG_ON(err);
        nla_nest_end(user_skb, nla);
 
        if (upcall_info->userdata)
@@ -488,6 +477,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
                          nla_len(upcall_info->userdata),
                          nla_data(upcall_info->userdata));
 
+       if (upcall_info->egress_tun_info) {
+               nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+               err = ovs_nla_put_egress_tunnel_key(user_skb,
+                                                   upcall_info->egress_tun_info);
+               BUG_ON(err);
+               nla_nest_end(user_skb, nla);
+       }
+
        /* Only reserve room for attribute header, packet data is added
         * in skb_zerocopy() */
        if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
@@ -511,9 +508,12 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
 
        err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
+       user_skb = NULL;
 out:
        if (err)
                skb_tx_error(skb);
+
+       kfree_skb(user_skb);
        kfree_skb(nskb);
        return err;
 }
@@ -525,11 +525,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        struct sw_flow_actions *acts;
        struct sk_buff *packet;
        struct sw_flow *flow;
+       struct sw_flow_actions *sf_acts;
        struct datapath *dp;
        struct ethhdr *eth;
        struct vport *input_vport;
        int len;
        int err;
+       bool log = !a[OVS_FLOW_ATTR_PROBE];
 
        err = -EINVAL;
        if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -562,31 +564,23 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
        if (IS_ERR(flow))
                goto err_kfree_skb;
 
-       err = ovs_flow_extract(packet, -1, &flow->key);
+       err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
+                                            &flow->key, log);
        if (err)
                goto err_flow_free;
 
-       err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
-       if (err)
-               goto err_flow_free;
-       acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
-       err = PTR_ERR(acts);
-       if (IS_ERR(acts))
-               goto err_flow_free;
-
        err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
-                                  &flow->key, 0, &acts);
-       rcu_assign_pointer(flow->sf_acts, acts);
+                                  &flow->key, &acts, log);
        if (err)
                goto err_flow_free;
 
-       OVS_CB(packet)->flow = flow;
-       OVS_CB(packet)->pkt_key = &flow->key;
+       rcu_assign_pointer(flow->sf_acts, acts);
+       OVS_CB(packet)->egress_tun_info = NULL;
        packet->priority = flow->key.phy.priority;
        packet->mark = flow->key.phy.skb_mark;
 
        rcu_read_lock();
-       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+       dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
        err = -ENODEV;
        if (!dp)
                goto err_unlock;
@@ -599,9 +593,10 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock;
 
        OVS_CB(packet)->input_vport = input_vport;
+       sf_acts = rcu_dereference(flow->sf_acts);
 
        local_bh_disable();
-       err = ovs_execute_actions(dp, packet);
+       err = ovs_execute_actions(dp, packet, &flow->key, sf_acts);
        local_bh_enable();
        rcu_read_unlock();
 
@@ -644,7 +639,7 @@ static struct genl_family dp_packet_genl_family = {
        .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
 };
 
-static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
+static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
                         struct ovs_dp_megaflow_stats *mega_stats)
 {
        int i;
@@ -664,9 +659,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
                percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
 
                do {
-                       start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+                       start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
                        local_stats = *percpu_stats;
-               } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+               } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
 
                stats->n_hit += local_stats.n_hit;
                stats->n_missed += local_stats.n_missed;
@@ -678,8 +673,8 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 {
        return NLMSG_ALIGN(sizeof(struct ovs_header))
-               + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
-               + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+               + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
+               + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
                + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
                + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
                + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
@@ -687,58 +682,67 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
 }
 
 /* Called with ovs_mutex or RCU read lock. */
-static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
-                                 struct sk_buff *skb, u32 portid,
-                                 u32 seq, u32 flags, u8 cmd)
+static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
+                                  struct sk_buff *skb)
 {
-       const int skb_orig_len = skb->len;
-       struct nlattr *start;
-       struct ovs_flow_stats stats;
-       __be16 tcp_flags;
-       unsigned long used;
-       struct ovs_header *ovs_header;
        struct nlattr *nla;
        int err;
 
-       ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
-       if (!ovs_header)
-               return -EMSGSIZE;
-
-       ovs_header->dp_ifindex = dp_ifindex;
-
        /* Fill flow key. */
        nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
        if (!nla)
-               goto nla_put_failure;
+               return -EMSGSIZE;
 
-       err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
+       err = ovs_nla_put_flow(&flow->unmasked_key,
+                              &flow->unmasked_key, skb);
        if (err)
-               goto error;
+               return err;
        nla_nest_end(skb, nla);
 
+       /* Fill flow mask. */
        nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
        if (!nla)
-               goto nla_put_failure;
+               return -EMSGSIZE;
 
        err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
        if (err)
-               goto error;
-
+               return err;
        nla_nest_end(skb, nla);
 
+       return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
+                                  struct sk_buff *skb)
+{
+       struct ovs_flow_stats stats;
+       __be16 tcp_flags;
+       unsigned long used;
+
        ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
 
        if (used &&
            nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
-               goto nla_put_failure;
+               return -EMSGSIZE;
 
        if (stats.n_packets &&
            nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
-               goto nla_put_failure;
+               return -EMSGSIZE;
 
        if ((u8)ntohs(tcp_flags) &&
             nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
-               goto nla_put_failure;
+               return -EMSGSIZE;
+
+       return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
+                                    struct sk_buff *skb, int skb_orig_len)
+{
+       struct nlattr *start;
+       int err;
 
        /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
         * this is the first flow to be dumped into 'skb'.  This is unusual for
@@ -762,17 +766,45 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
                        nla_nest_end(skb, start);
                else {
                        if (skb_orig_len)
-                               goto error;
+                               return err;
 
                        nla_nest_cancel(skb, start);
                }
-       } else if (skb_orig_len)
-               goto nla_put_failure;
+       } else if (skb_orig_len) {
+               return -EMSGSIZE;
+       }
+
+       return 0;
+}
+
+/* Called with ovs_mutex or RCU read lock. */
+static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
+                                 struct sk_buff *skb, u32 portid,
+                                 u32 seq, u32 flags, u8 cmd)
+{
+       const int skb_orig_len = skb->len;
+       struct ovs_header *ovs_header;
+       int err;
+
+       ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
+       if (!ovs_header)
+               return -EMSGSIZE;
+       ovs_header->dp_ifindex = dp_ifindex;
+
+       err = ovs_flow_cmd_fill_match(flow, skb);
+       if (err)
+               goto error;
+
+       err = ovs_flow_cmd_fill_stats(flow, skb);
+       if (err)
+               goto error;
+
+       err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
+       if (err)
+               goto error;
 
        return genlmsg_end(skb, ovs_header);
 
-nla_put_failure:
-       err = -EMSGSIZE;
 error:
        genlmsg_cancel(skb, ovs_header);
        return err;
@@ -785,7 +817,8 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
 {
        struct sk_buff *skb;
 
-       if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
+       if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
+                                       GROUP_ID(&ovs_dp_flow_multicast_group)))
                return NULL;
 
        skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
@@ -797,7 +830,8 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act
 }
 
 /* Called with ovs_mutex. */
-static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
+static struct sk_buff *ovs_flow_cmd_build_info(struct datapath *dp,
+                                              const struct sw_flow *flow,
                                               int dp_ifindex,
                                               struct genl_info *info, u8 cmd,
                                               bool always)
@@ -807,7 +841,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
 
        skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
                                      always);
-       if (!skb || IS_ERR(skb))
+       if (IS_ERR_OR_NULL(skb))
                return skb;
 
        retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
@@ -828,13 +862,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
        struct sw_flow_actions *acts;
        struct sw_flow_match match;
        int error;
+       bool log = !a[OVS_FLOW_ATTR_PROBE];
 
        /* Must have key and actions. */
        error = -EINVAL;
-       if (!a[OVS_FLOW_ATTR_KEY])
+       if (!a[OVS_FLOW_ATTR_KEY]) {
+               OVS_NLERR(log, "Flow key attribute not present in new flow.");
                goto error;
-       if (!a[OVS_FLOW_ATTR_ACTIONS])
+       }
+       if (!a[OVS_FLOW_ATTR_ACTIONS]) {
+               OVS_NLERR(log,
+                         "Flow actions attribute not present in new flow.");
                goto error;
+       }
 
        /* Most of the time we need to allocate a new flow, do it before
         * locking. */
@@ -846,24 +886,22 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
 
        /* Extract key. */
        ovs_match_init(&match, &new_flow->unmasked_key, &mask);
-       error = ovs_nla_get_match(&match,
-                                 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+       error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+                                 a[OVS_FLOW_ATTR_MASK], log);
        if (error)
                goto err_kfree_flow;
 
        ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
 
        /* Validate actions. */
-       acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
-       error = PTR_ERR(acts);
-       if (IS_ERR(acts))
-               goto err_kfree_flow;
-
        error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
-                                    0, &acts);
+                                    &acts, log);
        if (error) {
-               OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
-               goto err_kfree_acts;
+               OVS_NLERR(
+                       log,
+                       "Flow actions may not be safe on all matching packets."
+                       );
+               goto err_kfree_flow;
        }
 
        reply = ovs_flow_cmd_alloc_info(acts, info, false);
@@ -915,8 +953,12 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                }
                /* The unmasked key has to be the same for flow updates. */
                if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-                       error = -EEXIST;
-                       goto err_unlock_ovs;
+                       /* Look for any overlapping flow. */
+                       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+                       if (!flow) {
+                               error = -ENOENT;
+                               goto err_unlock_ovs;
+                       }
                }
                /* Update actions. */
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -951,11 +993,32 @@ error:
        return error;
 }
 
+/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
+static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
+                                               const struct sw_flow_key *key,
+                                               const struct sw_flow_mask *mask,
+                                               bool log)
+{
+       struct sw_flow_actions *acts;
+       struct sw_flow_key masked_key;
+       int error;
+
+       ovs_flow_mask_key(&masked_key, key, mask);
+       error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
+       if (error) {
+               OVS_NLERR(log,
+                         "Actions may not be safe on all matching packets.");
+               return ERR_PTR(error);
+       }
+
+       return acts;
+}
+
 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
 {
        struct nlattr **a = info->attrs;
        struct ovs_header *ovs_header = info->userhdr;
-       struct sw_flow_key key, masked_key;
+       struct sw_flow_key key;
        struct sw_flow *flow;
        struct sw_flow_mask mask;
        struct sk_buff *reply = NULL;
@@ -963,36 +1026,31 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
        struct sw_flow_actions *old_acts = NULL, *acts = NULL;
        struct sw_flow_match match;
        int error;
+       bool log = !a[OVS_FLOW_ATTR_PROBE];
 
        /* Extract key. */
        error = -EINVAL;
-       if (!a[OVS_FLOW_ATTR_KEY])
+       if (!a[OVS_FLOW_ATTR_KEY]) {
+               OVS_NLERR(log, "Flow key attribute not present in set flow.");
                goto error;
+       }
 
        ovs_match_init(&match, &key, &mask);
-       error = ovs_nla_get_match(&match,
-                                 a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
+       error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
+                                 a[OVS_FLOW_ATTR_MASK], log);
        if (error)
                goto error;
 
        /* Validate actions. */
        if (a[OVS_FLOW_ATTR_ACTIONS]) {
-               acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
-               error = PTR_ERR(acts);
-               if (IS_ERR(acts))
+               acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
+                                       log);
+               if (IS_ERR(acts)) {
+                       error = PTR_ERR(acts);
                        goto error;
-
-               ovs_flow_mask_key(&masked_key, &key, &mask);
-               error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
-                                            &masked_key, 0, &acts);
-               if (error) {
-                       OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
-                       goto err_kfree_acts;
                }
-       }
 
-       /* Can allocate before locking if have acts. */
-       if (acts) {
+               /* Can allocate before locking if have acts. */
                reply = ovs_flow_cmd_alloc_info(acts, info, false);
                if (IS_ERR(reply)) {
                        error = PTR_ERR(reply);
@@ -1007,16 +1065,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_ovs;
        }
        /* Check that the flow exists. */
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (unlikely(!flow)) {
                error = -ENOENT;
                goto err_unlock_ovs;
        }
-       /* The unmasked key has to be the same for flow updates. */
-       if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-               error = -EEXIST;
-               goto err_unlock_ovs;
-       }
+
        /* Update actions, if present. */
        if (likely(acts)) {
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -1032,7 +1086,8 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                }
        } else {
                /* Could not alloc without acts before locking. */
-               reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
+               reply = ovs_flow_cmd_build_info(dp, flow,
+                                               ovs_header->dp_ifindex,
                                                info, OVS_FLOW_CMD_NEW, false);
                if (unlikely(IS_ERR(reply))) {
                        error = PTR_ERR(reply);
@@ -1070,14 +1125,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        struct sw_flow_match match;
        int err;
+       bool log = !a[OVS_FLOW_ATTR_PROBE];
 
        if (!a[OVS_FLOW_ATTR_KEY]) {
-               OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
+               OVS_NLERR(log,
+                         "Flow get message rejected, Key attribute missing.");
                return -EINVAL;
        }
 
        ovs_match_init(&match, &key, NULL);
-       err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+       err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log);
        if (err)
                return err;
 
@@ -1088,13 +1145,13 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (!flow) {
                err = -ENOENT;
                goto unlock;
        }
 
-       reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
+       reply = ovs_flow_cmd_build_info(dp, flow, ovs_header->dp_ifindex, info,
                                        OVS_FLOW_CMD_NEW, true);
        if (IS_ERR(reply)) {
                err = PTR_ERR(reply);
@@ -1118,10 +1175,12 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
        struct datapath *dp;
        struct sw_flow_match match;
        int err;
+       bool log = !a[OVS_FLOW_ATTR_PROBE];
 
        if (likely(a[OVS_FLOW_ATTR_KEY])) {
                ovs_match_init(&match, &key, NULL);
-               err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
+               err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
+                                       log);
                if (unlikely(err))
                        return err;
        }
@@ -1136,8 +1195,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
                err = ovs_flow_tbl_flush(&dp->table);
                goto unlock;
        }
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (unlikely(!flow)) {
                err = -ENOENT;
                goto unlock;
        }
@@ -1180,7 +1239,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct datapath *dp;
 
        rcu_read_lock();
-       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+       dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp) {
                rcu_read_unlock();
                return -ENODEV;
@@ -1212,8 +1271,10 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
 
 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
        [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+       [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
        [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
        [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+       [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
 };
 
 static struct genl_ops dp_flow_genl_ops[] = {
@@ -1314,7 +1375,7 @@ static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
 
 /* Called with rcu_read_lock or ovs_mutex. */
 static struct datapath *lookup_datapath(struct net *net,
-                                       struct ovs_header *ovs_header,
+                                       const struct ovs_header *ovs_header,
                                        struct nlattr *a[OVS_DP_ATTR_MAX + 1])
 {
        struct datapath *dp;
@@ -1342,7 +1403,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
        dp->user_features = 0;
 }
 
-static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
+static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
 {
        if (a[OVS_DP_ATTR_USER_FEATURES])
                dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
@@ -1378,18 +1439,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        if (err)
                goto err_free_dp;
 
-       dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+       dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
        if (!dp->stats_percpu) {
                err = -ENOMEM;
                goto err_destroy_table;
        }
 
-       for_each_possible_cpu(i) {
-               struct dp_stats_percpu *dpath_stats;
-               dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
-               u64_stats_init(&dpath_stats->sync);
-       }
-
        dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
                            GFP_KERNEL);
        if (!dp->ports) {
@@ -1709,7 +1764,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
 
 /* Called with ovs_mutex or RCU read lock. */
 static struct vport *lookup_vport(struct net *net,
-                                 struct ovs_header *ovs_header,
+                                 const struct ovs_header *ovs_header,
                                  struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
 {
        struct datapath *dp;
@@ -1800,10 +1855,6 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        if (IS_ERR(vport))
                goto exit_unlock_free;
 
-       err = 0;
-       if (a[OVS_VPORT_ATTR_STATS])
-               ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
-
        err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
                                      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
        BUG_ON(err < 0);
@@ -1847,10 +1898,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
                        goto exit_unlock_free;
        }
 
-       if (a[OVS_VPORT_ATTR_STATS])
-               ovs_vport_set_stats(vport, nla_data(a[OVS_VPORT_ATTR_STATS]));
-
-
        if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
                err = ovs_vport_set_upcall_portids(vport,
                                                   a[OVS_VPORT_ATTR_UPCALL_PID]);
@@ -1947,7 +1994,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int i, j = 0;
 
        rcu_read_lock();
-       dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
+       dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
        if (!dp) {
                rcu_read_unlock();
                return -ENODEV;
@@ -2099,10 +2146,14 @@ static int __init dp_init(void)
        pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
                VERSION);
 
-       err = ovs_flow_init();
+       err = action_fifos_init();
        if (err)
                goto error;
 
+       err = ovs_flow_init();
+       if (err)
+               goto error_action_fifos_exit;
+
        err = ovs_vport_init();
        if (err)
                goto error_flow_exit;
@@ -2129,6 +2180,8 @@ error_vport_exit:
        ovs_vport_exit();
 error_flow_exit:
        ovs_flow_exit();
+error_action_fifos_exit:
+       action_fifos_exit();
 error:
        return err;
 }
@@ -2141,6 +2194,7 @@ static void dp_cleanup(void)
        rcu_barrier();
        ovs_vport_exit();
        ovs_flow_exit();
+       action_fifos_exit();
 }
 
 module_init(dp_init);