Merge tag 'iommu-updates-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[cascardo/linux.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60
61 int ovs_net_id __read_mostly;
62 EXPORT_SYMBOL_GPL(ovs_net_id);
63
64 static struct genl_family dp_packet_genl_family;
65 static struct genl_family dp_flow_genl_family;
66 static struct genl_family dp_datapath_genl_family;
67
68 static const struct nla_policy flow_policy[];
69
70 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
71         .name = OVS_FLOW_MCGROUP,
72 };
73
74 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
75         .name = OVS_DATAPATH_MCGROUP,
76 };
77
78 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
79         .name = OVS_VPORT_MCGROUP,
80 };
81
82 /* Check if need to build a reply message.
83  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85                             unsigned int group)
86 {
87         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88                genl_has_listeners(family, genl_info_net(info), group);
89 }
90
91 static void ovs_notify(struct genl_family *family,
92                        struct sk_buff *skb, struct genl_info *info)
93 {
94         genl_notify(family, skb, info, 0, GFP_KERNEL);
95 }
96
97 /**
98  * DOC: Locking:
99  *
100  * All writes e.g. Writes to device state (add/remove datapath, port, set
101  * operations on vports, etc.), Writes to other state (flow table
102  * modifications, set miscellaneous datapath parameters, etc.) are protected
103  * by ovs_lock.
104  *
105  * Reads are protected by RCU.
106  *
107  * There are a few special cases (mostly stats) that have their own
108  * synchronization but they nest under all of above and don't interact with
109  * each other.
110  *
111  * The RTNL lock nests inside ovs_mutex.
112  */
113
114 static DEFINE_MUTEX(ovs_mutex);
115
116 void ovs_lock(void)
117 {
118         mutex_lock(&ovs_mutex);
119 }
120
121 void ovs_unlock(void)
122 {
123         mutex_unlock(&ovs_mutex);
124 }
125
126 #ifdef CONFIG_LOCKDEP
127 int lockdep_ovsl_is_held(void)
128 {
129         if (debug_locks)
130                 return lockdep_is_held(&ovs_mutex);
131         else
132                 return 1;
133 }
134 EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held);
135 #endif
136
137 static struct vport *new_vport(const struct vport_parms *);
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct sw_flow_key *,
140                              const struct dp_upcall_info *,
141                              uint32_t cutlen);
142 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
143                                   const struct sw_flow_key *,
144                                   const struct dp_upcall_info *,
145                                   uint32_t cutlen);
146
147 /* Must be called with rcu_read_lock. */
148 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
149 {
150         struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
151
152         if (dev) {
153                 struct vport *vport = ovs_internal_dev_get_vport(dev);
154                 if (vport)
155                         return vport->dp;
156         }
157
158         return NULL;
159 }
160
161 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
162  * returned dp pointer valid.
163  */
164 static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
165 {
166         struct datapath *dp;
167
168         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
169         rcu_read_lock();
170         dp = get_dp_rcu(net, dp_ifindex);
171         rcu_read_unlock();
172
173         return dp;
174 }
175
176 /* Must be called with rcu_read_lock or ovs_mutex. */
177 const char *ovs_dp_name(const struct datapath *dp)
178 {
179         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
180         return ovs_vport_name(vport);
181 }
182
183 static int get_dpifindex(const struct datapath *dp)
184 {
185         struct vport *local;
186         int ifindex;
187
188         rcu_read_lock();
189
190         local = ovs_vport_rcu(dp, OVSP_LOCAL);
191         if (local)
192                 ifindex = local->dev->ifindex;
193         else
194                 ifindex = 0;
195
196         rcu_read_unlock();
197
198         return ifindex;
199 }
200
201 static void destroy_dp_rcu(struct rcu_head *rcu)
202 {
203         struct datapath *dp = container_of(rcu, struct datapath, rcu);
204
205         ovs_flow_tbl_destroy(&dp->table);
206         free_percpu(dp->stats_percpu);
207         kfree(dp->ports);
208         kfree(dp);
209 }
210
211 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
212                                             u16 port_no)
213 {
214         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
215 }
216
217 /* Called with ovs_mutex or RCU read lock. */
218 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
219 {
220         struct vport *vport;
221         struct hlist_head *head;
222
223         head = vport_hash_bucket(dp, port_no);
224         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
225                 if (vport->port_no == port_no)
226                         return vport;
227         }
228         return NULL;
229 }
230
231 /* Called with ovs_mutex. */
232 static struct vport *new_vport(const struct vport_parms *parms)
233 {
234         struct vport *vport;
235
236         vport = ovs_vport_add(parms);
237         if (!IS_ERR(vport)) {
238                 struct datapath *dp = parms->dp;
239                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
240
241                 hlist_add_head_rcu(&vport->dp_hash_node, head);
242         }
243         return vport;
244 }
245
246 void ovs_dp_detach_port(struct vport *p)
247 {
248         ASSERT_OVSL();
249
250         /* First drop references to device. */
251         hlist_del_rcu(&p->dp_hash_node);
252
253         /* Then destroy it. */
254         ovs_vport_del(p);
255 }
256
257 /* Must be called with rcu_read_lock. */
258 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
259 {
260         const struct vport *p = OVS_CB(skb)->input_vport;
261         struct datapath *dp = p->dp;
262         struct sw_flow *flow;
263         struct sw_flow_actions *sf_acts;
264         struct dp_stats_percpu *stats;
265         u64 *stats_counter;
266         u32 n_mask_hit;
267
268         stats = this_cpu_ptr(dp->stats_percpu);
269
270         /* Look up flow. */
271         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, &n_mask_hit);
272         if (unlikely(!flow)) {
273                 struct dp_upcall_info upcall;
274                 int error;
275
276                 memset(&upcall, 0, sizeof(upcall));
277                 upcall.cmd = OVS_PACKET_CMD_MISS;
278                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
279                 upcall.mru = OVS_CB(skb)->mru;
280                 error = ovs_dp_upcall(dp, skb, key, &upcall, 0);
281                 if (unlikely(error))
282                         kfree_skb(skb);
283                 else
284                         consume_skb(skb);
285                 stats_counter = &stats->n_missed;
286                 goto out;
287         }
288
289         ovs_flow_stats_update(flow, key->tp.flags, skb);
290         sf_acts = rcu_dereference(flow->sf_acts);
291         ovs_execute_actions(dp, skb, sf_acts, key);
292
293         stats_counter = &stats->n_hit;
294
295 out:
296         /* Update datapath statistics. */
297         u64_stats_update_begin(&stats->syncp);
298         (*stats_counter)++;
299         stats->n_mask_hit += n_mask_hit;
300         u64_stats_update_end(&stats->syncp);
301 }
302
303 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
304                   const struct sw_flow_key *key,
305                   const struct dp_upcall_info *upcall_info,
306                   uint32_t cutlen)
307 {
308         struct dp_stats_percpu *stats;
309         int err;
310
311         if (upcall_info->portid == 0) {
312                 err = -ENOTCONN;
313                 goto err;
314         }
315
316         if (!skb_is_gso(skb))
317                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
318         else
319                 err = queue_gso_packets(dp, skb, key, upcall_info, cutlen);
320         if (err)
321                 goto err;
322
323         return 0;
324
325 err:
326         stats = this_cpu_ptr(dp->stats_percpu);
327
328         u64_stats_update_begin(&stats->syncp);
329         stats->n_lost++;
330         u64_stats_update_end(&stats->syncp);
331
332         return err;
333 }
334
335 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
336                              const struct sw_flow_key *key,
337                              const struct dp_upcall_info *upcall_info,
338                                  uint32_t cutlen)
339 {
340         unsigned short gso_type = skb_shinfo(skb)->gso_type;
341         struct sw_flow_key later_key;
342         struct sk_buff *segs, *nskb;
343         int err;
344
345         BUILD_BUG_ON(sizeof(*OVS_CB(skb)) > SKB_SGO_CB_OFFSET);
346         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
347         if (IS_ERR(segs))
348                 return PTR_ERR(segs);
349         if (segs == NULL)
350                 return -EINVAL;
351
352         if (gso_type & SKB_GSO_UDP) {
353                 /* The initial flow key extracted by ovs_flow_key_extract()
354                  * in this case is for a first fragment, so we need to
355                  * properly mark later fragments.
356                  */
357                 later_key = *key;
358                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
359         }
360
361         /* Queue all of the segments. */
362         skb = segs;
363         do {
364                 if (gso_type & SKB_GSO_UDP && skb != segs)
365                         key = &later_key;
366
367                 err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
368                 if (err)
369                         break;
370
371         } while ((skb = skb->next));
372
373         /* Free all of the segments. */
374         skb = segs;
375         do {
376                 nskb = skb->next;
377                 if (err)
378                         kfree_skb(skb);
379                 else
380                         consume_skb(skb);
381         } while ((skb = nskb));
382         return err;
383 }
384
385 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
386                               unsigned int hdrlen)
387 {
388         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
389                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
390                 + nla_total_size(ovs_key_attr_size()) /* OVS_PACKET_ATTR_KEY */
391                 + nla_total_size(sizeof(unsigned int)); /* OVS_PACKET_ATTR_LEN */
392
393         /* OVS_PACKET_ATTR_USERDATA */
394         if (upcall_info->userdata)
395                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
396
397         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
398         if (upcall_info->egress_tun_info)
399                 size += nla_total_size(ovs_tun_key_attr_size());
400
401         /* OVS_PACKET_ATTR_ACTIONS */
402         if (upcall_info->actions_len)
403                 size += nla_total_size(upcall_info->actions_len);
404
405         /* OVS_PACKET_ATTR_MRU */
406         if (upcall_info->mru)
407                 size += nla_total_size(sizeof(upcall_info->mru));
408
409         return size;
410 }
411
412 static void pad_packet(struct datapath *dp, struct sk_buff *skb)
413 {
414         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
415                 size_t plen = NLA_ALIGN(skb->len) - skb->len;
416
417                 if (plen > 0)
418                         memset(skb_put(skb, plen), 0, plen);
419         }
420 }
421
422 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
423                                   const struct sw_flow_key *key,
424                                   const struct dp_upcall_info *upcall_info,
425                                   uint32_t cutlen)
426 {
427         struct ovs_header *upcall;
428         struct sk_buff *nskb = NULL;
429         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
430         struct nlattr *nla;
431         size_t len;
432         unsigned int hlen;
433         int err, dp_ifindex;
434
435         dp_ifindex = get_dpifindex(dp);
436         if (!dp_ifindex)
437                 return -ENODEV;
438
439         if (skb_vlan_tag_present(skb)) {
440                 nskb = skb_clone(skb, GFP_ATOMIC);
441                 if (!nskb)
442                         return -ENOMEM;
443
444                 nskb = __vlan_hwaccel_push_inside(nskb);
445                 if (!nskb)
446                         return -ENOMEM;
447
448                 skb = nskb;
449         }
450
451         if (nla_attr_size(skb->len) > USHRT_MAX) {
452                 err = -EFBIG;
453                 goto out;
454         }
455
456         /* Complete checksum if needed */
457         if (skb->ip_summed == CHECKSUM_PARTIAL &&
458             (err = skb_checksum_help(skb)))
459                 goto out;
460
461         /* Older versions of OVS user space enforce alignment of the last
462          * Netlink attribute to NLA_ALIGNTO which would require extensive
463          * padding logic. Only perform zerocopy if padding is not required.
464          */
465         if (dp->user_features & OVS_DP_F_UNALIGNED)
466                 hlen = skb_zerocopy_headlen(skb);
467         else
468                 hlen = skb->len;
469
470         len = upcall_msg_size(upcall_info, hlen - cutlen);
471         user_skb = genlmsg_new(len, GFP_ATOMIC);
472         if (!user_skb) {
473                 err = -ENOMEM;
474                 goto out;
475         }
476
477         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
478                              0, upcall_info->cmd);
479         upcall->dp_ifindex = dp_ifindex;
480
481         err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
482         BUG_ON(err);
483
484         if (upcall_info->userdata)
485                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
486                           nla_len(upcall_info->userdata),
487                           nla_data(upcall_info->userdata));
488
489         if (upcall_info->egress_tun_info) {
490                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
491                 err = ovs_nla_put_tunnel_info(user_skb,
492                                               upcall_info->egress_tun_info);
493                 BUG_ON(err);
494                 nla_nest_end(user_skb, nla);
495         }
496
497         if (upcall_info->actions_len) {
498                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
499                 err = ovs_nla_put_actions(upcall_info->actions,
500                                           upcall_info->actions_len,
501                                           user_skb);
502                 if (!err)
503                         nla_nest_end(user_skb, nla);
504                 else
505                         nla_nest_cancel(user_skb, nla);
506         }
507
508         /* Add OVS_PACKET_ATTR_MRU */
509         if (upcall_info->mru) {
510                 if (nla_put_u16(user_skb, OVS_PACKET_ATTR_MRU,
511                                 upcall_info->mru)) {
512                         err = -ENOBUFS;
513                         goto out;
514                 }
515                 pad_packet(dp, user_skb);
516         }
517
518         /* Add OVS_PACKET_ATTR_LEN when packet is truncated */
519         if (cutlen > 0) {
520                 if (nla_put_u32(user_skb, OVS_PACKET_ATTR_LEN,
521                                 skb->len)) {
522                         err = -ENOBUFS;
523                         goto out;
524                 }
525                 pad_packet(dp, user_skb);
526         }
527
528         /* Only reserve room for attribute header, packet data is added
529          * in skb_zerocopy() */
530         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
531                 err = -ENOBUFS;
532                 goto out;
533         }
534         nla->nla_len = nla_attr_size(skb->len - cutlen);
535
536         err = skb_zerocopy(user_skb, skb, skb->len - cutlen, hlen);
537         if (err)
538                 goto out;
539
540         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
541         pad_packet(dp, user_skb);
542
543         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
544
545         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
546         user_skb = NULL;
547 out:
548         if (err)
549                 skb_tx_error(skb);
550         kfree_skb(user_skb);
551         kfree_skb(nskb);
552         return err;
553 }
554
555 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
556 {
557         struct ovs_header *ovs_header = info->userhdr;
558         struct net *net = sock_net(skb->sk);
559         struct nlattr **a = info->attrs;
560         struct sw_flow_actions *acts;
561         struct sk_buff *packet;
562         struct sw_flow *flow;
563         struct sw_flow_actions *sf_acts;
564         struct datapath *dp;
565         struct ethhdr *eth;
566         struct vport *input_vport;
567         u16 mru = 0;
568         int len;
569         int err;
570         bool log = !a[OVS_PACKET_ATTR_PROBE];
571
572         err = -EINVAL;
573         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
574             !a[OVS_PACKET_ATTR_ACTIONS])
575                 goto err;
576
577         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
578         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
579         err = -ENOMEM;
580         if (!packet)
581                 goto err;
582         skb_reserve(packet, NET_IP_ALIGN);
583
584         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
585
586         skb_reset_mac_header(packet);
587         eth = eth_hdr(packet);
588
589         /* Normally, setting the skb 'protocol' field would be handled by a
590          * call to eth_type_trans(), but it assumes there's a sending
591          * device, which we may not have. */
592         if (eth_proto_is_802_3(eth->h_proto))
593                 packet->protocol = eth->h_proto;
594         else
595                 packet->protocol = htons(ETH_P_802_2);
596
597         /* Set packet's mru */
598         if (a[OVS_PACKET_ATTR_MRU]) {
599                 mru = nla_get_u16(a[OVS_PACKET_ATTR_MRU]);
600                 packet->ignore_df = 1;
601         }
602         OVS_CB(packet)->mru = mru;
603
604         /* Build an sw_flow for sending this packet. */
605         flow = ovs_flow_alloc();
606         err = PTR_ERR(flow);
607         if (IS_ERR(flow))
608                 goto err_kfree_skb;
609
610         err = ovs_flow_key_extract_userspace(net, a[OVS_PACKET_ATTR_KEY],
611                                              packet, &flow->key, log);
612         if (err)
613                 goto err_flow_free;
614
615         err = ovs_nla_copy_actions(net, a[OVS_PACKET_ATTR_ACTIONS],
616                                    &flow->key, &acts, log);
617         if (err)
618                 goto err_flow_free;
619
620         rcu_assign_pointer(flow->sf_acts, acts);
621         packet->priority = flow->key.phy.priority;
622         packet->mark = flow->key.phy.skb_mark;
623
624         rcu_read_lock();
625         dp = get_dp_rcu(net, ovs_header->dp_ifindex);
626         err = -ENODEV;
627         if (!dp)
628                 goto err_unlock;
629
630         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
631         if (!input_vport)
632                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
633
634         if (!input_vport)
635                 goto err_unlock;
636
637         packet->dev = input_vport->dev;
638         OVS_CB(packet)->input_vport = input_vport;
639         sf_acts = rcu_dereference(flow->sf_acts);
640
641         local_bh_disable();
642         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
643         local_bh_enable();
644         rcu_read_unlock();
645
646         ovs_flow_free(flow, false);
647         return err;
648
649 err_unlock:
650         rcu_read_unlock();
651 err_flow_free:
652         ovs_flow_free(flow, false);
653 err_kfree_skb:
654         kfree_skb(packet);
655 err:
656         return err;
657 }
658
659 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
660         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
661         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
662         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
663         [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
664         [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
665 };
666
667 static const struct genl_ops dp_packet_genl_ops[] = {
668         { .cmd = OVS_PACKET_CMD_EXECUTE,
669           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
670           .policy = packet_policy,
671           .doit = ovs_packet_cmd_execute
672         }
673 };
674
675 static struct genl_family dp_packet_genl_family = {
676         .id = GENL_ID_GENERATE,
677         .hdrsize = sizeof(struct ovs_header),
678         .name = OVS_PACKET_FAMILY,
679         .version = OVS_PACKET_VERSION,
680         .maxattr = OVS_PACKET_ATTR_MAX,
681         .netnsok = true,
682         .parallel_ops = true,
683         .ops = dp_packet_genl_ops,
684         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
685 };
686
687 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
688                          struct ovs_dp_megaflow_stats *mega_stats)
689 {
690         int i;
691
692         memset(mega_stats, 0, sizeof(*mega_stats));
693
694         stats->n_flows = ovs_flow_tbl_count(&dp->table);
695         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
696
697         stats->n_hit = stats->n_missed = stats->n_lost = 0;
698
699         for_each_possible_cpu(i) {
700                 const struct dp_stats_percpu *percpu_stats;
701                 struct dp_stats_percpu local_stats;
702                 unsigned int start;
703
704                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
705
706                 do {
707                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
708                         local_stats = *percpu_stats;
709                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
710
711                 stats->n_hit += local_stats.n_hit;
712                 stats->n_missed += local_stats.n_missed;
713                 stats->n_lost += local_stats.n_lost;
714                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
715         }
716 }
717
718 static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags)
719 {
720         return ovs_identifier_is_ufid(sfid) &&
721                !(ufid_flags & OVS_UFID_F_OMIT_KEY);
722 }
723
724 static bool should_fill_mask(uint32_t ufid_flags)
725 {
726         return !(ufid_flags & OVS_UFID_F_OMIT_MASK);
727 }
728
729 static bool should_fill_actions(uint32_t ufid_flags)
730 {
731         return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS);
732 }
733
734 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts,
735                                     const struct sw_flow_id *sfid,
736                                     uint32_t ufid_flags)
737 {
738         size_t len = NLMSG_ALIGN(sizeof(struct ovs_header));
739
740         /* OVS_FLOW_ATTR_UFID */
741         if (sfid && ovs_identifier_is_ufid(sfid))
742                 len += nla_total_size(sfid->ufid_len);
743
744         /* OVS_FLOW_ATTR_KEY */
745         if (!sfid || should_fill_key(sfid, ufid_flags))
746                 len += nla_total_size(ovs_key_attr_size());
747
748         /* OVS_FLOW_ATTR_MASK */
749         if (should_fill_mask(ufid_flags))
750                 len += nla_total_size(ovs_key_attr_size());
751
752         /* OVS_FLOW_ATTR_ACTIONS */
753         if (should_fill_actions(ufid_flags))
754                 len += nla_total_size(acts->orig_len);
755
756         return len
757                 + nla_total_size_64bit(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
758                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
759                 + nla_total_size_64bit(8); /* OVS_FLOW_ATTR_USED */
760 }
761
762 /* Called with ovs_mutex or RCU read lock. */
763 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
764                                    struct sk_buff *skb)
765 {
766         struct ovs_flow_stats stats;
767         __be16 tcp_flags;
768         unsigned long used;
769
770         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
771
772         if (used &&
773             nla_put_u64_64bit(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used),
774                               OVS_FLOW_ATTR_PAD))
775                 return -EMSGSIZE;
776
777         if (stats.n_packets &&
778             nla_put_64bit(skb, OVS_FLOW_ATTR_STATS,
779                           sizeof(struct ovs_flow_stats), &stats,
780                           OVS_FLOW_ATTR_PAD))
781                 return -EMSGSIZE;
782
783         if ((u8)ntohs(tcp_flags) &&
784              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
785                 return -EMSGSIZE;
786
787         return 0;
788 }
789
790 /* Called with ovs_mutex or RCU read lock. */
791 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
792                                      struct sk_buff *skb, int skb_orig_len)
793 {
794         struct nlattr *start;
795         int err;
796
797         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
798          * this is the first flow to be dumped into 'skb'.  This is unusual for
799          * Netlink but individual action lists can be longer than
800          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
801          * The userspace caller can always fetch the actions separately if it
802          * really wants them.  (Most userspace callers in fact don't care.)
803          *
804          * This can only fail for dump operations because the skb is always
805          * properly sized for single flows.
806          */
807         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
808         if (start) {
809                 const struct sw_flow_actions *sf_acts;
810
811                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
812                 err = ovs_nla_put_actions(sf_acts->actions,
813                                           sf_acts->actions_len, skb);
814
815                 if (!err)
816                         nla_nest_end(skb, start);
817                 else {
818                         if (skb_orig_len)
819                                 return err;
820
821                         nla_nest_cancel(skb, start);
822                 }
823         } else if (skb_orig_len) {
824                 return -EMSGSIZE;
825         }
826
827         return 0;
828 }
829
830 /* Called with ovs_mutex or RCU read lock. */
831 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
832                                   struct sk_buff *skb, u32 portid,
833                                   u32 seq, u32 flags, u8 cmd, u32 ufid_flags)
834 {
835         const int skb_orig_len = skb->len;
836         struct ovs_header *ovs_header;
837         int err;
838
839         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
840                                  flags, cmd);
841         if (!ovs_header)
842                 return -EMSGSIZE;
843
844         ovs_header->dp_ifindex = dp_ifindex;
845
846         err = ovs_nla_put_identifier(flow, skb);
847         if (err)
848                 goto error;
849
850         if (should_fill_key(&flow->id, ufid_flags)) {
851                 err = ovs_nla_put_masked_key(flow, skb);
852                 if (err)
853                         goto error;
854         }
855
856         if (should_fill_mask(ufid_flags)) {
857                 err = ovs_nla_put_mask(flow, skb);
858                 if (err)
859                         goto error;
860         }
861
862         err = ovs_flow_cmd_fill_stats(flow, skb);
863         if (err)
864                 goto error;
865
866         if (should_fill_actions(ufid_flags)) {
867                 err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
868                 if (err)
869                         goto error;
870         }
871
872         genlmsg_end(skb, ovs_header);
873         return 0;
874
875 error:
876         genlmsg_cancel(skb, ovs_header);
877         return err;
878 }
879
880 /* May not be called with RCU read lock. */
881 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
882                                                const struct sw_flow_id *sfid,
883                                                struct genl_info *info,
884                                                bool always,
885                                                uint32_t ufid_flags)
886 {
887         struct sk_buff *skb;
888         size_t len;
889
890         if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0))
891                 return NULL;
892
893         len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags);
894         skb = genlmsg_new(len, GFP_KERNEL);
895         if (!skb)
896                 return ERR_PTR(-ENOMEM);
897
898         return skb;
899 }
900
901 /* Called with ovs_mutex. */
902 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
903                                                int dp_ifindex,
904                                                struct genl_info *info, u8 cmd,
905                                                bool always, u32 ufid_flags)
906 {
907         struct sk_buff *skb;
908         int retval;
909
910         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts),
911                                       &flow->id, info, always, ufid_flags);
912         if (IS_ERR_OR_NULL(skb))
913                 return skb;
914
915         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
916                                         info->snd_portid, info->snd_seq, 0,
917                                         cmd, ufid_flags);
918         BUG_ON(retval < 0);
919         return skb;
920 }
921
922 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
923 {
924         struct net *net = sock_net(skb->sk);
925         struct nlattr **a = info->attrs;
926         struct ovs_header *ovs_header = info->userhdr;
927         struct sw_flow *flow = NULL, *new_flow;
928         struct sw_flow_mask mask;
929         struct sk_buff *reply;
930         struct datapath *dp;
931         struct sw_flow_key key;
932         struct sw_flow_actions *acts;
933         struct sw_flow_match match;
934         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
935         int error;
936         bool log = !a[OVS_FLOW_ATTR_PROBE];
937
938         /* Must have key and actions. */
939         error = -EINVAL;
940         if (!a[OVS_FLOW_ATTR_KEY]) {
941                 OVS_NLERR(log, "Flow key attr not present in new flow.");
942                 goto error;
943         }
944         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
945                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
946                 goto error;
947         }
948
949         /* Most of the time we need to allocate a new flow, do it before
950          * locking.
951          */
952         new_flow = ovs_flow_alloc();
953         if (IS_ERR(new_flow)) {
954                 error = PTR_ERR(new_flow);
955                 goto error;
956         }
957
958         /* Extract key. */
959         ovs_match_init(&match, &key, &mask);
960         error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
961                                   a[OVS_FLOW_ATTR_MASK], log);
962         if (error)
963                 goto err_kfree_flow;
964
965         ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
966
967         /* Extract flow identifier. */
968         error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
969                                        &key, log);
970         if (error)
971                 goto err_kfree_flow;
972
973         /* Validate actions. */
974         error = ovs_nla_copy_actions(net, a[OVS_FLOW_ATTR_ACTIONS],
975                                      &new_flow->key, &acts, log);
976         if (error) {
977                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
978                 goto err_kfree_flow;
979         }
980
981         reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false,
982                                         ufid_flags);
983         if (IS_ERR(reply)) {
984                 error = PTR_ERR(reply);
985                 goto err_kfree_acts;
986         }
987
988         ovs_lock();
989         dp = get_dp(net, ovs_header->dp_ifindex);
990         if (unlikely(!dp)) {
991                 error = -ENODEV;
992                 goto err_unlock_ovs;
993         }
994
995         /* Check if this is a duplicate flow */
996         if (ovs_identifier_is_ufid(&new_flow->id))
997                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id);
998         if (!flow)
999                 flow = ovs_flow_tbl_lookup(&dp->table, &key);
1000         if (likely(!flow)) {
1001                 rcu_assign_pointer(new_flow->sf_acts, acts);
1002
1003                 /* Put flow in bucket. */
1004                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
1005                 if (unlikely(error)) {
1006                         acts = NULL;
1007                         goto err_unlock_ovs;
1008                 }
1009
1010                 if (unlikely(reply)) {
1011                         error = ovs_flow_cmd_fill_info(new_flow,
1012                                                        ovs_header->dp_ifindex,
1013                                                        reply, info->snd_portid,
1014                                                        info->snd_seq, 0,
1015                                                        OVS_FLOW_CMD_NEW,
1016                                                        ufid_flags);
1017                         BUG_ON(error < 0);
1018                 }
1019                 ovs_unlock();
1020         } else {
1021                 struct sw_flow_actions *old_acts;
1022
1023                 /* Bail out if we're not allowed to modify an existing flow.
1024                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1025                  * because Generic Netlink treats the latter as a dump
1026                  * request.  We also accept NLM_F_EXCL in case that bug ever
1027                  * gets fixed.
1028                  */
1029                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
1030                                                          | NLM_F_EXCL))) {
1031                         error = -EEXIST;
1032                         goto err_unlock_ovs;
1033                 }
1034                 /* The flow identifier has to be the same for flow updates.
1035                  * Look for any overlapping flow.
1036                  */
1037                 if (unlikely(!ovs_flow_cmp(flow, &match))) {
1038                         if (ovs_identifier_is_key(&flow->id))
1039                                 flow = ovs_flow_tbl_lookup_exact(&dp->table,
1040                                                                  &match);
1041                         else /* UFID matches but key is different */
1042                                 flow = NULL;
1043                         if (!flow) {
1044                                 error = -ENOENT;
1045                                 goto err_unlock_ovs;
1046                         }
1047                 }
1048                 /* Update actions. */
1049                 old_acts = ovsl_dereference(flow->sf_acts);
1050                 rcu_assign_pointer(flow->sf_acts, acts);
1051
1052                 if (unlikely(reply)) {
1053                         error = ovs_flow_cmd_fill_info(flow,
1054                                                        ovs_header->dp_ifindex,
1055                                                        reply, info->snd_portid,
1056                                                        info->snd_seq, 0,
1057                                                        OVS_FLOW_CMD_NEW,
1058                                                        ufid_flags);
1059                         BUG_ON(error < 0);
1060                 }
1061                 ovs_unlock();
1062
1063                 ovs_nla_free_flow_actions_rcu(old_acts);
1064                 ovs_flow_free(new_flow, false);
1065         }
1066
1067         if (reply)
1068                 ovs_notify(&dp_flow_genl_family, reply, info);
1069         return 0;
1070
1071 err_unlock_ovs:
1072         ovs_unlock();
1073         kfree_skb(reply);
1074 err_kfree_acts:
1075         ovs_nla_free_flow_actions(acts);
1076 err_kfree_flow:
1077         ovs_flow_free(new_flow, false);
1078 error:
1079         return error;
1080 }
1081
1082 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
1083 static struct sw_flow_actions *get_flow_actions(struct net *net,
1084                                                 const struct nlattr *a,
1085                                                 const struct sw_flow_key *key,
1086                                                 const struct sw_flow_mask *mask,
1087                                                 bool log)
1088 {
1089         struct sw_flow_actions *acts;
1090         struct sw_flow_key masked_key;
1091         int error;
1092
1093         ovs_flow_mask_key(&masked_key, key, true, mask);
1094         error = ovs_nla_copy_actions(net, a, &masked_key, &acts, log);
1095         if (error) {
1096                 OVS_NLERR(log,
1097                           "Actions may not be safe on all matching packets");
1098                 return ERR_PTR(error);
1099         }
1100
1101         return acts;
1102 }
1103
1104 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1105 {
1106         struct net *net = sock_net(skb->sk);
1107         struct nlattr **a = info->attrs;
1108         struct ovs_header *ovs_header = info->userhdr;
1109         struct sw_flow_key key;
1110         struct sw_flow *flow;
1111         struct sw_flow_mask mask;
1112         struct sk_buff *reply = NULL;
1113         struct datapath *dp;
1114         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1115         struct sw_flow_match match;
1116         struct sw_flow_id sfid;
1117         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1118         int error = 0;
1119         bool log = !a[OVS_FLOW_ATTR_PROBE];
1120         bool ufid_present;
1121
1122         ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log);
1123         if (a[OVS_FLOW_ATTR_KEY]) {
1124                 ovs_match_init(&match, &key, &mask);
1125                 error = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1126                                           a[OVS_FLOW_ATTR_MASK], log);
1127         } else if (!ufid_present) {
1128                 OVS_NLERR(log,
1129                           "Flow set message rejected, Key attribute missing.");
1130                 error = -EINVAL;
1131         }
1132         if (error)
1133                 goto error;
1134
1135         /* Validate actions. */
1136         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1137                 if (!a[OVS_FLOW_ATTR_KEY]) {
1138                         OVS_NLERR(log,
1139                                   "Flow key attribute not present in set flow.");
1140                         error = -EINVAL;
1141                         goto error;
1142                 }
1143
1144                 acts = get_flow_actions(net, a[OVS_FLOW_ATTR_ACTIONS], &key,
1145                                         &mask, log);
1146                 if (IS_ERR(acts)) {
1147                         error = PTR_ERR(acts);
1148                         goto error;
1149                 }
1150
1151                 /* Can allocate before locking if have acts. */
1152                 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false,
1153                                                 ufid_flags);
1154                 if (IS_ERR(reply)) {
1155                         error = PTR_ERR(reply);
1156                         goto err_kfree_acts;
1157                 }
1158         }
1159
1160         ovs_lock();
1161         dp = get_dp(net, ovs_header->dp_ifindex);
1162         if (unlikely(!dp)) {
1163                 error = -ENODEV;
1164                 goto err_unlock_ovs;
1165         }
1166         /* Check that the flow exists. */
1167         if (ufid_present)
1168                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid);
1169         else
1170                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1171         if (unlikely(!flow)) {
1172                 error = -ENOENT;
1173                 goto err_unlock_ovs;
1174         }
1175
1176         /* Update actions, if present. */
1177         if (likely(acts)) {
1178                 old_acts = ovsl_dereference(flow->sf_acts);
1179                 rcu_assign_pointer(flow->sf_acts, acts);
1180
1181                 if (unlikely(reply)) {
1182                         error = ovs_flow_cmd_fill_info(flow,
1183                                                        ovs_header->dp_ifindex,
1184                                                        reply, info->snd_portid,
1185                                                        info->snd_seq, 0,
1186                                                        OVS_FLOW_CMD_NEW,
1187                                                        ufid_flags);
1188                         BUG_ON(error < 0);
1189                 }
1190         } else {
1191                 /* Could not alloc without acts before locking. */
1192                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1193                                                 info, OVS_FLOW_CMD_NEW, false,
1194                                                 ufid_flags);
1195
1196                 if (IS_ERR(reply)) {
1197                         error = PTR_ERR(reply);
1198                         goto err_unlock_ovs;
1199                 }
1200         }
1201
1202         /* Clear stats. */
1203         if (a[OVS_FLOW_ATTR_CLEAR])
1204                 ovs_flow_stats_clear(flow);
1205         ovs_unlock();
1206
1207         if (reply)
1208                 ovs_notify(&dp_flow_genl_family, reply, info);
1209         if (old_acts)
1210                 ovs_nla_free_flow_actions_rcu(old_acts);
1211
1212         return 0;
1213
1214 err_unlock_ovs:
1215         ovs_unlock();
1216         kfree_skb(reply);
1217 err_kfree_acts:
1218         ovs_nla_free_flow_actions(acts);
1219 error:
1220         return error;
1221 }
1222
1223 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1224 {
1225         struct nlattr **a = info->attrs;
1226         struct ovs_header *ovs_header = info->userhdr;
1227         struct net *net = sock_net(skb->sk);
1228         struct sw_flow_key key;
1229         struct sk_buff *reply;
1230         struct sw_flow *flow;
1231         struct datapath *dp;
1232         struct sw_flow_match match;
1233         struct sw_flow_id ufid;
1234         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1235         int err = 0;
1236         bool log = !a[OVS_FLOW_ATTR_PROBE];
1237         bool ufid_present;
1238
1239         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1240         if (a[OVS_FLOW_ATTR_KEY]) {
1241                 ovs_match_init(&match, &key, NULL);
1242                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY], NULL,
1243                                         log);
1244         } else if (!ufid_present) {
1245                 OVS_NLERR(log,
1246                           "Flow get message rejected, Key attribute missing.");
1247                 err = -EINVAL;
1248         }
1249         if (err)
1250                 return err;
1251
1252         ovs_lock();
1253         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1254         if (!dp) {
1255                 err = -ENODEV;
1256                 goto unlock;
1257         }
1258
1259         if (ufid_present)
1260                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1261         else
1262                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1263         if (!flow) {
1264                 err = -ENOENT;
1265                 goto unlock;
1266         }
1267
1268         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1269                                         OVS_FLOW_CMD_NEW, true, ufid_flags);
1270         if (IS_ERR(reply)) {
1271                 err = PTR_ERR(reply);
1272                 goto unlock;
1273         }
1274
1275         ovs_unlock();
1276         return genlmsg_reply(reply, info);
1277 unlock:
1278         ovs_unlock();
1279         return err;
1280 }
1281
1282 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1283 {
1284         struct nlattr **a = info->attrs;
1285         struct ovs_header *ovs_header = info->userhdr;
1286         struct net *net = sock_net(skb->sk);
1287         struct sw_flow_key key;
1288         struct sk_buff *reply;
1289         struct sw_flow *flow = NULL;
1290         struct datapath *dp;
1291         struct sw_flow_match match;
1292         struct sw_flow_id ufid;
1293         u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1294         int err;
1295         bool log = !a[OVS_FLOW_ATTR_PROBE];
1296         bool ufid_present;
1297
1298         ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log);
1299         if (a[OVS_FLOW_ATTR_KEY]) {
1300                 ovs_match_init(&match, &key, NULL);
1301                 err = ovs_nla_get_match(net, &match, a[OVS_FLOW_ATTR_KEY],
1302                                         NULL, log);
1303                 if (unlikely(err))
1304                         return err;
1305         }
1306
1307         ovs_lock();
1308         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1309         if (unlikely(!dp)) {
1310                 err = -ENODEV;
1311                 goto unlock;
1312         }
1313
1314         if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) {
1315                 err = ovs_flow_tbl_flush(&dp->table);
1316                 goto unlock;
1317         }
1318
1319         if (ufid_present)
1320                 flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid);
1321         else
1322                 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1323         if (unlikely(!flow)) {
1324                 err = -ENOENT;
1325                 goto unlock;
1326         }
1327
1328         ovs_flow_tbl_remove(&dp->table, flow);
1329         ovs_unlock();
1330
1331         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1332                                         &flow->id, info, false, ufid_flags);
1333         if (likely(reply)) {
1334                 if (likely(!IS_ERR(reply))) {
1335                         rcu_read_lock();        /*To keep RCU checker happy. */
1336                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1337                                                      reply, info->snd_portid,
1338                                                      info->snd_seq, 0,
1339                                                      OVS_FLOW_CMD_DEL,
1340                                                      ufid_flags);
1341                         rcu_read_unlock();
1342                         BUG_ON(err < 0);
1343
1344                         ovs_notify(&dp_flow_genl_family, reply, info);
1345                 } else {
1346                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1347                 }
1348         }
1349
1350         ovs_flow_free(flow, true);
1351         return 0;
1352 unlock:
1353         ovs_unlock();
1354         return err;
1355 }
1356
1357 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1358 {
1359         struct nlattr *a[__OVS_FLOW_ATTR_MAX];
1360         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1361         struct table_instance *ti;
1362         struct datapath *dp;
1363         u32 ufid_flags;
1364         int err;
1365
1366         err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a,
1367                             OVS_FLOW_ATTR_MAX, flow_policy);
1368         if (err)
1369                 return err;
1370         ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]);
1371
1372         rcu_read_lock();
1373         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1374         if (!dp) {
1375                 rcu_read_unlock();
1376                 return -ENODEV;
1377         }
1378
1379         ti = rcu_dereference(dp->table.ti);
1380         for (;;) {
1381                 struct sw_flow *flow;
1382                 u32 bucket, obj;
1383
1384                 bucket = cb->args[0];
1385                 obj = cb->args[1];
1386                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1387                 if (!flow)
1388                         break;
1389
1390                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1391                                            NETLINK_CB(cb->skb).portid,
1392                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1393                                            OVS_FLOW_CMD_NEW, ufid_flags) < 0)
1394                         break;
1395
1396                 cb->args[0] = bucket;
1397                 cb->args[1] = obj;
1398         }
1399         rcu_read_unlock();
1400         return skb->len;
1401 }
1402
1403 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1404         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1405         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1406         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1407         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1408         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1409         [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 },
1410         [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
1411 };
1412
1413 static const struct genl_ops dp_flow_genl_ops[] = {
1414         { .cmd = OVS_FLOW_CMD_NEW,
1415           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1416           .policy = flow_policy,
1417           .doit = ovs_flow_cmd_new
1418         },
1419         { .cmd = OVS_FLOW_CMD_DEL,
1420           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1421           .policy = flow_policy,
1422           .doit = ovs_flow_cmd_del
1423         },
1424         { .cmd = OVS_FLOW_CMD_GET,
1425           .flags = 0,               /* OK for unprivileged users. */
1426           .policy = flow_policy,
1427           .doit = ovs_flow_cmd_get,
1428           .dumpit = ovs_flow_cmd_dump
1429         },
1430         { .cmd = OVS_FLOW_CMD_SET,
1431           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1432           .policy = flow_policy,
1433           .doit = ovs_flow_cmd_set,
1434         },
1435 };
1436
1437 static struct genl_family dp_flow_genl_family = {
1438         .id = GENL_ID_GENERATE,
1439         .hdrsize = sizeof(struct ovs_header),
1440         .name = OVS_FLOW_FAMILY,
1441         .version = OVS_FLOW_VERSION,
1442         .maxattr = OVS_FLOW_ATTR_MAX,
1443         .netnsok = true,
1444         .parallel_ops = true,
1445         .ops = dp_flow_genl_ops,
1446         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1447         .mcgrps = &ovs_dp_flow_multicast_group,
1448         .n_mcgrps = 1,
1449 };
1450
1451 static size_t ovs_dp_cmd_msg_size(void)
1452 {
1453         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1454
1455         msgsize += nla_total_size(IFNAMSIZ);
1456         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_stats));
1457         msgsize += nla_total_size_64bit(sizeof(struct ovs_dp_megaflow_stats));
1458         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1459
1460         return msgsize;
1461 }
1462
1463 /* Called with ovs_mutex. */
1464 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1465                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1466 {
1467         struct ovs_header *ovs_header;
1468         struct ovs_dp_stats dp_stats;
1469         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1470         int err;
1471
1472         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1473                                    flags, cmd);
1474         if (!ovs_header)
1475                 goto error;
1476
1477         ovs_header->dp_ifindex = get_dpifindex(dp);
1478
1479         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1480         if (err)
1481                 goto nla_put_failure;
1482
1483         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1484         if (nla_put_64bit(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1485                           &dp_stats, OVS_DP_ATTR_PAD))
1486                 goto nla_put_failure;
1487
1488         if (nla_put_64bit(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1489                           sizeof(struct ovs_dp_megaflow_stats),
1490                           &dp_megaflow_stats, OVS_DP_ATTR_PAD))
1491                 goto nla_put_failure;
1492
1493         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1494                 goto nla_put_failure;
1495
1496         genlmsg_end(skb, ovs_header);
1497         return 0;
1498
1499 nla_put_failure:
1500         genlmsg_cancel(skb, ovs_header);
1501 error:
1502         return -EMSGSIZE;
1503 }
1504
1505 static struct sk_buff *ovs_dp_cmd_alloc_info(void)
1506 {
1507         return genlmsg_new(ovs_dp_cmd_msg_size(), GFP_KERNEL);
1508 }
1509
1510 /* Called with rcu_read_lock or ovs_mutex. */
1511 static struct datapath *lookup_datapath(struct net *net,
1512                                         const struct ovs_header *ovs_header,
1513                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1514 {
1515         struct datapath *dp;
1516
1517         if (!a[OVS_DP_ATTR_NAME])
1518                 dp = get_dp(net, ovs_header->dp_ifindex);
1519         else {
1520                 struct vport *vport;
1521
1522                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1523                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1524         }
1525         return dp ? dp : ERR_PTR(-ENODEV);
1526 }
1527
1528 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1529 {
1530         struct datapath *dp;
1531
1532         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1533         if (IS_ERR(dp))
1534                 return;
1535
1536         WARN(dp->user_features, "Dropping previously announced user features\n");
1537         dp->user_features = 0;
1538 }
1539
1540 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1541 {
1542         if (a[OVS_DP_ATTR_USER_FEATURES])
1543                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1544 }
1545
1546 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1547 {
1548         struct nlattr **a = info->attrs;
1549         struct vport_parms parms;
1550         struct sk_buff *reply;
1551         struct datapath *dp;
1552         struct vport *vport;
1553         struct ovs_net *ovs_net;
1554         int err, i;
1555
1556         err = -EINVAL;
1557         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1558                 goto err;
1559
1560         reply = ovs_dp_cmd_alloc_info();
1561         if (!reply)
1562                 return -ENOMEM;
1563
1564         err = -ENOMEM;
1565         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1566         if (dp == NULL)
1567                 goto err_free_reply;
1568
1569         ovs_dp_set_net(dp, sock_net(skb->sk));
1570
1571         /* Allocate table. */
1572         err = ovs_flow_tbl_init(&dp->table);
1573         if (err)
1574                 goto err_free_dp;
1575
1576         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1577         if (!dp->stats_percpu) {
1578                 err = -ENOMEM;
1579                 goto err_destroy_table;
1580         }
1581
1582         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1583                             GFP_KERNEL);
1584         if (!dp->ports) {
1585                 err = -ENOMEM;
1586                 goto err_destroy_percpu;
1587         }
1588
1589         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1590                 INIT_HLIST_HEAD(&dp->ports[i]);
1591
1592         /* Set up our datapath device. */
1593         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1594         parms.type = OVS_VPORT_TYPE_INTERNAL;
1595         parms.options = NULL;
1596         parms.dp = dp;
1597         parms.port_no = OVSP_LOCAL;
1598         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1599
1600         ovs_dp_change(dp, a);
1601
1602         /* So far only local changes have been made, now need the lock. */
1603         ovs_lock();
1604
1605         vport = new_vport(&parms);
1606         if (IS_ERR(vport)) {
1607                 err = PTR_ERR(vport);
1608                 if (err == -EBUSY)
1609                         err = -EEXIST;
1610
1611                 if (err == -EEXIST) {
1612                         /* An outdated user space instance that does not understand
1613                          * the concept of user_features has attempted to create a new
1614                          * datapath and is likely to reuse it. Drop all user features.
1615                          */
1616                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1617                                 ovs_dp_reset_user_features(skb, info);
1618                 }
1619
1620                 goto err_destroy_ports_array;
1621         }
1622
1623         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1624                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1625         BUG_ON(err < 0);
1626
1627         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1628         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1629
1630         ovs_unlock();
1631
1632         ovs_notify(&dp_datapath_genl_family, reply, info);
1633         return 0;
1634
1635 err_destroy_ports_array:
1636         ovs_unlock();
1637         kfree(dp->ports);
1638 err_destroy_percpu:
1639         free_percpu(dp->stats_percpu);
1640 err_destroy_table:
1641         ovs_flow_tbl_destroy(&dp->table);
1642 err_free_dp:
1643         kfree(dp);
1644 err_free_reply:
1645         kfree_skb(reply);
1646 err:
1647         return err;
1648 }
1649
1650 /* Called with ovs_mutex. */
1651 static void __dp_destroy(struct datapath *dp)
1652 {
1653         int i;
1654
1655         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1656                 struct vport *vport;
1657                 struct hlist_node *n;
1658
1659                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1660                         if (vport->port_no != OVSP_LOCAL)
1661                                 ovs_dp_detach_port(vport);
1662         }
1663
1664         list_del_rcu(&dp->list_node);
1665
1666         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1667          * all ports in datapath are destroyed first before freeing datapath.
1668          */
1669         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1670
1671         /* RCU destroy the flow table */
1672         call_rcu(&dp->rcu, destroy_dp_rcu);
1673 }
1674
1675 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1676 {
1677         struct sk_buff *reply;
1678         struct datapath *dp;
1679         int err;
1680
1681         reply = ovs_dp_cmd_alloc_info();
1682         if (!reply)
1683                 return -ENOMEM;
1684
1685         ovs_lock();
1686         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1687         err = PTR_ERR(dp);
1688         if (IS_ERR(dp))
1689                 goto err_unlock_free;
1690
1691         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1692                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1693         BUG_ON(err < 0);
1694
1695         __dp_destroy(dp);
1696         ovs_unlock();
1697
1698         ovs_notify(&dp_datapath_genl_family, reply, info);
1699
1700         return 0;
1701
1702 err_unlock_free:
1703         ovs_unlock();
1704         kfree_skb(reply);
1705         return err;
1706 }
1707
1708 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1709 {
1710         struct sk_buff *reply;
1711         struct datapath *dp;
1712         int err;
1713
1714         reply = ovs_dp_cmd_alloc_info();
1715         if (!reply)
1716                 return -ENOMEM;
1717
1718         ovs_lock();
1719         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1720         err = PTR_ERR(dp);
1721         if (IS_ERR(dp))
1722                 goto err_unlock_free;
1723
1724         ovs_dp_change(dp, info->attrs);
1725
1726         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1727                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1728         BUG_ON(err < 0);
1729
1730         ovs_unlock();
1731         ovs_notify(&dp_datapath_genl_family, reply, info);
1732
1733         return 0;
1734
1735 err_unlock_free:
1736         ovs_unlock();
1737         kfree_skb(reply);
1738         return err;
1739 }
1740
1741 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1742 {
1743         struct sk_buff *reply;
1744         struct datapath *dp;
1745         int err;
1746
1747         reply = ovs_dp_cmd_alloc_info();
1748         if (!reply)
1749                 return -ENOMEM;
1750
1751         ovs_lock();
1752         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1753         if (IS_ERR(dp)) {
1754                 err = PTR_ERR(dp);
1755                 goto err_unlock_free;
1756         }
1757         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1758                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1759         BUG_ON(err < 0);
1760         ovs_unlock();
1761
1762         return genlmsg_reply(reply, info);
1763
1764 err_unlock_free:
1765         ovs_unlock();
1766         kfree_skb(reply);
1767         return err;
1768 }
1769
1770 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1771 {
1772         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1773         struct datapath *dp;
1774         int skip = cb->args[0];
1775         int i = 0;
1776
1777         ovs_lock();
1778         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1779                 if (i >= skip &&
1780                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1781                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1782                                          OVS_DP_CMD_NEW) < 0)
1783                         break;
1784                 i++;
1785         }
1786         ovs_unlock();
1787
1788         cb->args[0] = i;
1789
1790         return skb->len;
1791 }
1792
1793 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1794         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1795         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1796         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1797 };
1798
1799 static const struct genl_ops dp_datapath_genl_ops[] = {
1800         { .cmd = OVS_DP_CMD_NEW,
1801           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1802           .policy = datapath_policy,
1803           .doit = ovs_dp_cmd_new
1804         },
1805         { .cmd = OVS_DP_CMD_DEL,
1806           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1807           .policy = datapath_policy,
1808           .doit = ovs_dp_cmd_del
1809         },
1810         { .cmd = OVS_DP_CMD_GET,
1811           .flags = 0,               /* OK for unprivileged users. */
1812           .policy = datapath_policy,
1813           .doit = ovs_dp_cmd_get,
1814           .dumpit = ovs_dp_cmd_dump
1815         },
1816         { .cmd = OVS_DP_CMD_SET,
1817           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1818           .policy = datapath_policy,
1819           .doit = ovs_dp_cmd_set,
1820         },
1821 };
1822
1823 static struct genl_family dp_datapath_genl_family = {
1824         .id = GENL_ID_GENERATE,
1825         .hdrsize = sizeof(struct ovs_header),
1826         .name = OVS_DATAPATH_FAMILY,
1827         .version = OVS_DATAPATH_VERSION,
1828         .maxattr = OVS_DP_ATTR_MAX,
1829         .netnsok = true,
1830         .parallel_ops = true,
1831         .ops = dp_datapath_genl_ops,
1832         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1833         .mcgrps = &ovs_dp_datapath_multicast_group,
1834         .n_mcgrps = 1,
1835 };
1836
1837 /* Called with ovs_mutex or RCU read lock. */
1838 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1839                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1840 {
1841         struct ovs_header *ovs_header;
1842         struct ovs_vport_stats vport_stats;
1843         int err;
1844
1845         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1846                                  flags, cmd);
1847         if (!ovs_header)
1848                 return -EMSGSIZE;
1849
1850         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1851
1852         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1853             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1854             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1855                            ovs_vport_name(vport)))
1856                 goto nla_put_failure;
1857
1858         ovs_vport_get_stats(vport, &vport_stats);
1859         if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS,
1860                           sizeof(struct ovs_vport_stats), &vport_stats,
1861                           OVS_VPORT_ATTR_PAD))
1862                 goto nla_put_failure;
1863
1864         if (ovs_vport_get_upcall_portids(vport, skb))
1865                 goto nla_put_failure;
1866
1867         err = ovs_vport_get_options(vport, skb);
1868         if (err == -EMSGSIZE)
1869                 goto error;
1870
1871         genlmsg_end(skb, ovs_header);
1872         return 0;
1873
1874 nla_put_failure:
1875         err = -EMSGSIZE;
1876 error:
1877         genlmsg_cancel(skb, ovs_header);
1878         return err;
1879 }
1880
1881 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1882 {
1883         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1884 }
1885
1886 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1887 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1888                                          u32 seq, u8 cmd)
1889 {
1890         struct sk_buff *skb;
1891         int retval;
1892
1893         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1894         if (!skb)
1895                 return ERR_PTR(-ENOMEM);
1896
1897         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1898         BUG_ON(retval < 0);
1899
1900         return skb;
1901 }
1902
1903 /* Called with ovs_mutex or RCU read lock. */
1904 static struct vport *lookup_vport(struct net *net,
1905                                   const struct ovs_header *ovs_header,
1906                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1907 {
1908         struct datapath *dp;
1909         struct vport *vport;
1910
1911         if (a[OVS_VPORT_ATTR_NAME]) {
1912                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1913                 if (!vport)
1914                         return ERR_PTR(-ENODEV);
1915                 if (ovs_header->dp_ifindex &&
1916                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1917                         return ERR_PTR(-ENODEV);
1918                 return vport;
1919         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1920                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1921
1922                 if (port_no >= DP_MAX_PORTS)
1923                         return ERR_PTR(-EFBIG);
1924
1925                 dp = get_dp(net, ovs_header->dp_ifindex);
1926                 if (!dp)
1927                         return ERR_PTR(-ENODEV);
1928
1929                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1930                 if (!vport)
1931                         return ERR_PTR(-ENODEV);
1932                 return vport;
1933         } else
1934                 return ERR_PTR(-EINVAL);
1935 }
1936
1937 /* Called with ovs_mutex */
1938 static void update_headroom(struct datapath *dp)
1939 {
1940         unsigned dev_headroom, max_headroom = 0;
1941         struct net_device *dev;
1942         struct vport *vport;
1943         int i;
1944
1945         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1946                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1947                         dev = vport->dev;
1948                         dev_headroom = netdev_get_fwd_headroom(dev);
1949                         if (dev_headroom > max_headroom)
1950                                 max_headroom = dev_headroom;
1951                 }
1952         }
1953
1954         dp->max_headroom = max_headroom;
1955         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1956                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
1957                         netdev_set_rx_headroom(vport->dev, max_headroom);
1958 }
1959
1960 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1961 {
1962         struct nlattr **a = info->attrs;
1963         struct ovs_header *ovs_header = info->userhdr;
1964         struct vport_parms parms;
1965         struct sk_buff *reply;
1966         struct vport *vport;
1967         struct datapath *dp;
1968         u32 port_no;
1969         int err;
1970
1971         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1972             !a[OVS_VPORT_ATTR_UPCALL_PID])
1973                 return -EINVAL;
1974
1975         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1976                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1977         if (port_no >= DP_MAX_PORTS)
1978                 return -EFBIG;
1979
1980         reply = ovs_vport_cmd_alloc_info();
1981         if (!reply)
1982                 return -ENOMEM;
1983
1984         ovs_lock();
1985 restart:
1986         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1987         err = -ENODEV;
1988         if (!dp)
1989                 goto exit_unlock_free;
1990
1991         if (port_no) {
1992                 vport = ovs_vport_ovsl(dp, port_no);
1993                 err = -EBUSY;
1994                 if (vport)
1995                         goto exit_unlock_free;
1996         } else {
1997                 for (port_no = 1; ; port_no++) {
1998                         if (port_no >= DP_MAX_PORTS) {
1999                                 err = -EFBIG;
2000                                 goto exit_unlock_free;
2001                         }
2002                         vport = ovs_vport_ovsl(dp, port_no);
2003                         if (!vport)
2004                                 break;
2005                 }
2006         }
2007
2008         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
2009         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
2010         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
2011         parms.dp = dp;
2012         parms.port_no = port_no;
2013         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
2014
2015         vport = new_vport(&parms);
2016         err = PTR_ERR(vport);
2017         if (IS_ERR(vport)) {
2018                 if (err == -EAGAIN)
2019                         goto restart;
2020                 goto exit_unlock_free;
2021         }
2022
2023         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2024                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2025
2026         if (netdev_get_fwd_headroom(vport->dev) > dp->max_headroom)
2027                 update_headroom(dp);
2028         else
2029                 netdev_set_rx_headroom(vport->dev, dp->max_headroom);
2030
2031         BUG_ON(err < 0);
2032         ovs_unlock();
2033
2034         ovs_notify(&dp_vport_genl_family, reply, info);
2035         return 0;
2036
2037 exit_unlock_free:
2038         ovs_unlock();
2039         kfree_skb(reply);
2040         return err;
2041 }
2042
2043 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
2044 {
2045         struct nlattr **a = info->attrs;
2046         struct sk_buff *reply;
2047         struct vport *vport;
2048         int err;
2049
2050         reply = ovs_vport_cmd_alloc_info();
2051         if (!reply)
2052                 return -ENOMEM;
2053
2054         ovs_lock();
2055         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2056         err = PTR_ERR(vport);
2057         if (IS_ERR(vport))
2058                 goto exit_unlock_free;
2059
2060         if (a[OVS_VPORT_ATTR_TYPE] &&
2061             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
2062                 err = -EINVAL;
2063                 goto exit_unlock_free;
2064         }
2065
2066         if (a[OVS_VPORT_ATTR_OPTIONS]) {
2067                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
2068                 if (err)
2069                         goto exit_unlock_free;
2070         }
2071
2072
2073         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
2074                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
2075
2076                 err = ovs_vport_set_upcall_portids(vport, ids);
2077                 if (err)
2078                         goto exit_unlock_free;
2079         }
2080
2081         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2082                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2083         BUG_ON(err < 0);
2084
2085         ovs_unlock();
2086         ovs_notify(&dp_vport_genl_family, reply, info);
2087         return 0;
2088
2089 exit_unlock_free:
2090         ovs_unlock();
2091         kfree_skb(reply);
2092         return err;
2093 }
2094
2095 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
2096 {
2097         bool must_update_headroom = false;
2098         struct nlattr **a = info->attrs;
2099         struct sk_buff *reply;
2100         struct datapath *dp;
2101         struct vport *vport;
2102         int err;
2103
2104         reply = ovs_vport_cmd_alloc_info();
2105         if (!reply)
2106                 return -ENOMEM;
2107
2108         ovs_lock();
2109         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
2110         err = PTR_ERR(vport);
2111         if (IS_ERR(vport))
2112                 goto exit_unlock_free;
2113
2114         if (vport->port_no == OVSP_LOCAL) {
2115                 err = -EINVAL;
2116                 goto exit_unlock_free;
2117         }
2118
2119         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2120                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
2121         BUG_ON(err < 0);
2122
2123         /* the vport deletion may trigger dp headroom update */
2124         dp = vport->dp;
2125         if (netdev_get_fwd_headroom(vport->dev) == dp->max_headroom)
2126                 must_update_headroom = true;
2127         netdev_reset_rx_headroom(vport->dev);
2128         ovs_dp_detach_port(vport);
2129
2130         if (must_update_headroom)
2131                 update_headroom(dp);
2132         ovs_unlock();
2133
2134         ovs_notify(&dp_vport_genl_family, reply, info);
2135         return 0;
2136
2137 exit_unlock_free:
2138         ovs_unlock();
2139         kfree_skb(reply);
2140         return err;
2141 }
2142
2143 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
2144 {
2145         struct nlattr **a = info->attrs;
2146         struct ovs_header *ovs_header = info->userhdr;
2147         struct sk_buff *reply;
2148         struct vport *vport;
2149         int err;
2150
2151         reply = ovs_vport_cmd_alloc_info();
2152         if (!reply)
2153                 return -ENOMEM;
2154
2155         rcu_read_lock();
2156         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
2157         err = PTR_ERR(vport);
2158         if (IS_ERR(vport))
2159                 goto exit_unlock_free;
2160         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
2161                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
2162         BUG_ON(err < 0);
2163         rcu_read_unlock();
2164
2165         return genlmsg_reply(reply, info);
2166
2167 exit_unlock_free:
2168         rcu_read_unlock();
2169         kfree_skb(reply);
2170         return err;
2171 }
2172
2173 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
2174 {
2175         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
2176         struct datapath *dp;
2177         int bucket = cb->args[0], skip = cb->args[1];
2178         int i, j = 0;
2179
2180         rcu_read_lock();
2181         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
2182         if (!dp) {
2183                 rcu_read_unlock();
2184                 return -ENODEV;
2185         }
2186         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2187                 struct vport *vport;
2188
2189                 j = 0;
2190                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2191                         if (j >= skip &&
2192                             ovs_vport_cmd_fill_info(vport, skb,
2193                                                     NETLINK_CB(cb->skb).portid,
2194                                                     cb->nlh->nlmsg_seq,
2195                                                     NLM_F_MULTI,
2196                                                     OVS_VPORT_CMD_NEW) < 0)
2197                                 goto out;
2198
2199                         j++;
2200                 }
2201                 skip = 0;
2202         }
2203 out:
2204         rcu_read_unlock();
2205
2206         cb->args[0] = i;
2207         cb->args[1] = j;
2208
2209         return skb->len;
2210 }
2211
2212 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2213         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2214         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2215         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2216         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2217         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2218         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2219 };
2220
2221 static const struct genl_ops dp_vport_genl_ops[] = {
2222         { .cmd = OVS_VPORT_CMD_NEW,
2223           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2224           .policy = vport_policy,
2225           .doit = ovs_vport_cmd_new
2226         },
2227         { .cmd = OVS_VPORT_CMD_DEL,
2228           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2229           .policy = vport_policy,
2230           .doit = ovs_vport_cmd_del
2231         },
2232         { .cmd = OVS_VPORT_CMD_GET,
2233           .flags = 0,               /* OK for unprivileged users. */
2234           .policy = vport_policy,
2235           .doit = ovs_vport_cmd_get,
2236           .dumpit = ovs_vport_cmd_dump
2237         },
2238         { .cmd = OVS_VPORT_CMD_SET,
2239           .flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2240           .policy = vport_policy,
2241           .doit = ovs_vport_cmd_set,
2242         },
2243 };
2244
2245 struct genl_family dp_vport_genl_family = {
2246         .id = GENL_ID_GENERATE,
2247         .hdrsize = sizeof(struct ovs_header),
2248         .name = OVS_VPORT_FAMILY,
2249         .version = OVS_VPORT_VERSION,
2250         .maxattr = OVS_VPORT_ATTR_MAX,
2251         .netnsok = true,
2252         .parallel_ops = true,
2253         .ops = dp_vport_genl_ops,
2254         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2255         .mcgrps = &ovs_dp_vport_multicast_group,
2256         .n_mcgrps = 1,
2257 };
2258
2259 static struct genl_family * const dp_genl_families[] = {
2260         &dp_datapath_genl_family,
2261         &dp_vport_genl_family,
2262         &dp_flow_genl_family,
2263         &dp_packet_genl_family,
2264 };
2265
2266 static void dp_unregister_genl(int n_families)
2267 {
2268         int i;
2269
2270         for (i = 0; i < n_families; i++)
2271                 genl_unregister_family(dp_genl_families[i]);
2272 }
2273
2274 static int dp_register_genl(void)
2275 {
2276         int err;
2277         int i;
2278
2279         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2280
2281                 err = genl_register_family(dp_genl_families[i]);
2282                 if (err)
2283                         goto error;
2284         }
2285
2286         return 0;
2287
2288 error:
2289         dp_unregister_genl(i);
2290         return err;
2291 }
2292
2293 static int __net_init ovs_init_net(struct net *net)
2294 {
2295         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2296
2297         INIT_LIST_HEAD(&ovs_net->dps);
2298         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2299         ovs_ct_init(net);
2300         return 0;
2301 }
2302
2303 static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
2304                                             struct list_head *head)
2305 {
2306         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2307         struct datapath *dp;
2308
2309         list_for_each_entry(dp, &ovs_net->dps, list_node) {
2310                 int i;
2311
2312                 for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
2313                         struct vport *vport;
2314
2315                         hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
2316                                 if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
2317                                         continue;
2318
2319                                 if (dev_net(vport->dev) == dnet)
2320                                         list_add(&vport->detach_list, head);
2321                         }
2322                 }
2323         }
2324 }
2325
2326 static void __net_exit ovs_exit_net(struct net *dnet)
2327 {
2328         struct datapath *dp, *dp_next;
2329         struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
2330         struct vport *vport, *vport_next;
2331         struct net *net;
2332         LIST_HEAD(head);
2333
2334         ovs_ct_exit(dnet);
2335         ovs_lock();
2336         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2337                 __dp_destroy(dp);
2338
2339         rtnl_lock();
2340         for_each_net(net)
2341                 list_vports_from_net(net, dnet, &head);
2342         rtnl_unlock();
2343
2344         /* Detach all vports from given namespace. */
2345         list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
2346                 list_del(&vport->detach_list);
2347                 ovs_dp_detach_port(vport);
2348         }
2349
2350         ovs_unlock();
2351
2352         cancel_work_sync(&ovs_net->dp_notify_work);
2353 }
2354
2355 static struct pernet_operations ovs_net_ops = {
2356         .init = ovs_init_net,
2357         .exit = ovs_exit_net,
2358         .id   = &ovs_net_id,
2359         .size = sizeof(struct ovs_net),
2360 };
2361
2362 static int __init dp_init(void)
2363 {
2364         int err;
2365
2366         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2367
2368         pr_info("Open vSwitch switching datapath\n");
2369
2370         err = action_fifos_init();
2371         if (err)
2372                 goto error;
2373
2374         err = ovs_internal_dev_rtnl_link_register();
2375         if (err)
2376                 goto error_action_fifos_exit;
2377
2378         err = ovs_flow_init();
2379         if (err)
2380                 goto error_unreg_rtnl_link;
2381
2382         err = ovs_vport_init();
2383         if (err)
2384                 goto error_flow_exit;
2385
2386         err = register_pernet_device(&ovs_net_ops);
2387         if (err)
2388                 goto error_vport_exit;
2389
2390         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2391         if (err)
2392                 goto error_netns_exit;
2393
2394         err = ovs_netdev_init();
2395         if (err)
2396                 goto error_unreg_notifier;
2397
2398         err = dp_register_genl();
2399         if (err < 0)
2400                 goto error_unreg_netdev;
2401
2402         return 0;
2403
2404 error_unreg_netdev:
2405         ovs_netdev_exit();
2406 error_unreg_notifier:
2407         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2408 error_netns_exit:
2409         unregister_pernet_device(&ovs_net_ops);
2410 error_vport_exit:
2411         ovs_vport_exit();
2412 error_flow_exit:
2413         ovs_flow_exit();
2414 error_unreg_rtnl_link:
2415         ovs_internal_dev_rtnl_link_unregister();
2416 error_action_fifos_exit:
2417         action_fifos_exit();
2418 error:
2419         return err;
2420 }
2421
2422 static void dp_cleanup(void)
2423 {
2424         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2425         ovs_netdev_exit();
2426         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2427         unregister_pernet_device(&ovs_net_ops);
2428         rcu_barrier();
2429         ovs_vport_exit();
2430         ovs_flow_exit();
2431         ovs_internal_dev_rtnl_link_unregister();
2432         action_fifos_exit();
2433 }
2434
2435 module_init(dp_init);
2436 module_exit(dp_cleanup);
2437
2438 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2439 MODULE_LICENSE("GPL");