36071703986cac6b3bfe5f5b5f02b61bc56a3175
[cascardo/ovs.git] / datapath / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/version.h>
40 #include <linux/ethtool.h>
41 #include <linux/wait.h>
42 #include <asm/div64.h>
43 #include <linux/highmem.h>
44 #include <linux/netfilter_bridge.h>
45 #include <linux/netfilter_ipv4.h>
46 #include <linux/inetdevice.h>
47 #include <linux/list.h>
48 #include <linux/openvswitch.h>
49 #include <linux/rculist.h>
50 #include <linux/dmi.h>
51 #include <net/genetlink.h>
52 #include <net/net_namespace.h>
53 #include <net/netns/generic.h>
54
55 #include "datapath.h"
56 #include "flow.h"
57 #include "flow_table.h"
58 #include "flow_netlink.h"
59 #include "vlan.h"
60 #include "vport-internal_dev.h"
61 #include "vport-netdev.h"
62
63 int ovs_net_id __read_mostly;
64
65 static struct genl_family dp_packet_genl_family;
66 static struct genl_family dp_flow_genl_family;
67 static struct genl_family dp_datapath_genl_family;
68
69 static struct genl_multicast_group ovs_dp_flow_multicast_group = {
70         .name = OVS_FLOW_MCGROUP
71 };
72
73 static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
74         .name = OVS_DATAPATH_MCGROUP
75 };
76
77 struct genl_multicast_group ovs_dp_vport_multicast_group = {
78         .name = OVS_VPORT_MCGROUP
79 };
80
81 /* Check if need to build a reply message.
82  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply.
83  */
84 static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
85                             unsigned int group)
86 {
87         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
88                genl_has_listeners(family, genl_info_net(info)->genl_sock,
89                                   group);
90 }
91
92 static void ovs_notify(struct genl_family *family, struct genl_multicast_group *grp,
93                        struct sk_buff *skb, struct genl_info *info)
94 {
95         genl_notify(family, skb, genl_info_net(info),
96                     info->snd_portid, GROUP_ID(grp), info->nlhdr, GFP_KERNEL);
97 }
98
99 /**
100  * DOC: Locking:
101  *
102  * All writes e.g. Writes to device state (add/remove datapath, port, set
103  * operations on vports, etc.), Writes to other state (flow table
104  * modifications, set miscellaneous datapath parameters, etc.) are protected
105  * by ovs_lock.
106  *
107  * Reads are protected by RCU.
108  *
109  * There are a few special cases (mostly stats) that have their own
110  * synchronization but they nest under all of above and don't interact with
111  * each other.
112  *
113  * The RTNL lock nests inside ovs_mutex.
114  */
115
116 static DEFINE_MUTEX(ovs_mutex);
117
118 void ovs_lock(void)
119 {
120         mutex_lock(&ovs_mutex);
121 }
122
123 void ovs_unlock(void)
124 {
125         mutex_unlock(&ovs_mutex);
126 }
127
128 #ifdef CONFIG_LOCKDEP
129 int lockdep_ovsl_is_held(void)
130 {
131         if (debug_locks)
132                 return lockdep_is_held(&ovs_mutex);
133         else
134                 return 1;
135 }
136 #endif
137
138 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
139                              const struct sw_flow_key *,
140                              const struct dp_upcall_info *);
141 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
142                                   const struct sw_flow_key *,
143                                   const struct dp_upcall_info *);
144
145 /* Must be called with rcu_read_lock. */
146 static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex)
147 {
148         struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex);
149
150         if (dev) {
151                 struct vport *vport = ovs_internal_dev_get_vport(dev);
152                 if (vport)
153                         return vport->dp;
154         }
155
156         return NULL;
157 }
158
159 /* The caller must hold either ovs_mutex or rcu_read_lock to keep the
160  * returned dp pointer valid.
161  */
162 static inline struct datapath *get_dp(struct net *net, int dp_ifindex)
163 {
164         struct datapath *dp;
165
166         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held());
167         rcu_read_lock();
168         dp = get_dp_rcu(net, dp_ifindex);
169         rcu_read_unlock();
170
171         return dp;
172 }
173
174 /* Must be called with rcu_read_lock or ovs_mutex. */
175 const char *ovs_dp_name(const struct datapath *dp)
176 {
177         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
178         return vport->ops->get_name(vport);
179 }
180
181 static int get_dpifindex(const struct datapath *dp)
182 {
183         struct vport *local;
184         int ifindex;
185
186         rcu_read_lock();
187
188         local = ovs_vport_rcu(dp, OVSP_LOCAL);
189         if (local)
190                 ifindex = netdev_vport_priv(local)->dev->ifindex;
191         else
192                 ifindex = 0;
193
194         rcu_read_unlock();
195
196         return ifindex;
197 }
198
199 static void destroy_dp_rcu(struct rcu_head *rcu)
200 {
201         struct datapath *dp = container_of(rcu, struct datapath, rcu);
202
203         ovs_flow_tbl_destroy(&dp->table);
204         free_percpu(dp->stats_percpu);
205         release_net(ovs_dp_get_net(dp));
206         kfree(dp->ports);
207         kfree(dp);
208 }
209
210 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
211                                             u16 port_no)
212 {
213         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
214 }
215
216 /* Called with ovs_mutex or RCU read lock. */
217 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
218 {
219         struct vport *vport;
220         struct hlist_head *head;
221
222         head = vport_hash_bucket(dp, port_no);
223         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
224                 if (vport->port_no == port_no)
225                         return vport;
226         }
227         return NULL;
228 }
229
230 /* Called with ovs_mutex. */
231 static struct vport *new_vport(const struct vport_parms *parms)
232 {
233         struct vport *vport;
234
235         vport = ovs_vport_add(parms);
236         if (!IS_ERR(vport)) {
237                 struct datapath *dp = parms->dp;
238                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
239
240                 hlist_add_head_rcu(&vport->dp_hash_node, head);
241         }
242         return vport;
243 }
244
245 void ovs_dp_detach_port(struct vport *p)
246 {
247         ASSERT_OVSL();
248
249         /* First drop references to device. */
250         hlist_del_rcu(&p->dp_hash_node);
251
252         /* Then destroy it. */
253         ovs_vport_del(p);
254 }
255
256 /* Must be called with rcu_read_lock. */
257 void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
258 {
259         const struct vport *p = OVS_CB(skb)->input_vport;
260         struct datapath *dp = p->dp;
261         struct sw_flow *flow;
262         struct sw_flow_actions *sf_acts;
263         struct dp_stats_percpu *stats;
264         u64 *stats_counter;
265         u32 n_mask_hit;
266
267         stats = this_cpu_ptr(dp->stats_percpu);
268
269         /* Look up flow. */
270         flow = ovs_flow_tbl_lookup_stats(&dp->table, key, skb_get_hash(skb),
271                                          &n_mask_hit);
272         if (unlikely(!flow)) {
273                 struct dp_upcall_info upcall;
274                 int error;
275
276                 upcall.cmd = OVS_PACKET_CMD_MISS;
277                 upcall.userdata = NULL;
278                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
279                 upcall.egress_tun_info = NULL;
280                 error = ovs_dp_upcall(dp, skb, key, &upcall);
281                 if (unlikely(error))
282                         kfree_skb(skb);
283                 else
284                         consume_skb(skb);
285                 stats_counter = &stats->n_missed;
286                 goto out;
287         }
288
289         ovs_flow_stats_update(flow, key->tp.flags, skb);
290         sf_acts = rcu_dereference(flow->sf_acts);
291         ovs_execute_actions(dp, skb, sf_acts, key);
292
293         stats_counter = &stats->n_hit;
294
295 out:
296         /* Update datapath statistics. */
297         u64_stats_update_begin(&stats->syncp);
298         (*stats_counter)++;
299         stats->n_mask_hit += n_mask_hit;
300         u64_stats_update_end(&stats->syncp);
301 }
302
303 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
304                   const struct sw_flow_key *key,
305                   const struct dp_upcall_info *upcall_info)
306 {
307         struct dp_stats_percpu *stats;
308         int err;
309
310         if (upcall_info->portid == 0) {
311                 err = -ENOTCONN;
312                 goto err;
313         }
314
315         if (!skb_is_gso(skb))
316                 err = queue_userspace_packet(dp, skb, key, upcall_info);
317         else
318                 err = queue_gso_packets(dp, skb, key, upcall_info);
319         if (err)
320                 goto err;
321
322         return 0;
323
324 err:
325         stats = this_cpu_ptr(dp->stats_percpu);
326
327         u64_stats_update_begin(&stats->syncp);
328         stats->n_lost++;
329         u64_stats_update_end(&stats->syncp);
330
331         return err;
332 }
333
334 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
335                              const struct sw_flow_key *key,
336                              const struct dp_upcall_info *upcall_info)
337 {
338         unsigned short gso_type = skb_shinfo(skb)->gso_type;
339         struct sw_flow_key later_key;
340         struct sk_buff *segs, *nskb;
341         struct ovs_skb_cb ovs_cb;
342         int err;
343
344         ovs_cb = *OVS_CB(skb);
345         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
346         *OVS_CB(skb) = ovs_cb;
347         if (IS_ERR(segs))
348                 return PTR_ERR(segs);
349         if (segs == NULL)
350                 return -EINVAL;
351
352         if (gso_type & SKB_GSO_UDP) {
353                 /* The initial flow key extracted by ovs_flow_key_extract()
354                  * in this case is for a first fragment, so we need to
355                  * properly mark later fragments.
356                  */
357                 later_key = *key;
358                 later_key.ip.frag = OVS_FRAG_TYPE_LATER;
359         }
360
361         /* Queue all of the segments. */
362         skb = segs;
363         do {
364                 *OVS_CB(skb) = ovs_cb;
365                 if (gso_type & SKB_GSO_UDP && skb != segs)
366                         key = &later_key;
367
368                 err = queue_userspace_packet(dp, skb, key, upcall_info);
369                 if (err)
370                         break;
371
372         } while ((skb = skb->next));
373
374         /* Free all of the segments. */
375         skb = segs;
376         do {
377                 nskb = skb->next;
378                 if (err)
379                         kfree_skb(skb);
380                 else
381                         consume_skb(skb);
382         } while ((skb = nskb));
383         return err;
384 }
385
386 static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
387                               unsigned int hdrlen)
388 {
389         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
390                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
391                 + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */
392
393         /* OVS_PACKET_ATTR_USERDATA */
394         if (upcall_info->userdata)
395                 size += NLA_ALIGN(upcall_info->userdata->nla_len);
396
397         /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */
398         if (upcall_info->egress_tun_info)
399                 size += nla_total_size(ovs_tun_key_attr_size());
400
401         return size;
402 }
403
404 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
405                                   const struct sw_flow_key *key,
406                                   const struct dp_upcall_info *upcall_info)
407 {
408         struct ovs_header *upcall;
409         struct sk_buff *nskb = NULL;
410         struct sk_buff *user_skb = NULL; /* to be queued to userspace */
411         struct nlattr *nla;
412         struct genl_info info = {
413 #ifdef HAVE_GENLMSG_NEW_UNICAST
414                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
415 #endif
416                 .snd_portid = upcall_info->portid,
417         };
418         size_t len;
419         unsigned int hlen;
420         int err, dp_ifindex;
421
422         dp_ifindex = get_dpifindex(dp);
423         if (!dp_ifindex)
424                 return -ENODEV;
425
426         if (vlan_tx_tag_present(skb)) {
427                 nskb = skb_clone(skb, GFP_ATOMIC);
428                 if (!nskb)
429                         return -ENOMEM;
430
431                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
432                 if (!nskb)
433                         return -ENOMEM;
434
435                 vlan_set_tci(nskb, 0);
436
437                 skb = nskb;
438         }
439
440         if (nla_attr_size(skb->len) > USHRT_MAX) {
441                 err = -EFBIG;
442                 goto out;
443         }
444
445         /* Complete checksum if needed */
446         if (skb->ip_summed == CHECKSUM_PARTIAL &&
447             (err = skb_checksum_help(skb)))
448                 goto out;
449
450         /* Older versions of OVS user space enforce alignment of the last
451          * Netlink attribute to NLA_ALIGNTO which would require extensive
452          * padding logic. Only perform zerocopy if padding is not required.
453          */
454         if (dp->user_features & OVS_DP_F_UNALIGNED)
455                 hlen = skb_zerocopy_headlen(skb);
456         else
457                 hlen = skb->len;
458
459         len = upcall_msg_size(upcall_info, hlen);
460         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
461         if (!user_skb) {
462                 err = -ENOMEM;
463                 goto out;
464         }
465
466         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
467                              0, upcall_info->cmd);
468         upcall->dp_ifindex = dp_ifindex;
469
470         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
471         err = ovs_nla_put_flow(key, key, user_skb);
472         BUG_ON(err);
473         nla_nest_end(user_skb, nla);
474
475         if (upcall_info->userdata)
476                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
477                           nla_len(upcall_info->userdata),
478                           nla_data(upcall_info->userdata));
479
480         if (upcall_info->egress_tun_info) {
481                 nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
482                 err = ovs_nla_put_egress_tunnel_key(user_skb,
483                                                     upcall_info->egress_tun_info);
484                 BUG_ON(err);
485                 nla_nest_end(user_skb, nla);
486         }
487
488         /* Only reserve room for attribute header, packet data is added
489          * in skb_zerocopy()
490          */
491         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
492                 err = -ENOBUFS;
493                 goto out;
494         }
495         nla->nla_len = nla_attr_size(skb->len);
496
497         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
498         if (err)
499                 goto out;
500
501         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
502         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
503                 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
504
505                 if (plen > 0)
506                         memset(skb_put(user_skb, plen), 0, plen);
507         }
508
509         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
510
511         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
512         user_skb = NULL;
513 out:
514         if (err)
515                 skb_tx_error(skb);
516         kfree_skb(user_skb);
517         kfree_skb(nskb);
518         return err;
519 }
520
521 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
522 {
523         struct ovs_header *ovs_header = info->userhdr;
524         struct nlattr **a = info->attrs;
525         struct sw_flow_actions *acts;
526         struct sk_buff *packet;
527         struct sw_flow *flow;
528         struct sw_flow_actions *sf_acts;
529         struct datapath *dp;
530         struct ethhdr *eth;
531         struct vport *input_vport;
532         int len;
533         int err;
534         bool log = !a[OVS_FLOW_ATTR_PROBE];
535
536         err = -EINVAL;
537         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
538             !a[OVS_PACKET_ATTR_ACTIONS])
539                 goto err;
540
541         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
542         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
543         err = -ENOMEM;
544         if (!packet)
545                 goto err;
546         skb_reserve(packet, NET_IP_ALIGN);
547
548         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
549
550         skb_reset_mac_header(packet);
551         eth = eth_hdr(packet);
552
553         /* Normally, setting the skb 'protocol' field would be handled by a
554          * call to eth_type_trans(), but it assumes there's a sending
555          * device, which we may not have.
556          */
557         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
558                 packet->protocol = eth->h_proto;
559         else
560                 packet->protocol = htons(ETH_P_802_2);
561
562         /* Build an sw_flow for sending this packet. */
563         flow = ovs_flow_alloc();
564         err = PTR_ERR(flow);
565         if (IS_ERR(flow))
566                 goto err_kfree_skb;
567
568         err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet,
569                                              &flow->key, log);
570         if (err)
571                 goto err_flow_free;
572
573         err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
574                                    &flow->key, &acts, log);
575         if (err)
576                 goto err_flow_free;
577
578         rcu_assign_pointer(flow->sf_acts, acts);
579         OVS_CB(packet)->egress_tun_info = NULL;
580         packet->priority = flow->key.phy.priority;
581         packet->mark = flow->key.phy.skb_mark;
582
583         rcu_read_lock();
584         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
585         err = -ENODEV;
586         if (!dp)
587                 goto err_unlock;
588
589         input_vport = ovs_vport_rcu(dp, flow->key.phy.in_port);
590         if (!input_vport)
591                 input_vport = ovs_vport_rcu(dp, OVSP_LOCAL);
592
593         if (!input_vport)
594                 goto err_unlock;
595
596         OVS_CB(packet)->input_vport = input_vport;
597         sf_acts = rcu_dereference(flow->sf_acts);
598
599         local_bh_disable();
600         err = ovs_execute_actions(dp, packet, sf_acts, &flow->key);
601         local_bh_enable();
602         rcu_read_unlock();
603
604         ovs_flow_free(flow, false);
605         return err;
606
607 err_unlock:
608         rcu_read_unlock();
609 err_flow_free:
610         ovs_flow_free(flow, false);
611 err_kfree_skb:
612         kfree_skb(packet);
613 err:
614         return err;
615 }
616
617 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
618         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
619         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
620         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
621 };
622
623 static struct genl_ops dp_packet_genl_ops[] = {
624         { .cmd = OVS_PACKET_CMD_EXECUTE,
625           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
626           .policy = packet_policy,
627           .doit = ovs_packet_cmd_execute
628         }
629 };
630
631 static struct genl_family dp_packet_genl_family = {
632         .id = GENL_ID_GENERATE,
633         .hdrsize = sizeof(struct ovs_header),
634         .name = OVS_PACKET_FAMILY,
635         .version = OVS_PACKET_VERSION,
636         .maxattr = OVS_PACKET_ATTR_MAX,
637         .netnsok = true,
638         .parallel_ops = true,
639         .ops = dp_packet_genl_ops,
640         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
641 };
642
643 static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats,
644                          struct ovs_dp_megaflow_stats *mega_stats)
645 {
646         int i;
647
648         memset(mega_stats, 0, sizeof(*mega_stats));
649
650         stats->n_flows = ovs_flow_tbl_count(&dp->table);
651         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
652
653         stats->n_hit = stats->n_missed = stats->n_lost = 0;
654
655         for_each_possible_cpu(i) {
656                 const struct dp_stats_percpu *percpu_stats;
657                 struct dp_stats_percpu local_stats;
658                 unsigned int start;
659
660                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
661
662                 do {
663                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
664                         local_stats = *percpu_stats;
665                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
666
667                 stats->n_hit += local_stats.n_hit;
668                 stats->n_missed += local_stats.n_missed;
669                 stats->n_lost += local_stats.n_lost;
670                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
671         }
672 }
673
674 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
675 {
676         return NLMSG_ALIGN(sizeof(struct ovs_header))
677                 + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */
678                 + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */
679                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
680                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
681                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
682                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
683 }
684
685 /* Called with ovs_mutex or RCU read lock. */
686 static int ovs_flow_cmd_fill_match(const struct sw_flow *flow,
687                                    struct sk_buff *skb)
688 {
689         struct nlattr *nla;
690         int err;
691
692         /* Fill flow key. */
693         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
694         if (!nla)
695                 return -EMSGSIZE;
696
697         err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
698         if (err)
699                 return err;
700
701         nla_nest_end(skb, nla);
702
703         /* Fill flow mask. */
704         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
705         if (!nla)
706                 return -EMSGSIZE;
707
708         err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
709         if (err)
710                 return err;
711
712         nla_nest_end(skb, nla);
713         return 0;
714 }
715
716 /* Called with ovs_mutex or RCU read lock. */
717 static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow,
718                                    struct sk_buff *skb)
719 {
720         struct ovs_flow_stats stats;
721         __be16 tcp_flags;
722         unsigned long used;
723
724         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
725
726         if (used &&
727             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
728                 return -EMSGSIZE;
729
730         if (stats.n_packets &&
731             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
732                 return -EMSGSIZE;
733
734         if ((u8)ntohs(tcp_flags) &&
735              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
736                 return -EMSGSIZE;
737
738         return 0;
739 }
740
741 /* Called with ovs_mutex or RCU read lock. */
742 static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow,
743                                      struct sk_buff *skb, int skb_orig_len)
744 {
745         struct nlattr *start;
746         int err;
747
748         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
749          * this is the first flow to be dumped into 'skb'.  This is unusual for
750          * Netlink but individual action lists can be longer than
751          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
752          * The userspace caller can always fetch the actions separately if it
753          * really wants them.  (Most userspace callers in fact don't care.)
754          *
755          * This can only fail for dump operations because the skb is always
756          * properly sized for single flows.
757          */
758         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
759         if (start) {
760                 const struct sw_flow_actions *sf_acts;
761
762                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
763                 err = ovs_nla_put_actions(sf_acts->actions,
764                                           sf_acts->actions_len, skb);
765
766                 if (!err)
767                         nla_nest_end(skb, start);
768                 else {
769                         if (skb_orig_len)
770                                 return err;
771
772                         nla_nest_cancel(skb, start);
773                 }
774         } else if (skb_orig_len) {
775                 return -EMSGSIZE;
776         }
777
778         return 0;
779 }
780
781 /* Called with ovs_mutex or RCU read lock. */
782 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
783                                   struct sk_buff *skb, u32 portid,
784                                   u32 seq, u32 flags, u8 cmd)
785 {
786         const int skb_orig_len = skb->len;
787         struct ovs_header *ovs_header;
788         int err;
789
790         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family,
791                                  flags, cmd);
792         if (!ovs_header)
793                 return -EMSGSIZE;
794
795         ovs_header->dp_ifindex = dp_ifindex;
796
797         err = ovs_flow_cmd_fill_match(flow, skb);
798         if (err)
799                 goto error;
800
801         err = ovs_flow_cmd_fill_stats(flow, skb);
802         if (err)
803                 goto error;
804
805         err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len);
806         if (err)
807                 goto error;
808
809         return genlmsg_end(skb, ovs_header);
810
811 error:
812         genlmsg_cancel(skb, ovs_header);
813         return err;
814 }
815
816 /* May not be called with RCU read lock. */
817 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
818                                                struct genl_info *info,
819                                                bool always)
820 {
821         struct sk_buff *skb;
822
823         if (!always && !ovs_must_notify(&dp_flow_genl_family, info,
824                                         GROUP_ID(&ovs_dp_flow_multicast_group)))
825                 return NULL;
826
827         skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
828         if (!skb)
829                 return ERR_PTR(-ENOMEM);
830
831         return skb;
832 }
833
834 /* Called with ovs_mutex. */
835 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
836                                                int dp_ifindex,
837                                                struct genl_info *info, u8 cmd,
838                                                bool always)
839 {
840         struct sk_buff *skb;
841         int retval;
842
843         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
844                                       always);
845         if (IS_ERR_OR_NULL(skb))
846                 return skb;
847
848         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
849                                         info->snd_portid, info->snd_seq, 0,
850                                         cmd);
851         BUG_ON(retval < 0);
852         return skb;
853 }
854
855 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
856 {
857         struct nlattr **a = info->attrs;
858         struct ovs_header *ovs_header = info->userhdr;
859         struct sw_flow *flow, *new_flow;
860         struct sw_flow_mask mask;
861         struct sk_buff *reply;
862         struct datapath *dp;
863         struct sw_flow_actions *acts;
864         struct sw_flow_match match;
865         int error;
866         bool log = !a[OVS_FLOW_ATTR_PROBE];
867
868         /* Must have key and actions. */
869         error = -EINVAL;
870         if (!a[OVS_FLOW_ATTR_KEY]) {
871                 OVS_NLERR(log, "Flow key attr not present in new flow.");
872                 goto error;
873         }
874         if (!a[OVS_FLOW_ATTR_ACTIONS]) {
875                 OVS_NLERR(log, "Flow actions attr not present in new flow.");
876                 goto error;
877         }
878
879         /* Most of the time we need to allocate a new flow, do it before
880          * locking.
881          */
882         new_flow = ovs_flow_alloc();
883         if (IS_ERR(new_flow)) {
884                 error = PTR_ERR(new_flow);
885                 goto error;
886         }
887
888         /* Extract key. */
889         ovs_match_init(&match, &new_flow->unmasked_key, &mask);
890         error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
891                                   a[OVS_FLOW_ATTR_MASK], log);
892         if (error)
893                 goto err_kfree_flow;
894
895         ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
896
897         /* Validate actions. */
898         error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
899                                      &acts, log);
900         if (error) {
901                 OVS_NLERR(log, "Flow actions may not be safe on all matching packets.");
902                 goto err_kfree_flow;
903         }
904
905         reply = ovs_flow_cmd_alloc_info(acts, info, false);
906         if (IS_ERR(reply)) {
907                 error = PTR_ERR(reply);
908                 goto err_kfree_acts;
909         }
910
911         ovs_lock();
912         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
913         if (unlikely(!dp)) {
914                 error = -ENODEV;
915                 goto err_unlock_ovs;
916         }
917         /* Check if this is a duplicate flow */
918         flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
919         if (likely(!flow)) {
920                 rcu_assign_pointer(new_flow->sf_acts, acts);
921
922                 /* Put flow in bucket. */
923                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
924                 if (unlikely(error)) {
925                         acts = NULL;
926                         goto err_unlock_ovs;
927                 }
928
929                 if (unlikely(reply)) {
930                         error = ovs_flow_cmd_fill_info(new_flow,
931                                                        ovs_header->dp_ifindex,
932                                                        reply, info->snd_portid,
933                                                        info->snd_seq, 0,
934                                                        OVS_FLOW_CMD_NEW);
935                         BUG_ON(error < 0);
936                 }
937                 ovs_unlock();
938         } else {
939                 struct sw_flow_actions *old_acts;
940
941                 /* Bail out if we're not allowed to modify an existing flow.
942                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
943                  * because Generic Netlink treats the latter as a dump
944                  * request.  We also accept NLM_F_EXCL in case that bug ever
945                  * gets fixed.
946                  */
947                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
948                                                          | NLM_F_EXCL))) {
949                         error = -EEXIST;
950                         goto err_unlock_ovs;
951                 }
952                 /* The unmasked key has to be the same for flow updates. */
953                 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
954                         /* Look for any overlapping flow. */
955                         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
956                         if (!flow) {
957                                 error = -ENOENT;
958                                 goto err_unlock_ovs;
959                         }
960                 }
961                 /* Update actions. */
962                 old_acts = ovsl_dereference(flow->sf_acts);
963                 rcu_assign_pointer(flow->sf_acts, acts);
964
965                 if (unlikely(reply)) {
966                         error = ovs_flow_cmd_fill_info(flow,
967                                                        ovs_header->dp_ifindex,
968                                                        reply, info->snd_portid,
969                                                        info->snd_seq, 0,
970                                                        OVS_FLOW_CMD_NEW);
971                         BUG_ON(error < 0);
972                 }
973                 ovs_unlock();
974
975                 ovs_nla_free_flow_actions(old_acts);
976                 ovs_flow_free(new_flow, false);
977         }
978
979         if (reply)
980                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
981         return 0;
982
983 err_unlock_ovs:
984         ovs_unlock();
985         kfree_skb(reply);
986 err_kfree_acts:
987         kfree(acts);
988 err_kfree_flow:
989         ovs_flow_free(new_flow, false);
990 error:
991         return error;
992 }
993
994 /* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
995 static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
996                                                 const struct sw_flow_key *key,
997                                                 const struct sw_flow_mask *mask,
998                                                 bool log)
999 {
1000         struct sw_flow_actions *acts;
1001         struct sw_flow_key masked_key;
1002         int error;
1003
1004         ovs_flow_mask_key(&masked_key, key, mask);
1005         error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
1006         if (error) {
1007                 OVS_NLERR(log,
1008                           "Actions may not be safe on all matching packets");
1009                 return ERR_PTR(error);
1010         }
1011
1012         return acts;
1013 }
1014
1015 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
1016 {
1017         struct nlattr **a = info->attrs;
1018         struct ovs_header *ovs_header = info->userhdr;
1019         struct sw_flow_key key;
1020         struct sw_flow *flow;
1021         struct sw_flow_mask mask;
1022         struct sk_buff *reply = NULL;
1023         struct datapath *dp;
1024         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
1025         struct sw_flow_match match;
1026         int error;
1027         bool log = !a[OVS_FLOW_ATTR_PROBE];
1028
1029         /* Extract key. */
1030         error = -EINVAL;
1031         if (!a[OVS_FLOW_ATTR_KEY]) {
1032                 OVS_NLERR(log, "Flow key attribute not present in set flow.");
1033                 goto error;
1034         }
1035
1036         ovs_match_init(&match, &key, &mask);
1037         error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY],
1038                                   a[OVS_FLOW_ATTR_MASK], log);
1039         if (error)
1040                 goto error;
1041
1042         /* Validate actions. */
1043         if (a[OVS_FLOW_ATTR_ACTIONS]) {
1044                 acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask,
1045                                         log);
1046                 if (IS_ERR(acts)) {
1047                         error = PTR_ERR(acts);
1048                         goto error;
1049                 }
1050
1051                 /* Can allocate before locking if have acts. */
1052                 reply = ovs_flow_cmd_alloc_info(acts, info, false);
1053                 if (IS_ERR(reply)) {
1054                         error = PTR_ERR(reply);
1055                         goto err_kfree_acts;
1056                 }
1057         }
1058
1059         ovs_lock();
1060         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1061         if (unlikely(!dp)) {
1062                 error = -ENODEV;
1063                 goto err_unlock_ovs;
1064         }
1065         /* Check that the flow exists. */
1066         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1067         if (unlikely(!flow)) {
1068                 error = -ENOENT;
1069                 goto err_unlock_ovs;
1070         }
1071
1072         /* Update actions, if present. */
1073         if (likely(acts)) {
1074                 old_acts = ovsl_dereference(flow->sf_acts);
1075                 rcu_assign_pointer(flow->sf_acts, acts);
1076
1077                 if (unlikely(reply)) {
1078                         error = ovs_flow_cmd_fill_info(flow,
1079                                                        ovs_header->dp_ifindex,
1080                                                        reply, info->snd_portid,
1081                                                        info->snd_seq, 0,
1082                                                        OVS_FLOW_CMD_NEW);
1083                         BUG_ON(error < 0);
1084                 }
1085         } else {
1086                 /* Could not alloc without acts before locking. */
1087                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1088                                                 info, OVS_FLOW_CMD_NEW, false);
1089                 if (unlikely(IS_ERR(reply))) {
1090                         error = PTR_ERR(reply);
1091                         goto err_unlock_ovs;
1092                 }
1093         }
1094
1095         /* Clear stats. */
1096         if (a[OVS_FLOW_ATTR_CLEAR])
1097                 ovs_flow_stats_clear(flow);
1098         ovs_unlock();
1099
1100         if (reply)
1101                 ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1102         if (old_acts)
1103                 ovs_nla_free_flow_actions(old_acts);
1104
1105         return 0;
1106
1107 err_unlock_ovs:
1108         ovs_unlock();
1109         kfree_skb(reply);
1110 err_kfree_acts:
1111         kfree(acts);
1112 error:
1113         return error;
1114 }
1115
1116 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1117 {
1118         struct nlattr **a = info->attrs;
1119         struct ovs_header *ovs_header = info->userhdr;
1120         struct sw_flow_key key;
1121         struct sk_buff *reply;
1122         struct sw_flow *flow;
1123         struct datapath *dp;
1124         struct sw_flow_match match;
1125         int err;
1126         bool log = !a[OVS_FLOW_ATTR_PROBE];
1127
1128         if (!a[OVS_FLOW_ATTR_KEY]) {
1129                 OVS_NLERR(log,
1130                           "Flow get message rejected, Key attribute missing.");
1131                 return -EINVAL;
1132         }
1133
1134         ovs_match_init(&match, &key, NULL);
1135         err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log);
1136         if (err)
1137                 return err;
1138
1139         ovs_lock();
1140         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1141         if (!dp) {
1142                 err = -ENODEV;
1143                 goto unlock;
1144         }
1145
1146         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1147         if (!flow) {
1148                 err = -ENOENT;
1149                 goto unlock;
1150         }
1151
1152         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1153                                         OVS_FLOW_CMD_NEW, true);
1154         if (IS_ERR(reply)) {
1155                 err = PTR_ERR(reply);
1156                 goto unlock;
1157         }
1158
1159         ovs_unlock();
1160         return genlmsg_reply(reply, info);
1161 unlock:
1162         ovs_unlock();
1163         return err;
1164 }
1165
1166 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1167 {
1168         struct nlattr **a = info->attrs;
1169         struct ovs_header *ovs_header = info->userhdr;
1170         struct sw_flow_key key;
1171         struct sk_buff *reply;
1172         struct sw_flow *flow;
1173         struct datapath *dp;
1174         struct sw_flow_match match;
1175         int err;
1176         bool log = !a[OVS_FLOW_ATTR_PROBE];
1177
1178         if (likely(a[OVS_FLOW_ATTR_KEY])) {
1179                 ovs_match_init(&match, &key, NULL);
1180                 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL,
1181                                         log);
1182                 if (unlikely(err))
1183                         return err;
1184         }
1185
1186         ovs_lock();
1187         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1188         if (unlikely(!dp)) {
1189                 err = -ENODEV;
1190                 goto unlock;
1191         }
1192
1193         if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1194                 err = ovs_flow_tbl_flush(&dp->table);
1195                 goto unlock;
1196         }
1197
1198         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1199         if (unlikely(!flow)) {
1200                 err = -ENOENT;
1201                 goto unlock;
1202         }
1203
1204         ovs_flow_tbl_remove(&dp->table, flow);
1205         ovs_unlock();
1206
1207         reply = ovs_flow_cmd_alloc_info(rcu_dereference_raw(flow->sf_acts),
1208                                         info, false);
1209
1210         if (likely(reply)) {
1211                 if (likely(!IS_ERR(reply))) {
1212                         rcu_read_lock();        /*To keep RCU checker happy. */
1213                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1214                                                      reply, info->snd_portid,
1215                                                      info->snd_seq, 0,
1216                                                      OVS_FLOW_CMD_DEL);
1217                         rcu_read_unlock();
1218                         BUG_ON(err < 0);
1219                         ovs_notify(&dp_flow_genl_family, &ovs_dp_flow_multicast_group, reply, info);
1220                 } else {
1221                         genl_set_err(&dp_flow_genl_family, sock_net(skb->sk), 0,
1222                                      GROUP_ID(&ovs_dp_flow_multicast_group), PTR_ERR(reply));
1223
1224                 }
1225         }
1226
1227         ovs_flow_free(flow, true);
1228         return 0;
1229 unlock:
1230         ovs_unlock();
1231         return err;
1232 }
1233
1234 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1235 {
1236         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1237         struct table_instance *ti;
1238         struct datapath *dp;
1239
1240         rcu_read_lock();
1241         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1242         if (!dp) {
1243                 rcu_read_unlock();
1244                 return -ENODEV;
1245         }
1246
1247         ti = rcu_dereference(dp->table.ti);
1248         for (;;) {
1249                 struct sw_flow *flow;
1250                 u32 bucket, obj;
1251
1252                 bucket = cb->args[0];
1253                 obj = cb->args[1];
1254                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1255                 if (!flow)
1256                         break;
1257
1258                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1259                                            NETLINK_CB(cb->skb).portid,
1260                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1261                                            OVS_FLOW_CMD_NEW) < 0)
1262                         break;
1263
1264                 cb->args[0] = bucket;
1265                 cb->args[1] = obj;
1266         }
1267         rcu_read_unlock();
1268         return skb->len;
1269 }
1270
1271 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1272         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1273         [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED },
1274         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1275         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1276         [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG },
1277 };
1278
1279 static struct genl_ops dp_flow_genl_ops[] = {
1280         { .cmd = OVS_FLOW_CMD_NEW,
1281           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1282           .policy = flow_policy,
1283           .doit = ovs_flow_cmd_new
1284         },
1285         { .cmd = OVS_FLOW_CMD_DEL,
1286           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1287           .policy = flow_policy,
1288           .doit = ovs_flow_cmd_del
1289         },
1290         { .cmd = OVS_FLOW_CMD_GET,
1291           .flags = 0,               /* OK for unprivileged users. */
1292           .policy = flow_policy,
1293           .doit = ovs_flow_cmd_get,
1294           .dumpit = ovs_flow_cmd_dump
1295         },
1296         { .cmd = OVS_FLOW_CMD_SET,
1297           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1298           .policy = flow_policy,
1299           .doit = ovs_flow_cmd_set,
1300         },
1301 };
1302
1303 static struct genl_family dp_flow_genl_family = {
1304         .id = GENL_ID_GENERATE,
1305         .hdrsize = sizeof(struct ovs_header),
1306         .name = OVS_FLOW_FAMILY,
1307         .version = OVS_FLOW_VERSION,
1308         .maxattr = OVS_FLOW_ATTR_MAX,
1309         .netnsok = true,
1310         .parallel_ops = true,
1311         .ops = dp_flow_genl_ops,
1312         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1313         .mcgrps = &ovs_dp_flow_multicast_group,
1314         .n_mcgrps = 1,
1315 };
1316
1317 static size_t ovs_dp_cmd_msg_size(void)
1318 {
1319         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1320
1321         msgsize += nla_total_size(IFNAMSIZ);
1322         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1323         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1324         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1325
1326         return msgsize;
1327 }
1328
1329 /* Called with ovs_mutex. */
1330 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1331                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1332 {
1333         struct ovs_header *ovs_header;
1334         struct ovs_dp_stats dp_stats;
1335         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1336         int err;
1337
1338         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1339                                    flags, cmd);
1340         if (!ovs_header)
1341                 goto error;
1342
1343         ovs_header->dp_ifindex = get_dpifindex(dp);
1344
1345         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1346         if (err)
1347                 goto nla_put_failure;
1348
1349         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1350         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1351                         &dp_stats))
1352                 goto nla_put_failure;
1353
1354         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1355                         sizeof(struct ovs_dp_megaflow_stats),
1356                         &dp_megaflow_stats))
1357                 goto nla_put_failure;
1358
1359         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1360                 goto nla_put_failure;
1361
1362         return genlmsg_end(skb, ovs_header);
1363
1364 nla_put_failure:
1365         genlmsg_cancel(skb, ovs_header);
1366 error:
1367         return -EMSGSIZE;
1368 }
1369
1370 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1371 {
1372         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1373 }
1374
1375 /* Called with rcu_read_lock or ovs_mutex. */
1376 static struct datapath *lookup_datapath(struct net *net,
1377                                         const struct ovs_header *ovs_header,
1378                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1379 {
1380         struct datapath *dp;
1381
1382         if (!a[OVS_DP_ATTR_NAME])
1383                 dp = get_dp(net, ovs_header->dp_ifindex);
1384         else {
1385                 struct vport *vport;
1386
1387                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1388                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1389         }
1390         return dp ? dp : ERR_PTR(-ENODEV);
1391 }
1392
1393 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1394 {
1395         struct datapath *dp;
1396
1397         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1398         if (IS_ERR(dp))
1399                 return;
1400
1401         WARN(dp->user_features, "Dropping previously announced user features\n");
1402         dp->user_features = 0;
1403 }
1404
1405 static void ovs_dp_change(struct datapath *dp, struct nlattr *a[])
1406 {
1407         if (a[OVS_DP_ATTR_USER_FEATURES])
1408                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1409 }
1410
1411 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1412 {
1413         struct nlattr **a = info->attrs;
1414         struct vport_parms parms;
1415         struct sk_buff *reply;
1416         struct datapath *dp;
1417         struct vport *vport;
1418         struct ovs_net *ovs_net;
1419         int err, i;
1420
1421         err = -EINVAL;
1422         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1423                 goto err;
1424
1425         reply = ovs_dp_cmd_alloc_info(info);
1426         if (!reply)
1427                 return -ENOMEM;
1428
1429         err = -ENOMEM;
1430         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1431         if (dp == NULL)
1432                 goto err_free_reply;
1433
1434         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1435
1436         /* Allocate table. */
1437         err = ovs_flow_tbl_init(&dp->table);
1438         if (err)
1439                 goto err_free_dp;
1440
1441         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1442         if (!dp->stats_percpu) {
1443                 err = -ENOMEM;
1444                 goto err_destroy_table;
1445         }
1446
1447         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1448                             GFP_KERNEL);
1449         if (!dp->ports) {
1450                 err = -ENOMEM;
1451                 goto err_destroy_percpu;
1452         }
1453
1454         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1455                 INIT_HLIST_HEAD(&dp->ports[i]);
1456
1457         /* Set up our datapath device. */
1458         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1459         parms.type = OVS_VPORT_TYPE_INTERNAL;
1460         parms.options = NULL;
1461         parms.dp = dp;
1462         parms.port_no = OVSP_LOCAL;
1463         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1464
1465         ovs_dp_change(dp, a);
1466
1467         /* So far only local changes have been made, now need the lock. */
1468         ovs_lock();
1469
1470         vport = new_vport(&parms);
1471         if (IS_ERR(vport)) {
1472                 err = PTR_ERR(vport);
1473                 if (err == -EBUSY)
1474                         err = -EEXIST;
1475
1476                 if (err == -EEXIST) {
1477                         /* An outdated user space instance that does not understand
1478                          * the concept of user_features has attempted to create a new
1479                          * datapath and is likely to reuse it. Drop all user features.
1480                          */
1481                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1482                                 ovs_dp_reset_user_features(skb, info);
1483                 }
1484
1485                 goto err_destroy_ports_array;
1486         }
1487
1488         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1489                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1490         BUG_ON(err < 0);
1491
1492         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1493         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1494         ovs_unlock();
1495
1496         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1497         return 0;
1498
1499 err_destroy_ports_array:
1500         ovs_unlock();
1501         kfree(dp->ports);
1502 err_destroy_percpu:
1503         free_percpu(dp->stats_percpu);
1504 err_destroy_table:
1505         ovs_flow_tbl_destroy(&dp->table);
1506 err_free_dp:
1507         release_net(ovs_dp_get_net(dp));
1508         kfree(dp);
1509 err_free_reply:
1510         kfree_skb(reply);
1511 err:
1512         return err;
1513 }
1514
1515 /* Called with ovs_mutex. */
1516 static void __dp_destroy(struct datapath *dp)
1517 {
1518         int i;
1519
1520         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1521                 struct vport *vport;
1522                 struct hlist_node *n;
1523
1524                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1525                         if (vport->port_no != OVSP_LOCAL)
1526                                 ovs_dp_detach_port(vport);
1527         }
1528
1529         list_del_rcu(&dp->list_node);
1530
1531         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1532          * all ports in datapath are destroyed first before freeing datapath.
1533          */
1534         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1535
1536         /* RCU destroy the flow table */
1537         call_rcu(&dp->rcu, destroy_dp_rcu);
1538 }
1539
1540 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1541 {
1542         struct sk_buff *reply;
1543         struct datapath *dp;
1544         int err;
1545
1546         reply = ovs_dp_cmd_alloc_info(info);
1547         if (!reply)
1548                 return -ENOMEM;
1549
1550         ovs_lock();
1551         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1552         err = PTR_ERR(dp);
1553         if (IS_ERR(dp))
1554                 goto err_unlock_free;
1555
1556         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1557                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1558         BUG_ON(err < 0);
1559
1560         __dp_destroy(dp);
1561         ovs_unlock();
1562
1563         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1564         return 0;
1565
1566 err_unlock_free:
1567         ovs_unlock();
1568         kfree_skb(reply);
1569         return err;
1570 }
1571
1572 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1573 {
1574         struct sk_buff *reply;
1575         struct datapath *dp;
1576         int err;
1577
1578         reply = ovs_dp_cmd_alloc_info(info);
1579         if (!reply)
1580                 return -ENOMEM;
1581
1582         ovs_lock();
1583         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1584         err = PTR_ERR(dp);
1585         if (IS_ERR(dp))
1586                 goto err_unlock_free;
1587
1588         ovs_dp_change(dp, info->attrs);
1589
1590         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1591                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1592         BUG_ON(err < 0);
1593         ovs_unlock();
1594
1595         ovs_notify(&dp_datapath_genl_family, &ovs_dp_datapath_multicast_group, reply, info);
1596         return 0;
1597
1598 err_unlock_free:
1599         ovs_unlock();
1600         kfree_skb(reply);
1601         return err;
1602 }
1603
1604 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1605 {
1606         struct sk_buff *reply;
1607         struct datapath *dp;
1608         int err;
1609
1610         reply = ovs_dp_cmd_alloc_info(info);
1611         if (!reply)
1612                 return -ENOMEM;
1613
1614         ovs_lock();
1615         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1616         if (IS_ERR(dp)) {
1617                 err = PTR_ERR(dp);
1618                 goto err_unlock_free;
1619         }
1620         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1621                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1622         BUG_ON(err < 0);
1623         ovs_unlock();
1624
1625         return genlmsg_reply(reply, info);
1626
1627 err_unlock_free:
1628         ovs_unlock();
1629         kfree_skb(reply);
1630         return err;
1631 }
1632
1633 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1634 {
1635         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1636         struct datapath *dp;
1637         int skip = cb->args[0];
1638         int i = 0;
1639
1640         ovs_lock();
1641         list_for_each_entry(dp, &ovs_net->dps, list_node) {
1642                 if (i >= skip &&
1643                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1644                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1645                                          OVS_DP_CMD_NEW) < 0)
1646                         break;
1647                 i++;
1648         }
1649         ovs_unlock();
1650
1651         cb->args[0] = i;
1652
1653         return skb->len;
1654 }
1655
1656 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1657         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1658         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1659         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1660 };
1661
1662 static struct genl_ops dp_datapath_genl_ops[] = {
1663         { .cmd = OVS_DP_CMD_NEW,
1664           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1665           .policy = datapath_policy,
1666           .doit = ovs_dp_cmd_new
1667         },
1668         { .cmd = OVS_DP_CMD_DEL,
1669           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1670           .policy = datapath_policy,
1671           .doit = ovs_dp_cmd_del
1672         },
1673         { .cmd = OVS_DP_CMD_GET,
1674           .flags = 0,               /* OK for unprivileged users. */
1675           .policy = datapath_policy,
1676           .doit = ovs_dp_cmd_get,
1677           .dumpit = ovs_dp_cmd_dump
1678         },
1679         { .cmd = OVS_DP_CMD_SET,
1680           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1681           .policy = datapath_policy,
1682           .doit = ovs_dp_cmd_set,
1683         },
1684 };
1685
1686 static struct genl_family dp_datapath_genl_family = {
1687         .id = GENL_ID_GENERATE,
1688         .hdrsize = sizeof(struct ovs_header),
1689         .name = OVS_DATAPATH_FAMILY,
1690         .version = OVS_DATAPATH_VERSION,
1691         .maxattr = OVS_DP_ATTR_MAX,
1692         .netnsok = true,
1693         .parallel_ops = true,
1694         .ops = dp_datapath_genl_ops,
1695         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1696         .mcgrps = &ovs_dp_datapath_multicast_group,
1697         .n_mcgrps = 1,
1698 };
1699
1700 /* Called with ovs_mutex or RCU read lock. */
1701 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1702                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1703 {
1704         struct ovs_header *ovs_header;
1705         struct ovs_vport_stats vport_stats;
1706         int err;
1707
1708         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1709                                  flags, cmd);
1710         if (!ovs_header)
1711                 return -EMSGSIZE;
1712
1713         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1714
1715         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1716             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1717             nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)))
1718                 goto nla_put_failure;
1719
1720         ovs_vport_get_stats(vport, &vport_stats);
1721         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1722                     &vport_stats))
1723                 goto nla_put_failure;
1724
1725         if (ovs_vport_get_upcall_portids(vport, skb))
1726                 goto nla_put_failure;
1727
1728         err = ovs_vport_get_options(vport, skb);
1729         if (err == -EMSGSIZE)
1730                 goto error;
1731
1732         return genlmsg_end(skb, ovs_header);
1733
1734 nla_put_failure:
1735         err = -EMSGSIZE;
1736 error:
1737         genlmsg_cancel(skb, ovs_header);
1738         return err;
1739 }
1740
1741 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1742 {
1743         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1744 }
1745
1746 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1747 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1748                                          u32 seq, u8 cmd)
1749 {
1750         struct sk_buff *skb;
1751         int retval;
1752
1753         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1754         if (!skb)
1755                 return ERR_PTR(-ENOMEM);
1756
1757         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1758         BUG_ON(retval < 0);
1759
1760         return skb;
1761 }
1762
1763 /* Called with ovs_mutex or RCU read lock. */
1764 static struct vport *lookup_vport(struct net *net,
1765                                   const struct ovs_header *ovs_header,
1766                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1767 {
1768         struct datapath *dp;
1769         struct vport *vport;
1770
1771         if (a[OVS_VPORT_ATTR_NAME]) {
1772                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1773                 if (!vport)
1774                         return ERR_PTR(-ENODEV);
1775                 if (ovs_header->dp_ifindex &&
1776                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1777                         return ERR_PTR(-ENODEV);
1778                 return vport;
1779         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1780                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1781
1782                 if (port_no >= DP_MAX_PORTS)
1783                         return ERR_PTR(-EFBIG);
1784
1785                 dp = get_dp(net, ovs_header->dp_ifindex);
1786                 if (!dp)
1787                         return ERR_PTR(-ENODEV);
1788
1789                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1790                 if (!vport)
1791                         return ERR_PTR(-ENODEV);
1792                 return vport;
1793         } else
1794                 return ERR_PTR(-EINVAL);
1795 }
1796
1797 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1798 {
1799         struct nlattr **a = info->attrs;
1800         struct ovs_header *ovs_header = info->userhdr;
1801         struct vport_parms parms;
1802         struct sk_buff *reply;
1803         struct vport *vport;
1804         struct datapath *dp;
1805         u32 port_no;
1806         int err;
1807
1808         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1809             !a[OVS_VPORT_ATTR_UPCALL_PID])
1810                 return -EINVAL;
1811
1812         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1813                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1814         if (port_no >= DP_MAX_PORTS)
1815                 return -EFBIG;
1816
1817         reply = ovs_vport_cmd_alloc_info();
1818         if (!reply)
1819                 return -ENOMEM;
1820
1821         ovs_lock();
1822         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1823         err = -ENODEV;
1824         if (!dp)
1825                 goto exit_unlock_free;
1826
1827         if (port_no) {
1828                 vport = ovs_vport_ovsl(dp, port_no);
1829                 err = -EBUSY;
1830                 if (vport)
1831                         goto exit_unlock_free;
1832         } else {
1833                 for (port_no = 1; ; port_no++) {
1834                         if (port_no >= DP_MAX_PORTS) {
1835                                 err = -EFBIG;
1836                                 goto exit_unlock_free;
1837                         }
1838                         vport = ovs_vport_ovsl(dp, port_no);
1839                         if (!vport)
1840                                 break;
1841                 }
1842         }
1843
1844         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1845         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1846         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1847         parms.dp = dp;
1848         parms.port_no = port_no;
1849         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1850
1851         vport = new_vport(&parms);
1852         err = PTR_ERR(vport);
1853         if (IS_ERR(vport))
1854                 goto exit_unlock_free;
1855
1856         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1857                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1858         BUG_ON(err < 0);
1859         ovs_unlock();
1860
1861         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1862         return 0;
1863
1864 exit_unlock_free:
1865         ovs_unlock();
1866         kfree_skb(reply);
1867         return err;
1868 }
1869
1870 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1871 {
1872         struct nlattr **a = info->attrs;
1873         struct sk_buff *reply;
1874         struct vport *vport;
1875         int err;
1876
1877         reply = ovs_vport_cmd_alloc_info();
1878         if (!reply)
1879                 return -ENOMEM;
1880
1881         ovs_lock();
1882         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1883         err = PTR_ERR(vport);
1884         if (IS_ERR(vport))
1885                 goto exit_unlock_free;
1886
1887         if (a[OVS_VPORT_ATTR_TYPE] &&
1888             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1889                 err = -EINVAL;
1890                 goto exit_unlock_free;
1891         }
1892
1893         if (a[OVS_VPORT_ATTR_OPTIONS]) {
1894                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1895                 if (err)
1896                         goto exit_unlock_free;
1897         }
1898
1899         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
1900                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
1901
1902                 err = ovs_vport_set_upcall_portids(vport, ids);
1903                 if (err)
1904                         goto exit_unlock_free;
1905         }
1906
1907         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1908                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1909         BUG_ON(err < 0);
1910         ovs_unlock();
1911
1912         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1913         return 0;
1914
1915 exit_unlock_free:
1916         ovs_unlock();
1917         kfree_skb(reply);
1918         return err;
1919 }
1920
1921 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1922 {
1923         struct nlattr **a = info->attrs;
1924         struct sk_buff *reply;
1925         struct vport *vport;
1926         int err;
1927
1928         reply = ovs_vport_cmd_alloc_info();
1929         if (!reply)
1930                 return -ENOMEM;
1931
1932         ovs_lock();
1933         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1934         err = PTR_ERR(vport);
1935         if (IS_ERR(vport))
1936                 goto exit_unlock_free;
1937
1938         if (vport->port_no == OVSP_LOCAL) {
1939                 err = -EINVAL;
1940                 goto exit_unlock_free;
1941         }
1942
1943         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1944                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1945         BUG_ON(err < 0);
1946         ovs_dp_detach_port(vport);
1947         ovs_unlock();
1948
1949         ovs_notify(&dp_vport_genl_family, &ovs_dp_vport_multicast_group, reply, info);
1950         return 0;
1951
1952 exit_unlock_free:
1953         ovs_unlock();
1954         kfree_skb(reply);
1955         return err;
1956 }
1957
1958 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1959 {
1960         struct nlattr **a = info->attrs;
1961         struct ovs_header *ovs_header = info->userhdr;
1962         struct sk_buff *reply;
1963         struct vport *vport;
1964         int err;
1965
1966         reply = ovs_vport_cmd_alloc_info();
1967         if (!reply)
1968                 return -ENOMEM;
1969
1970         rcu_read_lock();
1971         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1972         err = PTR_ERR(vport);
1973         if (IS_ERR(vport))
1974                 goto exit_unlock_free;
1975         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1976                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1977         BUG_ON(err < 0);
1978         rcu_read_unlock();
1979
1980         return genlmsg_reply(reply, info);
1981
1982 exit_unlock_free:
1983         rcu_read_unlock();
1984         kfree_skb(reply);
1985         return err;
1986 }
1987
1988 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1989 {
1990         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1991         struct datapath *dp;
1992         int bucket = cb->args[0], skip = cb->args[1];
1993         int i, j = 0;
1994
1995         rcu_read_lock();
1996         dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex);
1997         if (!dp) {
1998                 rcu_read_unlock();
1999                 return -ENODEV;
2000         }
2001         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
2002                 struct vport *vport;
2003
2004                 j = 0;
2005                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
2006                         if (j >= skip &&
2007                             ovs_vport_cmd_fill_info(vport, skb,
2008                                                     NETLINK_CB(cb->skb).portid,
2009                                                     cb->nlh->nlmsg_seq,
2010                                                     NLM_F_MULTI,
2011                                                     OVS_VPORT_CMD_NEW) < 0)
2012                                 goto out;
2013
2014                         j++;
2015                 }
2016                 skip = 0;
2017         }
2018 out:
2019         rcu_read_unlock();
2020
2021         cb->args[0] = i;
2022         cb->args[1] = j;
2023
2024         return skb->len;
2025 }
2026
2027 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
2028         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
2029         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
2030         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
2031         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
2032         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
2033         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
2034 };
2035
2036 static struct genl_ops dp_vport_genl_ops[] = {
2037         { .cmd = OVS_VPORT_CMD_NEW,
2038           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2039           .policy = vport_policy,
2040           .doit = ovs_vport_cmd_new
2041         },
2042         { .cmd = OVS_VPORT_CMD_DEL,
2043           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2044           .policy = vport_policy,
2045           .doit = ovs_vport_cmd_del
2046         },
2047         { .cmd = OVS_VPORT_CMD_GET,
2048           .flags = 0,               /* OK for unprivileged users. */
2049           .policy = vport_policy,
2050           .doit = ovs_vport_cmd_get,
2051           .dumpit = ovs_vport_cmd_dump
2052         },
2053         { .cmd = OVS_VPORT_CMD_SET,
2054           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
2055           .policy = vport_policy,
2056           .doit = ovs_vport_cmd_set,
2057         },
2058 };
2059
2060 struct genl_family dp_vport_genl_family = {
2061         .id = GENL_ID_GENERATE,
2062         .hdrsize = sizeof(struct ovs_header),
2063         .name = OVS_VPORT_FAMILY,
2064         .version = OVS_VPORT_VERSION,
2065         .maxattr = OVS_VPORT_ATTR_MAX,
2066         .netnsok = true,
2067         .parallel_ops = true,
2068         .ops = dp_vport_genl_ops,
2069         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
2070         .mcgrps = &ovs_dp_vport_multicast_group,
2071         .n_mcgrps = 1,
2072 };
2073
2074 static struct genl_family *dp_genl_families[] = {
2075         &dp_datapath_genl_family,
2076         &dp_vport_genl_family,
2077         &dp_flow_genl_family,
2078         &dp_packet_genl_family,
2079 };
2080
2081 static void dp_unregister_genl(int n_families)
2082 {
2083         int i;
2084
2085         for (i = 0; i < n_families; i++)
2086                 genl_unregister_family(dp_genl_families[i]);
2087 }
2088
2089 static int dp_register_genl(void)
2090 {
2091         int err;
2092         int i;
2093
2094         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2095
2096                 err = genl_register_family(dp_genl_families[i]);
2097                 if (err)
2098                         goto error;
2099         }
2100
2101         return 0;
2102
2103 error:
2104         dp_unregister_genl(i);
2105         return err;
2106 }
2107
2108 static int __net_init ovs_init_net(struct net *net)
2109 {
2110         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2111
2112         INIT_LIST_HEAD(&ovs_net->dps);
2113         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2114         return 0;
2115 }
2116
2117 static void __net_exit ovs_exit_net(struct net *net)
2118 {
2119         struct datapath *dp, *dp_next;
2120         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2121
2122         ovs_lock();
2123         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2124                 __dp_destroy(dp);
2125         ovs_unlock();
2126
2127         cancel_work_sync(&ovs_net->dp_notify_work);
2128 }
2129
2130 static struct pernet_operations ovs_net_ops = {
2131         .init = ovs_init_net,
2132         .exit = ovs_exit_net,
2133         .id   = &ovs_net_id,
2134         .size = sizeof(struct ovs_net),
2135 };
2136
2137 DEFINE_COMPAT_PNET_REG_FUNC(device);
2138
2139 static int __init dp_init(void)
2140 {
2141         int err;
2142
2143         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2144
2145         pr_info("Open vSwitch switching datapath %s, built "__DATE__" "__TIME__"\n",
2146                 VERSION);
2147
2148         err = action_fifos_init();
2149         if (err)
2150                 goto error;
2151
2152         err = ovs_flow_init();
2153         if (err)
2154                 goto error_action_fifos_exit;
2155
2156         err = ovs_vport_init();
2157         if (err)
2158                 goto error_flow_exit;
2159
2160         err = register_pernet_device(&ovs_net_ops);
2161         if (err)
2162                 goto error_vport_exit;
2163
2164         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2165         if (err)
2166                 goto error_netns_exit;
2167
2168         err = dp_register_genl();
2169         if (err < 0)
2170                 goto error_unreg_notifier;
2171
2172         return 0;
2173
2174 error_unreg_notifier:
2175         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2176 error_netns_exit:
2177         unregister_pernet_device(&ovs_net_ops);
2178 error_vport_exit:
2179         ovs_vport_exit();
2180 error_flow_exit:
2181         ovs_flow_exit();
2182 error_action_fifos_exit:
2183         action_fifos_exit();
2184 error:
2185         return err;
2186 }
2187
2188 static void dp_cleanup(void)
2189 {
2190         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2191         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2192         unregister_pernet_device(&ovs_net_ops);
2193         rcu_barrier();
2194         ovs_vport_exit();
2195         ovs_flow_exit();
2196         action_fifos_exit();
2197 }
2198
2199 module_init(dp_init);
2200 module_exit(dp_cleanup);
2201
2202 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2203 MODULE_LICENSE("GPL");
2204 MODULE_VERSION(VERSION);