2 * Copyright (c) 2007-2015 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/netfilter_ipv6.h>
26 #include <linux/sctp.h>
27 #include <linux/tcp.h>
28 #include <linux/udp.h>
29 #include <linux/in6.h>
30 #include <linux/if_arp.h>
31 #include <linux/if_vlan.h>
36 #include <net/checksum.h>
37 #include <net/dsfield.h>
39 #include <net/sctp/checksum.h>
42 #include "conntrack.h"
47 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
48 struct sw_flow_key *key,
49 const struct nlattr *attr, int len);
51 struct deferred_action {
53 const struct nlattr *actions;
55 /* Store pkt_key clone when creating deferred action. */
56 struct sw_flow_key pkt_key;
59 #define MAX_L2_LEN (VLAN_ETH_HLEN + 3 * MPLS_HLEN)
60 struct ovs_frag_data {
64 __be16 inner_protocol;
68 u8 l2_data[MAX_L2_LEN];
71 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
72 static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
75 #define DEFERRED_ACTION_FIFO_SIZE 10
79 /* Deferred action fifo queue storage. */
80 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
83 static struct action_fifo __percpu *action_fifos;
84 #define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
85 * looping by the network stack
87 static DEFINE_PER_CPU(int, exec_actions_level);
89 static void action_fifo_init(struct action_fifo *fifo)
95 static bool action_fifo_is_empty(const struct action_fifo *fifo)
97 return (fifo->head == fifo->tail);
100 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
102 if (action_fifo_is_empty(fifo))
105 return &fifo->fifo[fifo->tail++];
108 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
110 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
113 return &fifo->fifo[fifo->head++];
116 /* Return queue entry if fifo is not full */
117 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
118 const struct sw_flow_key *key,
119 const struct nlattr *attr)
121 struct action_fifo *fifo;
122 struct deferred_action *da;
124 fifo = this_cpu_ptr(action_fifos);
125 da = action_fifo_put(fifo);
135 static void invalidate_flow_key(struct sw_flow_key *key)
137 key->eth.type = htons(0);
140 static bool is_flow_key_valid(const struct sw_flow_key *key)
142 return !!key->eth.type;
145 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
146 const struct ovs_action_push_mpls *mpls)
148 __be32 *new_mpls_lse;
151 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
152 if (skb_encapsulation(skb))
155 if (skb_cow_head(skb, MPLS_HLEN) < 0)
158 skb_push(skb, MPLS_HLEN);
159 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
161 skb_reset_mac_header(skb);
163 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
164 *new_mpls_lse = mpls->mpls_lse;
166 if (skb->ip_summed == CHECKSUM_COMPLETE)
167 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
171 hdr->h_proto = mpls->mpls_ethertype;
172 if (!ovs_skb_get_inner_protocol(skb))
173 ovs_skb_set_inner_protocol(skb, skb->protocol);
174 skb->protocol = mpls->mpls_ethertype;
176 invalidate_flow_key(key);
180 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
181 const __be16 ethertype)
186 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
190 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
192 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
195 __skb_pull(skb, MPLS_HLEN);
196 skb_reset_mac_header(skb);
198 /* skb_mpls_header() is used to locate the ethertype
199 * field correctly in the presence of VLAN tags.
201 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
202 hdr->h_proto = ethertype;
203 if (eth_p_mpls(skb->protocol))
204 skb->protocol = ethertype;
206 invalidate_flow_key(key);
210 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
211 const __be32 *mpls_lse, const __be32 *mask)
217 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
221 stack = (__be32 *)skb_mpls_header(skb);
222 lse = OVS_MASKED(*stack, *mpls_lse, *mask);
223 if (skb->ip_summed == CHECKSUM_COMPLETE) {
224 __be32 diff[] = { ~(*stack), lse };
226 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
231 flow_key->mpls.top_lse = lse;
235 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
239 err = skb_vlan_pop(skb);
240 if (skb_vlan_tag_present(skb))
241 invalidate_flow_key(key);
247 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
248 const struct ovs_action_push_vlan *vlan)
250 if (skb_vlan_tag_present(skb))
251 invalidate_flow_key(key);
253 key->eth.tci = vlan->vlan_tci;
254 return skb_vlan_push(skb, vlan->vlan_tpid,
255 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
258 /* 'src' is already properly masked. */
259 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
261 u16 *dst = (u16 *)dst_;
262 const u16 *src = (const u16 *)src_;
263 const u16 *mask = (const u16 *)mask_;
265 OVS_SET_MASKED(dst[0], src[0], mask[0]);
266 OVS_SET_MASKED(dst[1], src[1], mask[1]);
267 OVS_SET_MASKED(dst[2], src[2], mask[2]);
270 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
271 const struct ovs_key_ethernet *key,
272 const struct ovs_key_ethernet *mask)
276 err = skb_ensure_writable(skb, ETH_HLEN);
280 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
282 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
284 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
287 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
289 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
290 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
294 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
295 __be32 addr, __be32 new_addr)
297 int transport_len = skb->len - skb_transport_offset(skb);
299 if (nh->frag_off & htons(IP_OFFSET))
302 if (nh->protocol == IPPROTO_TCP) {
303 if (likely(transport_len >= sizeof(struct tcphdr)))
304 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
306 } else if (nh->protocol == IPPROTO_UDP) {
307 if (likely(transport_len >= sizeof(struct udphdr))) {
308 struct udphdr *uh = udp_hdr(skb);
310 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
311 inet_proto_csum_replace4(&uh->check, skb,
314 uh->check = CSUM_MANGLED_0;
321 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
322 __be32 *addr, __be32 new_addr)
324 update_ip_l4_checksum(skb, nh, *addr, new_addr);
325 csum_replace4(&nh->check, *addr, new_addr);
330 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
331 __be32 addr[4], const __be32 new_addr[4])
333 int transport_len = skb->len - skb_transport_offset(skb);
335 if (l4_proto == NEXTHDR_TCP) {
336 if (likely(transport_len >= sizeof(struct tcphdr)))
337 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
339 } else if (l4_proto == NEXTHDR_UDP) {
340 if (likely(transport_len >= sizeof(struct udphdr))) {
341 struct udphdr *uh = udp_hdr(skb);
343 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
344 inet_proto_csum_replace16(&uh->check, skb,
347 uh->check = CSUM_MANGLED_0;
350 } else if (l4_proto == NEXTHDR_ICMP) {
351 if (likely(transport_len >= sizeof(struct icmp6hdr)))
352 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
353 skb, addr, new_addr, 1);
357 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
358 const __be32 mask[4], __be32 masked[4])
360 masked[0] = OVS_MASKED(old[0], addr[0], mask[0]);
361 masked[1] = OVS_MASKED(old[1], addr[1], mask[1]);
362 masked[2] = OVS_MASKED(old[2], addr[2], mask[2]);
363 masked[3] = OVS_MASKED(old[3], addr[3], mask[3]);
366 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
367 __be32 addr[4], const __be32 new_addr[4],
368 bool recalculate_csum)
370 if (likely(recalculate_csum))
371 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
374 memcpy(addr, new_addr, sizeof(__be32[4]));
377 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
379 /* Bits 21-24 are always unmasked, so this retains their values. */
380 OVS_SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
381 OVS_SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
382 OVS_SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
385 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
388 new_ttl = OVS_MASKED(nh->ttl, new_ttl, mask);
390 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
394 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
395 const struct ovs_key_ipv4 *key,
396 const struct ovs_key_ipv4 *mask)
402 err = skb_ensure_writable(skb, skb_network_offset(skb) +
403 sizeof(struct iphdr));
409 /* Setting an IP addresses is typically only a side effect of
410 * matching on them in the current userspace implementation, so it
411 * makes sense to check if the value actually changed.
413 if (mask->ipv4_src) {
414 new_addr = OVS_MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
416 if (unlikely(new_addr != nh->saddr)) {
417 set_ip_addr(skb, nh, &nh->saddr, new_addr);
418 flow_key->ipv4.addr.src = new_addr;
421 if (mask->ipv4_dst) {
422 new_addr = OVS_MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
424 if (unlikely(new_addr != nh->daddr)) {
425 set_ip_addr(skb, nh, &nh->daddr, new_addr);
426 flow_key->ipv4.addr.dst = new_addr;
429 if (mask->ipv4_tos) {
430 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
431 flow_key->ip.tos = nh->tos;
433 if (mask->ipv4_ttl) {
434 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
435 flow_key->ip.ttl = nh->ttl;
441 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
443 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
446 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
447 const struct ovs_key_ipv6 *key,
448 const struct ovs_key_ipv6 *mask)
453 err = skb_ensure_writable(skb, skb_network_offset(skb) +
454 sizeof(struct ipv6hdr));
460 /* Setting an IP addresses is typically only a side effect of
461 * matching on them in the current userspace implementation, so it
462 * makes sense to check if the value actually changed.
464 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
465 __be32 *saddr = (__be32 *)&nh->saddr;
468 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
470 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
471 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
473 memcpy(&flow_key->ipv6.addr.src, masked,
474 sizeof(flow_key->ipv6.addr.src));
477 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
478 unsigned int offset = 0;
479 int flags = IP6_FH_F_SKIP_RH;
480 bool recalc_csum = true;
481 __be32 *daddr = (__be32 *)&nh->daddr;
484 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
486 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
487 if (ipv6_ext_hdr(nh->nexthdr))
488 recalc_csum = (ipv6_find_hdr(skb, &offset,
493 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
495 memcpy(&flow_key->ipv6.addr.dst, masked,
496 sizeof(flow_key->ipv6.addr.dst));
499 if (mask->ipv6_tclass) {
500 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
501 flow_key->ip.tos = ipv6_get_dsfield(nh);
503 if (mask->ipv6_label) {
504 set_ipv6_fl(nh, ntohl(key->ipv6_label),
505 ntohl(mask->ipv6_label));
506 flow_key->ipv6.label =
507 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
509 if (mask->ipv6_hlimit) {
510 OVS_SET_MASKED(nh->hop_limit, key->ipv6_hlimit,
512 flow_key->ip.ttl = nh->hop_limit;
517 /* Must follow skb_ensure_writable() since that can move the skb data. */
518 static void set_tp_port(struct sk_buff *skb, __be16 *port,
519 __be16 new_port, __sum16 *check)
521 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
525 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
526 const struct ovs_key_udp *key,
527 const struct ovs_key_udp *mask)
533 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
534 sizeof(struct udphdr));
539 /* Either of the masks is non-zero, so do not bother checking them. */
540 src = OVS_MASKED(uh->source, key->udp_src, mask->udp_src);
541 dst = OVS_MASKED(uh->dest, key->udp_dst, mask->udp_dst);
543 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
544 if (likely(src != uh->source)) {
545 set_tp_port(skb, &uh->source, src, &uh->check);
546 flow_key->tp.src = src;
548 if (likely(dst != uh->dest)) {
549 set_tp_port(skb, &uh->dest, dst, &uh->check);
550 flow_key->tp.dst = dst;
553 if (unlikely(!uh->check))
554 uh->check = CSUM_MANGLED_0;
558 flow_key->tp.src = src;
559 flow_key->tp.dst = dst;
567 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
568 const struct ovs_key_tcp *key,
569 const struct ovs_key_tcp *mask)
575 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
576 sizeof(struct tcphdr));
581 src = OVS_MASKED(th->source, key->tcp_src, mask->tcp_src);
582 if (likely(src != th->source)) {
583 set_tp_port(skb, &th->source, src, &th->check);
584 flow_key->tp.src = src;
586 dst = OVS_MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
587 if (likely(dst != th->dest)) {
588 set_tp_port(skb, &th->dest, dst, &th->check);
589 flow_key->tp.dst = dst;
596 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
597 const struct ovs_key_sctp *key,
598 const struct ovs_key_sctp *mask)
600 unsigned int sctphoff = skb_transport_offset(skb);
602 __le32 old_correct_csum, new_csum, old_csum;
605 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
610 old_csum = sh->checksum;
611 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
613 sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src);
614 sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
616 new_csum = sctp_compute_cksum(skb, sctphoff);
618 /* Carry any checksum errors through. */
619 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
622 flow_key->tp.src = sh->source;
623 flow_key->tp.dst = sh->dest;
628 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
629 static int ovs_vport_output(OVS_VPORT_OUTPUT_PARAMS)
631 struct ovs_frag_data *data = get_pcpu_ptr(ovs_frag_data_storage);
632 struct vport *vport = data->vport;
634 if (skb_cow_head(skb, data->l2_len) < 0) {
639 __skb_dst_copy(skb, data->dst);
640 *OVS_GSO_CB(skb) = data->cb;
641 ovs_skb_set_inner_protocol(skb, data->inner_protocol);
642 skb->vlan_tci = data->vlan_tci;
643 skb->vlan_proto = data->vlan_proto;
645 /* Reconstruct the MAC header. */
646 skb_push(skb, data->l2_len);
647 memcpy(skb->data, &data->l2_data, data->l2_len);
648 ovs_skb_postpush_rcsum(skb, skb->data, data->l2_len);
649 skb_reset_mac_header(skb);
651 ovs_vport_send(vport, skb);
656 ovs_dst_get_mtu(const struct dst_entry *dst)
658 return dst->dev->mtu;
661 static struct dst_ops ovs_dst_ops = {
663 .mtu = ovs_dst_get_mtu,
666 /* prepare_frag() is called once per (larger-than-MTU) frame; its inverse is
667 * ovs_vport_output(), which is called once per fragmented packet.
669 static void prepare_frag(struct vport *vport, struct sk_buff *skb)
671 unsigned int hlen = skb_network_offset(skb);
672 struct ovs_frag_data *data;
674 data = get_pcpu_ptr(ovs_frag_data_storage);
675 data->dst = (unsigned long) skb_dst(skb);
677 data->cb = *OVS_GSO_CB(skb);
678 data->inner_protocol = ovs_skb_get_inner_protocol(skb);
679 data->vlan_tci = skb->vlan_tci;
680 data->vlan_proto = skb->vlan_proto;
682 memcpy(&data->l2_data, skb->data, hlen);
684 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
688 static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
691 if (skb_network_offset(skb) > MAX_L2_LEN) {
692 OVS_NLERR(1, "L2 header too long to fragment");
696 if (ethertype == htons(ETH_P_IP)) {
697 struct dst_entry ovs_dst;
698 unsigned long orig_dst;
700 prepare_frag(vport, skb);
701 dst_init(&ovs_dst, &ovs_dst_ops, NULL, 1,
702 DST_OBSOLETE_NONE, DST_NOCOUNT);
703 ovs_dst.dev = vport->dev;
705 orig_dst = (unsigned long) skb_dst(skb);
706 skb_dst_set_noref(skb, &ovs_dst);
707 IPCB(skb)->frag_max_size = mru;
709 ip_do_fragment(skb->sk, skb, ovs_vport_output);
710 refdst_drop(orig_dst);
711 } else if (ethertype == htons(ETH_P_IPV6)) {
712 const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
713 unsigned long orig_dst;
714 struct rt6_info ovs_rt;
720 prepare_frag(vport, skb);
721 memset(&ovs_rt, 0, sizeof(ovs_rt));
722 dst_init(&ovs_rt.dst, &ovs_dst_ops, NULL, 1,
723 DST_OBSOLETE_NONE, DST_NOCOUNT);
724 ovs_rt.dst.dev = vport->dev;
726 orig_dst = (unsigned long) skb_dst(skb);
727 skb_dst_set_noref(skb, &ovs_rt.dst);
728 IP6CB(skb)->frag_max_size = mru;
730 v6ops->fragment(skb->sk, skb, ovs_vport_output);
731 refdst_drop(orig_dst);
733 WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
734 ovs_vport_name(vport), ntohs(ethertype), mru,
744 static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
747 WARN_ONCE(1, "Fragment unavailable ->%s: eth=%04x, MRU=%d, MTU=%d.",
748 ovs_vport_name(vport), ntohs(ethertype), mru,
754 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
755 struct sw_flow_key *key)
757 struct vport *vport = ovs_vport_rcu(dp, out_port);
760 u16 mru = OVS_CB(skb)->mru;
762 if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
763 ovs_vport_send(vport, skb);
764 } else if (mru <= vport->dev->mtu) {
765 __be16 ethertype = key->eth.type;
767 if (!is_flow_key_valid(key)) {
768 if (eth_p_mpls(skb->protocol))
769 ethertype = ovs_skb_get_inner_protocol(skb);
771 ethertype = vlan_get_protocol(skb);
774 ovs_fragment(vport, skb, mru, ethertype);
776 OVS_NLERR(true, "Cannot fragment IP frames");
783 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
784 struct sw_flow_key *key, const struct nlattr *attr,
785 const struct nlattr *actions, int actions_len)
787 struct ip_tunnel_info info;
788 struct dp_upcall_info upcall;
789 const struct nlattr *a;
792 memset(&upcall, 0, sizeof(upcall));
793 upcall.cmd = OVS_PACKET_CMD_ACTION;
794 upcall.mru = OVS_CB(skb)->mru;
796 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
797 a = nla_next(a, &rem)) {
798 switch (nla_type(a)) {
799 case OVS_USERSPACE_ATTR_USERDATA:
803 case OVS_USERSPACE_ATTR_PID:
804 upcall.portid = nla_get_u32(a);
807 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
808 /* Get out tunnel info. */
811 vport = ovs_vport_rcu(dp, nla_get_u32(a));
815 upcall.egress_tun_info = &info;
816 err = ovs_vport_get_egress_tun_info(vport, skb,
819 upcall.egress_tun_info = NULL;
825 case OVS_USERSPACE_ATTR_ACTIONS: {
826 /* Include actions. */
827 upcall.actions = actions;
828 upcall.actions_len = actions_len;
832 } /* End of switch. */
835 return ovs_dp_upcall(dp, skb, key, &upcall);
838 static int sample(struct datapath *dp, struct sk_buff *skb,
839 struct sw_flow_key *key, const struct nlattr *attr,
840 const struct nlattr *actions, int actions_len)
842 const struct nlattr *acts_list = NULL;
843 const struct nlattr *a;
846 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
847 a = nla_next(a, &rem)) {
850 switch (nla_type(a)) {
851 case OVS_SAMPLE_ATTR_PROBABILITY:
852 probability = nla_get_u32(a);
853 if (!probability || prandom_u32() > probability)
857 case OVS_SAMPLE_ATTR_ACTIONS:
863 rem = nla_len(acts_list);
864 a = nla_data(acts_list);
866 /* Actions list is empty, do nothing */
870 /* The only known usage of sample action is having a single user-space
871 * action. Treat this usage as a special case.
872 * The output_userspace() should clone the skb to be sent to the
873 * user space. This skb will be consumed by its caller.
875 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
876 nla_is_last(a, rem)))
877 return output_userspace(dp, skb, key, a, actions, actions_len);
879 skb = skb_clone(skb, GFP_ATOMIC);
881 /* Skip the sample action when out of memory. */
884 if (!add_deferred_actions(skb, key, a)) {
886 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
894 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
895 const struct nlattr *attr)
897 struct ovs_action_hash *hash_act = nla_data(attr);
900 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
901 hash = skb_get_hash(skb);
902 hash = jhash_1word(hash, hash_act->hash_basis);
906 key->ovs_flow_hash = hash;
909 static int execute_set_action(struct sk_buff *skb,
910 struct sw_flow_key *flow_key,
911 const struct nlattr *a)
913 /* Only tunnel set execution is supported without a mask. */
914 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
915 struct ovs_tunnel_info *tun = nla_data(a);
917 ovs_skb_dst_drop(skb);
918 ovs_dst_hold((struct dst_entry *)tun->tun_dst);
919 ovs_skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
926 /* Mask is at the midpoint of the data. */
927 #define get_mask(a, type) ((const type)nla_data(a) + 1)
929 static int execute_masked_set_action(struct sk_buff *skb,
930 struct sw_flow_key *flow_key,
931 const struct nlattr *a)
935 switch (nla_type(a)) {
936 case OVS_KEY_ATTR_PRIORITY:
937 OVS_SET_MASKED(skb->priority, nla_get_u32(a),
938 *get_mask(a, u32 *));
939 flow_key->phy.priority = skb->priority;
942 case OVS_KEY_ATTR_SKB_MARK:
943 OVS_SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
944 flow_key->phy.skb_mark = skb->mark;
947 case OVS_KEY_ATTR_TUNNEL_INFO:
948 /* Masked data not supported for tunnel. */
952 case OVS_KEY_ATTR_ETHERNET:
953 err = set_eth_addr(skb, flow_key, nla_data(a),
954 get_mask(a, struct ovs_key_ethernet *));
957 case OVS_KEY_ATTR_IPV4:
958 err = set_ipv4(skb, flow_key, nla_data(a),
959 get_mask(a, struct ovs_key_ipv4 *));
962 case OVS_KEY_ATTR_IPV6:
963 err = set_ipv6(skb, flow_key, nla_data(a),
964 get_mask(a, struct ovs_key_ipv6 *));
967 case OVS_KEY_ATTR_TCP:
968 err = set_tcp(skb, flow_key, nla_data(a),
969 get_mask(a, struct ovs_key_tcp *));
972 case OVS_KEY_ATTR_UDP:
973 err = set_udp(skb, flow_key, nla_data(a),
974 get_mask(a, struct ovs_key_udp *));
977 case OVS_KEY_ATTR_SCTP:
978 err = set_sctp(skb, flow_key, nla_data(a),
979 get_mask(a, struct ovs_key_sctp *));
982 case OVS_KEY_ATTR_MPLS:
983 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
987 case OVS_KEY_ATTR_CT_STATE:
988 case OVS_KEY_ATTR_CT_ZONE:
989 case OVS_KEY_ATTR_CT_MARK:
990 case OVS_KEY_ATTR_CT_LABELS:
998 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
999 struct sw_flow_key *key,
1000 const struct nlattr *a, int rem)
1002 struct deferred_action *da;
1004 if (!is_flow_key_valid(key)) {
1007 err = ovs_flow_key_update(skb, key);
1011 BUG_ON(!is_flow_key_valid(key));
1013 if (!nla_is_last(a, rem)) {
1014 /* Recirc action is the not the last action
1015 * of the action list, need to clone the skb.
1017 skb = skb_clone(skb, GFP_ATOMIC);
1019 /* Skip the recirc action when out of memory, but
1020 * continue on with the rest of the action list.
1026 da = add_deferred_actions(skb, key, NULL);
1028 da->pkt_key.recirc_id = nla_get_u32(a);
1032 if (net_ratelimit())
1033 pr_warn("%s: deferred action limit reached, drop recirc action\n",
1040 /* Execute a list of actions against 'skb'. */
1041 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
1042 struct sw_flow_key *key,
1043 const struct nlattr *attr, int len)
1045 /* Every output action needs a separate clone of 'skb', but the common
1046 * case is just a single output action, so that doing a clone and
1047 * then freeing the original skbuff is wasteful. So the following code
1048 * is slightly obscure just to avoid that.
1051 const struct nlattr *a;
1054 for (a = attr, rem = len; rem > 0;
1055 a = nla_next(a, &rem)) {
1058 if (unlikely(prev_port != -1)) {
1059 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
1062 do_output(dp, out_skb, prev_port, key);
1067 switch (nla_type(a)) {
1068 case OVS_ACTION_ATTR_OUTPUT:
1069 prev_port = nla_get_u32(a);
1072 case OVS_ACTION_ATTR_USERSPACE:
1073 output_userspace(dp, skb, key, a, attr, len);
1076 case OVS_ACTION_ATTR_HASH:
1077 execute_hash(skb, key, a);
1080 case OVS_ACTION_ATTR_PUSH_MPLS:
1081 err = push_mpls(skb, key, nla_data(a));
1084 case OVS_ACTION_ATTR_POP_MPLS:
1085 err = pop_mpls(skb, key, nla_get_be16(a));
1088 case OVS_ACTION_ATTR_PUSH_VLAN:
1089 err = push_vlan(skb, key, nla_data(a));
1092 case OVS_ACTION_ATTR_POP_VLAN:
1093 err = pop_vlan(skb, key);
1096 case OVS_ACTION_ATTR_RECIRC:
1097 err = execute_recirc(dp, skb, key, a, rem);
1098 if (nla_is_last(a, rem)) {
1099 /* If this is the last action, the skb has
1100 * been consumed or freed.
1101 * Return immediately.
1107 case OVS_ACTION_ATTR_SET:
1108 err = execute_set_action(skb, key, nla_data(a));
1111 case OVS_ACTION_ATTR_SET_MASKED:
1112 case OVS_ACTION_ATTR_SET_TO_MASKED:
1113 err = execute_masked_set_action(skb, key, nla_data(a));
1116 case OVS_ACTION_ATTR_SAMPLE:
1117 err = sample(dp, skb, key, a, attr, len);
1120 case OVS_ACTION_ATTR_CT:
1121 if (!is_flow_key_valid(key)) {
1122 err = ovs_flow_key_update(skb, key);
1127 err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
1130 /* Hide stolen IP fragments from user space. */
1132 return err == -EINPROGRESS ? 0 : err;
1136 if (unlikely(err)) {
1142 if (prev_port != -1)
1143 do_output(dp, skb, prev_port, key);
1150 static void process_deferred_actions(struct datapath *dp)
1152 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
1154 /* Do not touch the FIFO in case there is no deferred actions. */
1155 if (action_fifo_is_empty(fifo))
1158 /* Finishing executing all deferred actions. */
1160 struct deferred_action *da = action_fifo_get(fifo);
1161 struct sk_buff *skb = da->skb;
1162 struct sw_flow_key *key = &da->pkt_key;
1163 const struct nlattr *actions = da->actions;
1166 do_execute_actions(dp, skb, key, actions,
1169 ovs_dp_process_packet(skb, key);
1170 } while (!action_fifo_is_empty(fifo));
1172 /* Reset FIFO for the next packet. */
1173 action_fifo_init(fifo);
1176 /* Execute a list of actions against 'skb'. */
1177 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
1178 const struct sw_flow_actions *acts,
1179 struct sw_flow_key *key)
1181 int level = this_cpu_read(exec_actions_level);
1184 if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
1185 if (net_ratelimit())
1186 pr_warn("%s: packet loop detected, dropping.\n",
1193 this_cpu_inc(exec_actions_level);
1194 err = do_execute_actions(dp, skb, key,
1195 acts->actions, acts->actions_len);
1198 process_deferred_actions(dp);
1200 this_cpu_dec(exec_actions_level);
1202 /* This return status currently does not reflect the errors
1203 * encounted during deferred actions execution. Probably needs to
1204 * be fixed in the future.
1209 int action_fifos_init(void)
1211 action_fifos = alloc_percpu(struct action_fifo);
1218 void action_fifos_exit(void)
1220 free_percpu(action_fifos);