/*
- * Distributed under the terms of the GNU GPL version 2.
- * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
+ * Copyright (c) 2007-2014 Nicira, Inc.
*
- * Significant portions of this file may be copied from parts of the Linux
- * kernel, by Linus Torvalds and others.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
*/
-/* Functions for executing flow actions. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/openvswitch.h>
+#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
-#include <net/inet_ecn.h>
+
#include <net/ip.h>
+#include <net/ipv6.h>
#include <net/checksum.h>
+#include <net/dsfield.h>
+#include <net/mpls.h>
+#include <net/sctp/checksum.h>
-#include "actions.h"
-#include "checksum.h"
#include "datapath.h"
-#include "loop_counter.h"
-#include "openvswitch/datapath-protocol.h"
+#include "gso.h"
#include "vlan.h"
#include "vport.h"
-static int do_execute_actions(struct datapath *, struct sk_buff *,
- struct sw_flow_actions *acts);
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *attr, int len);
+
+struct deferred_action {
+ struct sk_buff *skb;
+ const struct nlattr *actions;
+
+ /* Store pkt_key clone when creating deferred action. */
+ struct sw_flow_key pkt_key;
+};
+
+#define DEFERRED_ACTION_FIFO_SIZE 10
+struct action_fifo {
+ int head;
+ int tail;
+ /* Deferred action fifo queue storage. */
+ struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
+};
+
+static struct action_fifo __percpu *action_fifos;
+#define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
+ * looping by the network stack
+ */
+static DEFINE_PER_CPU(int, exec_actions_level);
+
+static void action_fifo_init(struct action_fifo *fifo)
+{
+ fifo->head = 0;
+ fifo->tail = 0;
+}
-static struct sk_buff *make_writable(struct sk_buff *skb, unsigned min_headroom)
+static bool action_fifo_is_empty(const struct action_fifo *fifo)
{
- if (skb_cloned(skb)) {
- struct sk_buff *nskb;
- unsigned headroom = max(min_headroom, skb_headroom(skb));
+ return (fifo->head == fifo->tail);
+}
- nskb = skb_copy_expand(skb, headroom, skb_tailroom(skb), GFP_ATOMIC);
- if (nskb) {
- set_skb_csum_bits(skb, nskb);
- kfree_skb(skb);
- return nskb;
- }
- } else {
- unsigned int hdr_len = (skb_transport_offset(skb)
- + sizeof(struct tcphdr));
- if (pskb_may_pull(skb, min(hdr_len, skb->len)))
- return skb;
- }
- kfree_skb(skb);
- return NULL;
+static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
+{
+ if (action_fifo_is_empty(fifo))
+ return NULL;
+
+ return &fifo->fifo[fifo->tail++];
}
-static struct sk_buff *strip_vlan(struct sk_buff *skb)
+static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{
- struct ethhdr *eh;
+ if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
+ return NULL;
+
+ return &fifo->fifo[fifo->head++];
+}
- if (vlan_tx_tag_present(skb)) {
- vlan_set_tci(skb, 0);
- return skb;
+/* Return queue entry if fifo is not full */
+static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
+ const struct sw_flow_key *key,
+ const struct nlattr *attr)
+{
+ struct action_fifo *fifo;
+ struct deferred_action *da;
+
+ fifo = this_cpu_ptr(action_fifos);
+ da = action_fifo_put(fifo);
+ if (da) {
+ da->skb = skb;
+ da->actions = attr;
+ da->pkt_key = *key;
}
- if (unlikely(vlan_eth_hdr(skb)->h_vlan_proto != htons(ETH_P_8021Q) ||
- skb->len < VLAN_ETH_HLEN))
- return skb;
+ return da;
+}
- skb = make_writable(skb, 0);
- if (unlikely(!skb))
- return NULL;
+static void invalidate_flow_key(struct sw_flow_key *key)
+{
+ key->eth.type = htons(0);
+}
+
+static bool is_flow_key_valid(const struct sw_flow_key *key)
+{
+ return !!key->eth.type;
+}
+
+static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_action_push_mpls *mpls)
+{
+ __be32 *new_mpls_lse;
+ struct ethhdr *hdr;
+
+ /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
+ if (skb_encapsulation(skb))
+ return -ENOTSUPP;
- if (get_ip_summed(skb) == OVS_CSUM_COMPLETE)
- skb->csum = csum_sub(skb->csum, csum_partial(skb->data
- + ETH_HLEN, VLAN_HLEN, 0));
+ if (skb_cow_head(skb, MPLS_HLEN) < 0)
+ return -ENOMEM;
+
+ skb_push(skb, MPLS_HLEN);
+ memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
+ skb->mac_len);
+ skb_reset_mac_header(skb);
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ new_mpls_lse = (__be32 *)skb_mpls_header(skb);
+ *new_mpls_lse = mpls->mpls_lse;
- eh = (struct ethhdr *)skb_pull(skb, VLAN_HLEN);
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
+ MPLS_HLEN, 0));
- skb->protocol = eh->h_proto;
- skb->mac_header += VLAN_HLEN;
+ hdr = eth_hdr(skb);
+ hdr->h_proto = mpls->mpls_ethertype;
+ if (!ovs_skb_get_inner_protocol(skb))
+ ovs_skb_set_inner_protocol(skb, skb->protocol);
+ skb->protocol = mpls->mpls_ethertype;
- return skb;
+ invalidate_flow_key(key);
+ return 0;
}
-static struct sk_buff *modify_vlan_tci(struct sk_buff *skb, __be16 tci)
+static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const __be16 ethertype)
{
- struct vlan_ethhdr *vh;
- __be16 old_tci;
+ struct ethhdr *hdr;
+ int err;
- if (vlan_tx_tag_present(skb) || skb->protocol != htons(ETH_P_8021Q))
- return __vlan_hwaccel_put_tag(skb, ntohs(tci));
+ err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
+ if (unlikely(err))
+ return err;
- skb = make_writable(skb, 0);
- if (unlikely(!skb))
- return NULL;
+ skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
- if (unlikely(skb->len < VLAN_ETH_HLEN))
- return skb;
+ memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
+ skb->mac_len);
- vh = vlan_eth_hdr(skb);
+ __skb_pull(skb, MPLS_HLEN);
+ skb_reset_mac_header(skb);
- old_tci = vh->h_vlan_TCI;
- vh->h_vlan_TCI = tci;
+ /* skb_mpls_header() is used to locate the ethertype
+ * field correctly in the presence of VLAN tags.
+ */
+ hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
+ hdr->h_proto = ethertype;
+ if (eth_p_mpls(skb->protocol))
+ skb->protocol = ethertype;
+
+ invalidate_flow_key(key);
+ return 0;
+}
+
+/* 'KEY' must not have any bits set outside of the 'MASK' */
+#define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
+#define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
+
+static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const __be32 *mpls_lse, const __be32 *mask)
+{
+ __be32 *stack;
+ __be32 lse;
+ int err;
+
+ err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
+ if (unlikely(err))
+ return err;
- if (get_ip_summed(skb) == OVS_CSUM_COMPLETE) {
- __be16 diff[] = { ~old_tci, vh->h_vlan_TCI };
- skb->csum = ~csum_partial((char *)diff, sizeof(diff), ~skb->csum);
+ stack = (__be32 *)skb_mpls_header(skb);
+ lse = MASKED(*stack, *mpls_lse, *mask);
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ __be32 diff[] = { ~(*stack), lse };
+
+ skb->csum = ~csum_partial((char *)diff, sizeof(diff),
+ ~skb->csum);
}
- return skb;
+ *stack = lse;
+ flow_key->mpls.top_lse = lse;
+ return 0;
+}
+
+static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
+{
+ int err;
+
+ err = skb_vlan_pop(skb);
+ if (skb_vlan_tag_present(skb))
+ invalidate_flow_key(key);
+ else
+ key->eth.tci = 0;
+ return err;
}
-static bool is_ip(struct sk_buff *skb)
+static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_action_push_vlan *vlan)
{
- return (OVS_CB(skb)->flow->key.eth.type == htons(ETH_P_IP) &&
- skb->transport_header > skb->network_header);
+ if (skb_vlan_tag_present(skb))
+ invalidate_flow_key(key);
+ else
+ key->eth.tci = vlan->vlan_tci;
+ return skb_vlan_push(skb, vlan->vlan_tpid,
+ ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
-static __sum16 *get_l4_checksum(struct sk_buff *skb)
+/* 'src' is already properly masked. */
+static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
+{
+ u16 *dst = (u16 *)dst_;
+ const u16 *src = (const u16 *)src_;
+ const u16 *mask = (const u16 *)mask_;
+
+ SET_MASKED(dst[0], src[0], mask[0]);
+ SET_MASKED(dst[1], src[1], mask[1]);
+ SET_MASKED(dst[2], src[2], mask[2]);
+}
+
+static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_ethernet *key,
+ const struct ovs_key_ethernet *mask)
+{
+ int err;
+
+ err = skb_ensure_writable(skb, ETH_HLEN);
+ if (unlikely(err))
+ return err;
+
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+
+ ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
+ mask->eth_src);
+ ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
+ mask->eth_dst);
+
+ ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
+
+ ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
+ ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
+ return 0;
+}
+
+static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
+ __be32 addr, __be32 new_addr)
{
- u8 nw_proto = OVS_CB(skb)->flow->key.ip.nw_proto;
int transport_len = skb->len - skb_transport_offset(skb);
- if (nw_proto == IPPROTO_TCP) {
+
+ if (nh->frag_off & htons(IP_OFFSET))
+ return;
+
+ if (nh->protocol == IPPROTO_TCP) {
if (likely(transport_len >= sizeof(struct tcphdr)))
- return &tcp_hdr(skb)->check;
- } else if (nw_proto == IPPROTO_UDP) {
- if (likely(transport_len >= sizeof(struct udphdr)))
- return &udp_hdr(skb)->check;
+ inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
+ addr, new_addr, 1);
+ } else if (nh->protocol == IPPROTO_UDP) {
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&uh->check, skb,
+ addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
}
- return NULL;
+
}
-static struct sk_buff *set_nw_addr(struct sk_buff *skb, const struct nlattr *a)
+static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
+ __be32 *addr, __be32 new_addr)
{
- __be32 new_nwaddr = nla_get_be32(a);
- struct iphdr *nh;
- __sum16 *check;
- __be32 *nwaddr;
+ update_ip_l4_checksum(skb, nh, *addr, new_addr);
+ csum_replace4(&nh->check, *addr, new_addr);
+ skb_clear_hash(skb);
+ *addr = new_addr;
+}
- if (unlikely(!is_ip(skb)))
- return skb;
+static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
+ __be32 addr[4], const __be32 new_addr[4])
+{
+ int transport_len = skb->len - skb_transport_offset(skb);
- skb = make_writable(skb, 0);
- if (unlikely(!skb))
- return NULL;
+ if (l4_proto == NEXTHDR_TCP) {
+ if (likely(transport_len >= sizeof(struct tcphdr)))
+ inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
+ addr, new_addr, 1);
+ } else if (l4_proto == NEXTHDR_UDP) {
+ if (likely(transport_len >= sizeof(struct udphdr))) {
+ struct udphdr *uh = udp_hdr(skb);
+
+ if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace16(&uh->check, skb,
+ addr, new_addr, 1);
+ if (!uh->check)
+ uh->check = CSUM_MANGLED_0;
+ }
+ }
+ } else if (l4_proto == NEXTHDR_ICMP) {
+ if (likely(transport_len >= sizeof(struct icmp6hdr)))
+ inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
+ skb, addr, new_addr, 1);
+ }
+}
- nh = ip_hdr(skb);
- nwaddr = nla_type(a) == ODP_ACTION_ATTR_SET_NW_SRC ? &nh->saddr : &nh->daddr;
+static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
+ const __be32 mask[4], __be32 masked[4])
+{
+ masked[0] = MASKED(old[0], addr[0], mask[0]);
+ masked[1] = MASKED(old[1], addr[1], mask[1]);
+ masked[2] = MASKED(old[2], addr[2], mask[2]);
+ masked[3] = MASKED(old[3], addr[3], mask[3]);
+}
+
+static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
+ __be32 addr[4], const __be32 new_addr[4],
+ bool recalculate_csum)
+{
+ if (likely(recalculate_csum))
+ update_ipv6_checksum(skb, l4_proto, addr, new_addr);
- check = get_l4_checksum(skb);
- if (likely(check))
- inet_proto_csum_replace4(check, skb, *nwaddr, new_nwaddr, 1);
- csum_replace4(&nh->check, *nwaddr, new_nwaddr);
+ skb_clear_hash(skb);
+ memcpy(addr, new_addr, sizeof(__be32[4]));
+}
- skb_clear_rxhash(skb);
+static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
+{
+ /* Bits 21-24 are always unmasked, so this retains their values. */
+ SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
+ SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
+ SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
+}
- *nwaddr = new_nwaddr;
+static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
+ u8 mask)
+{
+ new_ttl = MASKED(nh->ttl, new_ttl, mask);
- return skb;
+ csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
+ nh->ttl = new_ttl;
}
-static struct sk_buff *set_nw_tos(struct sk_buff *skb, u8 nw_tos)
+static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_ipv4 *key,
+ const struct ovs_key_ipv4 *mask)
{
- if (unlikely(!is_ip(skb)))
- return skb;
+ struct iphdr *nh;
+ __be32 new_addr;
+ int err;
- skb = make_writable(skb, 0);
- if (skb) {
- struct iphdr *nh = ip_hdr(skb);
- u8 *f = &nh->tos;
- u8 old = *f;
- u8 new;
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(struct iphdr));
+ if (unlikely(err))
+ return err;
- /* Set the DSCP bits and preserve the ECN bits. */
- new = nw_tos | (nh->tos & INET_ECN_MASK);
- csum_replace4(&nh->check, (__force __be32)old,
- (__force __be32)new);
- *f = new;
+ nh = ip_hdr(skb);
+
+ /* Setting an IP addresses is typically only a side effect of
+ * matching on them in the current userspace implementation, so it
+ * makes sense to check if the value actually changed.
+ */
+ if (mask->ipv4_src) {
+ new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
+
+ if (unlikely(new_addr != nh->saddr)) {
+ set_ip_addr(skb, nh, &nh->saddr, new_addr);
+ flow_key->ipv4.addr.src = new_addr;
+ }
+ }
+ if (mask->ipv4_dst) {
+ new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
+
+ if (unlikely(new_addr != nh->daddr)) {
+ set_ip_addr(skb, nh, &nh->daddr, new_addr);
+ flow_key->ipv4.addr.dst = new_addr;
+ }
}
- return skb;
+ if (mask->ipv4_tos) {
+ ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
+ flow_key->ip.tos = nh->tos;
+ }
+ if (mask->ipv4_ttl) {
+ set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
+ flow_key->ip.ttl = nh->ttl;
+ }
+
+ return 0;
}
-static struct sk_buff *set_tp_port(struct sk_buff *skb, const struct nlattr *a)
+static bool is_ipv6_mask_nonzero(const __be32 addr[4])
{
- struct udphdr *th;
- __sum16 *check;
- __be16 *port;
+ return !!(addr[0] | addr[1] | addr[2] | addr[3]);
+}
+
+static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_ipv6 *key,
+ const struct ovs_key_ipv6 *mask)
+{
+ struct ipv6hdr *nh;
+ int err;
- if (unlikely(!is_ip(skb)))
- return skb;
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(struct ipv6hdr));
+ if (unlikely(err))
+ return err;
- skb = make_writable(skb, 0);
- if (unlikely(!skb))
- return NULL;
+ nh = ipv6_hdr(skb);
- /* Must follow make_writable() since that can move the skb data. */
- check = get_l4_checksum(skb);
- if (unlikely(!check))
- return skb;
-
- /*
- * Update port and checksum.
- *
- * This is OK because source and destination port numbers are at the
- * same offsets in both UDP and TCP headers, and get_l4_checksum() only
- * supports those protocols.
+ /* Setting an IP addresses is typically only a side effect of
+ * matching on them in the current userspace implementation, so it
+ * makes sense to check if the value actually changed.
*/
- th = udp_hdr(skb);
- port = nla_type(a) == ODP_ACTION_ATTR_SET_TP_SRC ? &th->source : &th->dest;
- inet_proto_csum_replace2(check, skb, *port, nla_get_be16(a), 0);
- *port = nla_get_be16(a);
- skb_clear_rxhash(skb);
+ if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
+ __be32 *saddr = (__be32 *)&nh->saddr;
+ __be32 masked[4];
- return skb;
+ mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
+
+ if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
+ set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
+ true);
+ memcpy(&flow_key->ipv6.addr.src, masked,
+ sizeof(flow_key->ipv6.addr.src));
+ }
+ }
+ if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
+ unsigned int offset = 0;
+ int flags = IP6_FH_F_SKIP_RH;
+ bool recalc_csum = true;
+ __be32 *daddr = (__be32 *)&nh->daddr;
+ __be32 masked[4];
+
+ mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
+
+ if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
+ if (ipv6_ext_hdr(nh->nexthdr))
+ recalc_csum = (ipv6_find_hdr(skb, &offset,
+ NEXTHDR_ROUTING,
+ NULL, &flags)
+ != NEXTHDR_ROUTING);
+
+ set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
+ recalc_csum);
+ memcpy(&flow_key->ipv6.addr.dst, masked,
+ sizeof(flow_key->ipv6.addr.dst));
+ }
+ }
+ if (mask->ipv6_tclass) {
+ ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
+ flow_key->ip.tos = ipv6_get_dsfield(nh);
+ }
+ if (mask->ipv6_label) {
+ set_ipv6_fl(nh, ntohl(key->ipv6_label),
+ ntohl(mask->ipv6_label));
+ flow_key->ipv6.label =
+ *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
+ }
+ if (mask->ipv6_hlimit) {
+ SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
+ flow_key->ip.ttl = nh->hop_limit;
+ }
+ return 0;
}
-/**
- * is_spoofed_arp - check for invalid ARP packet
- *
- * @skb: skbuff containing an Ethernet packet, with network header pointing
- * just past the Ethernet and optional 802.1Q header.
- *
- * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy
- * or truncated header fields or one whose inner and outer Ethernet address
- * differ.
- */
-static bool is_spoofed_arp(struct sk_buff *skb)
+/* Must follow skb_ensure_writable() since that can move the skb data. */
+static void set_tp_port(struct sk_buff *skb, __be16 *port,
+ __be16 new_port, __sum16 *check)
{
- struct arp_eth_header *arp;
+ inet_proto_csum_replace2(check, skb, *port, new_port, 0);
+ *port = new_port;
+}
- if (OVS_CB(skb)->flow->key.eth.type != htons(ETH_P_ARP))
- return false;
+static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_udp *key,
+ const struct ovs_key_udp *mask)
+{
+ struct udphdr *uh;
+ __be16 src, dst;
+ int err;
+
+ err = skb_ensure_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr));
+ if (unlikely(err))
+ return err;
+
+ uh = udp_hdr(skb);
+ /* Either of the masks is non-zero, so do not bother checking them. */
+ src = MASKED(uh->source, key->udp_src, mask->udp_src);
+ dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
+
+ if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (likely(src != uh->source)) {
+ set_tp_port(skb, &uh->source, src, &uh->check);
+ flow_key->tp.src = src;
+ }
+ if (likely(dst != uh->dest)) {
+ set_tp_port(skb, &uh->dest, dst, &uh->check);
+ flow_key->tp.dst = dst;
+ }
- if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len)
- return true;
+ if (unlikely(!uh->check))
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ uh->source = src;
+ uh->dest = dst;
+ flow_key->tp.src = src;
+ flow_key->tp.dst = dst;
+ }
- arp = (struct arp_eth_header *)skb_network_header(skb);
- return (arp->ar_hrd != htons(ARPHRD_ETHER) ||
- arp->ar_pro != htons(ETH_P_IP) ||
- arp->ar_hln != ETH_ALEN ||
- arp->ar_pln != 4 ||
- compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source));
+ skb_clear_hash(skb);
+
+ return 0;
}
-static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_tcp *key,
+ const struct ovs_key_tcp *mask)
{
- struct vport *p;
+ struct tcphdr *th;
+ __be16 src, dst;
+ int err;
+
+ err = skb_ensure_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct tcphdr));
+ if (unlikely(err))
+ return err;
+
+ th = tcp_hdr(skb);
+ src = MASKED(th->source, key->tcp_src, mask->tcp_src);
+ if (likely(src != th->source)) {
+ set_tp_port(skb, &th->source, src, &th->check);
+ flow_key->tp.src = src;
+ }
+ dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
+ if (likely(dst != th->dest)) {
+ set_tp_port(skb, &th->dest, dst, &th->check);
+ flow_key->tp.dst = dst;
+ }
+ skb_clear_hash(skb);
- if (!skb)
- goto error;
+ return 0;
+}
+
+static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
+ const struct ovs_key_sctp *key,
+ const struct ovs_key_sctp *mask)
+{
+ unsigned int sctphoff = skb_transport_offset(skb);
+ struct sctphdr *sh;
+ __le32 old_correct_csum, new_csum, old_csum;
+ int err;
+
+ err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
+ if (unlikely(err))
+ return err;
+
+ sh = sctp_hdr(skb);
+ old_csum = sh->checksum;
+ old_correct_csum = sctp_compute_cksum(skb, sctphoff);
+
+ sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
+ sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
- p = rcu_dereference(dp->ports[out_port]);
- if (!p)
- goto error;
+ new_csum = sctp_compute_cksum(skb, sctphoff);
- vport_send(p, skb);
- return;
+ /* Carry any checksum errors through. */
+ sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
-error:
- kfree_skb(skb);
+ skb_clear_hash(skb);
+ flow_key->tp.src = sh->source;
+ flow_key->tp.dst = sh->dest;
+
+ return 0;
+}
+
+static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
+{
+ struct vport *vport = ovs_vport_rcu(dp, out_port);
+
+ if (likely(vport))
+ ovs_vport_send(vport, skb);
+ else
+ kfree_skb(skb);
}
-static int output_control(struct datapath *dp, struct sk_buff *skb, u64 arg)
+static int output_userspace(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key, const struct nlattr *attr,
+ const struct nlattr *actions, int actions_len)
{
+ struct ovs_tunnel_info info;
struct dp_upcall_info upcall;
+ const struct nlattr *a;
+ int rem;
+
+ memset(&upcall, 0, sizeof(upcall));
+ upcall.cmd = OVS_PACKET_CMD_ACTION;
+
+ for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+ a = nla_next(a, &rem)) {
+ switch (nla_type(a)) {
+ case OVS_USERSPACE_ATTR_USERDATA:
+ upcall.userdata = a;
+ break;
+
+ case OVS_USERSPACE_ATTR_PID:
+ upcall.portid = nla_get_u32(a);
+ break;
+
+ case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
+ /* Get out tunnel info. */
+ struct vport *vport;
+
+ vport = ovs_vport_rcu(dp, nla_get_u32(a));
+ if (vport) {
+ int err;
+
+ err = ovs_vport_get_egress_tun_info(vport, skb,
+ &info);
+ if (!err)
+ upcall.egress_tun_info = &info;
+ }
+ break;
+ }
+
+ case OVS_USERSPACE_ATTR_ACTIONS: {
+ /* Include actions. */
+ upcall.actions = actions;
+ upcall.actions_len = actions_len;
+ break;
+ }
+
+ } /* End of switch. */
+ }
+
+ return ovs_dp_upcall(dp, skb, key, &upcall);
+}
+
+static int sample(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key, const struct nlattr *attr,
+ const struct nlattr *actions, int actions_len)
+{
+ const struct nlattr *acts_list = NULL;
+ const struct nlattr *a;
+ int rem;
+
+ for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
+ a = nla_next(a, &rem)) {
+ u32 probability;
+
+ switch (nla_type(a)) {
+ case OVS_SAMPLE_ATTR_PROBABILITY:
+ probability = nla_get_u32(a);
+ if (!probability || prandom_u32() > probability)
+ return 0;
+ break;
+
+ case OVS_SAMPLE_ATTR_ACTIONS:
+ acts_list = a;
+ break;
+ }
+ }
+
+ rem = nla_len(acts_list);
+ a = nla_data(acts_list);
+
+ /* Actions list is empty, do nothing */
+ if (unlikely(!rem))
+ return 0;
+
+ /* The only known usage of sample action is having a single user-space
+ * action. Treat this usage as a special case.
+ * The output_userspace() should clone the skb to be sent to the
+ * user space. This skb will be consumed by its caller.
+ */
+ if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
+ nla_is_last(a, rem)))
+ return output_userspace(dp, skb, key, a, actions, actions_len);
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
- return -ENOMEM;
+ /* Skip the sample action when out of memory. */
+ return 0;
+
+ if (!add_deferred_actions(skb, key, a)) {
+ if (net_ratelimit())
+ pr_warn("%s: deferred actions limit reached, dropping sample action\n",
+ ovs_dp_name(dp));
- upcall.cmd = ODP_PACKET_CMD_ACTION;
- upcall.key = &OVS_CB(skb)->flow->key;
- upcall.userdata = arg;
- upcall.sample_pool = 0;
- upcall.actions = NULL;
- upcall.actions_len = 0;
- return dp_upcall(dp, skb, &upcall);
+ kfree_skb(skb);
+ }
+ return 0;
+}
+
+static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct nlattr *attr)
+{
+ struct ovs_action_hash *hash_act = nla_data(attr);
+ u32 hash = 0;
+
+ /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
+ hash = skb_get_hash(skb);
+ hash = jhash_1word(hash, hash_act->hash_basis);
+ if (!hash)
+ hash = 0x1;
+
+ key->ovs_flow_hash = hash;
+}
+
+static int execute_set_action(struct sk_buff *skb,
+ struct sw_flow_key *flow_key,
+ const struct nlattr *a)
+{
+ /* Only tunnel set execution is supported without a mask. */
+ if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
+ OVS_CB(skb)->egress_tun_info = nla_data(a);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/* Mask is at the midpoint of the data. */
+#define get_mask(a, type) ((const type)nla_data(a) + 1)
+
+static int execute_masked_set_action(struct sk_buff *skb,
+ struct sw_flow_key *flow_key,
+ const struct nlattr *a)
+{
+ int err = 0;
+
+ switch (nla_type(a)) {
+ case OVS_KEY_ATTR_PRIORITY:
+ SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
+ flow_key->phy.priority = skb->priority;
+ break;
+
+ case OVS_KEY_ATTR_SKB_MARK:
+ SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
+ flow_key->phy.skb_mark = skb->mark;
+ break;
+
+ case OVS_KEY_ATTR_TUNNEL_INFO:
+ /* Masked data not supported for tunnel. */
+ err = -EINVAL;
+ break;
+
+ case OVS_KEY_ATTR_ETHERNET:
+ err = set_eth_addr(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_ethernet *));
+ break;
+
+ case OVS_KEY_ATTR_IPV4:
+ err = set_ipv4(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_ipv4 *));
+ break;
+
+ case OVS_KEY_ATTR_IPV6:
+ err = set_ipv6(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_ipv6 *));
+ break;
+
+ case OVS_KEY_ATTR_TCP:
+ err = set_tcp(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_tcp *));
+ break;
+
+ case OVS_KEY_ATTR_UDP:
+ err = set_udp(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_udp *));
+ break;
+
+ case OVS_KEY_ATTR_SCTP:
+ err = set_sctp(skb, flow_key, nla_data(a),
+ get_mask(a, struct ovs_key_sctp *));
+ break;
+
+ case OVS_KEY_ATTR_MPLS:
+ err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
+ __be32 *));
+ break;
+ }
+
+ return err;
+}
+
+static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *a, int rem)
+{
+ struct deferred_action *da;
+
+ if (!is_flow_key_valid(key)) {
+ int err;
+
+ err = ovs_flow_key_update(skb, key);
+ if (err)
+ return err;
+ }
+ BUG_ON(!is_flow_key_valid(key));
+
+ if (!nla_is_last(a, rem)) {
+ /* Recirc action is the not the last action
+ * of the action list, need to clone the skb.
+ */
+ skb = skb_clone(skb, GFP_ATOMIC);
+
+ /* Skip the recirc action when out of memory, but
+ * continue on with the rest of the action list.
+ */
+ if (!skb)
+ return 0;
+ }
+
+ da = add_deferred_actions(skb, key, NULL);
+ if (da) {
+ da->pkt_key.recirc_id = nla_get_u32(a);
+ } else {
+ kfree_skb(skb);
+
+ if (net_ratelimit())
+ pr_warn("%s: deferred action limit reached, drop recirc action\n",
+ ovs_dp_name(dp));
+ }
+
+ return 0;
}
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_actions *acts)
+ struct sw_flow_key *key,
+ const struct nlattr *attr, int len)
{
/* Every output action needs a separate clone of 'skb', but the common
* case is just a single output action, so that doing a clone and
* then freeing the original skbuff is wasteful. So the following code
- * is slightly obscure just to avoid that. */
+ * is slightly obscure just to avoid that.
+ */
int prev_port = -1;
- u32 priority = skb->priority;
const struct nlattr *a;
- int rem, err;
+ int rem;
- for (a = acts->actions, rem = acts->actions_len; rem > 0;
+ for (a = attr, rem = len; rem > 0;
a = nla_next(a, &rem)) {
- if (prev_port != -1) {
- do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
+ int err = 0;
+
+ if (unlikely(prev_port != -1)) {
+ struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (out_skb)
+ do_output(dp, out_skb, prev_port);
+
prev_port = -1;
}
switch (nla_type(a)) {
- case ODP_ACTION_ATTR_OUTPUT:
+ case OVS_ACTION_ATTR_OUTPUT:
prev_port = nla_get_u32(a);
break;
- case ODP_ACTION_ATTR_CONTROLLER:
- err = output_control(dp, skb, nla_get_u64(a));
- if (err) {
- kfree_skb(skb);
- return err;
- }
- break;
-
- case ODP_ACTION_ATTR_SET_TUNNEL:
- OVS_CB(skb)->tun_id = nla_get_be64(a);
+ case OVS_ACTION_ATTR_USERSPACE:
+ output_userspace(dp, skb, key, a, attr, len);
break;
- case ODP_ACTION_ATTR_SET_DL_TCI:
- skb = modify_vlan_tci(skb, nla_get_be16(a));
+ case OVS_ACTION_ATTR_HASH:
+ execute_hash(skb, key, a);
break;
- case ODP_ACTION_ATTR_STRIP_VLAN:
- skb = strip_vlan(skb);
+ case OVS_ACTION_ATTR_PUSH_MPLS:
+ err = push_mpls(skb, key, nla_data(a));
break;
- case ODP_ACTION_ATTR_SET_DL_SRC:
- skb = make_writable(skb, 0);
- if (!skb)
- return -ENOMEM;
- memcpy(eth_hdr(skb)->h_source, nla_data(a), ETH_ALEN);
+ case OVS_ACTION_ATTR_POP_MPLS:
+ err = pop_mpls(skb, key, nla_get_be16(a));
break;
- case ODP_ACTION_ATTR_SET_DL_DST:
- skb = make_writable(skb, 0);
- if (!skb)
- return -ENOMEM;
- memcpy(eth_hdr(skb)->h_dest, nla_data(a), ETH_ALEN);
+ case OVS_ACTION_ATTR_PUSH_VLAN:
+ err = push_vlan(skb, key, nla_data(a));
break;
- case ODP_ACTION_ATTR_SET_NW_SRC:
- case ODP_ACTION_ATTR_SET_NW_DST:
- skb = set_nw_addr(skb, a);
+ case OVS_ACTION_ATTR_POP_VLAN:
+ err = pop_vlan(skb, key);
break;
- case ODP_ACTION_ATTR_SET_NW_TOS:
- skb = set_nw_tos(skb, nla_get_u8(a));
+ case OVS_ACTION_ATTR_RECIRC:
+ err = execute_recirc(dp, skb, key, a, rem);
+ if (nla_is_last(a, rem)) {
+ /* If this is the last action, the skb has
+ * been consumed or freed.
+ * Return immediately.
+ */
+ return err;
+ }
break;
- case ODP_ACTION_ATTR_SET_TP_SRC:
- case ODP_ACTION_ATTR_SET_TP_DST:
- skb = set_tp_port(skb, a);
+ case OVS_ACTION_ATTR_SET:
+ err = execute_set_action(skb, key, nla_data(a));
break;
- case ODP_ACTION_ATTR_SET_PRIORITY:
- skb->priority = nla_get_u32(a);
+ case OVS_ACTION_ATTR_SET_MASKED:
+ case OVS_ACTION_ATTR_SET_TO_MASKED:
+ err = execute_masked_set_action(skb, key, nla_data(a));
break;
- case ODP_ACTION_ATTR_POP_PRIORITY:
- skb->priority = priority;
+ case OVS_ACTION_ATTR_SAMPLE:
+ err = sample(dp, skb, key, a, attr, len);
break;
+ }
- case ODP_ACTION_ATTR_DROP_SPOOFED_ARP:
- if (unlikely(is_spoofed_arp(skb)))
- goto exit;
- break;
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ return err;
}
- if (!skb)
- return -ENOMEM;
}
-exit:
+
if (prev_port != -1)
do_output(dp, skb, prev_port);
else
- kfree_skb(skb);
+ consume_skb(skb);
+
return 0;
}
-static void sflow_sample(struct datapath *dp, struct sk_buff *skb,
- struct sw_flow_actions *acts)
+static void process_deferred_actions(struct datapath *dp)
{
- struct sk_buff *nskb;
- struct vport *p = OVS_CB(skb)->vport;
- struct dp_upcall_info upcall;
-
- if (unlikely(!p))
- return;
+ struct action_fifo *fifo = this_cpu_ptr(action_fifos);
- atomic_inc(&p->sflow_pool);
- if (net_random() >= dp->sflow_probability)
+ /* Do not touch the FIFO in case there is no deferred actions. */
+ if (action_fifo_is_empty(fifo))
return;
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!nskb))
- return;
-
- upcall.cmd = ODP_PACKET_CMD_SAMPLE;
- upcall.key = &OVS_CB(skb)->flow->key;
- upcall.userdata = 0;
- upcall.sample_pool = atomic_read(&p->sflow_pool);
- upcall.actions = acts->actions;
- upcall.actions_len = acts->actions_len;
- dp_upcall(dp, nskb, &upcall);
+ /* Finishing executing all deferred actions. */
+ do {
+ struct deferred_action *da = action_fifo_get(fifo);
+ struct sk_buff *skb = da->skb;
+ struct sw_flow_key *key = &da->pkt_key;
+ const struct nlattr *actions = da->actions;
+
+ if (actions)
+ do_execute_actions(dp, skb, key, actions,
+ nla_len(actions));
+ else
+ ovs_dp_process_packet(skb, key);
+ } while (!action_fifo_is_empty(fifo));
+
+ /* Reset FIFO for the next packet. */
+ action_fifo_init(fifo);
}
/* Execute a list of actions against 'skb'. */
-int execute_actions(struct datapath *dp, struct sk_buff *skb)
-{
- struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
- struct loop_counter *loop;
- int error;
-
- /* Check whether we've looped too much. */
- loop = loop_get_counter();
- if (unlikely(++loop->count > MAX_LOOPS))
- loop->looping = true;
- if (unlikely(loop->looping)) {
- error = loop_suppress(dp, acts);
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_actions *acts,
+ struct sw_flow_key *key)
+{
+ int level = this_cpu_read(exec_actions_level);
+ int err;
+
+ if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
+ if (net_ratelimit())
+ pr_warn("%s: packet loop detected, dropping.\n",
+ ovs_dp_name(dp));
+
kfree_skb(skb);
- goto out_loop;
+ return -ELOOP;
}
- /* Really execute actions. */
- if (dp->sflow_probability)
- sflow_sample(dp, skb, acts);
- OVS_CB(skb)->tun_id = 0;
- error = do_execute_actions(dp, skb, acts);
+ this_cpu_inc(exec_actions_level);
+ err = do_execute_actions(dp, skb, key,
+ acts->actions, acts->actions_len);
+
+ if (!level)
+ process_deferred_actions(dp);
- /* Check whether sub-actions looped too much. */
- if (unlikely(loop->looping))
- error = loop_suppress(dp, acts);
+ this_cpu_dec(exec_actions_level);
-out_loop:
- /* Decrement loop counter. */
- if (!--loop->count)
- loop->looping = false;
- loop_put_counter();
+ /* This return status currently does not reflect the errors
+ * encounted during deferred actions execution. Probably needs to
+ * be fixed in the future.
+ */
+ return err;
+}
- return error;
+int action_fifos_init(void)
+{
+ action_fifos = alloc_percpu(struct action_fifo);
+ if (!action_fifos)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void action_fifos_exit(void)
+{
+ free_percpu(action_fifos);
}