#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/dsfield.h>
+#include <net/mpls.h>
#include <net/sctp/checksum.h>
#include "datapath.h"
#include "gso.h"
-#include "mpls.h"
#include "vlan.h"
#include "vport.h"
-static void flow_key_clone(struct sk_buff *skb, struct sw_flow_key *new_key)
-{
- *new_key = *OVS_CB(skb)->pkt_key;
- OVS_CB(skb)->pkt_key = new_key;
-}
-
-static void flow_key_set_recirc_id(struct sk_buff *skb, u32 recirc_id)
-{
- OVS_CB(skb)->pkt_key->recirc_id = recirc_id;
-}
-
-static void flow_key_set_priority(struct sk_buff *skb, u32 priority)
-{
- OVS_CB(skb)->pkt_key->phy.priority = priority;
-}
-
-static void flow_key_set_skb_mark(struct sk_buff *skb, u32 skb_mark)
-{
- OVS_CB(skb)->pkt_key->phy.skb_mark = skb_mark;
-}
-
-static void flow_key_set_eth_src(struct sk_buff *skb, const u8 addr[])
-{
- ether_addr_copy(OVS_CB(skb)->pkt_key->eth.src, addr);
-}
-
-static void flow_key_set_eth_dst(struct sk_buff *skb, const u8 addr[])
-{
- ether_addr_copy(OVS_CB(skb)->pkt_key->eth.dst, addr);
-}
+static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
+ const struct nlattr *attr, int len);
-static void flow_key_set_vlan_tci(struct sk_buff *skb, __be16 tci)
-{
- OVS_CB(skb)->pkt_key->eth.tci = tci;
-}
+struct deferred_action {
+ struct sk_buff *skb;
+ const struct nlattr *actions;
-static void flow_key_set_mpls_top_lse(struct sk_buff *skb, __be32 top_lse)
-{
- OVS_CB(skb)->pkt_key->mpls.top_lse = top_lse;
-}
+ /* Store pkt_key clone when creating deferred action. */
+ struct sw_flow_key pkt_key;
+};
-static void flow_key_set_ipv4_src(struct sk_buff *skb, __be32 addr)
-{
- OVS_CB(skb)->pkt_key->ipv4.addr.src = addr;
-}
+#define DEFERRED_ACTION_FIFO_SIZE 10
+struct action_fifo {
+ int head;
+ int tail;
+ /* Deferred action fifo queue storage. */
+ struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
+};
-static void flow_key_set_ipv4_dst(struct sk_buff *skb, __be32 addr)
-{
- OVS_CB(skb)->pkt_key->ipv4.addr.src = addr;
-}
+static struct action_fifo __percpu *action_fifos;
+#define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
+ * looping by the network stack
+ */
+static DEFINE_PER_CPU(int, exec_actions_level);
-static void flow_key_set_ip_tos(struct sk_buff *skb, u8 tos)
+static void action_fifo_init(struct action_fifo *fifo)
{
- OVS_CB(skb)->pkt_key->ip.tos = tos;
+ fifo->head = 0;
+ fifo->tail = 0;
}
-static void flow_key_set_ip_ttl(struct sk_buff *skb, u8 ttl)
+static bool action_fifo_is_empty(const struct action_fifo *fifo)
{
- OVS_CB(skb)->pkt_key->ip.ttl = ttl;
+ return (fifo->head == fifo->tail);
}
-static void flow_key_set_ipv6_src(struct sk_buff *skb,
- const __be32 addr[4])
+static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
{
- memcpy(&OVS_CB(skb)->pkt_key->ipv6.addr.src, addr, sizeof(__be32[4]));
-}
+ if (action_fifo_is_empty(fifo))
+ return NULL;
-static void flow_key_set_ipv6_dst(struct sk_buff *skb,
- const __be32 addr[4])
-{
- memcpy(&OVS_CB(skb)->pkt_key->ipv6.addr.dst, addr, sizeof(__be32[4]));
+ return &fifo->fifo[fifo->tail++];
}
-static void flow_key_set_ipv6_fl(struct sk_buff *skb,
- const struct ipv6hdr *nh)
+static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
{
- OVS_CB(skb)->pkt_key->ipv6.label = *(__be32 *)nh &
- htonl(IPV6_FLOWINFO_FLOWLABEL);
-}
+ if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
+ return NULL;
-static void flow_key_set_tp_src(struct sk_buff *skb, __be16 port)
-{
- OVS_CB(skb)->pkt_key->tp.src = port;
+ return &fifo->fifo[fifo->head++];
}
-static void flow_key_set_tp_dst(struct sk_buff *skb, __be16 port)
+/* Return queue entry if fifo is not full */
+static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
+ const struct sw_flow_key *key,
+ const struct nlattr *attr)
{
- OVS_CB(skb)->pkt_key->tp.dst = port;
-}
+ struct action_fifo *fifo;
+ struct deferred_action *da;
-static void invalidate_skb_flow_key(struct sk_buff *skb)
-{
- OVS_CB(skb)->pkt_key->eth.type = htons(0);
-}
+ fifo = this_cpu_ptr(action_fifos);
+ da = action_fifo_put(fifo);
+ if (da) {
+ da->skb = skb;
+ da->actions = attr;
+ da->pkt_key = *key;
+ }
-static bool is_skb_flow_key_valid(struct sk_buff *skb)
-{
- return !!OVS_CB(skb)->pkt_key->eth.type;
+ return da;
}
-static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *attr, int len);
-
-static int make_writable(struct sk_buff *skb, int write_len)
+static void invalidate_flow_key(struct sw_flow_key *key)
{
- if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
- return 0;
-
- return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ key->eth.type = htons(0);
}
-/* The end of the mac header.
- *
- * For non-MPLS skbs this will correspond to the network header.
- * For MPLS skbs it will be before the network_header as the MPLS
- * label stack lies between the end of the mac header and the network
- * header. That is, for MPLS skbs the end of the mac header
- * is the top of the MPLS label stack.
- */
-static unsigned char *mac_header_end(const struct sk_buff *skb)
+static bool is_flow_key_valid(const struct sw_flow_key *key)
{
- return skb_mac_header(skb) + skb->mac_len;
+ return !!key->eth.type;
}
-static int push_mpls(struct sk_buff *skb,
+static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_mpls *mpls)
{
__be32 *new_mpls_lse;
struct ethhdr *hdr;
+ /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
+ if (skb_encapsulation(skb))
+ return -ENOTSUPP;
+
if (skb_cow_head(skb, MPLS_HLEN) < 0)
return -ENOMEM;
skb->mac_len);
skb_reset_mac_header(skb);
- new_mpls_lse = (__be32 *)mac_header_end(skb);
+ new_mpls_lse = (__be32 *)skb_mpls_header(skb);
*new_mpls_lse = mpls->mpls_lse;
if (skb->ip_summed == CHECKSUM_COMPLETE)
if (!ovs_skb_get_inner_protocol(skb))
ovs_skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
- invalidate_skb_flow_key(skb);
+
+ invalidate_flow_key(key);
return 0;
}
-static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
+static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const __be16 ethertype)
{
struct ethhdr *hdr;
int err;
- err = make_writable(skb, skb->mac_len + MPLS_HLEN);
+ err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->csum = csum_sub(skb->csum,
- csum_partial(mac_header_end(skb),
+ csum_partial(skb_mpls_header(skb),
MPLS_HLEN, 0));
memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
__skb_pull(skb, MPLS_HLEN);
skb_reset_mac_header(skb);
- /* mac_header_end() is used to locate the ethertype
+ /* skb_mpls_header() is used to locate the ethertype
* field correctly in the presence of VLAN tags.
*/
- hdr = (struct ethhdr *)(mac_header_end(skb) - ETH_HLEN);
+ hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
hdr->h_proto = ethertype;
if (eth_p_mpls(skb->protocol))
skb->protocol = ethertype;
- invalidate_skb_flow_key(skb);
+
+ invalidate_flow_key(key);
return 0;
}
-static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse)
+static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
+ const __be32 *mpls_lse)
{
- __be32 *stack = (__be32 *)mac_header_end(skb);
+ __be32 *stack;
int err;
- err = make_writable(skb, skb->mac_len + MPLS_HLEN);
+ err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
if (unlikely(err))
return err;
+ stack = (__be32 *)skb_mpls_header(skb);
if (skb->ip_summed == CHECKSUM_COMPLETE) {
__be32 diff[] = { ~(*stack), *mpls_lse };
skb->csum = ~csum_partial((char *)diff, sizeof(diff),
}
*stack = *mpls_lse;
- flow_key_set_mpls_top_lse(skb, *stack);
+ key->mpls.top_lse = *mpls_lse;
return 0;
}
-/* remove VLAN header from packet and update csum accordingly. */
-static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
+static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
{
- struct vlan_hdr *vhdr;
int err;
- err = make_writable(skb, VLAN_ETH_HLEN);
- if (unlikely(err))
- return err;
-
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_sub(skb->csum, csum_partial(skb->data
- + (2 * ETH_ALEN), VLAN_HLEN, 0));
-
- vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
- *current_tci = vhdr->h_vlan_TCI;
-
- memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
- __skb_pull(skb, VLAN_HLEN);
-
- vlan_set_encap_proto(skb, vhdr);
- skb->mac_header += VLAN_HLEN;
- /* Update mac_len for subsequent MPLS actions */
- skb->mac_len -= VLAN_HLEN;
-
- return 0;
-}
-
-static int pop_vlan(struct sk_buff *skb)
-{
- __be16 tci;
- int err;
-
- if (likely(vlan_tx_tag_present(skb))) {
- vlan_set_tci(skb, 0);
- } else {
- if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
- skb->len < VLAN_ETH_HLEN))
- return 0;
-
- err = __pop_vlan_tci(skb, &tci);
- if (err)
- return err;
- }
- /* move next vlan tag to hw accel tag */
- if (likely(skb->protocol != htons(ETH_P_8021Q) ||
- skb->len < VLAN_ETH_HLEN)) {
- flow_key_set_vlan_tci(skb, 0);
- return 0;
- }
-
- invalidate_skb_flow_key(skb);
- err = __pop_vlan_tci(skb, &tci);
- if (unlikely(err))
- return err;
+ err = skb_vlan_pop(skb);
+ if (vlan_tx_tag_present(skb))
+ invalidate_flow_key(key);
+ else
+ key->eth.tci = 0;
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
- return 0;
+ return err;
}
-static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
+static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_action_push_vlan *vlan)
{
- if (unlikely(vlan_tx_tag_present(skb))) {
- u16 current_tag;
-
- /* push down current VLAN tag */
- current_tag = vlan_tx_tag_get(skb);
-
- if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
- return -ENOMEM;
-
- /* Update mac_len for subsequent MPLS actions */
- skb->mac_len += VLAN_HLEN;
-
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(skb->csum, csum_partial(skb->data
- + (2 * ETH_ALEN), VLAN_HLEN, 0));
+ if (vlan_tx_tag_present(skb))
+ invalidate_flow_key(key);
+ else
+ key->eth.tci = vlan->vlan_tci;
- invalidate_skb_flow_key(skb);
- } else {
- flow_key_set_vlan_tci(skb, vlan->vlan_tci);
- }
- __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
- return 0;
+ return skb_vlan_push(skb, vlan->vlan_tpid,
+ ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
}
-static int set_eth_addr(struct sk_buff *skb,
+static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_key_ethernet *eth_key)
{
int err;
- err = make_writable(skb, ETH_HLEN);
+ err = skb_ensure_writable(skb, ETH_HLEN);
if (unlikely(err))
return err;
ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
- flow_key_set_eth_src(skb, eth_key->eth_src);
- flow_key_set_eth_dst(skb, eth_key->eth_dst);
+ ether_addr_copy(key->eth.src, eth_key->eth_src);
+ ether_addr_copy(key->eth.dst, eth_key->eth_dst);
return 0;
}
nh->ttl = new_ttl;
}
-static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
+static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_ipv4 *ipv4_key)
{
struct iphdr *nh;
int err;
- err = make_writable(skb, skb_network_offset(skb) +
- sizeof(struct iphdr));
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(struct iphdr));
if (unlikely(err))
return err;
if (ipv4_key->ipv4_src != nh->saddr) {
set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
- flow_key_set_ipv4_src(skb, ipv4_key->ipv4_src);
+ key->ipv4.addr.src = ipv4_key->ipv4_src;
}
if (ipv4_key->ipv4_dst != nh->daddr) {
set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
- flow_key_set_ipv4_dst(skb, ipv4_key->ipv4_dst);
+ key->ipv4.addr.dst = ipv4_key->ipv4_dst;
}
if (ipv4_key->ipv4_tos != nh->tos) {
ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
- flow_key_set_ip_tos(skb, nh->tos);
+ key->ip.tos = nh->tos;
}
if (ipv4_key->ipv4_ttl != nh->ttl) {
set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
- flow_key_set_ip_ttl(skb, ipv4_key->ipv4_ttl);
+ key->ip.ttl = ipv4_key->ipv4_ttl;
}
return 0;
}
-static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
+static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_ipv6 *ipv6_key)
{
struct ipv6hdr *nh;
int err;
__be32 *saddr;
__be32 *daddr;
- err = make_writable(skb, skb_network_offset(skb) +
- sizeof(struct ipv6hdr));
+ err = skb_ensure_writable(skb, skb_network_offset(skb) +
+ sizeof(struct ipv6hdr));
if (unlikely(err))
return err;
if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
ipv6_key->ipv6_src, true);
- flow_key_set_ipv6_src(skb, ipv6_key->ipv6_src);
+ memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
+ sizeof(ipv6_key->ipv6_src));
}
if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
unsigned int offset = 0;
- int flags = OVS_IP6T_FH_F_SKIP_RH;
+ int flags = IP6_FH_F_SKIP_RH;
bool recalc_csum = true;
if (ipv6_ext_hdr(nh->nexthdr))
set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
ipv6_key->ipv6_dst, recalc_csum);
- flow_key_set_ipv6_dst(skb, ipv6_key->ipv6_dst);
+ memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
+ sizeof(ipv6_key->ipv6_dst));
}
set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
- flow_key_set_ip_tos(skb, ipv6_get_dsfield(nh));
+ key->ip.tos = ipv6_get_dsfield(nh);
set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
- flow_key_set_ipv6_fl(skb, nh);
+ key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
nh->hop_limit = ipv6_key->ipv6_hlimit;
- flow_key_set_ip_ttl(skb, ipv6_key->ipv6_hlimit);
+ key->ip.ttl = ipv6_key->ipv6_hlimit;
return 0;
}
-/* Must follow make_writable() since that can move the skb data. */
+/* Must follow skb_ensure_writable() since that can move the skb data. */
static void set_tp_port(struct sk_buff *skb, __be16 *port,
__be16 new_port, __sum16 *check)
{
}
}
-static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
+static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_udp *udp_port_key)
{
struct udphdr *uh;
int err;
- err = make_writable(skb, skb_transport_offset(skb) +
- sizeof(struct udphdr));
+ err = skb_ensure_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct udphdr));
if (unlikely(err))
return err;
uh = udp_hdr(skb);
if (udp_port_key->udp_src != uh->source) {
set_udp_port(skb, &uh->source, udp_port_key->udp_src);
- flow_key_set_tp_src(skb, udp_port_key->udp_src);
+ key->tp.src = udp_port_key->udp_src;
}
if (udp_port_key->udp_dst != uh->dest) {
set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
- flow_key_set_tp_dst(skb, udp_port_key->udp_dst);
+ key->tp.dst = udp_port_key->udp_dst;
}
return 0;
}
-static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
+static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct ovs_key_tcp *tcp_port_key)
{
struct tcphdr *th;
int err;
- err = make_writable(skb, skb_transport_offset(skb) +
- sizeof(struct tcphdr));
+ err = skb_ensure_writable(skb, skb_transport_offset(skb) +
+ sizeof(struct tcphdr));
if (unlikely(err))
return err;
th = tcp_hdr(skb);
if (tcp_port_key->tcp_src != th->source) {
set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
- flow_key_set_tp_src(skb, tcp_port_key->tcp_src);
+ key->tp.src = tcp_port_key->tcp_src;
}
if (tcp_port_key->tcp_dst != th->dest) {
set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
- flow_key_set_tp_dst(skb, tcp_port_key->tcp_dst);
+ key->tp.dst = tcp_port_key->tcp_dst;
}
return 0;
}
-static int set_sctp(struct sk_buff *skb,
+static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_key_sctp *sctp_port_key)
{
struct sctphdr *sh;
int err;
unsigned int sctphoff = skb_transport_offset(skb);
- err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
+ err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
if (unlikely(err))
return err;
sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
skb_clear_hash(skb);
- flow_key_set_tp_src(skb, sctp_port_key->sctp_src);
- flow_key_set_tp_dst(skb, sctp_port_key->sctp_dst);
+ key->tp.src = sctp_port_key->sctp_src;
+ key->tp.dst = sctp_port_key->sctp_dst;
}
return 0;
}
static int output_userspace(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *attr)
+ struct sw_flow_key *key, const struct nlattr *attr)
{
+ struct ovs_tunnel_info info;
struct dp_upcall_info upcall;
const struct nlattr *a;
int rem;
- struct ovs_tunnel_info info;
upcall.cmd = OVS_PACKET_CMD_ACTION;
upcall.userdata = NULL;
} /* End of switch. */
}
- return ovs_dp_upcall(dp, skb, &upcall);
-}
-
-static bool last_action(const struct nlattr *a, int rem)
-{
- return a->nla_len == rem;
+ return ovs_dp_upcall(dp, skb, key, &upcall);
}
static int sample(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *attr)
+ struct sw_flow_key *key, const struct nlattr *attr)
{
- struct sw_flow_key sample_key;
const struct nlattr *acts_list = NULL;
const struct nlattr *a;
int rem;
/* The only known usage of sample action is having a single user-space
* action. Treat this usage as a special case.
* The output_userspace() should clone the skb to be sent to the
- * user space. This skb will be consumed by its caller. */
+ * user space. This skb will be consumed by its caller.
+ */
if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
- last_action(a, rem)))
- return output_userspace(dp, skb, a);
+ nla_is_last(a, rem)))
+ return output_userspace(dp, skb, key, a);
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
/* Skip the sample action when out of memory. */
return 0;
- flow_key_clone(skb, &sample_key);
+ if (!add_deferred_actions(skb, key, a)) {
+ if (net_ratelimit())
+ pr_warn("%s: deferred actions limit reached, dropping sample action\n",
+ ovs_dp_name(dp));
- /* do_execute_actions() will consume the cloned skb. */
- return do_execute_actions(dp, skb, a, rem);
+ kfree_skb(skb);
+ }
+ return 0;
}
-static void execute_hash(struct sk_buff *skb, const struct nlattr *attr)
+static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct nlattr *attr)
{
- struct sw_flow_key *key = OVS_CB(skb)->pkt_key;
struct ovs_action_hash *hash_act = nla_data(attr);
u32 hash = 0;
key->ovs_flow_hash = hash;
}
-static int execute_set_action(struct sk_buff *skb,
- const struct nlattr *nested_attr)
+static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
+ const struct nlattr *nested_attr)
{
int err = 0;
switch (nla_type(nested_attr)) {
case OVS_KEY_ATTR_PRIORITY:
skb->priority = nla_get_u32(nested_attr);
- flow_key_set_priority(skb, skb->priority);
+ key->phy.priority = skb->priority;
break;
case OVS_KEY_ATTR_SKB_MARK:
skb->mark = nla_get_u32(nested_attr);
- flow_key_set_skb_mark(skb, skb->mark);
+ key->phy.skb_mark = skb->mark;
break;
case OVS_KEY_ATTR_TUNNEL_INFO:
break;
case OVS_KEY_ATTR_ETHERNET:
- err = set_eth_addr(skb, nla_data(nested_attr));
+ err = set_eth_addr(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_IPV4:
- err = set_ipv4(skb, nla_data(nested_attr));
+ err = set_ipv4(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_IPV6:
- err = set_ipv6(skb, nla_data(nested_attr));
+ err = set_ipv6(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_TCP:
- err = set_tcp(skb, nla_data(nested_attr));
+ err = set_tcp(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_UDP:
- err = set_udp(skb, nla_data(nested_attr));
+ err = set_udp(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_SCTP:
- err = set_sctp(skb, nla_data(nested_attr));
+ err = set_sctp(skb, key, nla_data(nested_attr));
break;
case OVS_KEY_ATTR_MPLS:
- err = set_mpls(skb, nla_data(nested_attr));
+ err = set_mpls(skb, key, nla_data(nested_attr));
break;
}
return err;
}
-
static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
+ struct sw_flow_key *key,
const struct nlattr *a, int rem)
{
- struct sw_flow_key recirc_key;
+ struct deferred_action *da;
- if (!is_skb_flow_key_valid(skb)) {
+ if (!is_flow_key_valid(key)) {
int err;
- err = ovs_flow_key_update(skb, OVS_CB(skb)->pkt_key);
+ err = ovs_flow_key_update(skb, key);
if (err)
return err;
-
}
- BUG_ON(!is_skb_flow_key_valid(skb));
+ BUG_ON(!is_flow_key_valid(key));
- if (!last_action(a, rem)) {
+ if (!nla_is_last(a, rem)) {
/* Recirc action is the not the last action
- * of the action list. */
+ * of the action list, need to clone the skb.
+ */
skb = skb_clone(skb, GFP_ATOMIC);
/* Skip the recirc action when out of memory, but
- * continue on with the rest of the action list. */
+ * continue on with the rest of the action list.
+ */
if (!skb)
return 0;
+ }
- flow_key_clone(skb, &recirc_key);
+ da = add_deferred_actions(skb, key, NULL);
+ if (da) {
+ da->pkt_key.recirc_id = nla_get_u32(a);
+ } else {
+ kfree_skb(skb);
+
+ if (net_ratelimit())
+ pr_warn("%s: deferred action limit reached, drop recirc action\n",
+ ovs_dp_name(dp));
}
- flow_key_set_recirc_id(skb, nla_get_u32(a));
- ovs_dp_process_packet(skb);
return 0;
}
/* Execute a list of actions against 'skb'. */
static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
- const struct nlattr *attr, int len)
+ struct sw_flow_key *key,
+ const struct nlattr *attr, int len)
{
/* Every output action needs a separate clone of 'skb', but the common
* case is just a single output action, so that doing a clone and
* then freeing the original skbuff is wasteful. So the following code
- * is slightly obscure just to avoid that. */
+ * is slightly obscure just to avoid that.
+ */
int prev_port = -1;
const struct nlattr *a;
int rem;
break;
case OVS_ACTION_ATTR_USERSPACE:
- output_userspace(dp, skb, a);
+ output_userspace(dp, skb, key, a);
break;
case OVS_ACTION_ATTR_HASH:
- execute_hash(skb, a);
+ execute_hash(skb, key, a);
break;
case OVS_ACTION_ATTR_PUSH_MPLS:
- err = push_mpls(skb, nla_data(a));
+ err = push_mpls(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_POP_MPLS:
- err = pop_mpls(skb, nla_get_be16(a));
+ err = pop_mpls(skb, key, nla_get_be16(a));
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
- err = push_vlan(skb, nla_data(a));
- if (unlikely(err)) /* skb already freed. */
- return err;
+ err = push_vlan(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_POP_VLAN:
- err = pop_vlan(skb);
+ err = pop_vlan(skb, key);
break;
case OVS_ACTION_ATTR_RECIRC:
- err = execute_recirc(dp, skb, a, rem);
- if (last_action(a, rem)) {
+ err = execute_recirc(dp, skb, key, a, rem);
+ if (nla_is_last(a, rem)) {
/* If this is the last action, the skb has
* been consumed or freed.
- * Return immediately. */
+ * Return immediately.
+ */
return err;
}
break;
case OVS_ACTION_ATTR_SET:
- err = execute_set_action(skb, nla_data(a));
+ err = execute_set_action(skb, key, nla_data(a));
break;
case OVS_ACTION_ATTR_SAMPLE:
- err = sample(dp, skb, a);
+ err = sample(dp, skb, key, a);
break;
}
return 0;
}
+static void process_deferred_actions(struct datapath *dp)
+{
+ struct action_fifo *fifo = this_cpu_ptr(action_fifos);
+
+ /* Do not touch the FIFO in case there is no deferred actions. */
+ if (action_fifo_is_empty(fifo))
+ return;
+
+ /* Finishing executing all deferred actions. */
+ do {
+ struct deferred_action *da = action_fifo_get(fifo);
+ struct sk_buff *skb = da->skb;
+ struct sw_flow_key *key = &da->pkt_key;
+ const struct nlattr *actions = da->actions;
+
+ if (actions)
+ do_execute_actions(dp, skb, key, actions,
+ nla_len(actions));
+ else
+ ovs_dp_process_packet(skb, key);
+ } while (!action_fifo_is_empty(fifo));
+
+ /* Reset FIFO for the next packet. */
+ action_fifo_init(fifo);
+}
+
/* Execute a list of actions against 'skb'. */
-int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, struct sw_flow_actions *acts)
+int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
+ const struct sw_flow_actions *acts,
+ struct sw_flow_key *key)
+{
+ int level = this_cpu_read(exec_actions_level);
+ int err;
+
+ if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
+ if (net_ratelimit())
+ pr_warn("%s: packet loop detected, dropping.\n",
+ ovs_dp_name(dp));
+
+ kfree_skb(skb);
+ return -ELOOP;
+ }
+
+ this_cpu_inc(exec_actions_level);
+ err = do_execute_actions(dp, skb, key,
+ acts->actions, acts->actions_len);
+
+ if (!level)
+ process_deferred_actions(dp);
+
+ this_cpu_dec(exec_actions_level);
+
+ /* This return status currently does not reflect the errors
+ * encounted during deferred actions execution. Probably needs to
+ * be fixed in the future.
+ */
+ return err;
+}
+
+int action_fifos_init(void)
+{
+ action_fifos = alloc_percpu(struct action_fifo);
+ if (!action_fifos)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void action_fifos_exit(void)
{
- return do_execute_actions(dp, skb, acts->actions, acts->actions_len);
+ free_percpu(action_fifos);
}