2 * Copyright (c) 2007-2015 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
34 #include <net/checksum.h>
35 #include <net/dsfield.h>
37 #include <net/sctp/checksum.h>
44 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
45 struct sw_flow_key *key,
46 const struct nlattr *attr, int len);
48 struct deferred_action {
50 const struct nlattr *actions;
52 /* Store pkt_key clone when creating deferred action. */
53 struct sw_flow_key pkt_key;
56 #define DEFERRED_ACTION_FIFO_SIZE 10
60 /* Deferred action fifo queue storage. */
61 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
64 static struct action_fifo __percpu *action_fifos;
65 #define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
66 * looping by the network stack
68 static DEFINE_PER_CPU(int, exec_actions_level);
70 static void action_fifo_init(struct action_fifo *fifo)
76 static bool action_fifo_is_empty(const struct action_fifo *fifo)
78 return (fifo->head == fifo->tail);
81 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
83 if (action_fifo_is_empty(fifo))
86 return &fifo->fifo[fifo->tail++];
89 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
91 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
94 return &fifo->fifo[fifo->head++];
97 /* Return queue entry if fifo is not full */
98 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
99 const struct sw_flow_key *key,
100 const struct nlattr *attr)
102 struct action_fifo *fifo;
103 struct deferred_action *da;
105 fifo = this_cpu_ptr(action_fifos);
106 da = action_fifo_put(fifo);
116 static void invalidate_flow_key(struct sw_flow_key *key)
118 key->eth.type = htons(0);
121 static bool is_flow_key_valid(const struct sw_flow_key *key)
123 return !!key->eth.type;
126 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
127 const struct ovs_action_push_mpls *mpls)
129 __be32 *new_mpls_lse;
132 /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
133 if (skb_encapsulation(skb))
136 if (skb_cow_head(skb, MPLS_HLEN) < 0)
139 skb_push(skb, MPLS_HLEN);
140 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
142 skb_reset_mac_header(skb);
144 new_mpls_lse = (__be32 *)skb_mpls_header(skb);
145 *new_mpls_lse = mpls->mpls_lse;
147 if (skb->ip_summed == CHECKSUM_COMPLETE)
148 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
152 hdr->h_proto = mpls->mpls_ethertype;
153 if (!ovs_skb_get_inner_protocol(skb))
154 ovs_skb_set_inner_protocol(skb, skb->protocol);
155 skb->protocol = mpls->mpls_ethertype;
157 invalidate_flow_key(key);
161 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
162 const __be16 ethertype)
167 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
171 skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN);
173 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
176 __skb_pull(skb, MPLS_HLEN);
177 skb_reset_mac_header(skb);
179 /* skb_mpls_header() is used to locate the ethertype
180 * field correctly in the presence of VLAN tags.
182 hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
183 hdr->h_proto = ethertype;
184 if (eth_p_mpls(skb->protocol))
185 skb->protocol = ethertype;
187 invalidate_flow_key(key);
191 /* 'KEY' must not have any bits set outside of the 'MASK' */
192 #define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK)))
193 #define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK))
195 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
196 const __be32 *mpls_lse, const __be32 *mask)
202 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
206 stack = (__be32 *)skb_mpls_header(skb);
207 lse = MASKED(*stack, *mpls_lse, *mask);
208 if (skb->ip_summed == CHECKSUM_COMPLETE) {
209 __be32 diff[] = { ~(*stack), lse };
211 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
216 flow_key->mpls.top_lse = lse;
220 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
224 err = skb_vlan_pop(skb);
225 if (skb_vlan_tag_present(skb))
226 invalidate_flow_key(key);
232 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
233 const struct ovs_action_push_vlan *vlan)
235 if (skb_vlan_tag_present(skb))
236 invalidate_flow_key(key);
238 key->eth.tci = vlan->vlan_tci;
239 return skb_vlan_push(skb, vlan->vlan_tpid,
240 ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
243 /* 'src' is already properly masked. */
244 static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_)
246 u16 *dst = (u16 *)dst_;
247 const u16 *src = (const u16 *)src_;
248 const u16 *mask = (const u16 *)mask_;
250 SET_MASKED(dst[0], src[0], mask[0]);
251 SET_MASKED(dst[1], src[1], mask[1]);
252 SET_MASKED(dst[2], src[2], mask[2]);
255 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
256 const struct ovs_key_ethernet *key,
257 const struct ovs_key_ethernet *mask)
261 err = skb_ensure_writable(skb, ETH_HLEN);
265 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
267 ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src,
269 ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst,
272 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
274 ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source);
275 ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest);
279 static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
280 __be32 addr, __be32 new_addr)
282 int transport_len = skb->len - skb_transport_offset(skb);
284 if (nh->frag_off & htons(IP_OFFSET))
287 if (nh->protocol == IPPROTO_TCP) {
288 if (likely(transport_len >= sizeof(struct tcphdr)))
289 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
291 } else if (nh->protocol == IPPROTO_UDP) {
292 if (likely(transport_len >= sizeof(struct udphdr))) {
293 struct udphdr *uh = udp_hdr(skb);
295 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
296 inet_proto_csum_replace4(&uh->check, skb,
299 uh->check = CSUM_MANGLED_0;
306 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
307 __be32 *addr, __be32 new_addr)
309 update_ip_l4_checksum(skb, nh, *addr, new_addr);
310 csum_replace4(&nh->check, *addr, new_addr);
315 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
316 __be32 addr[4], const __be32 new_addr[4])
318 int transport_len = skb->len - skb_transport_offset(skb);
320 if (l4_proto == NEXTHDR_TCP) {
321 if (likely(transport_len >= sizeof(struct tcphdr)))
322 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
324 } else if (l4_proto == NEXTHDR_UDP) {
325 if (likely(transport_len >= sizeof(struct udphdr))) {
326 struct udphdr *uh = udp_hdr(skb);
328 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
329 inet_proto_csum_replace16(&uh->check, skb,
332 uh->check = CSUM_MANGLED_0;
335 } else if (l4_proto == NEXTHDR_ICMP) {
336 if (likely(transport_len >= sizeof(struct icmp6hdr)))
337 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
338 skb, addr, new_addr, 1);
342 static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4],
343 const __be32 mask[4], __be32 masked[4])
345 masked[0] = MASKED(old[0], addr[0], mask[0]);
346 masked[1] = MASKED(old[1], addr[1], mask[1]);
347 masked[2] = MASKED(old[2], addr[2], mask[2]);
348 masked[3] = MASKED(old[3], addr[3], mask[3]);
351 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
352 __be32 addr[4], const __be32 new_addr[4],
353 bool recalculate_csum)
355 if (likely(recalculate_csum))
356 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
359 memcpy(addr, new_addr, sizeof(__be32[4]));
362 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask)
364 /* Bits 21-24 are always unmasked, so this retains their values. */
365 SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16));
366 SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8));
367 SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask);
370 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl,
373 new_ttl = MASKED(nh->ttl, new_ttl, mask);
375 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
379 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key,
380 const struct ovs_key_ipv4 *key,
381 const struct ovs_key_ipv4 *mask)
387 err = skb_ensure_writable(skb, skb_network_offset(skb) +
388 sizeof(struct iphdr));
394 /* Setting an IP addresses is typically only a side effect of
395 * matching on them in the current userspace implementation, so it
396 * makes sense to check if the value actually changed.
398 if (mask->ipv4_src) {
399 new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src);
401 if (unlikely(new_addr != nh->saddr)) {
402 set_ip_addr(skb, nh, &nh->saddr, new_addr);
403 flow_key->ipv4.addr.src = new_addr;
406 if (mask->ipv4_dst) {
407 new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst);
409 if (unlikely(new_addr != nh->daddr)) {
410 set_ip_addr(skb, nh, &nh->daddr, new_addr);
411 flow_key->ipv4.addr.dst = new_addr;
414 if (mask->ipv4_tos) {
415 ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos);
416 flow_key->ip.tos = nh->tos;
418 if (mask->ipv4_ttl) {
419 set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl);
420 flow_key->ip.ttl = nh->ttl;
426 static bool is_ipv6_mask_nonzero(const __be32 addr[4])
428 return !!(addr[0] | addr[1] | addr[2] | addr[3]);
431 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key,
432 const struct ovs_key_ipv6 *key,
433 const struct ovs_key_ipv6 *mask)
438 err = skb_ensure_writable(skb, skb_network_offset(skb) +
439 sizeof(struct ipv6hdr));
445 /* Setting an IP addresses is typically only a side effect of
446 * matching on them in the current userspace implementation, so it
447 * makes sense to check if the value actually changed.
449 if (is_ipv6_mask_nonzero(mask->ipv6_src)) {
450 __be32 *saddr = (__be32 *)&nh->saddr;
453 mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked);
455 if (unlikely(memcmp(saddr, masked, sizeof(masked)))) {
456 set_ipv6_addr(skb, key->ipv6_proto, saddr, masked,
458 memcpy(&flow_key->ipv6.addr.src, masked,
459 sizeof(flow_key->ipv6.addr.src));
462 if (is_ipv6_mask_nonzero(mask->ipv6_dst)) {
463 unsigned int offset = 0;
464 int flags = IP6_FH_F_SKIP_RH;
465 bool recalc_csum = true;
466 __be32 *daddr = (__be32 *)&nh->daddr;
469 mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked);
471 if (unlikely(memcmp(daddr, masked, sizeof(masked)))) {
472 if (ipv6_ext_hdr(nh->nexthdr))
473 recalc_csum = (ipv6_find_hdr(skb, &offset,
478 set_ipv6_addr(skb, key->ipv6_proto, daddr, masked,
480 memcpy(&flow_key->ipv6.addr.dst, masked,
481 sizeof(flow_key->ipv6.addr.dst));
484 if (mask->ipv6_tclass) {
485 ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass);
486 flow_key->ip.tos = ipv6_get_dsfield(nh);
488 if (mask->ipv6_label) {
489 set_ipv6_fl(nh, ntohl(key->ipv6_label),
490 ntohl(mask->ipv6_label));
491 flow_key->ipv6.label =
492 *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
494 if (mask->ipv6_hlimit) {
495 SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit);
496 flow_key->ip.ttl = nh->hop_limit;
501 /* Must follow skb_ensure_writable() since that can move the skb data. */
502 static void set_tp_port(struct sk_buff *skb, __be16 *port,
503 __be16 new_port, __sum16 *check)
505 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
509 static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key,
510 const struct ovs_key_udp *key,
511 const struct ovs_key_udp *mask)
517 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
518 sizeof(struct udphdr));
523 /* Either of the masks is non-zero, so do not bother checking them. */
524 src = MASKED(uh->source, key->udp_src, mask->udp_src);
525 dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst);
527 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
528 if (likely(src != uh->source)) {
529 set_tp_port(skb, &uh->source, src, &uh->check);
530 flow_key->tp.src = src;
532 if (likely(dst != uh->dest)) {
533 set_tp_port(skb, &uh->dest, dst, &uh->check);
534 flow_key->tp.dst = dst;
537 if (unlikely(!uh->check))
538 uh->check = CSUM_MANGLED_0;
542 flow_key->tp.src = src;
543 flow_key->tp.dst = dst;
551 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key,
552 const struct ovs_key_tcp *key,
553 const struct ovs_key_tcp *mask)
559 err = skb_ensure_writable(skb, skb_transport_offset(skb) +
560 sizeof(struct tcphdr));
565 src = MASKED(th->source, key->tcp_src, mask->tcp_src);
566 if (likely(src != th->source)) {
567 set_tp_port(skb, &th->source, src, &th->check);
568 flow_key->tp.src = src;
570 dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst);
571 if (likely(dst != th->dest)) {
572 set_tp_port(skb, &th->dest, dst, &th->check);
573 flow_key->tp.dst = dst;
580 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
581 const struct ovs_key_sctp *key,
582 const struct ovs_key_sctp *mask)
584 unsigned int sctphoff = skb_transport_offset(skb);
586 __le32 old_correct_csum, new_csum, old_csum;
589 err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
594 old_csum = sh->checksum;
595 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
597 sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src);
598 sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst);
600 new_csum = sctp_compute_cksum(skb, sctphoff);
602 /* Carry any checksum errors through. */
603 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
606 flow_key->tp.src = sh->source;
607 flow_key->tp.dst = sh->dest;
612 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
614 struct vport *vport = ovs_vport_rcu(dp, out_port);
617 ovs_vport_send(vport, skb);
621 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
622 struct sw_flow_key *key, const struct nlattr *attr,
623 const struct nlattr *actions, int actions_len)
625 struct ip_tunnel_info info;
626 struct dp_upcall_info upcall;
627 const struct nlattr *a;
630 memset(&upcall, 0, sizeof(upcall));
631 upcall.cmd = OVS_PACKET_CMD_ACTION;
633 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
634 a = nla_next(a, &rem)) {
635 switch (nla_type(a)) {
636 case OVS_USERSPACE_ATTR_USERDATA:
640 case OVS_USERSPACE_ATTR_PID:
641 upcall.portid = nla_get_u32(a);
644 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
645 /* Get out tunnel info. */
648 vport = ovs_vport_rcu(dp, nla_get_u32(a));
652 upcall.egress_tun_info = &info;
653 err = ovs_vport_get_egress_tun_info(vport, skb,
656 upcall.egress_tun_info = NULL;
662 case OVS_USERSPACE_ATTR_ACTIONS: {
663 /* Include actions. */
664 upcall.actions = actions;
665 upcall.actions_len = actions_len;
669 } /* End of switch. */
672 return ovs_dp_upcall(dp, skb, key, &upcall);
675 static int sample(struct datapath *dp, struct sk_buff *skb,
676 struct sw_flow_key *key, const struct nlattr *attr,
677 const struct nlattr *actions, int actions_len)
679 const struct nlattr *acts_list = NULL;
680 const struct nlattr *a;
683 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
684 a = nla_next(a, &rem)) {
687 switch (nla_type(a)) {
688 case OVS_SAMPLE_ATTR_PROBABILITY:
689 probability = nla_get_u32(a);
690 if (!probability || prandom_u32() > probability)
694 case OVS_SAMPLE_ATTR_ACTIONS:
700 rem = nla_len(acts_list);
701 a = nla_data(acts_list);
703 /* Actions list is empty, do nothing */
707 /* The only known usage of sample action is having a single user-space
708 * action. Treat this usage as a special case.
709 * The output_userspace() should clone the skb to be sent to the
710 * user space. This skb will be consumed by its caller.
712 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
713 nla_is_last(a, rem)))
714 return output_userspace(dp, skb, key, a, actions, actions_len);
716 skb = skb_clone(skb, GFP_ATOMIC);
718 /* Skip the sample action when out of memory. */
721 if (!add_deferred_actions(skb, key, a)) {
723 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
731 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
732 const struct nlattr *attr)
734 struct ovs_action_hash *hash_act = nla_data(attr);
737 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
738 hash = skb_get_hash(skb);
739 hash = jhash_1word(hash, hash_act->hash_basis);
743 key->ovs_flow_hash = hash;
746 static int execute_set_action(struct sk_buff *skb,
747 struct sw_flow_key *flow_key,
748 const struct nlattr *a)
750 /* Only tunnel set execution is supported without a mask. */
751 if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) {
752 struct ovs_tunnel_info *tun = nla_data(a);
754 ovs_skb_dst_drop(skb);
755 ovs_dst_hold((struct dst_entry *)tun->tun_dst);
756 ovs_skb_dst_set(skb, (struct dst_entry *)tun->tun_dst);
763 /* Mask is at the midpoint of the data. */
764 #define get_mask(a, type) ((const type)nla_data(a) + 1)
766 static int execute_masked_set_action(struct sk_buff *skb,
767 struct sw_flow_key *flow_key,
768 const struct nlattr *a)
772 switch (nla_type(a)) {
773 case OVS_KEY_ATTR_PRIORITY:
774 SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *));
775 flow_key->phy.priority = skb->priority;
778 case OVS_KEY_ATTR_SKB_MARK:
779 SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *));
780 flow_key->phy.skb_mark = skb->mark;
783 case OVS_KEY_ATTR_TUNNEL_INFO:
784 /* Masked data not supported for tunnel. */
788 case OVS_KEY_ATTR_ETHERNET:
789 err = set_eth_addr(skb, flow_key, nla_data(a),
790 get_mask(a, struct ovs_key_ethernet *));
793 case OVS_KEY_ATTR_IPV4:
794 err = set_ipv4(skb, flow_key, nla_data(a),
795 get_mask(a, struct ovs_key_ipv4 *));
798 case OVS_KEY_ATTR_IPV6:
799 err = set_ipv6(skb, flow_key, nla_data(a),
800 get_mask(a, struct ovs_key_ipv6 *));
803 case OVS_KEY_ATTR_TCP:
804 err = set_tcp(skb, flow_key, nla_data(a),
805 get_mask(a, struct ovs_key_tcp *));
808 case OVS_KEY_ATTR_UDP:
809 err = set_udp(skb, flow_key, nla_data(a),
810 get_mask(a, struct ovs_key_udp *));
813 case OVS_KEY_ATTR_SCTP:
814 err = set_sctp(skb, flow_key, nla_data(a),
815 get_mask(a, struct ovs_key_sctp *));
818 case OVS_KEY_ATTR_MPLS:
819 err = set_mpls(skb, flow_key, nla_data(a), get_mask(a,
827 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
828 struct sw_flow_key *key,
829 const struct nlattr *a, int rem)
831 struct deferred_action *da;
833 if (!is_flow_key_valid(key)) {
836 err = ovs_flow_key_update(skb, key);
840 BUG_ON(!is_flow_key_valid(key));
842 if (!nla_is_last(a, rem)) {
843 /* Recirc action is the not the last action
844 * of the action list, need to clone the skb.
846 skb = skb_clone(skb, GFP_ATOMIC);
848 /* Skip the recirc action when out of memory, but
849 * continue on with the rest of the action list.
855 da = add_deferred_actions(skb, key, NULL);
857 da->pkt_key.recirc_id = nla_get_u32(a);
862 pr_warn("%s: deferred action limit reached, drop recirc action\n",
869 /* Execute a list of actions against 'skb'. */
870 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
871 struct sw_flow_key *key,
872 const struct nlattr *attr, int len)
874 /* Every output action needs a separate clone of 'skb', but the common
875 * case is just a single output action, so that doing a clone and
876 * then freeing the original skbuff is wasteful. So the following code
877 * is slightly obscure just to avoid that.
880 const struct nlattr *a;
883 for (a = attr, rem = len; rem > 0;
884 a = nla_next(a, &rem)) {
887 if (unlikely(prev_port != -1)) {
888 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
891 do_output(dp, out_skb, prev_port);
896 switch (nla_type(a)) {
897 case OVS_ACTION_ATTR_OUTPUT:
898 prev_port = nla_get_u32(a);
901 case OVS_ACTION_ATTR_USERSPACE:
902 output_userspace(dp, skb, key, a, attr, len);
905 case OVS_ACTION_ATTR_HASH:
906 execute_hash(skb, key, a);
909 case OVS_ACTION_ATTR_PUSH_MPLS:
910 err = push_mpls(skb, key, nla_data(a));
913 case OVS_ACTION_ATTR_POP_MPLS:
914 err = pop_mpls(skb, key, nla_get_be16(a));
917 case OVS_ACTION_ATTR_PUSH_VLAN:
918 err = push_vlan(skb, key, nla_data(a));
921 case OVS_ACTION_ATTR_POP_VLAN:
922 err = pop_vlan(skb, key);
925 case OVS_ACTION_ATTR_RECIRC:
926 err = execute_recirc(dp, skb, key, a, rem);
927 if (nla_is_last(a, rem)) {
928 /* If this is the last action, the skb has
929 * been consumed or freed.
930 * Return immediately.
936 case OVS_ACTION_ATTR_SET:
937 err = execute_set_action(skb, key, nla_data(a));
940 case OVS_ACTION_ATTR_SET_MASKED:
941 case OVS_ACTION_ATTR_SET_TO_MASKED:
942 err = execute_masked_set_action(skb, key, nla_data(a));
945 case OVS_ACTION_ATTR_SAMPLE:
946 err = sample(dp, skb, key, a, attr, len);
957 do_output(dp, skb, prev_port);
964 static void process_deferred_actions(struct datapath *dp)
966 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
968 /* Do not touch the FIFO in case there is no deferred actions. */
969 if (action_fifo_is_empty(fifo))
972 /* Finishing executing all deferred actions. */
974 struct deferred_action *da = action_fifo_get(fifo);
975 struct sk_buff *skb = da->skb;
976 struct sw_flow_key *key = &da->pkt_key;
977 const struct nlattr *actions = da->actions;
980 do_execute_actions(dp, skb, key, actions,
983 ovs_dp_process_packet(skb, key);
984 } while (!action_fifo_is_empty(fifo));
986 /* Reset FIFO for the next packet. */
987 action_fifo_init(fifo);
990 /* Execute a list of actions against 'skb'. */
991 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
992 const struct sw_flow_actions *acts,
993 struct sw_flow_key *key)
995 int level = this_cpu_read(exec_actions_level);
998 if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
1000 pr_warn("%s: packet loop detected, dropping.\n",
1007 this_cpu_inc(exec_actions_level);
1008 err = do_execute_actions(dp, skb, key,
1009 acts->actions, acts->actions_len);
1012 process_deferred_actions(dp);
1014 this_cpu_dec(exec_actions_level);
1016 /* This return status currently does not reflect the errors
1017 * encounted during deferred actions execution. Probably needs to
1018 * be fixed in the future.
1023 int action_fifos_init(void)
1025 action_fifos = alloc_percpu(struct action_fifo);
1032 void action_fifos_exit(void)
1034 free_percpu(action_fifos);