2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/sctp/checksum.h>
43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
44 struct sw_flow_key *key,
45 const struct nlattr *attr, int len);
47 struct deferred_action {
49 const struct nlattr *actions;
51 /* Store pkt_key clone when creating deferred action. */
52 struct sw_flow_key pkt_key;
55 #define DEFERRED_ACTION_FIFO_SIZE 10
59 /* Deferred action fifo queue storage. */
60 struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
63 static struct action_fifo __percpu *action_fifos;
64 #define EXEC_ACTIONS_LEVEL_LIMIT 4 /* limit used to detect packet
65 * looping by the network stack
67 static DEFINE_PER_CPU(int, exec_actions_level);
69 static void action_fifo_init(struct action_fifo *fifo)
75 static bool action_fifo_is_empty(const struct action_fifo *fifo)
77 return (fifo->head == fifo->tail);
80 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
82 if (action_fifo_is_empty(fifo))
85 return &fifo->fifo[fifo->tail++];
88 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
90 if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
93 return &fifo->fifo[fifo->head++];
96 /* Return queue entry if fifo is not full */
97 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
98 const struct sw_flow_key *key,
99 const struct nlattr *attr)
101 struct action_fifo *fifo;
102 struct deferred_action *da;
104 fifo = this_cpu_ptr(action_fifos);
105 da = action_fifo_put(fifo);
115 static void invalidate_flow_key(struct sw_flow_key *key)
117 key->eth.type = htons(0);
120 static bool is_flow_key_valid(const struct sw_flow_key *key)
122 return !!key->eth.type;
125 static int make_writable(struct sk_buff *skb, int write_len)
127 if (!pskb_may_pull(skb, write_len))
130 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
133 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
136 /* The end of the mac header.
138 * For non-MPLS skbs this will correspond to the network header.
139 * For MPLS skbs it will be before the network_header as the MPLS
140 * label stack lies between the end of the mac header and the network
141 * header. That is, for MPLS skbs the end of the mac header
142 * is the top of the MPLS label stack.
144 static unsigned char *mac_header_end(const struct sk_buff *skb)
146 return skb_mac_header(skb) + skb->mac_len;
149 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
150 const struct ovs_action_push_mpls *mpls)
152 __be32 *new_mpls_lse;
155 if (skb_cow_head(skb, MPLS_HLEN) < 0)
158 skb_push(skb, MPLS_HLEN);
159 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
161 skb_reset_mac_header(skb);
163 new_mpls_lse = (__be32 *)mac_header_end(skb);
164 *new_mpls_lse = mpls->mpls_lse;
166 if (skb->ip_summed == CHECKSUM_COMPLETE)
167 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
171 hdr->h_proto = mpls->mpls_ethertype;
172 if (!ovs_skb_get_inner_protocol(skb))
173 ovs_skb_set_inner_protocol(skb, skb->protocol);
174 skb->protocol = mpls->mpls_ethertype;
175 invalidate_flow_key(key);
179 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
180 const __be16 ethertype)
185 err = make_writable(skb, skb->mac_len + MPLS_HLEN);
189 if (skb->ip_summed == CHECKSUM_COMPLETE)
190 skb->csum = csum_sub(skb->csum,
191 csum_partial(mac_header_end(skb),
194 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
197 __skb_pull(skb, MPLS_HLEN);
198 skb_reset_mac_header(skb);
200 /* mac_header_end() is used to locate the ethertype
201 * field correctly in the presence of VLAN tags.
203 hdr = (struct ethhdr *)(mac_header_end(skb) - ETH_HLEN);
204 hdr->h_proto = ethertype;
205 if (eth_p_mpls(skb->protocol))
206 skb->protocol = ethertype;
207 invalidate_flow_key(key);
211 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
212 const __be32 *mpls_lse)
214 __be32 *stack = (__be32 *)mac_header_end(skb);
217 err = make_writable(skb, skb->mac_len + MPLS_HLEN);
221 if (skb->ip_summed == CHECKSUM_COMPLETE) {
222 __be32 diff[] = { ~(*stack), *mpls_lse };
223 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
228 key->mpls.top_lse = *mpls_lse;
232 /* remove VLAN header from packet and update csum accordingly. */
233 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
235 struct vlan_hdr *vhdr;
238 err = make_writable(skb, VLAN_ETH_HLEN);
242 if (skb->ip_summed == CHECKSUM_COMPLETE)
243 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
244 + (2 * ETH_ALEN), VLAN_HLEN, 0));
246 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
247 *current_tci = vhdr->h_vlan_TCI;
249 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
250 __skb_pull(skb, VLAN_HLEN);
252 vlan_set_encap_proto(skb, vhdr);
253 skb->mac_header += VLAN_HLEN;
254 /* Update mac_len for subsequent MPLS actions */
255 skb->mac_len -= VLAN_HLEN;
260 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
265 if (likely(vlan_tx_tag_present(skb))) {
266 vlan_set_tci(skb, 0);
268 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
269 skb->len < VLAN_ETH_HLEN))
272 err = __pop_vlan_tci(skb, &tci);
276 /* move next vlan tag to hw accel tag */
277 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
278 skb->len < VLAN_ETH_HLEN)) {
283 invalidate_flow_key(key);
284 err = __pop_vlan_tci(skb, &tci);
288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
292 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
293 const struct ovs_action_push_vlan *vlan)
295 if (unlikely(vlan_tx_tag_present(skb))) {
298 /* push down current VLAN tag */
299 current_tag = vlan_tx_tag_get(skb);
301 if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
304 /* Update mac_len for subsequent MPLS actions */
305 skb->mac_len += VLAN_HLEN;
307 if (skb->ip_summed == CHECKSUM_COMPLETE)
308 skb->csum = csum_add(skb->csum, csum_partial(skb->data
309 + (2 * ETH_ALEN), VLAN_HLEN, 0));
311 invalidate_flow_key(key);
313 key->eth.tci = vlan->vlan_tci;
315 __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
319 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
320 const struct ovs_key_ethernet *eth_key)
323 err = make_writable(skb, ETH_HLEN);
327 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
329 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
330 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
332 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
334 ether_addr_copy(key->eth.src, eth_key->eth_src);
335 ether_addr_copy(key->eth.dst, eth_key->eth_dst);
339 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
340 __be32 *addr, __be32 new_addr)
342 int transport_len = skb->len - skb_transport_offset(skb);
344 if (nh->protocol == IPPROTO_TCP) {
345 if (likely(transport_len >= sizeof(struct tcphdr)))
346 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
348 } else if (nh->protocol == IPPROTO_UDP) {
349 if (likely(transport_len >= sizeof(struct udphdr))) {
350 struct udphdr *uh = udp_hdr(skb);
352 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
353 inet_proto_csum_replace4(&uh->check, skb,
356 uh->check = CSUM_MANGLED_0;
361 csum_replace4(&nh->check, *addr, new_addr);
366 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
367 __be32 addr[4], const __be32 new_addr[4])
369 int transport_len = skb->len - skb_transport_offset(skb);
371 if (l4_proto == NEXTHDR_TCP) {
372 if (likely(transport_len >= sizeof(struct tcphdr)))
373 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
375 } else if (l4_proto == NEXTHDR_UDP) {
376 if (likely(transport_len >= sizeof(struct udphdr))) {
377 struct udphdr *uh = udp_hdr(skb);
379 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
380 inet_proto_csum_replace16(&uh->check, skb,
383 uh->check = CSUM_MANGLED_0;
386 } else if (l4_proto == NEXTHDR_ICMP) {
387 if (likely(transport_len >= sizeof(struct icmp6hdr)))
388 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
389 skb, addr, new_addr, 1);
393 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
394 __be32 addr[4], const __be32 new_addr[4],
395 bool recalculate_csum)
397 if (likely(recalculate_csum))
398 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
401 memcpy(addr, new_addr, sizeof(__be32[4]));
404 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
406 nh->priority = tc >> 4;
407 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
410 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
412 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
413 nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
414 nh->flow_lbl[2] = fl & 0x000000FF;
417 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
419 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
423 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
424 const struct ovs_key_ipv4 *ipv4_key)
429 err = make_writable(skb, skb_network_offset(skb) +
430 sizeof(struct iphdr));
436 if (ipv4_key->ipv4_src != nh->saddr) {
437 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
438 key->ipv4.addr.src = ipv4_key->ipv4_src;
441 if (ipv4_key->ipv4_dst != nh->daddr) {
442 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
443 key->ipv4.addr.dst = ipv4_key->ipv4_dst;
446 if (ipv4_key->ipv4_tos != nh->tos) {
447 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
448 key->ip.tos = nh->tos;
451 if (ipv4_key->ipv4_ttl != nh->ttl) {
452 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
453 key->ip.ttl = ipv4_key->ipv4_ttl;
459 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
460 const struct ovs_key_ipv6 *ipv6_key)
467 err = make_writable(skb, skb_network_offset(skb) +
468 sizeof(struct ipv6hdr));
473 saddr = (__be32 *)&nh->saddr;
474 daddr = (__be32 *)&nh->daddr;
476 if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
477 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
478 ipv6_key->ipv6_src, true);
479 memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
480 sizeof(ipv6_key->ipv6_src));
483 if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
484 unsigned int offset = 0;
485 int flags = OVS_IP6T_FH_F_SKIP_RH;
486 bool recalc_csum = true;
488 if (ipv6_ext_hdr(nh->nexthdr))
489 recalc_csum = ipv6_find_hdr(skb, &offset,
490 NEXTHDR_ROUTING, NULL,
491 &flags) != NEXTHDR_ROUTING;
493 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
494 ipv6_key->ipv6_dst, recalc_csum);
495 memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
496 sizeof(ipv6_key->ipv6_dst));
499 set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
500 key->ip.tos = ipv6_get_dsfield(nh);
502 set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
503 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
505 nh->hop_limit = ipv6_key->ipv6_hlimit;
506 key->ip.ttl = ipv6_key->ipv6_hlimit;
510 /* Must follow make_writable() since that can move the skb data. */
511 static void set_tp_port(struct sk_buff *skb, __be16 *port,
512 __be16 new_port, __sum16 *check)
514 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
519 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
521 struct udphdr *uh = udp_hdr(skb);
523 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
524 set_tp_port(skb, port, new_port, &uh->check);
527 uh->check = CSUM_MANGLED_0;
534 static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
535 const struct ovs_key_udp *udp_port_key)
540 err = make_writable(skb, skb_transport_offset(skb) +
541 sizeof(struct udphdr));
546 if (udp_port_key->udp_src != uh->source) {
547 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
548 key->tp.src = udp_port_key->udp_src;
551 if (udp_port_key->udp_dst != uh->dest) {
552 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
553 key->tp.dst = udp_port_key->udp_dst;
559 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
560 const struct ovs_key_tcp *tcp_port_key)
565 err = make_writable(skb, skb_transport_offset(skb) +
566 sizeof(struct tcphdr));
571 if (tcp_port_key->tcp_src != th->source) {
572 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
573 key->tp.src = tcp_port_key->tcp_src;
576 if (tcp_port_key->tcp_dst != th->dest) {
577 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
578 key->tp.dst = tcp_port_key->tcp_dst;
584 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
585 const struct ovs_key_sctp *sctp_port_key)
589 unsigned int sctphoff = skb_transport_offset(skb);
591 err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
596 if (sctp_port_key->sctp_src != sh->source ||
597 sctp_port_key->sctp_dst != sh->dest) {
598 __le32 old_correct_csum, new_csum, old_csum;
600 old_csum = sh->checksum;
601 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
603 sh->source = sctp_port_key->sctp_src;
604 sh->dest = sctp_port_key->sctp_dst;
606 new_csum = sctp_compute_cksum(skb, sctphoff);
608 /* Carry any checksum errors through. */
609 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
612 key->tp.src = sctp_port_key->sctp_src;
613 key->tp.dst = sctp_port_key->sctp_dst;
619 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
621 struct vport *vport = ovs_vport_rcu(dp, out_port);
624 ovs_vport_send(vport, skb);
629 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
630 struct sw_flow_key *key, const struct nlattr *attr)
632 struct dp_upcall_info upcall;
633 const struct nlattr *a;
635 struct ovs_tunnel_info info;
637 upcall.cmd = OVS_PACKET_CMD_ACTION;
638 upcall.userdata = NULL;
640 upcall.egress_tun_info = NULL;
642 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
643 a = nla_next(a, &rem)) {
644 switch (nla_type(a)) {
645 case OVS_USERSPACE_ATTR_USERDATA:
649 case OVS_USERSPACE_ATTR_PID:
650 upcall.portid = nla_get_u32(a);
653 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
654 /* Get out tunnel info. */
657 vport = ovs_vport_rcu(dp, nla_get_u32(a));
661 err = ovs_vport_get_egress_tun_info(vport, skb,
664 upcall.egress_tun_info = &info;
669 } /* End of switch. */
672 return ovs_dp_upcall(dp, skb, key, &upcall);
675 static bool last_action(const struct nlattr *a, int rem)
677 return a->nla_len == rem;
680 static int sample(struct datapath *dp, struct sk_buff *skb,
681 struct sw_flow_key *key, const struct nlattr *attr)
683 const struct nlattr *acts_list = NULL;
684 const struct nlattr *a;
687 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
688 a = nla_next(a, &rem)) {
689 switch (nla_type(a)) {
690 case OVS_SAMPLE_ATTR_PROBABILITY:
691 if (prandom_u32() >= nla_get_u32(a))
695 case OVS_SAMPLE_ATTR_ACTIONS:
701 rem = nla_len(acts_list);
702 a = nla_data(acts_list);
704 /* Actions list is empty, do nothing */
708 /* The only known usage of sample action is having a single user-space
709 * action. Treat this usage as a special case.
710 * The output_userspace() should clone the skb to be sent to the
711 * user space. This skb will be consumed by its caller.
713 if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
714 last_action(a, rem)))
715 return output_userspace(dp, skb, key, a);
717 skb = skb_clone(skb, GFP_ATOMIC);
719 /* Skip the sample action when out of memory. */
722 if (!add_deferred_actions(skb, key, a)) {
724 pr_warn("%s: deferred actions limit reached, dropping sample action\n",
732 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
733 const struct nlattr *attr)
735 struct ovs_action_hash *hash_act = nla_data(attr);
738 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
739 hash = skb_get_hash(skb);
740 hash = jhash_1word(hash, hash_act->hash_basis);
744 key->ovs_flow_hash = hash;
747 static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
748 const struct nlattr *nested_attr)
752 switch (nla_type(nested_attr)) {
753 case OVS_KEY_ATTR_PRIORITY:
754 skb->priority = nla_get_u32(nested_attr);
755 key->phy.priority = skb->priority;
758 case OVS_KEY_ATTR_SKB_MARK:
759 skb->mark = nla_get_u32(nested_attr);
760 key->phy.skb_mark = skb->mark;
763 case OVS_KEY_ATTR_TUNNEL_INFO:
764 OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
767 case OVS_KEY_ATTR_ETHERNET:
768 err = set_eth_addr(skb, key, nla_data(nested_attr));
771 case OVS_KEY_ATTR_IPV4:
772 err = set_ipv4(skb, key, nla_data(nested_attr));
775 case OVS_KEY_ATTR_IPV6:
776 err = set_ipv6(skb, key, nla_data(nested_attr));
779 case OVS_KEY_ATTR_TCP:
780 err = set_tcp(skb, key, nla_data(nested_attr));
783 case OVS_KEY_ATTR_UDP:
784 err = set_udp(skb, key, nla_data(nested_attr));
787 case OVS_KEY_ATTR_SCTP:
788 err = set_sctp(skb, key, nla_data(nested_attr));
791 case OVS_KEY_ATTR_MPLS:
792 err = set_mpls(skb, key, nla_data(nested_attr));
799 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
800 struct sw_flow_key *key, const struct nlattr *a, int rem)
802 struct deferred_action *da;
804 if (!is_flow_key_valid(key)) {
807 err = ovs_flow_key_update(skb, key);
812 BUG_ON(!is_flow_key_valid(key));
814 if (!last_action(a, rem)) {
815 /* Recirc action is the not the last action
816 * of the action list, need to clone the skb.
818 skb = skb_clone(skb, GFP_ATOMIC);
820 /* Skip the recirc action when out of memory, but
821 * continue on with the rest of the action list.
827 da = add_deferred_actions(skb, key, NULL);
829 da->pkt_key.recirc_id = nla_get_u32(a);
834 pr_warn("%s: deferred action limit reached, drop recirc action\n",
841 /* Execute a list of actions against 'skb'. */
842 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
843 struct sw_flow_key *key,
844 const struct nlattr *attr, int len)
846 /* Every output action needs a separate clone of 'skb', but the common
847 * case is just a single output action, so that doing a clone and
848 * then freeing the original skbuff is wasteful. So the following code
849 * is slightly obscure just to avoid that.
852 const struct nlattr *a;
855 for (a = attr, rem = len; rem > 0;
856 a = nla_next(a, &rem)) {
859 if (unlikely(prev_port != -1)) {
860 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
863 do_output(dp, out_skb, prev_port);
868 switch (nla_type(a)) {
869 case OVS_ACTION_ATTR_OUTPUT:
870 prev_port = nla_get_u32(a);
873 case OVS_ACTION_ATTR_USERSPACE:
874 output_userspace(dp, skb, key, a);
877 case OVS_ACTION_ATTR_HASH:
878 execute_hash(skb, key, a);
881 case OVS_ACTION_ATTR_PUSH_MPLS:
882 err = push_mpls(skb, key, nla_data(a));
885 case OVS_ACTION_ATTR_POP_MPLS:
886 err = pop_mpls(skb, key, nla_get_be16(a));
889 case OVS_ACTION_ATTR_PUSH_VLAN:
890 err = push_vlan(skb, key, nla_data(a));
891 if (unlikely(err)) /* skb already freed. */
895 case OVS_ACTION_ATTR_POP_VLAN:
896 err = pop_vlan(skb, key);
899 case OVS_ACTION_ATTR_RECIRC:
900 err = execute_recirc(dp, skb, key, a, rem);
901 if (last_action(a, rem)) {
902 /* If this is the last action, the skb has
903 * been consumed or freed.
904 * Return immediately.
910 case OVS_ACTION_ATTR_SET:
911 err = execute_set_action(skb, key, nla_data(a));
914 case OVS_ACTION_ATTR_SAMPLE:
915 err = sample(dp, skb, key, a);
926 do_output(dp, skb, prev_port);
933 static void process_deferred_actions(struct datapath *dp)
935 struct action_fifo *fifo = this_cpu_ptr(action_fifos);
937 /* Do not touch the FIFO in case there is no deferred actions. */
938 if (action_fifo_is_empty(fifo))
941 /* Finishing executing all deferred actions. */
943 struct deferred_action *da = action_fifo_get(fifo);
944 struct sk_buff *skb = da->skb;
945 const struct nlattr *actions = da->actions;
948 do_execute_actions(dp, skb, &da->pkt_key, actions,
951 ovs_dp_process_packet(skb, &da->pkt_key);
952 } while (!action_fifo_is_empty(fifo));
954 /* Reset FIFO for the next packet. */
955 action_fifo_init(fifo);
958 /* Execute a list of actions against 'skb'. */
959 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
960 struct sw_flow_key *key,
961 const struct sw_flow_actions *acts)
963 int level = this_cpu_read(exec_actions_level);
966 if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
968 pr_warn("%s: packet loop detected, dropping.\n",
975 this_cpu_inc(exec_actions_level);
977 err = do_execute_actions(dp, skb, key, acts->actions, acts->actions_len);
980 process_deferred_actions(dp);
982 this_cpu_dec(exec_actions_level);
984 /* This return status currently does not reflect the errors
985 * encounted during deferred actions execution. Probably needs to
986 * be fixed in the future.
991 int action_fifos_init(void)
993 action_fifos = alloc_percpu(struct action_fifo);
1000 void action_fifos_exit(void)
1002 free_percpu(action_fifos);