98c4376b68515f0e0744241839b63e7131a58e90
[cascardo/ovs.git] / datapath / actions.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/skbuff.h>
22 #include <linux/in.h>
23 #include <linux/ip.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
31 #include <net/ip.h>
32 #include <net/ipv6.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/mpls.h>
36 #include <net/sctp/checksum.h>
37
38 #include "datapath.h"
39 #include "gso.h"
40 #include "vlan.h"
41 #include "vport.h"
42
43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
44                               struct sw_flow_key *key,
45                               const struct nlattr *attr, int len);
46
47 struct deferred_action {
48         struct sk_buff *skb;
49         const struct nlattr *actions;
50
51         /* Store pkt_key clone when creating deferred action. */
52         struct sw_flow_key pkt_key;
53 };
54
55 #define DEFERRED_ACTION_FIFO_SIZE 10
56 struct action_fifo {
57         int head;
58         int tail;
59         /* Deferred action fifo queue storage. */
60         struct deferred_action fifo[DEFERRED_ACTION_FIFO_SIZE];
61 };
62
63 static struct action_fifo __percpu *action_fifos;
64 #define EXEC_ACTIONS_LEVEL_LIMIT 4   /* limit used to detect packet
65                                       * looping by the network stack
66                                       */
67 static DEFINE_PER_CPU(int, exec_actions_level);
68
69 static void action_fifo_init(struct action_fifo *fifo)
70 {
71         fifo->head = 0;
72         fifo->tail = 0;
73 }
74
75 static bool action_fifo_is_empty(const struct action_fifo *fifo)
76 {
77         return (fifo->head == fifo->tail);
78 }
79
80 static struct deferred_action *action_fifo_get(struct action_fifo *fifo)
81 {
82         if (action_fifo_is_empty(fifo))
83                 return NULL;
84
85         return &fifo->fifo[fifo->tail++];
86 }
87
88 static struct deferred_action *action_fifo_put(struct action_fifo *fifo)
89 {
90         if (fifo->head >= DEFERRED_ACTION_FIFO_SIZE - 1)
91                 return NULL;
92
93         return &fifo->fifo[fifo->head++];
94 }
95
96 /* Return queue entry if fifo is not full */
97 static struct deferred_action *add_deferred_actions(struct sk_buff *skb,
98                                                     const struct sw_flow_key *key,
99                                                     const struct nlattr *attr)
100 {
101         struct action_fifo *fifo;
102         struct deferred_action *da;
103
104         fifo = this_cpu_ptr(action_fifos);
105         da = action_fifo_put(fifo);
106         if (da) {
107                 da->skb = skb;
108                 da->actions = attr;
109                 da->pkt_key = *key;
110         }
111
112         return da;
113 }
114
115 static void invalidate_flow_key(struct sw_flow_key *key)
116 {
117         key->eth.type = htons(0);
118 }
119
120 static bool is_flow_key_valid(const struct sw_flow_key *key)
121 {
122         return !!key->eth.type;
123 }
124
125 static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
126                      const struct ovs_action_push_mpls *mpls)
127 {
128         __be32 *new_mpls_lse;
129         struct ethhdr *hdr;
130
131         /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */
132         if (skb_encapsulation(skb))
133                 return -ENOTSUPP;
134
135         if (skb_cow_head(skb, MPLS_HLEN) < 0)
136                 return -ENOMEM;
137
138         skb_push(skb, MPLS_HLEN);
139         memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
140                 skb->mac_len);
141         skb_reset_mac_header(skb);
142
143         new_mpls_lse = (__be32 *)skb_mpls_header(skb);
144         *new_mpls_lse = mpls->mpls_lse;
145
146         if (skb->ip_summed == CHECKSUM_COMPLETE)
147                 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
148                                                              MPLS_HLEN, 0));
149
150         hdr = eth_hdr(skb);
151         hdr->h_proto = mpls->mpls_ethertype;
152         if (!ovs_skb_get_inner_protocol(skb))
153                 ovs_skb_set_inner_protocol(skb, skb->protocol);
154         skb->protocol = mpls->mpls_ethertype;
155
156         invalidate_flow_key(key);
157         return 0;
158 }
159
160 static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
161                     const __be16 ethertype)
162 {
163         struct ethhdr *hdr;
164         int err;
165
166         err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
167         if (unlikely(err))
168                 return err;
169
170         if (skb->ip_summed == CHECKSUM_COMPLETE)
171                 skb->csum = csum_sub(skb->csum,
172                                      csum_partial(skb_mpls_header(skb),
173                                                   MPLS_HLEN, 0));
174
175         memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
176                 skb->mac_len);
177
178         __skb_pull(skb, MPLS_HLEN);
179         skb_reset_mac_header(skb);
180
181         /* skb_mpls_header() is used to locate the ethertype
182          * field correctly in the presence of VLAN tags.
183          */
184         hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN);
185         hdr->h_proto = ethertype;
186         if (eth_p_mpls(skb->protocol))
187                 skb->protocol = ethertype;
188
189         invalidate_flow_key(key);
190         return 0;
191 }
192
193 static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key,
194                     const __be32 *mpls_lse)
195 {
196         __be32 *stack;
197         int err;
198
199         err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
200         if (unlikely(err))
201                 return err;
202
203         stack = (__be32 *)skb_mpls_header(skb);
204         if (skb->ip_summed == CHECKSUM_COMPLETE) {
205                 __be32 diff[] = { ~(*stack), *mpls_lse };
206                 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
207                                           ~skb->csum);
208         }
209
210         *stack = *mpls_lse;
211         key->mpls.top_lse = *mpls_lse;
212         return 0;
213 }
214
215 static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key)
216 {
217         int err;
218
219         err = skb_vlan_pop(skb);
220         if (skb_vlan_tag_present(skb))
221                 invalidate_flow_key(key);
222         else
223                 key->eth.tci = 0;
224
225         return err;
226 }
227
228 static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key,
229                      const struct ovs_action_push_vlan *vlan)
230 {
231         if (skb_vlan_tag_present(skb))
232                 invalidate_flow_key(key);
233         else
234                 key->eth.tci = vlan->vlan_tci;
235
236         return skb_vlan_push(skb, vlan->vlan_tpid,
237                              ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
238 }
239
240 static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key,
241                         const struct ovs_key_ethernet *eth_key)
242 {
243         int err;
244         err = skb_ensure_writable(skb, ETH_HLEN);
245         if (unlikely(err))
246                 return err;
247
248         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
249
250         ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
251         ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
252
253         ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
254
255         ether_addr_copy(key->eth.src, eth_key->eth_src);
256         ether_addr_copy(key->eth.dst, eth_key->eth_dst);
257         return 0;
258 }
259
260 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
261                         __be32 *addr, __be32 new_addr)
262 {
263         int transport_len = skb->len - skb_transport_offset(skb);
264
265         if (nh->protocol == IPPROTO_TCP) {
266                 if (likely(transport_len >= sizeof(struct tcphdr)))
267                         inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
268                                                  *addr, new_addr, 1);
269         } else if (nh->protocol == IPPROTO_UDP) {
270                 if (likely(transport_len >= sizeof(struct udphdr))) {
271                         struct udphdr *uh = udp_hdr(skb);
272
273                         if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
274                                 inet_proto_csum_replace4(&uh->check, skb,
275                                                          *addr, new_addr, 1);
276                                 if (!uh->check)
277                                         uh->check = CSUM_MANGLED_0;
278                         }
279                 }
280         }
281
282         csum_replace4(&nh->check, *addr, new_addr);
283         skb_clear_hash(skb);
284         *addr = new_addr;
285 }
286
287 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
288                                  __be32 addr[4], const __be32 new_addr[4])
289 {
290         int transport_len = skb->len - skb_transport_offset(skb);
291
292         if (l4_proto == NEXTHDR_TCP) {
293                 if (likely(transport_len >= sizeof(struct tcphdr)))
294                         inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
295                                                   addr, new_addr, 1);
296         } else if (l4_proto == NEXTHDR_UDP) {
297                 if (likely(transport_len >= sizeof(struct udphdr))) {
298                         struct udphdr *uh = udp_hdr(skb);
299
300                         if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
301                                 inet_proto_csum_replace16(&uh->check, skb,
302                                                           addr, new_addr, 1);
303                                 if (!uh->check)
304                                         uh->check = CSUM_MANGLED_0;
305                         }
306                 }
307         } else if (l4_proto == NEXTHDR_ICMP) {
308                 if (likely(transport_len >= sizeof(struct icmp6hdr)))
309                         inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
310                                                   skb, addr, new_addr, 1);
311         }
312 }
313
314 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
315                           __be32 addr[4], const __be32 new_addr[4],
316                           bool recalculate_csum)
317 {
318         if (likely(recalculate_csum))
319                 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
320
321         skb_clear_hash(skb);
322         memcpy(addr, new_addr, sizeof(__be32[4]));
323 }
324
325 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
326 {
327         nh->priority = tc >> 4;
328         nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
329 }
330
331 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
332 {
333         nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
334         nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
335         nh->flow_lbl[2] = fl & 0x000000FF;
336 }
337
338 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
339 {
340         csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
341         nh->ttl = new_ttl;
342 }
343
344 static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key,
345                     const struct ovs_key_ipv4 *ipv4_key)
346 {
347         struct iphdr *nh;
348         int err;
349
350         err = skb_ensure_writable(skb, skb_network_offset(skb) +
351                                   sizeof(struct iphdr));
352         if (unlikely(err))
353                 return err;
354
355         nh = ip_hdr(skb);
356
357         if (ipv4_key->ipv4_src != nh->saddr) {
358                 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
359                 key->ipv4.addr.src = ipv4_key->ipv4_src;
360         }
361
362         if (ipv4_key->ipv4_dst != nh->daddr) {
363                 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
364                 key->ipv4.addr.dst = ipv4_key->ipv4_dst;
365         }
366
367         if (ipv4_key->ipv4_tos != nh->tos) {
368                 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
369                 key->ip.tos = nh->tos;
370         }
371
372         if (ipv4_key->ipv4_ttl != nh->ttl) {
373                 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
374                 key->ip.ttl = ipv4_key->ipv4_ttl;
375         }
376
377         return 0;
378 }
379
380 static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key,
381                     const struct ovs_key_ipv6 *ipv6_key)
382 {
383         struct ipv6hdr *nh;
384         int err;
385         __be32 *saddr;
386         __be32 *daddr;
387
388         err = skb_ensure_writable(skb, skb_network_offset(skb) +
389                                   sizeof(struct ipv6hdr));
390         if (unlikely(err))
391                 return err;
392
393         nh = ipv6_hdr(skb);
394         saddr = (__be32 *)&nh->saddr;
395         daddr = (__be32 *)&nh->daddr;
396
397         if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) {
398                 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
399                               ipv6_key->ipv6_src, true);
400                 memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src,
401                        sizeof(ipv6_key->ipv6_src));
402         }
403
404         if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
405                 unsigned int offset = 0;
406                 int flags = IP6_FH_F_SKIP_RH;
407                 bool recalc_csum = true;
408
409                 if (ipv6_ext_hdr(nh->nexthdr))
410                         recalc_csum = ipv6_find_hdr(skb, &offset,
411                                                     NEXTHDR_ROUTING, NULL,
412                                                     &flags) != NEXTHDR_ROUTING;
413
414                 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
415                               ipv6_key->ipv6_dst, recalc_csum);
416                 memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst,
417                        sizeof(ipv6_key->ipv6_dst));
418         }
419
420         set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
421         key->ip.tos = ipv6_get_dsfield(nh);
422
423         set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
424         key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL);
425
426         nh->hop_limit = ipv6_key->ipv6_hlimit;
427         key->ip.ttl = ipv6_key->ipv6_hlimit;
428         return 0;
429 }
430
431 /* Must follow skb_ensure_writable() since that can move the skb data. */
432 static void set_tp_port(struct sk_buff *skb, __be16 *port,
433                          __be16 new_port, __sum16 *check)
434 {
435         inet_proto_csum_replace2(check, skb, *port, new_port, 0);
436         *port = new_port;
437         skb_clear_hash(skb);
438 }
439
440 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
441 {
442         struct udphdr *uh = udp_hdr(skb);
443
444         if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
445                 set_tp_port(skb, port, new_port, &uh->check);
446
447                 if (!uh->check)
448                         uh->check = CSUM_MANGLED_0;
449         } else {
450                 *port = new_port;
451                 skb_clear_hash(skb);
452         }
453 }
454
455 static int set_udp(struct sk_buff *skb, struct sw_flow_key *key,
456                    const struct ovs_key_udp *udp_port_key)
457 {
458         struct udphdr *uh;
459         int err;
460
461         err = skb_ensure_writable(skb, skb_transport_offset(skb) +
462                                   sizeof(struct udphdr));
463         if (unlikely(err))
464                 return err;
465
466         uh = udp_hdr(skb);
467         if (udp_port_key->udp_src != uh->source) {
468                 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
469                 key->tp.src = udp_port_key->udp_src;
470         }
471
472         if (udp_port_key->udp_dst != uh->dest) {
473                 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
474                 key->tp.dst = udp_port_key->udp_dst;
475         }
476
477         return 0;
478 }
479
480 static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key,
481                    const struct ovs_key_tcp *tcp_port_key)
482 {
483         struct tcphdr *th;
484         int err;
485
486         err = skb_ensure_writable(skb, skb_transport_offset(skb) +
487                                   sizeof(struct tcphdr));
488         if (unlikely(err))
489                 return err;
490
491         th = tcp_hdr(skb);
492         if (tcp_port_key->tcp_src != th->source) {
493                 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
494                 key->tp.src = tcp_port_key->tcp_src;
495         }
496
497         if (tcp_port_key->tcp_dst != th->dest) {
498                 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
499                 key->tp.dst = tcp_port_key->tcp_dst;
500         }
501
502         return 0;
503 }
504
505 static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key,
506                     const struct ovs_key_sctp *sctp_port_key)
507 {
508         struct sctphdr *sh;
509         int err;
510         unsigned int sctphoff = skb_transport_offset(skb);
511
512         err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr));
513         if (unlikely(err))
514                 return err;
515
516         sh = sctp_hdr(skb);
517         if (sctp_port_key->sctp_src != sh->source ||
518             sctp_port_key->sctp_dst != sh->dest) {
519                 __le32 old_correct_csum, new_csum, old_csum;
520
521                 old_csum = sh->checksum;
522                 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
523
524                 sh->source = sctp_port_key->sctp_src;
525                 sh->dest = sctp_port_key->sctp_dst;
526
527                 new_csum = sctp_compute_cksum(skb, sctphoff);
528
529                 /* Carry any checksum errors through. */
530                 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
531
532                 skb_clear_hash(skb);
533                 key->tp.src = sctp_port_key->sctp_src;
534                 key->tp.dst = sctp_port_key->sctp_dst;
535         }
536
537         return 0;
538 }
539
540 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
541 {
542         struct vport *vport = ovs_vport_rcu(dp, out_port);
543
544         if (likely(vport))
545                 ovs_vport_send(vport, skb);
546         else
547                 kfree_skb(skb);
548 }
549
550 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
551                             struct sw_flow_key *key, const struct nlattr *attr)
552 {
553         struct ovs_tunnel_info info;
554         struct dp_upcall_info upcall;
555         const struct nlattr *a;
556         int rem;
557
558         upcall.cmd = OVS_PACKET_CMD_ACTION;
559         upcall.userdata = NULL;
560         upcall.portid = 0;
561         upcall.egress_tun_info = NULL;
562
563         for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
564                  a = nla_next(a, &rem)) {
565                 switch (nla_type(a)) {
566                 case OVS_USERSPACE_ATTR_USERDATA:
567                         upcall.userdata = a;
568                         break;
569
570                 case OVS_USERSPACE_ATTR_PID:
571                         upcall.portid = nla_get_u32(a);
572                         break;
573
574                 case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: {
575                         /* Get out tunnel info. */
576                         struct vport *vport;
577
578                         vport = ovs_vport_rcu(dp, nla_get_u32(a));
579                         if (vport) {
580                                 int err;
581
582                                 err = ovs_vport_get_egress_tun_info(vport, skb,
583                                                                     &info);
584                                 if (!err)
585                                         upcall.egress_tun_info = &info;
586                         }
587                         break;
588                 }
589
590                 } /* End of switch. */
591         }
592
593         return ovs_dp_upcall(dp, skb, key, &upcall);
594 }
595
596 static int sample(struct datapath *dp, struct sk_buff *skb,
597                   struct sw_flow_key *key, const struct nlattr *attr)
598 {
599         const struct nlattr *acts_list = NULL;
600         const struct nlattr *a;
601         int rem;
602
603         for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
604                  a = nla_next(a, &rem)) {
605                 switch (nla_type(a)) {
606                 case OVS_SAMPLE_ATTR_PROBABILITY:
607                         if (prandom_u32() >= nla_get_u32(a))
608                                 return 0;
609                         break;
610
611                 case OVS_SAMPLE_ATTR_ACTIONS:
612                         acts_list = a;
613                         break;
614                 }
615         }
616
617         rem = nla_len(acts_list);
618         a = nla_data(acts_list);
619
620         /* Actions list is empty, do nothing */
621         if (unlikely(!rem))
622                 return 0;
623
624         /* The only known usage of sample action is having a single user-space
625          * action. Treat this usage as a special case.
626          * The output_userspace() should clone the skb to be sent to the
627          * user space. This skb will be consumed by its caller.
628          */
629         if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
630                    nla_is_last(a, rem)))
631                 return output_userspace(dp, skb, key, a);
632
633         skb = skb_clone(skb, GFP_ATOMIC);
634         if (!skb)
635                 /* Skip the sample action when out of memory. */
636                 return 0;
637
638         if (!add_deferred_actions(skb, key, a)) {
639                 if (net_ratelimit())
640                         pr_warn("%s: deferred actions limit reached, dropping sample action\n",
641                                 ovs_dp_name(dp));
642
643                 kfree_skb(skb);
644         }
645         return 0;
646 }
647
648 static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key,
649                          const struct nlattr *attr)
650 {
651         struct ovs_action_hash *hash_act = nla_data(attr);
652         u32 hash = 0;
653
654         /* OVS_HASH_ALG_L4 is the only possible hash algorithm.  */
655         hash = skb_get_hash(skb);
656         hash = jhash_1word(hash, hash_act->hash_basis);
657         if (!hash)
658                 hash = 0x1;
659
660         key->ovs_flow_hash = hash;
661 }
662
663 static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key,
664                               const struct nlattr *nested_attr)
665 {
666         int err = 0;
667
668         switch (nla_type(nested_attr)) {
669         case OVS_KEY_ATTR_PRIORITY:
670                 skb->priority = nla_get_u32(nested_attr);
671                 key->phy.priority = skb->priority;
672                 break;
673
674         case OVS_KEY_ATTR_SKB_MARK:
675                 skb->mark = nla_get_u32(nested_attr);
676                 key->phy.skb_mark = skb->mark;
677                 break;
678
679         case OVS_KEY_ATTR_TUNNEL_INFO:
680                 OVS_CB(skb)->egress_tun_info = nla_data(nested_attr);
681                 break;
682
683         case OVS_KEY_ATTR_ETHERNET:
684                 err = set_eth_addr(skb, key, nla_data(nested_attr));
685                 break;
686
687         case OVS_KEY_ATTR_IPV4:
688                 err = set_ipv4(skb, key, nla_data(nested_attr));
689                 break;
690
691         case OVS_KEY_ATTR_IPV6:
692                 err = set_ipv6(skb, key, nla_data(nested_attr));
693                 break;
694
695         case OVS_KEY_ATTR_TCP:
696                 err = set_tcp(skb, key, nla_data(nested_attr));
697                 break;
698
699         case OVS_KEY_ATTR_UDP:
700                 err = set_udp(skb, key, nla_data(nested_attr));
701                 break;
702
703         case OVS_KEY_ATTR_SCTP:
704                 err = set_sctp(skb, key, nla_data(nested_attr));
705                 break;
706
707         case OVS_KEY_ATTR_MPLS:
708                 err = set_mpls(skb, key, nla_data(nested_attr));
709                 break;
710         }
711
712         return err;
713 }
714
715 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
716                           struct sw_flow_key *key,
717                           const struct nlattr *a, int rem)
718 {
719         struct deferred_action *da;
720
721         if (!is_flow_key_valid(key)) {
722                 int err;
723
724                 err = ovs_flow_key_update(skb, key);
725                 if (err)
726                         return err;
727         }
728         BUG_ON(!is_flow_key_valid(key));
729
730         if (!nla_is_last(a, rem)) {
731                 /* Recirc action is the not the last action
732                  * of the action list, need to clone the skb.
733                  */
734                 skb = skb_clone(skb, GFP_ATOMIC);
735
736                 /* Skip the recirc action when out of memory, but
737                  * continue on with the rest of the action list.
738                  */
739                 if (!skb)
740                         return 0;
741         }
742
743         da = add_deferred_actions(skb, key, NULL);
744         if (da) {
745                 da->pkt_key.recirc_id = nla_get_u32(a);
746         } else {
747                 kfree_skb(skb);
748
749                 if (net_ratelimit())
750                         pr_warn("%s: deferred action limit reached, drop recirc action\n",
751                                 ovs_dp_name(dp));
752         }
753
754         return 0;
755 }
756
757 /* Execute a list of actions against 'skb'. */
758 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
759                               struct sw_flow_key *key,
760                               const struct nlattr *attr, int len)
761 {
762         /* Every output action needs a separate clone of 'skb', but the common
763          * case is just a single output action, so that doing a clone and
764          * then freeing the original skbuff is wasteful.  So the following code
765          * is slightly obscure just to avoid that.
766          */
767         int prev_port = -1;
768         const struct nlattr *a;
769         int rem;
770
771         for (a = attr, rem = len; rem > 0;
772              a = nla_next(a, &rem)) {
773                 int err = 0;
774
775                 if (unlikely(prev_port != -1)) {
776                         struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
777
778                         if (out_skb)
779                                 do_output(dp, out_skb, prev_port);
780
781                         prev_port = -1;
782                 }
783
784                 switch (nla_type(a)) {
785                 case OVS_ACTION_ATTR_OUTPUT:
786                         prev_port = nla_get_u32(a);
787                         break;
788
789                 case OVS_ACTION_ATTR_USERSPACE:
790                         output_userspace(dp, skb, key, a);
791                         break;
792
793                 case OVS_ACTION_ATTR_HASH:
794                         execute_hash(skb, key, a);
795                         break;
796
797                 case OVS_ACTION_ATTR_PUSH_MPLS:
798                         err = push_mpls(skb, key, nla_data(a));
799                         break;
800
801                 case OVS_ACTION_ATTR_POP_MPLS:
802                         err = pop_mpls(skb, key, nla_get_be16(a));
803                         break;
804
805                 case OVS_ACTION_ATTR_PUSH_VLAN:
806                         err = push_vlan(skb, key, nla_data(a));
807                         break;
808
809                 case OVS_ACTION_ATTR_POP_VLAN:
810                         err = pop_vlan(skb, key);
811                         break;
812
813                 case OVS_ACTION_ATTR_RECIRC:
814                         err = execute_recirc(dp, skb, key, a, rem);
815                         if (nla_is_last(a, rem)) {
816                                 /* If this is the last action, the skb has
817                                  * been consumed or freed.
818                                  * Return immediately.
819                                  */
820                                 return err;
821                         }
822                         break;
823
824                 case OVS_ACTION_ATTR_SET:
825                         err = execute_set_action(skb, key, nla_data(a));
826                         break;
827
828                 case OVS_ACTION_ATTR_SAMPLE:
829                         err = sample(dp, skb, key, a);
830                         break;
831                 }
832
833                 if (unlikely(err)) {
834                         kfree_skb(skb);
835                         return err;
836                 }
837         }
838
839         if (prev_port != -1)
840                 do_output(dp, skb, prev_port);
841         else
842                 consume_skb(skb);
843
844         return 0;
845 }
846
847 static void process_deferred_actions(struct datapath *dp)
848 {
849         struct action_fifo *fifo = this_cpu_ptr(action_fifos);
850
851         /* Do not touch the FIFO in case there is no deferred actions. */
852         if (action_fifo_is_empty(fifo))
853                 return;
854
855         /* Finishing executing all deferred actions. */
856         do {
857                 struct deferred_action *da = action_fifo_get(fifo);
858                 struct sk_buff *skb = da->skb;
859                 struct sw_flow_key *key = &da->pkt_key;
860                 const struct nlattr *actions = da->actions;
861
862                 if (actions)
863                         do_execute_actions(dp, skb, key, actions,
864                                            nla_len(actions));
865                 else
866                         ovs_dp_process_packet(skb, key);
867         } while (!action_fifo_is_empty(fifo));
868
869         /* Reset FIFO for the next packet.  */
870         action_fifo_init(fifo);
871 }
872
873 /* Execute a list of actions against 'skb'. */
874 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
875                         const struct sw_flow_actions *acts,
876                         struct sw_flow_key *key)
877 {
878         int level = this_cpu_read(exec_actions_level);
879         int err;
880
881         if (unlikely(level >= EXEC_ACTIONS_LEVEL_LIMIT)) {
882                 if (net_ratelimit())
883                         pr_warn("%s: packet loop detected, dropping.\n",
884                                 ovs_dp_name(dp));
885
886                 kfree_skb(skb);
887                 return -ELOOP;
888         }
889
890         this_cpu_inc(exec_actions_level);
891         err = do_execute_actions(dp, skb, key,
892                                  acts->actions, acts->actions_len);
893
894         if (!level)
895                 process_deferred_actions(dp);
896
897         this_cpu_dec(exec_actions_level);
898
899         /* This return status currently does not reflect the errors
900          * encounted during deferred actions execution. Probably needs to
901          * be fixed in the future.
902          */
903         return err;
904 }
905
906 int action_fifos_init(void)
907 {
908         action_fifos = alloc_percpu(struct action_fifo);
909         if (!action_fifos)
910                 return -ENOMEM;
911
912         return 0;
913 }
914
915 void action_fifos_exit(void)
916 {
917         free_percpu(action_fifos);
918 }