2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/sctp/checksum.h>
43 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
44 const struct nlattr *attr, int len);
46 static int make_writable(struct sk_buff *skb, int write_len)
48 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
51 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
54 /* The end of the mac header.
56 * For non-MPLS skbs this will correspond to the network header.
57 * For MPLS skbs it will be before the network_header as the MPLS
58 * label stack lies between the end of the mac header and the network
59 * header. That is, for MPLS skbs the end of the mac header
60 * is the top of the MPLS label stack.
62 static unsigned char *mac_header_end(const struct sk_buff *skb)
64 return skb_mac_header(skb) + skb->mac_len;
67 static int push_mpls(struct sk_buff *skb,
68 const struct ovs_action_push_mpls *mpls)
73 if (skb_cow_head(skb, MPLS_HLEN) < 0)
76 skb_push(skb, MPLS_HLEN);
77 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
79 skb_reset_mac_header(skb);
81 new_mpls_lse = (__be32 *)mac_header_end(skb);
82 *new_mpls_lse = mpls->mpls_lse;
84 if (skb->ip_summed == CHECKSUM_COMPLETE)
85 skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse,
89 hdr->h_proto = mpls->mpls_ethertype;
90 if (!ovs_skb_get_inner_protocol(skb))
91 ovs_skb_set_inner_protocol(skb, skb->protocol);
92 skb->protocol = mpls->mpls_ethertype;
96 static int pop_mpls(struct sk_buff *skb, const __be16 ethertype)
101 err = make_writable(skb, skb->mac_len + MPLS_HLEN);
105 if (skb->ip_summed == CHECKSUM_COMPLETE)
106 skb->csum = csum_sub(skb->csum,
107 csum_partial(mac_header_end(skb),
110 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
113 __skb_pull(skb, MPLS_HLEN);
114 skb_reset_mac_header(skb);
116 /* mac_header_end() is used to locate the ethertype
117 * field correctly in the presence of VLAN tags.
119 hdr = (struct ethhdr *)(mac_header_end(skb) - ETH_HLEN);
120 hdr->h_proto = ethertype;
121 if (eth_p_mpls(skb->protocol))
122 skb->protocol = ethertype;
126 static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse)
128 __be32 *stack = (__be32 *)mac_header_end(skb);
131 err = make_writable(skb, skb->mac_len + MPLS_HLEN);
135 if (skb->ip_summed == CHECKSUM_COMPLETE) {
136 __be32 diff[] = { ~(*stack), *mpls_lse };
137 skb->csum = ~csum_partial((char *)diff, sizeof(diff),
146 /* remove VLAN header from packet and update csum accordingly. */
147 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
149 struct vlan_hdr *vhdr;
152 err = make_writable(skb, VLAN_ETH_HLEN);
156 if (skb->ip_summed == CHECKSUM_COMPLETE)
157 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
158 + (2 * ETH_ALEN), VLAN_HLEN, 0));
160 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
161 *current_tci = vhdr->h_vlan_TCI;
163 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
164 __skb_pull(skb, VLAN_HLEN);
166 vlan_set_encap_proto(skb, vhdr);
167 skb->mac_header += VLAN_HLEN;
168 /* Update mac_len for subsequent MPLS actions */
169 skb->mac_len -= VLAN_HLEN;
174 static int pop_vlan(struct sk_buff *skb)
179 if (likely(vlan_tx_tag_present(skb))) {
180 vlan_set_tci(skb, 0);
182 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
183 skb->len < VLAN_ETH_HLEN))
186 err = __pop_vlan_tci(skb, &tci);
190 /* move next vlan tag to hw accel tag */
191 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
192 skb->len < VLAN_ETH_HLEN))
195 err = __pop_vlan_tci(skb, &tci);
199 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
203 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
205 if (unlikely(vlan_tx_tag_present(skb))) {
208 /* push down current VLAN tag */
209 current_tag = vlan_tx_tag_get(skb);
211 if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
214 /* Update mac_len for subsequent MPLS actions */
215 skb->mac_len += VLAN_HLEN;
217 if (skb->ip_summed == CHECKSUM_COMPLETE)
218 skb->csum = csum_add(skb->csum, csum_partial(skb->data
219 + (2 * ETH_ALEN), VLAN_HLEN, 0));
222 __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
226 static int set_eth_addr(struct sk_buff *skb,
227 const struct ovs_key_ethernet *eth_key)
230 err = make_writable(skb, ETH_HLEN);
234 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
236 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
237 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
239 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
244 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
245 __be32 *addr, __be32 new_addr)
247 int transport_len = skb->len - skb_transport_offset(skb);
249 if (nh->protocol == IPPROTO_TCP) {
250 if (likely(transport_len >= sizeof(struct tcphdr)))
251 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
253 } else if (nh->protocol == IPPROTO_UDP) {
254 if (likely(transport_len >= sizeof(struct udphdr))) {
255 struct udphdr *uh = udp_hdr(skb);
257 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
258 inet_proto_csum_replace4(&uh->check, skb,
261 uh->check = CSUM_MANGLED_0;
266 csum_replace4(&nh->check, *addr, new_addr);
271 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
272 __be32 addr[4], const __be32 new_addr[4])
274 int transport_len = skb->len - skb_transport_offset(skb);
276 if (l4_proto == IPPROTO_TCP) {
277 if (likely(transport_len >= sizeof(struct tcphdr)))
278 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
280 } else if (l4_proto == IPPROTO_UDP) {
281 if (likely(transport_len >= sizeof(struct udphdr))) {
282 struct udphdr *uh = udp_hdr(skb);
284 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
285 inet_proto_csum_replace16(&uh->check, skb,
288 uh->check = CSUM_MANGLED_0;
294 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
295 __be32 addr[4], const __be32 new_addr[4],
296 bool recalculate_csum)
298 if (recalculate_csum)
299 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
302 memcpy(addr, new_addr, sizeof(__be32[4]));
305 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
307 nh->priority = tc >> 4;
308 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
311 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
313 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
314 nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
315 nh->flow_lbl[2] = fl & 0x000000FF;
318 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
320 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
324 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
329 err = make_writable(skb, skb_network_offset(skb) +
330 sizeof(struct iphdr));
336 if (ipv4_key->ipv4_src != nh->saddr)
337 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
339 if (ipv4_key->ipv4_dst != nh->daddr)
340 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
342 if (ipv4_key->ipv4_tos != nh->tos)
343 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
345 if (ipv4_key->ipv4_ttl != nh->ttl)
346 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
351 static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
358 err = make_writable(skb, skb_network_offset(skb) +
359 sizeof(struct ipv6hdr));
364 saddr = (__be32 *)&nh->saddr;
365 daddr = (__be32 *)&nh->daddr;
367 if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
368 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
369 ipv6_key->ipv6_src, true);
371 if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
372 unsigned int offset = 0;
373 int flags = OVS_IP6T_FH_F_SKIP_RH;
374 bool recalc_csum = true;
376 if (ipv6_ext_hdr(nh->nexthdr))
377 recalc_csum = ipv6_find_hdr(skb, &offset,
378 NEXTHDR_ROUTING, NULL,
379 &flags) != NEXTHDR_ROUTING;
381 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
382 ipv6_key->ipv6_dst, recalc_csum);
385 set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
386 set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
387 nh->hop_limit = ipv6_key->ipv6_hlimit;
392 /* Must follow make_writable() since that can move the skb data. */
393 static void set_tp_port(struct sk_buff *skb, __be16 *port,
394 __be16 new_port, __sum16 *check)
396 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
401 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
403 struct udphdr *uh = udp_hdr(skb);
405 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
406 set_tp_port(skb, port, new_port, &uh->check);
409 uh->check = CSUM_MANGLED_0;
416 static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
421 err = make_writable(skb, skb_transport_offset(skb) +
422 sizeof(struct udphdr));
427 if (udp_port_key->udp_src != uh->source)
428 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
430 if (udp_port_key->udp_dst != uh->dest)
431 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
436 static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
441 err = make_writable(skb, skb_transport_offset(skb) +
442 sizeof(struct tcphdr));
447 if (tcp_port_key->tcp_src != th->source)
448 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
450 if (tcp_port_key->tcp_dst != th->dest)
451 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
456 static int set_sctp(struct sk_buff *skb,
457 const struct ovs_key_sctp *sctp_port_key)
461 unsigned int sctphoff = skb_transport_offset(skb);
463 err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
468 if (sctp_port_key->sctp_src != sh->source ||
469 sctp_port_key->sctp_dst != sh->dest) {
470 __le32 old_correct_csum, new_csum, old_csum;
472 old_csum = sh->checksum;
473 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
475 sh->source = sctp_port_key->sctp_src;
476 sh->dest = sctp_port_key->sctp_dst;
478 new_csum = sctp_compute_cksum(skb, sctphoff);
480 /* Carry any checksum errors through. */
481 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
489 static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
491 struct vport *vport = ovs_vport_rcu(dp, out_port);
494 ovs_vport_send(vport, skb);
499 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
500 const struct nlattr *attr)
502 struct dp_upcall_info upcall;
503 const struct nlattr *a;
506 upcall.cmd = OVS_PACKET_CMD_ACTION;
507 upcall.userdata = NULL;
510 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
511 a = nla_next(a, &rem)) {
512 switch (nla_type(a)) {
513 case OVS_USERSPACE_ATTR_USERDATA:
517 case OVS_USERSPACE_ATTR_PID:
518 upcall.portid = nla_get_u32(a);
523 return ovs_dp_upcall(dp, skb, &upcall);
526 static bool last_action(const struct nlattr *a, int rem)
528 return a->nla_len == rem;
531 static int sample(struct datapath *dp, struct sk_buff *skb,
532 const struct nlattr *attr)
534 const struct nlattr *acts_list = NULL;
535 const struct nlattr *a;
536 struct sk_buff *sample_skb;
539 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
540 a = nla_next(a, &rem)) {
541 switch (nla_type(a)) {
542 case OVS_SAMPLE_ATTR_PROBABILITY:
543 if (prandom_u32() >= nla_get_u32(a))
547 case OVS_SAMPLE_ATTR_ACTIONS:
553 rem = nla_len(acts_list);
554 a = nla_data(acts_list);
556 /* Actions list is either empty or only contains a single user-space
557 * action, the latter being a special case as it is the only known
558 * usage of the sample action.
559 * In these special cases don't clone the skb as there are no
560 * side-effects in the nested actions.
561 * Otherwise, clone in case the nested actions have side effects. */
562 if (likely(rem == 0 ||
563 (nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
564 last_action(a, rem)))) {
568 sample_skb = skb_clone(skb, GFP_ATOMIC);
570 /* Skip the sample action when out of memory. */
574 /* Note that do_execute_actions() never consumes skb.
575 * In the case where skb has been cloned above it is the clone that
576 * is consumed. Otherwise the skb_get(skb) call prevents
577 * consumption by do_execute_actions(). Thus, it is safe to simply
578 * return the error code and let the caller (also
579 * do_execute_actions()) free skb on error. */
580 return do_execute_actions(dp, sample_skb, a, rem);
583 static void execute_hash(struct sk_buff *skb, const struct nlattr *attr)
585 struct sw_flow_key *key = OVS_CB(skb)->pkt_key;
586 struct ovs_action_hash *hash_act = nla_data(attr);
589 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
590 hash = skb_get_hash(skb);
591 hash = jhash_1word(hash, hash_act->hash_basis);
595 key->ovs_flow_hash = hash;
598 static int execute_set_action(struct sk_buff *skb,
599 const struct nlattr *nested_attr)
603 switch (nla_type(nested_attr)) {
604 case OVS_KEY_ATTR_PRIORITY:
605 skb->priority = nla_get_u32(nested_attr);
608 case OVS_KEY_ATTR_SKB_MARK:
609 skb->mark = nla_get_u32(nested_attr);
612 case OVS_KEY_ATTR_TUNNEL_INFO:
613 OVS_CB(skb)->tun_info = nla_data(nested_attr);
616 case OVS_KEY_ATTR_ETHERNET:
617 err = set_eth_addr(skb, nla_data(nested_attr));
620 case OVS_KEY_ATTR_IPV4:
621 err = set_ipv4(skb, nla_data(nested_attr));
624 case OVS_KEY_ATTR_IPV6:
625 err = set_ipv6(skb, nla_data(nested_attr));
628 case OVS_KEY_ATTR_TCP:
629 err = set_tcp(skb, nla_data(nested_attr));
632 case OVS_KEY_ATTR_UDP:
633 err = set_udp(skb, nla_data(nested_attr));
636 case OVS_KEY_ATTR_SCTP:
637 err = set_sctp(skb, nla_data(nested_attr));
640 case OVS_KEY_ATTR_MPLS:
641 err = set_mpls(skb, nla_data(nested_attr));
648 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
649 const struct nlattr *a)
651 struct sw_flow_key recirc_key;
654 err = ovs_flow_key_extract_recirc(nla_get_u32(a), OVS_CB(skb)->pkt_key,
662 ovs_dp_process_packet_with_key(skb, &recirc_key, true);
667 /* Execute a list of actions against 'skb'. */
668 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
669 const struct nlattr *attr, int len)
671 /* Every output action needs a separate clone of 'skb', but the common
672 * case is just a single output action, so that doing a clone and
673 * then freeing the original skbuff is wasteful. So the following code
674 * is slightly obscure just to avoid that. */
676 const struct nlattr *a;
679 for (a = attr, rem = len; rem > 0;
680 a = nla_next(a, &rem)) {
683 if (unlikely(prev_port != -1)) {
684 struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
687 do_output(dp, out_skb, prev_port);
692 switch (nla_type(a)) {
693 case OVS_ACTION_ATTR_OUTPUT:
694 prev_port = nla_get_u32(a);
697 case OVS_ACTION_ATTR_USERSPACE:
698 output_userspace(dp, skb, a);
701 case OVS_ACTION_ATTR_HASH:
702 execute_hash(skb, a);
705 case OVS_ACTION_ATTR_PUSH_MPLS:
706 err = push_mpls(skb, nla_data(a));
709 case OVS_ACTION_ATTR_POP_MPLS:
710 err = pop_mpls(skb, nla_get_be16(a));
713 case OVS_ACTION_ATTR_PUSH_VLAN:
714 err = push_vlan(skb, nla_data(a));
715 if (unlikely(err)) /* skb already freed. */
719 case OVS_ACTION_ATTR_POP_VLAN:
723 case OVS_ACTION_ATTR_RECIRC: {
724 struct sk_buff *recirc_skb;
726 if (last_action(a, rem))
727 return execute_recirc(dp, skb, a);
729 /* Recirc action is the not the last action
730 * of the action list. */
731 recirc_skb = skb_clone(skb, GFP_ATOMIC);
733 /* Skip the recirc action when out of memory, but
734 * continue on with the rest of the action list. */
736 err = execute_recirc(dp, recirc_skb, a);
741 case OVS_ACTION_ATTR_SET:
742 err = execute_set_action(skb, nla_data(a));
745 case OVS_ACTION_ATTR_SAMPLE:
746 err = sample(dp, skb, a);
757 do_output(dp, skb, prev_port);
764 /* We limit the number of times that we pass into execute_actions()
765 * to avoid blowing out the stack in the event that we have a loop.
767 * Each loop adds some (estimated) cost to the kernel stack.
768 * The loop terminates when the max cost is exceeded.
770 #define RECIRC_STACK_COST 1
771 #define DEFAULT_STACK_COST 4
772 /* Allow up to 4 regular services, and up to 3 recirculations */
773 #define MAX_STACK_COST (DEFAULT_STACK_COST * 4 + RECIRC_STACK_COST * 3)
775 struct loop_counter {
776 u8 stack_cost; /* loop stack cost. */
777 bool looping; /* Loop detected? */
780 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
782 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
785 pr_warn("%s: flow loop detected, dropping\n",
787 actions->actions_len = 0;
791 /* Execute a list of actions against 'skb'. */
792 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, bool recirc)
794 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
795 const u8 stack_cost = recirc ? RECIRC_STACK_COST : DEFAULT_STACK_COST;
796 struct loop_counter *loop;
799 /* Check whether we've looped too much. */
800 loop = &__get_cpu_var(loop_counters);
801 loop->stack_cost += stack_cost;
802 if (unlikely(loop->stack_cost > MAX_STACK_COST))
803 loop->looping = true;
804 if (unlikely(loop->looping)) {
805 error = loop_suppress(dp, acts);
810 OVS_CB(skb)->tun_info = NULL;
811 error = do_execute_actions(dp, skb, acts->actions, acts->actions_len);
813 /* Check whether sub-actions looped too much. */
814 if (unlikely(loop->looping))
815 error = loop_suppress(dp, acts);
818 /* Decrement loop stack cost. */
819 loop->stack_cost -= stack_cost;
820 if (!loop->stack_cost)
821 loop->looping = false;