2 * Copyright (c) 2007-2014 Nicira, Inc.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/skbuff.h>
24 #include <linux/openvswitch.h>
25 #include <linux/sctp.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/in6.h>
29 #include <linux/if_arp.h>
30 #include <linux/if_vlan.h>
33 #include <net/checksum.h>
34 #include <net/dsfield.h>
35 #include <net/sctp/checksum.h>
41 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
42 const struct nlattr *attr, int len);
44 static int make_writable(struct sk_buff *skb, int write_len)
46 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
49 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
52 /* remove VLAN header from packet and update csum accordingly. */
53 static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)
55 struct vlan_hdr *vhdr;
58 err = make_writable(skb, VLAN_ETH_HLEN);
62 if (skb->ip_summed == CHECKSUM_COMPLETE)
63 skb->csum = csum_sub(skb->csum, csum_partial(skb->data
64 + (2 * ETH_ALEN), VLAN_HLEN, 0));
66 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
67 *current_tci = vhdr->h_vlan_TCI;
69 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
70 __skb_pull(skb, VLAN_HLEN);
72 vlan_set_encap_proto(skb, vhdr);
73 skb->mac_header += VLAN_HLEN;
74 skb_reset_mac_len(skb);
79 static int pop_vlan(struct sk_buff *skb)
84 if (likely(vlan_tx_tag_present(skb))) {
87 if (unlikely(skb->protocol != htons(ETH_P_8021Q) ||
88 skb->len < VLAN_ETH_HLEN))
91 err = __pop_vlan_tci(skb, &tci);
95 /* move next vlan tag to hw accel tag */
96 if (likely(skb->protocol != htons(ETH_P_8021Q) ||
97 skb->len < VLAN_ETH_HLEN))
100 err = __pop_vlan_tci(skb, &tci);
104 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci));
108 static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan)
110 if (unlikely(vlan_tx_tag_present(skb))) {
113 /* push down current VLAN tag */
114 current_tag = vlan_tx_tag_get(skb);
116 if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag))
119 if (skb->ip_summed == CHECKSUM_COMPLETE)
120 skb->csum = csum_add(skb->csum, csum_partial(skb->data
121 + (2 * ETH_ALEN), VLAN_HLEN, 0));
124 __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT);
128 static int set_eth_addr(struct sk_buff *skb,
129 const struct ovs_key_ethernet *eth_key)
132 err = make_writable(skb, ETH_HLEN);
136 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
138 ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src);
139 ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst);
141 ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2);
146 static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
147 __be32 *addr, __be32 new_addr)
149 int transport_len = skb->len - skb_transport_offset(skb);
151 if (nh->protocol == IPPROTO_TCP) {
152 if (likely(transport_len >= sizeof(struct tcphdr)))
153 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
155 } else if (nh->protocol == IPPROTO_UDP) {
156 if (likely(transport_len >= sizeof(struct udphdr))) {
157 struct udphdr *uh = udp_hdr(skb);
159 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
160 inet_proto_csum_replace4(&uh->check, skb,
163 uh->check = CSUM_MANGLED_0;
168 csum_replace4(&nh->check, *addr, new_addr);
173 static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
174 __be32 addr[4], const __be32 new_addr[4])
176 int transport_len = skb->len - skb_transport_offset(skb);
178 if (l4_proto == NEXTHDR_TCP) {
179 if (likely(transport_len >= sizeof(struct tcphdr)))
180 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
182 } else if (l4_proto == NEXTHDR_UDP) {
183 if (likely(transport_len >= sizeof(struct udphdr))) {
184 struct udphdr *uh = udp_hdr(skb);
186 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
187 inet_proto_csum_replace16(&uh->check, skb,
190 uh->check = CSUM_MANGLED_0;
193 } else if (l4_proto == NEXTHDR_ICMP) {
194 if (likely(transport_len >= sizeof(struct icmp6hdr)))
195 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
196 skb, addr, new_addr, 1);
200 static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto,
201 __be32 addr[4], const __be32 new_addr[4],
202 bool recalculate_csum)
204 if (recalculate_csum)
205 update_ipv6_checksum(skb, l4_proto, addr, new_addr);
208 memcpy(addr, new_addr, sizeof(__be32[4]));
211 static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc)
213 nh->priority = tc >> 4;
214 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4);
217 static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl)
219 nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16;
220 nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8;
221 nh->flow_lbl[2] = fl & 0x000000FF;
224 static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl)
226 csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8));
230 static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key)
235 err = make_writable(skb, skb_network_offset(skb) +
236 sizeof(struct iphdr));
242 if (ipv4_key->ipv4_src != nh->saddr)
243 set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src);
245 if (ipv4_key->ipv4_dst != nh->daddr)
246 set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst);
248 if (ipv4_key->ipv4_tos != nh->tos)
249 ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos);
251 if (ipv4_key->ipv4_ttl != nh->ttl)
252 set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl);
257 static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key)
264 err = make_writable(skb, skb_network_offset(skb) +
265 sizeof(struct ipv6hdr));
270 saddr = (__be32 *)&nh->saddr;
271 daddr = (__be32 *)&nh->daddr;
273 if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src)))
274 set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr,
275 ipv6_key->ipv6_src, true);
277 if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) {
278 unsigned int offset = 0;
279 int flags = OVS_IP6T_FH_F_SKIP_RH;
280 bool recalc_csum = true;
282 if (ipv6_ext_hdr(nh->nexthdr))
283 recalc_csum = ipv6_find_hdr(skb, &offset,
284 NEXTHDR_ROUTING, NULL,
285 &flags) != NEXTHDR_ROUTING;
287 set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr,
288 ipv6_key->ipv6_dst, recalc_csum);
291 set_ipv6_tc(nh, ipv6_key->ipv6_tclass);
292 set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label));
293 nh->hop_limit = ipv6_key->ipv6_hlimit;
298 /* Must follow make_writable() since that can move the skb data. */
299 static void set_tp_port(struct sk_buff *skb, __be16 *port,
300 __be16 new_port, __sum16 *check)
302 inet_proto_csum_replace2(check, skb, *port, new_port, 0);
307 static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port)
309 struct udphdr *uh = udp_hdr(skb);
311 if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) {
312 set_tp_port(skb, port, new_port, &uh->check);
315 uh->check = CSUM_MANGLED_0;
322 static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key)
327 err = make_writable(skb, skb_transport_offset(skb) +
328 sizeof(struct udphdr));
333 if (udp_port_key->udp_src != uh->source)
334 set_udp_port(skb, &uh->source, udp_port_key->udp_src);
336 if (udp_port_key->udp_dst != uh->dest)
337 set_udp_port(skb, &uh->dest, udp_port_key->udp_dst);
342 static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
347 err = make_writable(skb, skb_transport_offset(skb) +
348 sizeof(struct tcphdr));
353 if (tcp_port_key->tcp_src != th->source)
354 set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check);
356 if (tcp_port_key->tcp_dst != th->dest)
357 set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check);
362 static int set_sctp(struct sk_buff *skb,
363 const struct ovs_key_sctp *sctp_port_key)
367 unsigned int sctphoff = skb_transport_offset(skb);
369 err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
374 if (sctp_port_key->sctp_src != sh->source ||
375 sctp_port_key->sctp_dst != sh->dest) {
376 __le32 old_correct_csum, new_csum, old_csum;
378 old_csum = sh->checksum;
379 old_correct_csum = sctp_compute_cksum(skb, sctphoff);
381 sh->source = sctp_port_key->sctp_src;
382 sh->dest = sctp_port_key->sctp_dst;
384 new_csum = sctp_compute_cksum(skb, sctphoff);
386 /* Carry any checksum errors through. */
387 sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
395 static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
402 vport = ovs_vport_rcu(dp, out_port);
403 if (unlikely(!vport)) {
408 ovs_vport_send(vport, skb);
412 static int output_userspace(struct datapath *dp, struct sk_buff *skb,
413 const struct nlattr *attr)
415 struct dp_upcall_info upcall;
416 const struct nlattr *a;
419 BUG_ON(!OVS_CB(skb)->pkt_key);
421 upcall.cmd = OVS_PACKET_CMD_ACTION;
422 upcall.key = OVS_CB(skb)->pkt_key;
423 upcall.userdata = NULL;
426 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
427 a = nla_next(a, &rem)) {
428 switch (nla_type(a)) {
429 case OVS_USERSPACE_ATTR_USERDATA:
433 case OVS_USERSPACE_ATTR_PID:
434 upcall.portid = nla_get_u32(a);
439 return ovs_dp_upcall(dp, skb, &upcall);
442 static bool last_action(const struct nlattr *a, int rem)
444 return a->nla_len == rem;
447 static int sample(struct datapath *dp, struct sk_buff *skb,
448 const struct nlattr *attr)
450 const struct nlattr *acts_list = NULL;
451 const struct nlattr *a;
452 struct sk_buff *sample_skb;
455 for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
456 a = nla_next(a, &rem)) {
457 switch (nla_type(a)) {
458 case OVS_SAMPLE_ATTR_PROBABILITY:
459 if (prandom_u32() >= nla_get_u32(a))
463 case OVS_SAMPLE_ATTR_ACTIONS:
469 rem = nla_len(acts_list);
470 a = nla_data(acts_list);
472 /* Actions list is either empty or only contains a single user-space
473 * action, the latter being a special case as it is the only known
474 * usage of the sample action.
475 * In these special cases don't clone the skb as there are no
476 * side-effects in the nested actions.
477 * Otherwise, clone in case the nested actions have side effects. */
478 if (likely(rem == 0 ||
479 (nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
480 last_action(a, rem)))) {
484 sample_skb = skb_clone(skb, GFP_ATOMIC);
487 /* Note that do_execute_actions() never consumes skb.
488 * In the case where skb has been cloned above it is the clone that
489 * is consumed. Otherwise the skb_get(skb) call prevents
490 * consumption by do_execute_actions(). Thus, it is safe to simply
491 * return the error code and let the caller (also
492 * do_execute_actions()) free skb on error. */
493 return do_execute_actions(dp, sample_skb, a, rem);
496 static void execute_hash(struct sk_buff *skb, const struct nlattr *attr)
498 struct sw_flow_key *key = OVS_CB(skb)->pkt_key;
499 struct ovs_action_hash *hash_act = nla_data(attr);
502 /* OVS_HASH_ALG_L4 is the only possible hash algorithm. */
503 hash = skb_get_hash(skb);
504 hash = jhash_1word(hash, hash_act->hash_basis);
508 key->ovs_flow_hash = hash;
511 static int execute_set_action(struct sk_buff *skb,
512 const struct nlattr *nested_attr)
516 switch (nla_type(nested_attr)) {
517 case OVS_KEY_ATTR_PRIORITY:
518 skb->priority = nla_get_u32(nested_attr);
521 case OVS_KEY_ATTR_SKB_MARK:
522 skb->mark = nla_get_u32(nested_attr);
525 case OVS_KEY_ATTR_IPV4_TUNNEL:
526 OVS_CB(skb)->tun_key = nla_data(nested_attr);
529 case OVS_KEY_ATTR_ETHERNET:
530 err = set_eth_addr(skb, nla_data(nested_attr));
533 case OVS_KEY_ATTR_IPV4:
534 err = set_ipv4(skb, nla_data(nested_attr));
537 case OVS_KEY_ATTR_IPV6:
538 err = set_ipv6(skb, nla_data(nested_attr));
541 case OVS_KEY_ATTR_TCP:
542 err = set_tcp(skb, nla_data(nested_attr));
545 case OVS_KEY_ATTR_UDP:
546 err = set_udp(skb, nla_data(nested_attr));
549 case OVS_KEY_ATTR_SCTP:
550 err = set_sctp(skb, nla_data(nested_attr));
557 static int execute_recirc(struct datapath *dp, struct sk_buff *skb,
558 const struct nlattr *a)
560 struct sw_flow_key recirc_key;
563 err = ovs_flow_key_extract_recirc(nla_get_u32(a), OVS_CB(skb)->pkt_key,
571 ovs_dp_process_packet_with_key(skb, &recirc_key, true);
576 /* Execute a list of actions against 'skb'. */
577 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
578 const struct nlattr *attr, int len)
580 /* Every output action needs a separate clone of 'skb', but the common
581 * case is just a single output action, so that doing a clone and
582 * then freeing the original skbuff is wasteful. So the following code
583 * is slightly obscure just to avoid that. */
585 const struct nlattr *a;
588 for (a = attr, rem = len; rem > 0;
589 a = nla_next(a, &rem)) {
592 if (prev_port != -1) {
593 do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port);
597 switch (nla_type(a)) {
598 case OVS_ACTION_ATTR_OUTPUT:
599 prev_port = nla_get_u32(a);
602 case OVS_ACTION_ATTR_USERSPACE:
603 output_userspace(dp, skb, a);
606 case OVS_ACTION_ATTR_HASH:
607 execute_hash(skb, a);
610 case OVS_ACTION_ATTR_PUSH_VLAN:
611 err = push_vlan(skb, nla_data(a));
612 if (unlikely(err)) /* skb already freed. */
616 case OVS_ACTION_ATTR_POP_VLAN:
620 case OVS_ACTION_ATTR_RECIRC: {
621 struct sk_buff *recirc_skb;
623 if (last_action(a, rem))
624 return execute_recirc(dp, skb, a);
626 /* Recirc action is the not the last action
627 * of the action list. */
628 recirc_skb = skb_clone(skb, GFP_ATOMIC);
630 /* Skip the recirc action when out of memory, but
631 * continue on with the rest of the action list. */
633 err = execute_recirc(dp, recirc_skb, a);
638 case OVS_ACTION_ATTR_SET:
639 err = execute_set_action(skb, nla_data(a));
642 case OVS_ACTION_ATTR_SAMPLE:
643 err = sample(dp, skb, a);
654 do_output(dp, skb, prev_port);
661 /* We limit the number of times that we pass into execute_actions()
662 * to avoid blowing out the stack in the event that we have a loop.
664 * Each loop adds some (estimated) cost to the kernel stack.
665 * The loop terminates when the max cost is exceeded.
667 #define RECIRC_STACK_COST 1
668 #define DEFAULT_STACK_COST 4
669 /* Allow up to 4 regular services, and up to 3 recirculations */
670 #define MAX_STACK_COST (DEFAULT_STACK_COST * 4 + RECIRC_STACK_COST * 3)
672 struct loop_counter {
673 u8 stack_cost; /* loop stack cost. */
674 bool looping; /* Loop detected? */
677 static DEFINE_PER_CPU(struct loop_counter, loop_counters);
679 static int loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
682 pr_warn("%s: flow loop detected, dropping\n",
684 actions->actions_len = 0;
688 /* Execute a list of actions against 'skb'. */
689 int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, bool recirc)
691 struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
692 const u8 stack_cost = recirc ? RECIRC_STACK_COST : DEFAULT_STACK_COST;
693 struct loop_counter *loop;
696 /* Check whether we've looped too much. */
697 loop = &__get_cpu_var(loop_counters);
698 loop->stack_cost += stack_cost;
699 if (unlikely(loop->stack_cost > MAX_STACK_COST))
700 loop->looping = true;
701 if (unlikely(loop->looping)) {
702 error = loop_suppress(dp, acts);
707 OVS_CB(skb)->tun_key = NULL;
708 error = do_execute_actions(dp, skb, acts->actions, acts->actions_len);
710 /* Check whether sub-actions looped too much. */
711 if (unlikely(loop->looping))
712 error = loop_suppress(dp, acts);
715 /* Decrement loop stack cost. */
716 loop->stack_cost -= stack_cost;
717 if (!loop->stack_cost)
718 loop->looping = false;