2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <arpa/inet.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <netinet/ip6.h>
23 #include <netinet/icmp6.h>
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "ovs-thread.h"
33 #include "dp-packet.h"
34 #include "unaligned.h"
36 const struct in6_addr in6addr_exact = IN6ADDR_EXACT_INIT;
37 const struct in6_addr in6addr_all_hosts = IN6ADDR_ALL_HOSTS_INIT;
39 /* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
40 * success stores the dpid into '*dpidp' and returns true, on failure stores 0
41 * into '*dpidp' and returns false.
43 * Rejects an all-zeros dpid as invalid. */
45 dpid_from_string(const char *s, uint64_t *dpidp)
47 *dpidp = (strlen(s) == 16 && strspn(s, "0123456789abcdefABCDEF") == 16
48 ? strtoull(s, NULL, 16)
53 /* Returns true if 'ea' is a reserved address, that a bridge must never
54 * forward, false otherwise.
56 * If you change this function's behavior, please update corresponding
57 * documentation in vswitch.xml at the same time. */
59 eth_addr_is_reserved(const struct eth_addr ea)
61 struct eth_addr_node {
62 struct hmap_node hmap_node;
66 static struct eth_addr_node nodes[] = {
67 /* STP, IEEE pause frames, and other reserved protocols. */
68 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000000ULL },
69 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000001ULL },
70 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000002ULL },
71 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000003ULL },
72 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000004ULL },
73 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000005ULL },
74 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000006ULL },
75 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000007ULL },
76 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000008ULL },
77 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000009ULL },
78 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000aULL },
79 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000bULL },
80 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000cULL },
81 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000dULL },
82 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000eULL },
83 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000fULL },
85 /* Extreme protocols. */
86 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000000ULL }, /* EDP. */
87 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000004ULL }, /* EAPS. */
88 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000006ULL }, /* EAPS. */
90 /* Cisco protocols. */
91 { HMAP_NODE_NULL_INITIALIZER, 0x01000c000000ULL }, /* ISL. */
92 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccccULL }, /* PAgP, UDLD, CDP,
94 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccccccdULL }, /* PVST+. */
95 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccdcdcdULL }, /* STP Uplink Fast,
99 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc0ULL },
100 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc1ULL },
101 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc2ULL },
102 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc3ULL },
103 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc4ULL },
104 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc5ULL },
105 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc6ULL },
106 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc7ULL },
109 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
110 struct eth_addr_node *node;
111 static struct hmap addrs;
114 if (ovsthread_once_start(&once)) {
116 for (node = nodes; node < &nodes[ARRAY_SIZE(nodes)]; node++) {
117 hmap_insert(&addrs, &node->hmap_node, hash_uint64(node->ea64));
119 ovsthread_once_done(&once);
122 ea64 = eth_addr_to_uint64(ea);
123 HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_uint64(ea64), &addrs) {
124 if (node->ea64 == ea64) {
132 eth_addr_from_string(const char *s, struct eth_addr *ea)
134 if (ovs_scan(s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(*ea))) {
142 /* Fills 'b' with a Reverse ARP packet with Ethernet source address 'eth_src'.
143 * This function is used by Open vSwitch to compose packets in cases where
144 * context is important but content doesn't (or shouldn't) matter.
146 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
149 compose_rarp(struct dp_packet *b, const struct eth_addr eth_src)
151 struct eth_header *eth;
152 struct arp_eth_header *arp;
155 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN
156 + ARP_ETH_HEADER_LEN);
157 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
158 eth = dp_packet_put_uninit(b, sizeof *eth);
159 eth->eth_dst = eth_addr_broadcast;
160 eth->eth_src = eth_src;
161 eth->eth_type = htons(ETH_TYPE_RARP);
163 arp = dp_packet_put_uninit(b, sizeof *arp);
164 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
165 arp->ar_pro = htons(ARP_PRO_IP);
166 arp->ar_hln = sizeof arp->ar_sha;
167 arp->ar_pln = sizeof arp->ar_spa;
168 arp->ar_op = htons(ARP_OP_RARP);
169 arp->ar_sha = eth_src;
170 put_16aligned_be32(&arp->ar_spa, htonl(0));
171 arp->ar_tha = eth_src;
172 put_16aligned_be32(&arp->ar_tpa, htonl(0));
174 dp_packet_reset_offsets(b);
175 dp_packet_set_l3(b, arp);
178 /* Insert VLAN header according to given TCI. Packet passed must be Ethernet
179 * packet. Ignores the CFI bit of 'tci' using 0 instead.
181 * Also adjusts the layer offsets accordingly. */
183 eth_push_vlan(struct dp_packet *packet, ovs_be16 tpid, ovs_be16 tci)
185 struct vlan_eth_header *veh;
187 /* Insert new 802.1Q header. */
188 veh = dp_packet_resize_l2(packet, VLAN_HEADER_LEN);
189 memmove(veh, (char *)veh + VLAN_HEADER_LEN, 2 * ETH_ADDR_LEN);
190 veh->veth_type = tpid;
191 veh->veth_tci = tci & htons(~VLAN_CFI);
194 /* Removes outermost VLAN header (if any is present) from 'packet'.
196 * 'packet->l2_5' should initially point to 'packet''s outer-most VLAN header
197 * or may be NULL if there are no VLAN headers. */
199 eth_pop_vlan(struct dp_packet *packet)
201 struct vlan_eth_header *veh = dp_packet_l2(packet);
203 if (veh && dp_packet_size(packet) >= sizeof *veh
204 && eth_type_vlan(veh->veth_type)) {
206 memmove((char *)veh + VLAN_HEADER_LEN, veh, 2 * ETH_ADDR_LEN);
207 dp_packet_resize_l2(packet, -VLAN_HEADER_LEN);
211 /* Set ethertype of the packet. */
213 set_ethertype(struct dp_packet *packet, ovs_be16 eth_type)
215 struct eth_header *eh = dp_packet_l2(packet);
221 if (eth_type_vlan(eh->eth_type)) {
223 char *l2_5 = dp_packet_l2_5(packet);
225 p = ALIGNED_CAST(ovs_be16 *,
226 (l2_5 ? l2_5 : (char *)dp_packet_l3(packet)) - 2);
229 eh->eth_type = eth_type;
233 static bool is_mpls(struct dp_packet *packet)
235 return packet->l2_5_ofs != UINT16_MAX;
238 /* Set time to live (TTL) of an MPLS label stack entry (LSE). */
240 set_mpls_lse_ttl(ovs_be32 *lse, uint8_t ttl)
242 *lse &= ~htonl(MPLS_TTL_MASK);
243 *lse |= htonl((ttl << MPLS_TTL_SHIFT) & MPLS_TTL_MASK);
246 /* Set traffic class (TC) of an MPLS label stack entry (LSE). */
248 set_mpls_lse_tc(ovs_be32 *lse, uint8_t tc)
250 *lse &= ~htonl(MPLS_TC_MASK);
251 *lse |= htonl((tc << MPLS_TC_SHIFT) & MPLS_TC_MASK);
254 /* Set label of an MPLS label stack entry (LSE). */
256 set_mpls_lse_label(ovs_be32 *lse, ovs_be32 label)
258 *lse &= ~htonl(MPLS_LABEL_MASK);
259 *lse |= htonl((ntohl(label) << MPLS_LABEL_SHIFT) & MPLS_LABEL_MASK);
262 /* Set bottom of stack (BoS) bit of an MPLS label stack entry (LSE). */
264 set_mpls_lse_bos(ovs_be32 *lse, uint8_t bos)
266 *lse &= ~htonl(MPLS_BOS_MASK);
267 *lse |= htonl((bos << MPLS_BOS_SHIFT) & MPLS_BOS_MASK);
270 /* Compose an MPLS label stack entry (LSE) from its components:
271 * label, traffic class (TC), time to live (TTL) and
272 * bottom of stack (BoS) bit. */
274 set_mpls_lse_values(uint8_t ttl, uint8_t tc, uint8_t bos, ovs_be32 label)
276 ovs_be32 lse = htonl(0);
277 set_mpls_lse_ttl(&lse, ttl);
278 set_mpls_lse_tc(&lse, tc);
279 set_mpls_lse_bos(&lse, bos);
280 set_mpls_lse_label(&lse, label);
284 /* Set MPLS label stack entry to outermost MPLS header.*/
286 set_mpls_lse(struct dp_packet *packet, ovs_be32 mpls_lse)
288 /* Packet type should be MPLS to set label stack entry. */
289 if (is_mpls(packet)) {
290 struct mpls_hdr *mh = dp_packet_l2_5(packet);
292 /* Update mpls label stack entry. */
293 put_16aligned_be32(&mh->mpls_lse, mpls_lse);
297 /* Push MPLS label stack entry 'lse' onto 'packet' as the outermost MPLS
298 * header. If 'packet' does not already have any MPLS labels, then its
299 * Ethertype is changed to 'ethtype' (which must be an MPLS Ethertype). */
301 push_mpls(struct dp_packet *packet, ovs_be16 ethtype, ovs_be32 lse)
306 if (!eth_type_mpls(ethtype)) {
310 if (!is_mpls(packet)) {
311 /* Set MPLS label stack offset. */
312 packet->l2_5_ofs = packet->l3_ofs;
315 set_ethertype(packet, ethtype);
317 /* Push new MPLS shim header onto packet. */
318 len = packet->l2_5_ofs;
319 header = dp_packet_resize_l2_5(packet, MPLS_HLEN);
320 memmove(header, header + MPLS_HLEN, len);
321 memcpy(header + len, &lse, sizeof lse);
324 /* If 'packet' is an MPLS packet, removes its outermost MPLS label stack entry.
325 * If the label that was removed was the only MPLS label, changes 'packet''s
326 * Ethertype to 'ethtype' (which ordinarily should not be an MPLS
329 pop_mpls(struct dp_packet *packet, ovs_be16 ethtype)
331 if (is_mpls(packet)) {
332 struct mpls_hdr *mh = dp_packet_l2_5(packet);
333 size_t len = packet->l2_5_ofs;
335 set_ethertype(packet, ethtype);
336 if (get_16aligned_be32(&mh->mpls_lse) & htonl(MPLS_BOS_MASK)) {
337 dp_packet_set_l2_5(packet, NULL);
339 /* Shift the l2 header forward. */
340 memmove((char*)dp_packet_data(packet) + MPLS_HLEN, dp_packet_data(packet), len);
341 dp_packet_resize_l2_5(packet, -MPLS_HLEN);
345 /* Converts hex digits in 'hex' to an Ethernet packet in '*packetp'. The
346 * caller must free '*packetp'. On success, returns NULL. On failure, returns
347 * an error message and stores NULL in '*packetp'.
349 * Aligns the L3 header of '*packetp' on a 32-bit boundary. */
351 eth_from_hex(const char *hex, struct dp_packet **packetp)
353 struct dp_packet *packet;
355 /* Use 2 bytes of headroom to 32-bit align the L3 header. */
356 packet = *packetp = dp_packet_new_with_headroom(strlen(hex) / 2, 2);
358 if (dp_packet_put_hex(packet, hex, NULL)[0] != '\0') {
359 dp_packet_delete(packet);
361 return "Trailing garbage in packet data";
364 if (dp_packet_size(packet) < ETH_HEADER_LEN) {
365 dp_packet_delete(packet);
367 return "Packet data too short for Ethernet";
374 eth_format_masked(const struct eth_addr eth,
375 const struct eth_addr *mask, struct ds *s)
377 ds_put_format(s, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth));
378 if (mask && !eth_mask_is_exact(*mask)) {
379 ds_put_format(s, "/"ETH_ADDR_FMT, ETH_ADDR_ARGS(*mask));
383 /* Given the IP netmask 'netmask', returns the number of bits of the IP address
384 * that it specifies, that is, the number of 1-bits in 'netmask'.
386 * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will
387 * still be in the valid range but isn't otherwise meaningful. */
389 ip_count_cidr_bits(ovs_be32 netmask)
391 return 32 - ctz32(ntohl(netmask));
395 ip_format_masked(ovs_be32 ip, ovs_be32 mask, struct ds *s)
397 ds_put_format(s, IP_FMT, IP_ARGS(ip));
398 if (mask != OVS_BE32_MAX) {
399 if (ip_is_cidr(mask)) {
400 ds_put_format(s, "/%d", ip_count_cidr_bits(mask));
402 ds_put_format(s, "/"IP_FMT, IP_ARGS(mask));
407 /* Parses string 's', which must be an IP address with an optional netmask or
408 * CIDR prefix length. Stores the IP address into '*ip' and the netmask into
409 * '*mask'. (If 's' does not contain a netmask, 255.255.255.255 is
412 * Returns NULL if successful, otherwise an error message that the caller must
414 char * OVS_WARN_UNUSED_RESULT
415 ip_parse_masked(const char *s, ovs_be32 *ip, ovs_be32 *mask)
420 if (ovs_scan(s, IP_SCAN_FMT"/"IP_SCAN_FMT"%n",
421 IP_SCAN_ARGS(ip), IP_SCAN_ARGS(mask), &n) && !s[n]) {
423 } else if (ovs_scan(s, IP_SCAN_FMT"/%d%n", IP_SCAN_ARGS(ip), &prefix, &n)
425 if (prefix <= 0 || prefix > 32) {
426 return xasprintf("%s: network prefix bits not between 0 and "
429 *mask = be32_prefix_mask(prefix);
430 } else if (ovs_scan(s, IP_SCAN_FMT"%n", IP_SCAN_ARGS(ip), &n) && !s[n]) {
431 *mask = OVS_BE32_MAX;
433 return xasprintf("%s: invalid IP address", s);
439 ipv6_format_addr(const struct in6_addr *addr, struct ds *s)
443 ds_reserve(s, s->length + INET6_ADDRSTRLEN);
445 dst = s->string + s->length;
446 inet_ntop(AF_INET6, addr, dst, INET6_ADDRSTRLEN);
447 s->length += strlen(dst);
450 /* Same as print_ipv6_addr, but optionally encloses the address in square
453 ipv6_format_addr_bracket(const struct in6_addr *addr, struct ds *s,
459 ipv6_format_addr(addr, s);
466 ipv6_format_mapped(const struct in6_addr *addr, struct ds *s)
468 if (IN6_IS_ADDR_V4MAPPED(addr)) {
469 ds_put_format(s, IP_FMT, addr->s6_addr[12], addr->s6_addr[13],
470 addr->s6_addr[14], addr->s6_addr[15]);
472 ipv6_format_addr(addr, s);
477 ipv6_format_masked(const struct in6_addr *addr, const struct in6_addr *mask,
480 ipv6_format_addr(addr, s);
481 if (mask && !ipv6_mask_is_exact(mask)) {
482 if (ipv6_is_cidr(mask)) {
483 int cidr_bits = ipv6_count_cidr_bits(mask);
484 ds_put_format(s, "/%d", cidr_bits);
487 ipv6_format_addr(mask, s);
492 struct in6_addr ipv6_addr_bitand(const struct in6_addr *a,
493 const struct in6_addr *b)
499 for (i=0; i<4; i++) {
500 dst.s6_addr32[i] = a->s6_addr32[i] & b->s6_addr32[i];
503 for (i=0; i<16; i++) {
504 dst.s6_addr[i] = a->s6_addr[i] & b->s6_addr[i];
511 /* Returns an in6_addr consisting of 'mask' high-order 1-bits and 128-N
512 * low-order 0-bits. */
514 ipv6_create_mask(int mask)
516 struct in6_addr netmask;
517 uint8_t *netmaskp = &netmask.s6_addr[0];
519 memset(&netmask, 0, sizeof netmask);
527 *netmaskp = 0xff << (8 - mask);
533 /* Given the IPv6 netmask 'netmask', returns the number of bits of the IPv6
534 * address that it specifies, that is, the number of 1-bits in 'netmask'.
535 * 'netmask' must be a CIDR netmask (see ipv6_is_cidr()).
537 * If 'netmask' is not a CIDR netmask (see ipv6_is_cidr()), the return value
538 * will still be in the valid range but isn't otherwise meaningful. */
540 ipv6_count_cidr_bits(const struct in6_addr *netmask)
544 const uint8_t *netmaskp = &netmask->s6_addr[0];
546 for (i=0; i<16; i++) {
547 if (netmaskp[i] == 0xff) {
552 for(nm = netmaskp[i]; nm; nm <<= 1) {
563 /* Returns true if 'netmask' is a CIDR netmask, that is, if it consists of N
564 * high-order 1-bits and 128-N low-order 0-bits. */
566 ipv6_is_cidr(const struct in6_addr *netmask)
568 const uint8_t *netmaskp = &netmask->s6_addr[0];
571 for (i=0; i<16; i++) {
572 if (netmaskp[i] != 0xff) {
573 uint8_t x = ~netmaskp[i];
588 /* Parses string 's', which must be an IPv6 address with an optional
589 * CIDR prefix length. Stores the IP address into '*ipv6' and the CIDR
590 * prefix in '*prefix'. (If 's' does not contain a CIDR length, all-ones
593 * Returns NULL if successful, otherwise an error message that the caller must
595 char * OVS_WARN_UNUSED_RESULT
596 ipv6_parse_masked(const char *s, struct in6_addr *ipv6, struct in6_addr *mask)
598 char ipv6_s[IPV6_SCAN_LEN + 1];
599 char mask_s[IPV6_SCAN_LEN + 1];
603 if (ovs_scan(s, IPV6_SCAN_FMT"/"IPV6_SCAN_FMT"%n", ipv6_s, mask_s, &n)
604 && inet_pton(AF_INET6, ipv6_s, ipv6) == 1
605 && inet_pton(AF_INET6, mask_s, mask) == 1
608 } else if (ovs_scan(s, IPV6_SCAN_FMT"/%d%n", ipv6_s, &prefix, &n)
609 && inet_pton(AF_INET6, ipv6_s, ipv6) == 1
611 if (prefix <= 0 || prefix > 128) {
612 return xasprintf("%s: prefix bits not between 0 and 128", s);
614 *mask = ipv6_create_mask(prefix);
615 } else if (ovs_scan(s, IPV6_SCAN_FMT"%n", ipv6_s, &n)
616 && inet_pton(AF_INET6, ipv6_s, ipv6) == 1
618 *mask = in6addr_exact;
620 return xasprintf("%s: invalid IP address", s);
625 /* Populates 'b' with an Ethernet II packet headed with the given 'eth_dst',
626 * 'eth_src' and 'eth_type' parameters. A payload of 'size' bytes is allocated
627 * in 'b' and returned. This payload may be populated with appropriate
628 * information by the caller. Sets 'b''s 'frame' pointer and 'l3' offset to
629 * the Ethernet header and payload respectively. Aligns b->l3 on a 32-bit
632 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
635 eth_compose(struct dp_packet *b, const struct eth_addr eth_dst,
636 const struct eth_addr eth_src, uint16_t eth_type,
640 struct eth_header *eth;
644 /* The magic 2 here ensures that the L3 header (when it is added later)
645 * will be 32-bit aligned. */
646 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + size);
647 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
648 eth = dp_packet_put_uninit(b, ETH_HEADER_LEN);
649 data = dp_packet_put_uninit(b, size);
651 eth->eth_dst = eth_dst;
652 eth->eth_src = eth_src;
653 eth->eth_type = htons(eth_type);
655 dp_packet_reset_offsets(b);
656 dp_packet_set_l3(b, data);
662 packet_set_ipv4_addr(struct dp_packet *packet,
663 ovs_16aligned_be32 *addr, ovs_be32 new_addr)
665 struct ip_header *nh = dp_packet_l3(packet);
666 ovs_be32 old_addr = get_16aligned_be32(addr);
667 size_t l4_size = dp_packet_l4_size(packet);
669 if (nh->ip_proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
670 struct tcp_header *th = dp_packet_l4(packet);
672 th->tcp_csum = recalc_csum32(th->tcp_csum, old_addr, new_addr);
673 } else if (nh->ip_proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN ) {
674 struct udp_header *uh = dp_packet_l4(packet);
677 uh->udp_csum = recalc_csum32(uh->udp_csum, old_addr, new_addr);
679 uh->udp_csum = htons(0xffff);
683 nh->ip_csum = recalc_csum32(nh->ip_csum, old_addr, new_addr);
684 put_16aligned_be32(addr, new_addr);
687 /* Returns true, if packet contains at least one routing header where
688 * segements_left > 0.
690 * This function assumes that L3 and L4 offsets are set in the packet. */
692 packet_rh_present(struct dp_packet *packet)
694 const struct ovs_16aligned_ip6_hdr *nh;
698 uint8_t *data = dp_packet_l3(packet);
700 remaining = packet->l4_ofs - packet->l3_ofs;
702 if (remaining < sizeof *nh) {
705 nh = ALIGNED_CAST(struct ovs_16aligned_ip6_hdr *, data);
707 remaining -= sizeof *nh;
708 nexthdr = nh->ip6_nxt;
711 if ((nexthdr != IPPROTO_HOPOPTS)
712 && (nexthdr != IPPROTO_ROUTING)
713 && (nexthdr != IPPROTO_DSTOPTS)
714 && (nexthdr != IPPROTO_AH)
715 && (nexthdr != IPPROTO_FRAGMENT)) {
716 /* It's either a terminal header (e.g., TCP, UDP) or one we
717 * don't understand. In either case, we're done with the
718 * packet, so use it to fill in 'nw_proto'. */
722 /* We only verify that at least 8 bytes of the next header are
723 * available, but many of these headers are longer. Ensure that
724 * accesses within the extension header are within those first 8
725 * bytes. All extension headers are required to be at least 8
731 if (nexthdr == IPPROTO_AH) {
732 /* A standard AH definition isn't available, but the fields
733 * we care about are in the same location as the generic
734 * option header--only the header length is calculated
736 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
738 nexthdr = ext_hdr->ip6e_nxt;
739 len = (ext_hdr->ip6e_len + 2) * 4;
740 } else if (nexthdr == IPPROTO_FRAGMENT) {
741 const struct ovs_16aligned_ip6_frag *frag_hdr
742 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag *, data);
744 nexthdr = frag_hdr->ip6f_nxt;
745 len = sizeof *frag_hdr;
746 } else if (nexthdr == IPPROTO_ROUTING) {
747 const struct ip6_rthdr *rh = (struct ip6_rthdr *)data;
749 if (rh->ip6r_segleft > 0) {
753 nexthdr = rh->ip6r_nxt;
754 len = (rh->ip6r_len + 1) * 8;
756 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
758 nexthdr = ext_hdr->ip6e_nxt;
759 len = (ext_hdr->ip6e_len + 1) * 8;
762 if (remaining < len) {
773 packet_update_csum128(struct dp_packet *packet, uint8_t proto,
774 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4])
776 size_t l4_size = dp_packet_l4_size(packet);
778 if (proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
779 struct tcp_header *th = dp_packet_l4(packet);
781 th->tcp_csum = recalc_csum128(th->tcp_csum, addr, new_addr);
782 } else if (proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN) {
783 struct udp_header *uh = dp_packet_l4(packet);
786 uh->udp_csum = recalc_csum128(uh->udp_csum, addr, new_addr);
788 uh->udp_csum = htons(0xffff);
791 } else if (proto == IPPROTO_ICMPV6 &&
792 l4_size >= sizeof(struct icmp6_header)) {
793 struct icmp6_header *icmp = dp_packet_l4(packet);
795 icmp->icmp6_cksum = recalc_csum128(icmp->icmp6_cksum, addr, new_addr);
800 packet_set_ipv6_addr(struct dp_packet *packet, uint8_t proto,
801 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4],
802 bool recalculate_csum)
804 if (recalculate_csum) {
805 packet_update_csum128(packet, proto, addr, new_addr);
807 memcpy(addr, new_addr, sizeof(ovs_be32[4]));
811 packet_set_ipv6_flow_label(ovs_16aligned_be32 *flow_label, ovs_be32 flow_key)
813 ovs_be32 old_label = get_16aligned_be32(flow_label);
814 ovs_be32 new_label = (old_label & htonl(~IPV6_LABEL_MASK)) | flow_key;
815 put_16aligned_be32(flow_label, new_label);
819 packet_set_ipv6_tc(ovs_16aligned_be32 *flow_label, uint8_t tc)
821 ovs_be32 old_label = get_16aligned_be32(flow_label);
822 ovs_be32 new_label = (old_label & htonl(0xF00FFFFF)) | htonl(tc << 20);
823 put_16aligned_be32(flow_label, new_label);
826 /* Modifies the IPv4 header fields of 'packet' to be consistent with 'src',
827 * 'dst', 'tos', and 'ttl'. Updates 'packet''s L4 checksums as appropriate.
828 * 'packet' must contain a valid IPv4 packet with correctly populated l[347]
831 packet_set_ipv4(struct dp_packet *packet, ovs_be32 src, ovs_be32 dst,
832 uint8_t tos, uint8_t ttl)
834 struct ip_header *nh = dp_packet_l3(packet);
836 if (get_16aligned_be32(&nh->ip_src) != src) {
837 packet_set_ipv4_addr(packet, &nh->ip_src, src);
840 if (get_16aligned_be32(&nh->ip_dst) != dst) {
841 packet_set_ipv4_addr(packet, &nh->ip_dst, dst);
844 if (nh->ip_tos != tos) {
845 uint8_t *field = &nh->ip_tos;
847 nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t) *field),
848 htons((uint16_t) tos));
852 if (nh->ip_ttl != ttl) {
853 uint8_t *field = &nh->ip_ttl;
855 nh->ip_csum = recalc_csum16(nh->ip_csum, htons(*field << 8),
861 /* Modifies the IPv6 header fields of 'packet' to be consistent with 'src',
862 * 'dst', 'traffic class', and 'next hop'. Updates 'packet''s L4 checksums as
863 * appropriate. 'packet' must contain a valid IPv6 packet with correctly
864 * populated l[34] offsets. */
866 packet_set_ipv6(struct dp_packet *packet, uint8_t proto, const ovs_be32 src[4],
867 const ovs_be32 dst[4], uint8_t key_tc, ovs_be32 key_fl,
870 struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(packet);
872 if (memcmp(&nh->ip6_src, src, sizeof(ovs_be32[4]))) {
873 packet_set_ipv6_addr(packet, proto, nh->ip6_src.be32, src, true);
876 if (memcmp(&nh->ip6_dst, dst, sizeof(ovs_be32[4]))) {
877 packet_set_ipv6_addr(packet, proto, nh->ip6_dst.be32, dst,
878 !packet_rh_present(packet));
881 packet_set_ipv6_tc(&nh->ip6_flow, key_tc);
883 packet_set_ipv6_flow_label(&nh->ip6_flow, key_fl);
885 nh->ip6_hlim = key_hl;
889 packet_set_port(ovs_be16 *port, ovs_be16 new_port, ovs_be16 *csum)
891 if (*port != new_port) {
892 *csum = recalc_csum16(*csum, *port, new_port);
897 /* Sets the TCP source and destination port ('src' and 'dst' respectively) of
898 * the TCP header contained in 'packet'. 'packet' must be a valid TCP packet
899 * with its l4 offset properly populated. */
901 packet_set_tcp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
903 struct tcp_header *th = dp_packet_l4(packet);
905 packet_set_port(&th->tcp_src, src, &th->tcp_csum);
906 packet_set_port(&th->tcp_dst, dst, &th->tcp_csum);
909 /* Sets the UDP source and destination port ('src' and 'dst' respectively) of
910 * the UDP header contained in 'packet'. 'packet' must be a valid UDP packet
911 * with its l4 offset properly populated. */
913 packet_set_udp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
915 struct udp_header *uh = dp_packet_l4(packet);
918 packet_set_port(&uh->udp_src, src, &uh->udp_csum);
919 packet_set_port(&uh->udp_dst, dst, &uh->udp_csum);
922 uh->udp_csum = htons(0xffff);
930 /* Sets the SCTP source and destination port ('src' and 'dst' respectively) of
931 * the SCTP header contained in 'packet'. 'packet' must be a valid SCTP packet
932 * with its l4 offset properly populated. */
934 packet_set_sctp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
936 struct sctp_header *sh = dp_packet_l4(packet);
937 ovs_be32 old_csum, old_correct_csum, new_csum;
938 uint16_t tp_len = dp_packet_l4_size(packet);
940 old_csum = get_16aligned_be32(&sh->sctp_csum);
941 put_16aligned_be32(&sh->sctp_csum, 0);
942 old_correct_csum = crc32c((void *)sh, tp_len);
947 new_csum = crc32c((void *)sh, tp_len);
948 put_16aligned_be32(&sh->sctp_csum, old_csum ^ old_correct_csum ^ new_csum);
951 /* Sets the ICMP type and code of the ICMP header contained in 'packet'.
952 * 'packet' must be a valid ICMP packet with its l4 offset properly
955 packet_set_icmp(struct dp_packet *packet, uint8_t type, uint8_t code)
957 struct icmp_header *ih = dp_packet_l4(packet);
958 ovs_be16 orig_tc = htons(ih->icmp_type << 8 | ih->icmp_code);
959 ovs_be16 new_tc = htons(type << 8 | code);
961 if (orig_tc != new_tc) {
962 ih->icmp_type = type;
963 ih->icmp_code = code;
965 ih->icmp_csum = recalc_csum16(ih->icmp_csum, orig_tc, new_tc);
970 packet_set_nd(struct dp_packet *packet, const ovs_be32 target[4],
971 const struct eth_addr sll, const struct eth_addr tll) {
972 struct ovs_nd_msg *ns;
973 struct ovs_nd_opt *nd_opt;
974 int bytes_remain = dp_packet_l4_size(packet);
976 if (OVS_UNLIKELY(bytes_remain < sizeof(*ns))) {
980 ns = dp_packet_l4(packet);
981 nd_opt = &ns->options[0];
982 bytes_remain -= sizeof(*ns);
984 if (memcmp(&ns->target, target, sizeof(ovs_be32[4]))) {
985 packet_set_ipv6_addr(packet, IPPROTO_ICMPV6,
990 while (bytes_remain >= ND_OPT_LEN && nd_opt->nd_opt_len != 0) {
991 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
992 && nd_opt->nd_opt_len == 1) {
993 if (!eth_addr_equals(nd_opt->nd_opt_mac, sll)) {
994 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
996 *csum = recalc_csum48(*csum, nd_opt->nd_opt_mac, sll);
997 nd_opt->nd_opt_mac = sll;
1000 /* A packet can only contain one SLL or TLL option */
1002 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
1003 && nd_opt->nd_opt_len == 1) {
1004 if (!eth_addr_equals(nd_opt->nd_opt_mac, tll)) {
1005 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
1007 *csum = recalc_csum48(*csum, nd_opt->nd_opt_mac, tll);
1008 nd_opt->nd_opt_mac = tll;
1011 /* A packet can only contain one SLL or TLL option */
1015 nd_opt += nd_opt->nd_opt_len;
1016 bytes_remain -= nd_opt->nd_opt_len * ND_OPT_LEN;
1021 packet_tcp_flag_to_string(uint32_t flag)
1053 /* Appends a string representation of the TCP flags value 'tcp_flags'
1054 * (e.g. from struct flow.tcp_flags or obtained via TCP_FLAGS) to 's', in the
1055 * format used by tcpdump. */
1057 packet_format_tcp_flags(struct ds *s, uint16_t tcp_flags)
1060 ds_put_cstr(s, "none");
1064 if (tcp_flags & TCP_SYN) {
1065 ds_put_char(s, 'S');
1067 if (tcp_flags & TCP_FIN) {
1068 ds_put_char(s, 'F');
1070 if (tcp_flags & TCP_PSH) {
1071 ds_put_char(s, 'P');
1073 if (tcp_flags & TCP_RST) {
1074 ds_put_char(s, 'R');
1076 if (tcp_flags & TCP_URG) {
1077 ds_put_char(s, 'U');
1079 if (tcp_flags & TCP_ACK) {
1080 ds_put_char(s, '.');
1082 if (tcp_flags & TCP_ECE) {
1083 ds_put_cstr(s, "E");
1085 if (tcp_flags & TCP_CWR) {
1086 ds_put_cstr(s, "C");
1088 if (tcp_flags & TCP_NS) {
1089 ds_put_cstr(s, "N");
1091 if (tcp_flags & 0x200) {
1092 ds_put_cstr(s, "[200]");
1094 if (tcp_flags & 0x400) {
1095 ds_put_cstr(s, "[400]");
1097 if (tcp_flags & 0x800) {
1098 ds_put_cstr(s, "[800]");
1102 #define ARP_PACKET_SIZE (2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + \
1105 /* Clears 'b' and replaces its contents by an ARP frame with the specified
1106 * 'arp_op', 'arp_sha', 'arp_tha', 'arp_spa', and 'arp_tpa'. The outer
1107 * Ethernet frame is initialized with Ethernet source 'arp_sha' and destination
1108 * 'arp_tha', except that destination ff:ff:ff:ff:ff:ff is used instead if
1109 * 'broadcast' is true. */
1111 compose_arp(struct dp_packet *b, uint16_t arp_op,
1112 const struct eth_addr arp_sha, const struct eth_addr arp_tha,
1113 bool broadcast, ovs_be32 arp_spa, ovs_be32 arp_tpa)
1115 struct eth_header *eth;
1116 struct arp_eth_header *arp;
1119 dp_packet_prealloc_tailroom(b, ARP_PACKET_SIZE);
1120 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
1122 eth = dp_packet_put_uninit(b, sizeof *eth);
1123 eth->eth_dst = broadcast ? eth_addr_broadcast : arp_tha;
1124 eth->eth_src = arp_sha;
1125 eth->eth_type = htons(ETH_TYPE_ARP);
1127 arp = dp_packet_put_uninit(b, sizeof *arp);
1128 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
1129 arp->ar_pro = htons(ARP_PRO_IP);
1130 arp->ar_hln = sizeof arp->ar_sha;
1131 arp->ar_pln = sizeof arp->ar_spa;
1132 arp->ar_op = htons(arp_op);
1133 arp->ar_sha = arp_sha;
1134 arp->ar_tha = arp_tha;
1136 put_16aligned_be32(&arp->ar_spa, arp_spa);
1137 put_16aligned_be32(&arp->ar_tpa, arp_tpa);
1139 dp_packet_reset_offsets(b);
1140 dp_packet_set_l3(b, arp);
1144 packet_csum_pseudoheader(const struct ip_header *ip)
1146 uint32_t partial = 0;
1148 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_src));
1149 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_dst));
1150 partial = csum_add16(partial, htons(ip->ip_proto));
1151 partial = csum_add16(partial, htons(ntohs(ip->ip_tot_len) -
1152 IP_IHL(ip->ip_ihl_ver) * 4));