2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <arpa/inet.h>
20 #include <sys/socket.h>
21 #include <netinet/in.h>
22 #include <netinet/ip6.h>
23 #include <netinet/icmp6.h>
25 #include "byte-order.h"
30 #include "dynamic-string.h"
31 #include "ovs-thread.h"
33 #include "dp-packet.h"
34 #include "unaligned.h"
36 const struct in6_addr in6addr_exact = IN6ADDR_EXACT_INIT;
38 /* Parses 's' as a 16-digit hexadecimal number representing a datapath ID. On
39 * success stores the dpid into '*dpidp' and returns true, on failure stores 0
40 * into '*dpidp' and returns false.
42 * Rejects an all-zeros dpid as invalid. */
44 dpid_from_string(const char *s, uint64_t *dpidp)
46 *dpidp = (strlen(s) == 16 && strspn(s, "0123456789abcdefABCDEF") == 16
47 ? strtoull(s, NULL, 16)
52 /* Returns true if 'ea' is a reserved address, that a bridge must never
53 * forward, false otherwise.
55 * If you change this function's behavior, please update corresponding
56 * documentation in vswitch.xml at the same time. */
58 eth_addr_is_reserved(const uint8_t ea[ETH_ADDR_LEN])
60 struct eth_addr_node {
61 struct hmap_node hmap_node;
65 static struct eth_addr_node nodes[] = {
66 /* STP, IEEE pause frames, and other reserved protocols. */
67 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000000ULL },
68 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000001ULL },
69 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000002ULL },
70 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000003ULL },
71 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000004ULL },
72 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000005ULL },
73 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000006ULL },
74 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000007ULL },
75 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000008ULL },
76 { HMAP_NODE_NULL_INITIALIZER, 0x0180c2000009ULL },
77 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000aULL },
78 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000bULL },
79 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000cULL },
80 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000dULL },
81 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000eULL },
82 { HMAP_NODE_NULL_INITIALIZER, 0x0180c200000fULL },
84 /* Extreme protocols. */
85 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000000ULL }, /* EDP. */
86 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000004ULL }, /* EAPS. */
87 { HMAP_NODE_NULL_INITIALIZER, 0x00e02b000006ULL }, /* EAPS. */
89 /* Cisco protocols. */
90 { HMAP_NODE_NULL_INITIALIZER, 0x01000c000000ULL }, /* ISL. */
91 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccccULL }, /* PAgP, UDLD, CDP,
93 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccccccdULL }, /* PVST+. */
94 { HMAP_NODE_NULL_INITIALIZER, 0x01000ccdcdcdULL }, /* STP Uplink Fast,
98 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc0ULL },
99 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc1ULL },
100 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc2ULL },
101 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc3ULL },
102 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc4ULL },
103 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc5ULL },
104 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc6ULL },
105 { HMAP_NODE_NULL_INITIALIZER, 0x01000cccccc7ULL },
108 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
109 struct eth_addr_node *node;
110 static struct hmap addrs;
113 if (ovsthread_once_start(&once)) {
115 for (node = nodes; node < &nodes[ARRAY_SIZE(nodes)]; node++) {
116 hmap_insert(&addrs, &node->hmap_node, hash_uint64(node->ea64));
118 ovsthread_once_done(&once);
121 ea64 = eth_addr_to_uint64(ea);
122 HMAP_FOR_EACH_IN_BUCKET (node, hmap_node, hash_uint64(ea64), &addrs) {
123 if (node->ea64 == ea64) {
131 eth_addr_from_string(const char *s, uint8_t ea[ETH_ADDR_LEN])
133 if (ovs_scan(s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(ea))) {
136 memset(ea, 0, ETH_ADDR_LEN);
141 /* Fills 'b' with a Reverse ARP packet with Ethernet source address 'eth_src'.
142 * This function is used by Open vSwitch to compose packets in cases where
143 * context is important but content doesn't (or shouldn't) matter.
145 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
148 compose_rarp(struct dp_packet *b, const uint8_t eth_src[ETH_ADDR_LEN])
150 struct eth_header *eth;
151 struct arp_eth_header *arp;
154 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN
155 + ARP_ETH_HEADER_LEN);
156 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
157 eth = dp_packet_put_uninit(b, sizeof *eth);
158 memcpy(eth->eth_dst, eth_addr_broadcast, ETH_ADDR_LEN);
159 memcpy(eth->eth_src, eth_src, ETH_ADDR_LEN);
160 eth->eth_type = htons(ETH_TYPE_RARP);
162 arp = dp_packet_put_uninit(b, sizeof *arp);
163 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
164 arp->ar_pro = htons(ARP_PRO_IP);
165 arp->ar_hln = sizeof arp->ar_sha;
166 arp->ar_pln = sizeof arp->ar_spa;
167 arp->ar_op = htons(ARP_OP_RARP);
168 memcpy(arp->ar_sha, eth_src, ETH_ADDR_LEN);
169 put_16aligned_be32(&arp->ar_spa, htonl(0));
170 memcpy(arp->ar_tha, eth_src, ETH_ADDR_LEN);
171 put_16aligned_be32(&arp->ar_tpa, htonl(0));
173 dp_packet_reset_offsets(b);
174 dp_packet_set_l3(b, arp);
177 /* Insert VLAN header according to given TCI. Packet passed must be Ethernet
178 * packet. Ignores the CFI bit of 'tci' using 0 instead.
180 * Also adjusts the layer offsets accordingly. */
182 eth_push_vlan(struct dp_packet *packet, ovs_be16 tpid, ovs_be16 tci)
184 struct vlan_eth_header *veh;
186 /* Insert new 802.1Q header. */
187 veh = dp_packet_resize_l2(packet, VLAN_HEADER_LEN);
188 memmove(veh, (char *)veh + VLAN_HEADER_LEN, 2 * ETH_ADDR_LEN);
189 veh->veth_type = tpid;
190 veh->veth_tci = tci & htons(~VLAN_CFI);
193 /* Removes outermost VLAN header (if any is present) from 'packet'.
195 * 'packet->l2_5' should initially point to 'packet''s outer-most VLAN header
196 * or may be NULL if there are no VLAN headers. */
198 eth_pop_vlan(struct dp_packet *packet)
200 struct vlan_eth_header *veh = dp_packet_l2(packet);
202 if (veh && dp_packet_size(packet) >= sizeof *veh
203 && eth_type_vlan(veh->veth_type)) {
205 memmove((char *)veh + VLAN_HEADER_LEN, veh, 2 * ETH_ADDR_LEN);
206 dp_packet_resize_l2(packet, -VLAN_HEADER_LEN);
210 /* Set ethertype of the packet. */
212 set_ethertype(struct dp_packet *packet, ovs_be16 eth_type)
214 struct eth_header *eh = dp_packet_l2(packet);
220 if (eth_type_vlan(eh->eth_type)) {
222 char *l2_5 = dp_packet_l2_5(packet);
224 p = ALIGNED_CAST(ovs_be16 *,
225 (l2_5 ? l2_5 : (char *)dp_packet_l3(packet)) - 2);
228 eh->eth_type = eth_type;
232 static bool is_mpls(struct dp_packet *packet)
234 return packet->l2_5_ofs != UINT16_MAX;
237 /* Set time to live (TTL) of an MPLS label stack entry (LSE). */
239 set_mpls_lse_ttl(ovs_be32 *lse, uint8_t ttl)
241 *lse &= ~htonl(MPLS_TTL_MASK);
242 *lse |= htonl((ttl << MPLS_TTL_SHIFT) & MPLS_TTL_MASK);
245 /* Set traffic class (TC) of an MPLS label stack entry (LSE). */
247 set_mpls_lse_tc(ovs_be32 *lse, uint8_t tc)
249 *lse &= ~htonl(MPLS_TC_MASK);
250 *lse |= htonl((tc << MPLS_TC_SHIFT) & MPLS_TC_MASK);
253 /* Set label of an MPLS label stack entry (LSE). */
255 set_mpls_lse_label(ovs_be32 *lse, ovs_be32 label)
257 *lse &= ~htonl(MPLS_LABEL_MASK);
258 *lse |= htonl((ntohl(label) << MPLS_LABEL_SHIFT) & MPLS_LABEL_MASK);
261 /* Set bottom of stack (BoS) bit of an MPLS label stack entry (LSE). */
263 set_mpls_lse_bos(ovs_be32 *lse, uint8_t bos)
265 *lse &= ~htonl(MPLS_BOS_MASK);
266 *lse |= htonl((bos << MPLS_BOS_SHIFT) & MPLS_BOS_MASK);
269 /* Compose an MPLS label stack entry (LSE) from its components:
270 * label, traffic class (TC), time to live (TTL) and
271 * bottom of stack (BoS) bit. */
273 set_mpls_lse_values(uint8_t ttl, uint8_t tc, uint8_t bos, ovs_be32 label)
275 ovs_be32 lse = htonl(0);
276 set_mpls_lse_ttl(&lse, ttl);
277 set_mpls_lse_tc(&lse, tc);
278 set_mpls_lse_bos(&lse, bos);
279 set_mpls_lse_label(&lse, label);
283 /* Set MPLS label stack entry to outermost MPLS header.*/
285 set_mpls_lse(struct dp_packet *packet, ovs_be32 mpls_lse)
287 /* Packet type should be MPLS to set label stack entry. */
288 if (is_mpls(packet)) {
289 struct mpls_hdr *mh = dp_packet_l2_5(packet);
291 /* Update mpls label stack entry. */
292 put_16aligned_be32(&mh->mpls_lse, mpls_lse);
296 /* Push MPLS label stack entry 'lse' onto 'packet' as the the outermost MPLS
297 * header. If 'packet' does not already have any MPLS labels, then its
298 * Ethertype is changed to 'ethtype' (which must be an MPLS Ethertype). */
300 push_mpls(struct dp_packet *packet, ovs_be16 ethtype, ovs_be32 lse)
305 if (!eth_type_mpls(ethtype)) {
309 if (!is_mpls(packet)) {
310 /* Set MPLS label stack offset. */
311 packet->l2_5_ofs = packet->l3_ofs;
314 set_ethertype(packet, ethtype);
316 /* Push new MPLS shim header onto packet. */
317 len = packet->l2_5_ofs;
318 header = dp_packet_resize_l2_5(packet, MPLS_HLEN);
319 memmove(header, header + MPLS_HLEN, len);
320 memcpy(header + len, &lse, sizeof lse);
323 /* If 'packet' is an MPLS packet, removes its outermost MPLS label stack entry.
324 * If the label that was removed was the only MPLS label, changes 'packet''s
325 * Ethertype to 'ethtype' (which ordinarily should not be an MPLS
328 pop_mpls(struct dp_packet *packet, ovs_be16 ethtype)
330 if (is_mpls(packet)) {
331 struct mpls_hdr *mh = dp_packet_l2_5(packet);
332 size_t len = packet->l2_5_ofs;
334 set_ethertype(packet, ethtype);
335 if (get_16aligned_be32(&mh->mpls_lse) & htonl(MPLS_BOS_MASK)) {
336 dp_packet_set_l2_5(packet, NULL);
338 /* Shift the l2 header forward. */
339 memmove((char*)dp_packet_data(packet) + MPLS_HLEN, dp_packet_data(packet), len);
340 dp_packet_resize_l2_5(packet, -MPLS_HLEN);
344 /* Converts hex digits in 'hex' to an Ethernet packet in '*packetp'. The
345 * caller must free '*packetp'. On success, returns NULL. On failure, returns
346 * an error message and stores NULL in '*packetp'.
348 * Aligns the L3 header of '*packetp' on a 32-bit boundary. */
350 eth_from_hex(const char *hex, struct dp_packet **packetp)
352 struct dp_packet *packet;
354 /* Use 2 bytes of headroom to 32-bit align the L3 header. */
355 packet = *packetp = dp_packet_new_with_headroom(strlen(hex) / 2, 2);
357 if (dp_packet_put_hex(packet, hex, NULL)[0] != '\0') {
358 dp_packet_delete(packet);
360 return "Trailing garbage in packet data";
363 if (dp_packet_size(packet) < ETH_HEADER_LEN) {
364 dp_packet_delete(packet);
366 return "Packet data too short for Ethernet";
373 eth_format_masked(const uint8_t eth[ETH_ADDR_LEN],
374 const uint8_t mask[ETH_ADDR_LEN], struct ds *s)
376 ds_put_format(s, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth));
377 if (mask && !eth_mask_is_exact(mask)) {
378 ds_put_format(s, "/"ETH_ADDR_FMT, ETH_ADDR_ARGS(mask));
383 eth_addr_bitand(const uint8_t src[ETH_ADDR_LEN],
384 const uint8_t mask[ETH_ADDR_LEN],
385 uint8_t dst[ETH_ADDR_LEN])
389 for (i = 0; i < ETH_ADDR_LEN; i++) {
390 dst[i] = src[i] & mask[i];
394 /* Given the IP netmask 'netmask', returns the number of bits of the IP address
395 * that it specifies, that is, the number of 1-bits in 'netmask'.
397 * If 'netmask' is not a CIDR netmask (see ip_is_cidr()), the return value will
398 * still be in the valid range but isn't otherwise meaningful. */
400 ip_count_cidr_bits(ovs_be32 netmask)
402 return 32 - ctz32(ntohl(netmask));
406 ip_format_masked(ovs_be32 ip, ovs_be32 mask, struct ds *s)
408 ds_put_format(s, IP_FMT, IP_ARGS(ip));
409 if (mask != OVS_BE32_MAX) {
410 if (ip_is_cidr(mask)) {
411 ds_put_format(s, "/%d", ip_count_cidr_bits(mask));
413 ds_put_format(s, "/"IP_FMT, IP_ARGS(mask));
419 /* Stores the string representation of the IPv6 address 'addr' into the
420 * character array 'addr_str', which must be at least INET6_ADDRSTRLEN
423 format_ipv6_addr(char *addr_str, const struct in6_addr *addr)
425 inet_ntop(AF_INET6, addr, addr_str, INET6_ADDRSTRLEN);
429 print_ipv6_addr(struct ds *string, const struct in6_addr *addr)
433 ds_reserve(string, string->length + INET6_ADDRSTRLEN);
435 dst = string->string + string->length;
436 format_ipv6_addr(dst, addr);
437 string->length += strlen(dst);
441 print_ipv6_mapped(struct ds *s, const struct in6_addr *addr)
443 if (IN6_IS_ADDR_V4MAPPED(addr)) {
444 ds_put_format(s, IP_FMT, addr->s6_addr[12], addr->s6_addr[13],
445 addr->s6_addr[14], addr->s6_addr[15]);
447 print_ipv6_addr(s, addr);
452 print_ipv6_masked(struct ds *s, const struct in6_addr *addr,
453 const struct in6_addr *mask)
455 print_ipv6_addr(s, addr);
456 if (mask && !ipv6_mask_is_exact(mask)) {
457 if (ipv6_is_cidr(mask)) {
458 int cidr_bits = ipv6_count_cidr_bits(mask);
459 ds_put_format(s, "/%d", cidr_bits);
462 print_ipv6_addr(s, mask);
467 struct in6_addr ipv6_addr_bitand(const struct in6_addr *a,
468 const struct in6_addr *b)
474 for (i=0; i<4; i++) {
475 dst.s6_addr32[i] = a->s6_addr32[i] & b->s6_addr32[i];
478 for (i=0; i<16; i++) {
479 dst.s6_addr[i] = a->s6_addr[i] & b->s6_addr[i];
486 /* Returns an in6_addr consisting of 'mask' high-order 1-bits and 128-N
487 * low-order 0-bits. */
489 ipv6_create_mask(int mask)
491 struct in6_addr netmask;
492 uint8_t *netmaskp = &netmask.s6_addr[0];
494 memset(&netmask, 0, sizeof netmask);
502 *netmaskp = 0xff << (8 - mask);
508 /* Given the IPv6 netmask 'netmask', returns the number of bits of the IPv6
509 * address that it specifies, that is, the number of 1-bits in 'netmask'.
510 * 'netmask' must be a CIDR netmask (see ipv6_is_cidr()).
512 * If 'netmask' is not a CIDR netmask (see ipv6_is_cidr()), the return value
513 * will still be in the valid range but isn't otherwise meaningful. */
515 ipv6_count_cidr_bits(const struct in6_addr *netmask)
519 const uint8_t *netmaskp = &netmask->s6_addr[0];
521 for (i=0; i<16; i++) {
522 if (netmaskp[i] == 0xff) {
527 for(nm = netmaskp[i]; nm; nm <<= 1) {
538 /* Returns true if 'netmask' is a CIDR netmask, that is, if it consists of N
539 * high-order 1-bits and 128-N low-order 0-bits. */
541 ipv6_is_cidr(const struct in6_addr *netmask)
543 const uint8_t *netmaskp = &netmask->s6_addr[0];
546 for (i=0; i<16; i++) {
547 if (netmaskp[i] != 0xff) {
548 uint8_t x = ~netmaskp[i];
563 /* Populates 'b' with an Ethernet II packet headed with the given 'eth_dst',
564 * 'eth_src' and 'eth_type' parameters. A payload of 'size' bytes is allocated
565 * in 'b' and returned. This payload may be populated with appropriate
566 * information by the caller. Sets 'b''s 'frame' pointer and 'l3' offset to
567 * the Ethernet header and payload respectively. Aligns b->l3 on a 32-bit
570 * The returned packet has enough headroom to insert an 802.1Q VLAN header if
573 eth_compose(struct dp_packet *b, const uint8_t eth_dst[ETH_ADDR_LEN],
574 const uint8_t eth_src[ETH_ADDR_LEN], uint16_t eth_type,
578 struct eth_header *eth;
582 /* The magic 2 here ensures that the L3 header (when it is added later)
583 * will be 32-bit aligned. */
584 dp_packet_prealloc_tailroom(b, 2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + size);
585 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
586 eth = dp_packet_put_uninit(b, ETH_HEADER_LEN);
587 data = dp_packet_put_uninit(b, size);
589 memcpy(eth->eth_dst, eth_dst, ETH_ADDR_LEN);
590 memcpy(eth->eth_src, eth_src, ETH_ADDR_LEN);
591 eth->eth_type = htons(eth_type);
593 dp_packet_reset_offsets(b);
594 dp_packet_set_l3(b, data);
600 packet_set_ipv4_addr(struct dp_packet *packet,
601 ovs_16aligned_be32 *addr, ovs_be32 new_addr)
603 struct ip_header *nh = dp_packet_l3(packet);
604 ovs_be32 old_addr = get_16aligned_be32(addr);
605 size_t l4_size = dp_packet_l4_size(packet);
607 if (nh->ip_proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
608 struct tcp_header *th = dp_packet_l4(packet);
610 th->tcp_csum = recalc_csum32(th->tcp_csum, old_addr, new_addr);
611 } else if (nh->ip_proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN ) {
612 struct udp_header *uh = dp_packet_l4(packet);
615 uh->udp_csum = recalc_csum32(uh->udp_csum, old_addr, new_addr);
617 uh->udp_csum = htons(0xffff);
621 nh->ip_csum = recalc_csum32(nh->ip_csum, old_addr, new_addr);
622 put_16aligned_be32(addr, new_addr);
625 /* Returns true, if packet contains at least one routing header where
626 * segements_left > 0.
628 * This function assumes that L3 and L4 offsets are set in the packet. */
630 packet_rh_present(struct dp_packet *packet)
632 const struct ovs_16aligned_ip6_hdr *nh;
636 uint8_t *data = dp_packet_l3(packet);
638 remaining = packet->l4_ofs - packet->l3_ofs;
640 if (remaining < sizeof *nh) {
643 nh = ALIGNED_CAST(struct ovs_16aligned_ip6_hdr *, data);
645 remaining -= sizeof *nh;
646 nexthdr = nh->ip6_nxt;
649 if ((nexthdr != IPPROTO_HOPOPTS)
650 && (nexthdr != IPPROTO_ROUTING)
651 && (nexthdr != IPPROTO_DSTOPTS)
652 && (nexthdr != IPPROTO_AH)
653 && (nexthdr != IPPROTO_FRAGMENT)) {
654 /* It's either a terminal header (e.g., TCP, UDP) or one we
655 * don't understand. In either case, we're done with the
656 * packet, so use it to fill in 'nw_proto'. */
660 /* We only verify that at least 8 bytes of the next header are
661 * available, but many of these headers are longer. Ensure that
662 * accesses within the extension header are within those first 8
663 * bytes. All extension headers are required to be at least 8
669 if (nexthdr == IPPROTO_AH) {
670 /* A standard AH definition isn't available, but the fields
671 * we care about are in the same location as the generic
672 * option header--only the header length is calculated
674 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
676 nexthdr = ext_hdr->ip6e_nxt;
677 len = (ext_hdr->ip6e_len + 2) * 4;
678 } else if (nexthdr == IPPROTO_FRAGMENT) {
679 const struct ovs_16aligned_ip6_frag *frag_hdr
680 = ALIGNED_CAST(struct ovs_16aligned_ip6_frag *, data);
682 nexthdr = frag_hdr->ip6f_nxt;
683 len = sizeof *frag_hdr;
684 } else if (nexthdr == IPPROTO_ROUTING) {
685 const struct ip6_rthdr *rh = (struct ip6_rthdr *)data;
687 if (rh->ip6r_segleft > 0) {
691 nexthdr = rh->ip6r_nxt;
692 len = (rh->ip6r_len + 1) * 8;
694 const struct ip6_ext *ext_hdr = (struct ip6_ext *)data;
696 nexthdr = ext_hdr->ip6e_nxt;
697 len = (ext_hdr->ip6e_len + 1) * 8;
700 if (remaining < len) {
711 packet_update_csum128(struct dp_packet *packet, uint8_t proto,
712 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4])
714 size_t l4_size = dp_packet_l4_size(packet);
716 if (proto == IPPROTO_TCP && l4_size >= TCP_HEADER_LEN) {
717 struct tcp_header *th = dp_packet_l4(packet);
719 th->tcp_csum = recalc_csum128(th->tcp_csum, addr, new_addr);
720 } else if (proto == IPPROTO_UDP && l4_size >= UDP_HEADER_LEN) {
721 struct udp_header *uh = dp_packet_l4(packet);
724 uh->udp_csum = recalc_csum128(uh->udp_csum, addr, new_addr);
726 uh->udp_csum = htons(0xffff);
729 } else if (proto == IPPROTO_ICMPV6 &&
730 l4_size >= sizeof(struct icmp6_header)) {
731 struct icmp6_header *icmp = dp_packet_l4(packet);
733 icmp->icmp6_cksum = recalc_csum128(icmp->icmp6_cksum, addr, new_addr);
738 packet_set_ipv6_addr(struct dp_packet *packet, uint8_t proto,
739 ovs_16aligned_be32 addr[4], const ovs_be32 new_addr[4],
740 bool recalculate_csum)
742 if (recalculate_csum) {
743 packet_update_csum128(packet, proto, addr, new_addr);
745 memcpy(addr, new_addr, sizeof(ovs_be32[4]));
749 packet_set_ipv6_flow_label(ovs_16aligned_be32 *flow_label, ovs_be32 flow_key)
751 ovs_be32 old_label = get_16aligned_be32(flow_label);
752 ovs_be32 new_label = (old_label & htonl(~IPV6_LABEL_MASK)) | flow_key;
753 put_16aligned_be32(flow_label, new_label);
757 packet_set_ipv6_tc(ovs_16aligned_be32 *flow_label, uint8_t tc)
759 ovs_be32 old_label = get_16aligned_be32(flow_label);
760 ovs_be32 new_label = (old_label & htonl(0xF00FFFFF)) | htonl(tc << 20);
761 put_16aligned_be32(flow_label, new_label);
764 /* Modifies the IPv4 header fields of 'packet' to be consistent with 'src',
765 * 'dst', 'tos', and 'ttl'. Updates 'packet''s L4 checksums as appropriate.
766 * 'packet' must contain a valid IPv4 packet with correctly populated l[347]
769 packet_set_ipv4(struct dp_packet *packet, ovs_be32 src, ovs_be32 dst,
770 uint8_t tos, uint8_t ttl)
772 struct ip_header *nh = dp_packet_l3(packet);
774 if (get_16aligned_be32(&nh->ip_src) != src) {
775 packet_set_ipv4_addr(packet, &nh->ip_src, src);
778 if (get_16aligned_be32(&nh->ip_dst) != dst) {
779 packet_set_ipv4_addr(packet, &nh->ip_dst, dst);
782 if (nh->ip_tos != tos) {
783 uint8_t *field = &nh->ip_tos;
785 nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t) *field),
786 htons((uint16_t) tos));
790 if (nh->ip_ttl != ttl) {
791 uint8_t *field = &nh->ip_ttl;
793 nh->ip_csum = recalc_csum16(nh->ip_csum, htons(*field << 8),
799 /* Modifies the IPv6 header fields of 'packet' to be consistent with 'src',
800 * 'dst', 'traffic class', and 'next hop'. Updates 'packet''s L4 checksums as
801 * appropriate. 'packet' must contain a valid IPv6 packet with correctly
802 * populated l[34] offsets. */
804 packet_set_ipv6(struct dp_packet *packet, uint8_t proto, const ovs_be32 src[4],
805 const ovs_be32 dst[4], uint8_t key_tc, ovs_be32 key_fl,
808 struct ovs_16aligned_ip6_hdr *nh = dp_packet_l3(packet);
810 if (memcmp(&nh->ip6_src, src, sizeof(ovs_be32[4]))) {
811 packet_set_ipv6_addr(packet, proto, nh->ip6_src.be32, src, true);
814 if (memcmp(&nh->ip6_dst, dst, sizeof(ovs_be32[4]))) {
815 packet_set_ipv6_addr(packet, proto, nh->ip6_dst.be32, dst,
816 !packet_rh_present(packet));
819 packet_set_ipv6_tc(&nh->ip6_flow, key_tc);
821 packet_set_ipv6_flow_label(&nh->ip6_flow, key_fl);
823 nh->ip6_hlim = key_hl;
827 packet_set_port(ovs_be16 *port, ovs_be16 new_port, ovs_be16 *csum)
829 if (*port != new_port) {
830 *csum = recalc_csum16(*csum, *port, new_port);
835 /* Sets the TCP source and destination port ('src' and 'dst' respectively) of
836 * the TCP header contained in 'packet'. 'packet' must be a valid TCP packet
837 * with its l4 offset properly populated. */
839 packet_set_tcp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
841 struct tcp_header *th = dp_packet_l4(packet);
843 packet_set_port(&th->tcp_src, src, &th->tcp_csum);
844 packet_set_port(&th->tcp_dst, dst, &th->tcp_csum);
847 /* Sets the UDP source and destination port ('src' and 'dst' respectively) of
848 * the UDP header contained in 'packet'. 'packet' must be a valid UDP packet
849 * with its l4 offset properly populated. */
851 packet_set_udp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
853 struct udp_header *uh = dp_packet_l4(packet);
856 packet_set_port(&uh->udp_src, src, &uh->udp_csum);
857 packet_set_port(&uh->udp_dst, dst, &uh->udp_csum);
860 uh->udp_csum = htons(0xffff);
868 /* Sets the SCTP source and destination port ('src' and 'dst' respectively) of
869 * the SCTP header contained in 'packet'. 'packet' must be a valid SCTP packet
870 * with its l4 offset properly populated. */
872 packet_set_sctp_port(struct dp_packet *packet, ovs_be16 src, ovs_be16 dst)
874 struct sctp_header *sh = dp_packet_l4(packet);
875 ovs_be32 old_csum, old_correct_csum, new_csum;
876 uint16_t tp_len = dp_packet_l4_size(packet);
878 old_csum = get_16aligned_be32(&sh->sctp_csum);
879 put_16aligned_be32(&sh->sctp_csum, 0);
880 old_correct_csum = crc32c((void *)sh, tp_len);
885 new_csum = crc32c((void *)sh, tp_len);
886 put_16aligned_be32(&sh->sctp_csum, old_csum ^ old_correct_csum ^ new_csum);
890 packet_set_nd(struct dp_packet *packet, const ovs_be32 target[4],
891 const uint8_t sll[ETH_ADDR_LEN],
892 const uint8_t tll[ETH_ADDR_LEN]) {
893 struct ovs_nd_msg *ns;
894 struct ovs_nd_opt *nd_opt;
895 int bytes_remain = dp_packet_l4_size(packet);
897 if (OVS_UNLIKELY(bytes_remain < sizeof(*ns))) {
901 ns = dp_packet_l4(packet);
902 nd_opt = &ns->options[0];
903 bytes_remain -= sizeof(*ns);
905 if (memcmp(&ns->target, target, sizeof(ovs_be32[4]))) {
906 packet_set_ipv6_addr(packet, IPPROTO_ICMPV6,
911 while (bytes_remain >= ND_OPT_LEN && nd_opt->nd_opt_len != 0) {
912 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
913 && nd_opt->nd_opt_len == 1) {
914 if (memcmp(nd_opt->nd_opt_data, sll, ETH_ADDR_LEN)) {
915 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
917 *csum = recalc_csum48(*csum, nd_opt->nd_opt_data, sll);
918 memcpy(nd_opt->nd_opt_data, sll, ETH_ADDR_LEN);
921 /* A packet can only contain one SLL or TLL option */
923 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
924 && nd_opt->nd_opt_len == 1) {
925 if (memcmp(nd_opt->nd_opt_data, tll, ETH_ADDR_LEN)) {
926 ovs_be16 *csum = &(ns->icmph.icmp6_cksum);
928 *csum = recalc_csum48(*csum, nd_opt->nd_opt_data, tll);
929 memcpy(nd_opt->nd_opt_data, tll, ETH_ADDR_LEN);
932 /* A packet can only contain one SLL or TLL option */
936 nd_opt += nd_opt->nd_opt_len;
937 bytes_remain -= nd_opt->nd_opt_len * ND_OPT_LEN;
942 packet_tcp_flag_to_string(uint32_t flag)
974 /* Appends a string representation of the TCP flags value 'tcp_flags'
975 * (e.g. from struct flow.tcp_flags or obtained via TCP_FLAGS) to 's', in the
976 * format used by tcpdump. */
978 packet_format_tcp_flags(struct ds *s, uint16_t tcp_flags)
981 ds_put_cstr(s, "none");
985 if (tcp_flags & TCP_SYN) {
988 if (tcp_flags & TCP_FIN) {
991 if (tcp_flags & TCP_PSH) {
994 if (tcp_flags & TCP_RST) {
997 if (tcp_flags & TCP_URG) {
1000 if (tcp_flags & TCP_ACK) {
1001 ds_put_char(s, '.');
1003 if (tcp_flags & TCP_ECE) {
1004 ds_put_cstr(s, "E");
1006 if (tcp_flags & TCP_CWR) {
1007 ds_put_cstr(s, "C");
1009 if (tcp_flags & TCP_NS) {
1010 ds_put_cstr(s, "N");
1012 if (tcp_flags & 0x200) {
1013 ds_put_cstr(s, "[200]");
1015 if (tcp_flags & 0x400) {
1016 ds_put_cstr(s, "[400]");
1018 if (tcp_flags & 0x800) {
1019 ds_put_cstr(s, "[800]");
1023 #define ARP_PACKET_SIZE (2 + ETH_HEADER_LEN + VLAN_HEADER_LEN + \
1026 /* Clears 'b' and replaces its contents by an ARP frame with the specified
1027 * 'arp_op', 'arp_sha', 'arp_tha', 'arp_spa', and 'arp_tpa'. The outer
1028 * Ethernet frame is initialized with Ethernet source 'arp_sha' and destination
1029 * 'arp_tha', except that destination ff:ff:ff:ff:ff:ff is used instead if
1030 * 'broadcast' is true. */
1032 compose_arp(struct dp_packet *b, uint16_t arp_op,
1033 const uint8_t arp_sha[ETH_ADDR_LEN],
1034 const uint8_t arp_tha[ETH_ADDR_LEN], bool broadcast,
1035 ovs_be32 arp_spa, ovs_be32 arp_tpa)
1037 struct eth_header *eth;
1038 struct arp_eth_header *arp;
1041 dp_packet_prealloc_tailroom(b, ARP_PACKET_SIZE);
1042 dp_packet_reserve(b, 2 + VLAN_HEADER_LEN);
1044 eth = dp_packet_put_uninit(b, sizeof *eth);
1045 memcpy(eth->eth_dst, broadcast ? eth_addr_broadcast : arp_tha,
1047 memcpy(eth->eth_src, arp_sha, ETH_ADDR_LEN);
1048 eth->eth_type = htons(ETH_TYPE_ARP);
1050 arp = dp_packet_put_uninit(b, sizeof *arp);
1051 arp->ar_hrd = htons(ARP_HRD_ETHERNET);
1052 arp->ar_pro = htons(ARP_PRO_IP);
1053 arp->ar_hln = sizeof arp->ar_sha;
1054 arp->ar_pln = sizeof arp->ar_spa;
1055 arp->ar_op = htons(arp_op);
1056 memcpy(arp->ar_sha, arp_sha, ETH_ADDR_LEN);
1057 memcpy(arp->ar_tha, arp_tha, ETH_ADDR_LEN);
1059 put_16aligned_be32(&arp->ar_spa, arp_spa);
1060 put_16aligned_be32(&arp->ar_tpa, arp_tpa);
1062 dp_packet_reset_offsets(b);
1063 dp_packet_set_l3(b, arp);
1067 packet_csum_pseudoheader(const struct ip_header *ip)
1069 uint32_t partial = 0;
1071 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_src));
1072 partial = csum_add32(partial, get_16aligned_be32(&ip->ip_dst));
1073 partial = csum_add16(partial, htons(ip->ip_proto));
1074 partial = csum_add16(partial, htons(ntohs(ip->ip_tot_len) -
1075 IP_IHL(ip->ip_ihl_ver) * 4));