* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 33)
+#if (FLOW_WC_SEQ != 35)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
#define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
{ \
MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
- (FLOWMAP_MAX << ((IDX) % MAP_T_BITS)))); \
+ (MAP_MAX << ((IDX) % MAP_T_BITS)))); \
for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
MINIFLOW_ASSERT(!(FM)->bits[i]); \
} \
flowmap_set(&MF.map, (OFS), 1); \
}
-#define miniflow_assert_in_map(MF, OFS) \
- MINIFLOW_ASSERT(FLOWMAP_IS_SET(MF.map, (OFS))); \
+#define miniflow_assert_in_map(MF, OFS) \
+ MINIFLOW_ASSERT(flowmap_is_set(&MF.map, (OFS))); \
ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
#define miniflow_push_uint64_(MF, OFS, VALUE) \
} \
}
+#define miniflow_push_uint8_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint8_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 7) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint8_t *)MF.data + 7) = VALUE; \
+ MF.data++; \
+ } else { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint8_t *)MF.data + ((OFS) % 8)) = VALUE; \
+ } \
+}
+
#define miniflow_pad_to_64_(MF, OFS) \
{ \
MINIFLOW_ASSERT((OFS) % 8 != 0); \
MF.data++; \
}
+#define miniflow_pad_from_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ miniflow_set_map(MF, OFS / 8); \
+ \
+ memset((uint8_t *)MF.data, 0, (OFS) % 8); \
+}
+
#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
+#define miniflow_push_be8_(MF, OFS, VALUE) \
+ miniflow_push_uint8_(MF, OFS, (OVS_FORCE uint8_t)VALUE);
+
#define miniflow_set_maps(MF, OFS, N_WORDS) \
{ \
size_t ofs = (OFS); \
#define miniflow_push_be16(MF, FIELD, VALUE) \
miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
+#define miniflow_push_uint8(MF, FIELD, VALUE) \
+ miniflow_push_uint8_(MF, offsetof(struct flow, FIELD), VALUE)
+
#define miniflow_pad_to_64(MF, FIELD) \
- miniflow_pad_to_64_(MF, offsetof(struct flow, FIELD))
+ miniflow_pad_to_64_(MF, OFFSETOFEND(struct flow, FIELD))
+
+#define miniflow_pad_from_64(MF, FIELD) \
+ miniflow_pad_from_64_(MF, offsetof(struct flow, FIELD))
#define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
static inline void
parse_icmpv6(const void **datap, size_t *sizep, const struct icmp6_hdr *icmp,
const struct in6_addr **nd_target,
- uint8_t arp_buf[2][ETH_ADDR_LEN])
+ struct eth_addr arp_buf[2])
{
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
while (*sizep >= 8) {
/* The minimum size of an option is 8 bytes, which also is
* the size of Ethernet link-layer options. */
- const struct nd_opt_hdr *nd_opt = *datap;
- int opt_len = nd_opt->nd_opt_len * 8;
+ const struct ovs_nd_opt *nd_opt = *datap;
+ int opt_len = nd_opt->nd_opt_len * ND_OPT_LEN;
if (!opt_len || opt_len > *sizep) {
return;
* provided. It is considered an error if the same link
* layer option is specified twice. */
if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LINKADDR
- && opt_len == 8) {
+ && opt_len == 8) {
if (OVS_LIKELY(eth_addr_is_zero(arp_buf[0]))) {
- memcpy(arp_buf[0], nd_opt + 1, ETH_ADDR_LEN);
+ arp_buf[0] = nd_opt->nd_opt_mac;
} else {
goto invalid;
}
} else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LINKADDR
- && opt_len == 8) {
+ && opt_len == 8) {
if (OVS_LIKELY(eth_addr_is_zero(arp_buf[1]))) {
- memcpy(arp_buf[1], nd_opt + 1, ETH_ADDR_LEN);
+ arp_buf[1] = nd_opt->nd_opt_mac;
} else {
goto invalid;
}
invalid:
*nd_target = NULL;
- memset(arp_buf[0], 0, ETH_ADDR_LEN);
- memset(arp_buf[1], 0, ETH_ADDR_LEN);
- return;
+ arp_buf[0] = eth_addr_zero;
+ arp_buf[1] = eth_addr_zero;
}
/* Initializes 'flow' members from 'packet' and 'md'
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
/* Metadata. */
- if (md->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&md->tunnel)) {
miniflow_push_words(mf, tunnel, &md->tunnel,
offsetof(struct flow_tnl, metadata) /
sizeof(uint64_t));
}
miniflow_push_uint32(mf, dp_hash, md->dp_hash);
miniflow_push_uint32(mf, in_port, odp_to_u32(md->in_port.odp_port));
- if (md->recirc_id) {
+ if (md->recirc_id || md->ct_state) {
miniflow_push_uint32(mf, recirc_id, md->recirc_id);
- miniflow_pad_to_64(mf, conj_id);
+ miniflow_push_uint16(mf, ct_state, md->ct_state);
+ miniflow_push_uint16(mf, ct_zone, md->ct_zone);
+ }
+
+ if (md->ct_state) {
+ miniflow_push_uint32(mf, ct_mark, md->ct_mark);
+ miniflow_pad_to_64(mf, ct_mark);
+
+ if (!ovs_u128_is_zero(&md->ct_label)) {
+ miniflow_push_words(mf, ct_label, &md->ct_label,
+ sizeof md->ct_label / sizeof(uint64_t));
+ }
}
/* Initialize packet's layer pointer and offsets. */
} else {
if (dl_type == htons(ETH_TYPE_ARP) ||
dl_type == htons(ETH_TYPE_RARP)) {
- uint8_t arp_buf[2][ETH_ADDR_LEN];
+ struct eth_addr arp_buf[2];
const struct arp_eth_header *arp = (const struct arp_eth_header *)
data_try_pull(&data, &size, ARP_ETH_HEADER_LEN);
/* Must be adjacent. */
ASSERT_SEQUENTIAL(arp_sha, arp_tha);
- memcpy(arp_buf[0], arp->ar_sha, ETH_ADDR_LEN);
- memcpy(arp_buf[1], arp->ar_tha, ETH_ADDR_LEN);
+ arp_buf[0] = arp->ar_sha;
+ arp_buf[1] = arp->ar_tha;
miniflow_push_macs(mf, arp_sha, arp_buf);
- miniflow_pad_to_64(mf, tcp_flags);
+ miniflow_pad_to_64(mf, arp_tha);
}
}
goto out;
if (OVS_LIKELY(size >= TCP_HEADER_LEN)) {
const struct tcp_header *tcp = data;
- miniflow_push_be32(mf, arp_tha[2], 0);
+ miniflow_push_be32(mf, arp_tha.ea[2], 0);
miniflow_push_be32(mf, tcp_flags,
TCP_FLAGS_BE32(tcp->tcp_ctl));
miniflow_push_be16(mf, tp_src, tcp->tcp_src);
miniflow_push_be16(mf, tp_dst, tcp->tcp_dst);
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, udp->udp_src);
miniflow_push_be16(mf, tp_dst, udp->udp_dst);
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, sctp->sctp_src);
miniflow_push_be16(mf, tp_dst, sctp->sctp_dst);
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMPV6)) {
if (OVS_LIKELY(size >= sizeof(struct icmp6_hdr))) {
const struct in6_addr *nd_target = NULL;
- uint8_t arp_buf[2][ETH_ADDR_LEN];
+ struct eth_addr arp_buf[2] = { { { { 0 } } } };
const struct icmp6_hdr *icmp = data_pull(&data, &size,
sizeof *icmp);
- memset(arp_buf, 0, sizeof arp_buf);
parse_icmpv6(&data, &size, icmp, &nd_target, arp_buf);
if (nd_target) {
miniflow_push_words(mf, nd_target, nd_target,
- sizeof *nd_target / 8);
+ sizeof *nd_target / sizeof(uint64_t));
}
miniflow_push_macs(mf, arp_sha, arp_buf);
- miniflow_pad_to_64(mf, tcp_flags);
+ miniflow_pad_to_64(mf, arp_tha);
miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
}
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
match_init_catchall(flow_metadata);
if (flow->tunnel.tun_id != htonll(0)) {
match_set_tun_flags(flow_metadata,
flow->tunnel.flags & FLOW_TNL_PUB_F_MASK);
}
- if (flow->tunnel.ip_src != htonl(0)) {
+ if (flow->tunnel.ip_src) {
match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
}
- if (flow->tunnel.ip_dst != htonl(0)) {
+ if (flow->tunnel.ip_dst) {
match_set_tun_dst(flow_metadata, flow->tunnel.ip_dst);
}
+ if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) {
+ match_set_tun_ipv6_src(flow_metadata, &flow->tunnel.ipv6_src);
+ }
+ if (ipv6_addr_is_set(&flow->tunnel.ipv6_dst)) {
+ match_set_tun_ipv6_dst(flow_metadata, &flow->tunnel.ipv6_dst);
+ }
if (flow->tunnel.gbp_id != htons(0)) {
match_set_tun_gbp_id(flow_metadata, flow->tunnel.gbp_id);
}
}
match_set_in_port(flow_metadata, flow->in_port.ofp_port);
+ if (flow->ct_state != 0) {
+ match_set_ct_state(flow_metadata, flow->ct_state);
+ }
+ if (flow->ct_zone != 0) {
+ match_set_ct_zone(flow_metadata, flow->ct_zone);
+ }
+ if (flow->ct_mark != 0) {
+ match_set_ct_mark(flow_metadata, flow->ct_mark);
+ }
+ if (!ovs_u128_is_zero(&flow->ct_label)) {
+ match_set_ct_label(flow_metadata, flow->ct_label);
+ }
+}
+
+const char *ct_state_to_string(uint32_t state)
+{
+ switch (state) {
+ case CS_REPLY_DIR:
+ return "rpl";
+ case CS_TRACKED:
+ return "trk";
+ case CS_NEW:
+ return "new";
+ case CS_ESTABLISHED:
+ return "est";
+ case CS_RELATED:
+ return "rel";
+ case CS_INVALID:
+ return "inv";
+ case CS_SRC_NAT:
+ return "snat";
+ case CS_DST_NAT:
+ return "dnat";
+ default:
+ return NULL;
+ }
}
char *
if (!flow->dp_hash) {
WC_UNMASK_FIELD(wc, dp_hash);
}
+ if (!flow->ct_state) {
+ WC_UNMASK_FIELD(wc, ct_state);
+ }
+ if (!flow->ct_zone) {
+ WC_UNMASK_FIELD(wc, ct_zone);
+ }
+ if (!flow->ct_mark) {
+ WC_UNMASK_FIELD(wc, ct_mark);
+ }
+ if (ovs_u128_is_zero(&flow->ct_label)) {
+ WC_UNMASK_FIELD(wc, ct_label);
+ }
for (int i = 0; i < FLOW_N_REGS; i++) {
if (!flow->regs[i]) {
WC_UNMASK_FIELD(wc, regs[i]);
memset(&wc->masks, 0x0, sizeof wc->masks);
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
- if (flow->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
WC_MASK_FIELD(wc, tunnel.tun_id);
}
WC_MASK_FIELD(wc, tunnel.ip_src);
WC_MASK_FIELD(wc, tunnel.ip_dst);
+ WC_MASK_FIELD(wc, tunnel.ipv6_src);
+ WC_MASK_FIELD(wc, tunnel.ipv6_dst);
WC_MASK_FIELD(wc, tunnel.flags);
WC_MASK_FIELD(wc, tunnel.ip_tos);
WC_MASK_FIELD(wc, tunnel.ip_ttl);
WC_MASK_FIELD(wc, skb_priority);
WC_MASK_FIELD(wc, pkt_mark);
+ WC_MASK_FIELD(wc, ct_state);
+ WC_MASK_FIELD(wc, ct_zone);
+ WC_MASK_FIELD(wc, ct_mark);
+ WC_MASK_FIELD(wc, ct_label);
WC_MASK_FIELD(wc, recirc_id);
WC_MASK_FIELD(wc, dp_hash);
WC_MASK_FIELD(wc, in_port);
flow_wc_map(const struct flow *flow, struct flowmap *map)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
flowmap_init(map);
- if (flow->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
FLOWMAP_SET__(map, tunnel, offsetof(struct flow_tnl, metadata));
if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
if (flow->tunnel.metadata.present.map) {
FLOWMAP_SET(map, dl_src);
FLOWMAP_SET(map, dl_type);
FLOWMAP_SET(map, vlan_tci);
+ FLOWMAP_SET(map, ct_state);
+ FLOWMAP_SET(map, ct_zone);
+ FLOWMAP_SET(map, ct_mark);
+ FLOWMAP_SET(map, ct_label);
/* Ethertype-dependent fields. */
if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
ovs_be16 eth_type;
ovs_be16 vlan_tci;
ovs_be16 tp_port;
- uint8_t eth_addr[ETH_ADDR_LEN];
+ struct eth_addr eth_addr;
uint8_t ip_proto;
} fields;
int i;
memset(&fields, 0, sizeof fields);
- for (i = 0; i < ETH_ADDR_LEN; i++) {
- fields.eth_addr[i] = flow->dl_src[i] ^ flow->dl_dst[i];
+ for (i = 0; i < ARRAY_SIZE(fields.eth_addr.be16); i++) {
+ fields.eth_addr.be16[i] = flow->dl_src.be16[i] ^ flow->dl_dst.be16[i];
}
fields.vlan_tci = flow->vlan_tci & htons(VLAN_VID_MASK);
fields.eth_type = flow->dl_type;
/* Initialize to all zeros. */
memset(flow, 0, sizeof *flow);
- eth_addr_random(flow->dl_src);
- eth_addr_random(flow->dl_dst);
+ eth_addr_random(&flow->dl_src);
+ eth_addr_random(&flow->dl_dst);
flow->vlan_tci = (OVS_FORCE ovs_be16) (random_uint16() & VLAN_VID_MASK);
switch (fields) {
case NX_HASH_FIELDS_ETH_SRC:
- return jhash_bytes(flow->dl_src, sizeof flow->dl_src, basis);
+ return jhash_bytes(&flow->dl_src, sizeof flow->dl_src, basis);
case NX_HASH_FIELDS_SYMMETRIC_L4:
return flow_hash_symmetric_l4(flow, basis);
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields and dp_hash. */
- BUILD_ASSERT(FLOW_WC_SEQ == 33);
+ BUILD_ASSERT(FLOW_WC_SEQ == 35);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
flow->dp_hash = 0;
(icmp->icmp6_type == ND_NEIGHBOR_SOLICIT ||
icmp->icmp6_type == ND_NEIGHBOR_ADVERT)) {
struct in6_addr *nd_target;
- struct nd_opt_hdr *nd_opt;
+ struct ovs_nd_opt *nd_opt;
l4_len += sizeof *nd_target;
nd_target = dp_packet_put_zeros(p, sizeof *nd_target);
nd_opt = dp_packet_put_zeros(p, 8);
nd_opt->nd_opt_len = 1;
nd_opt->nd_opt_type = ND_OPT_SOURCE_LINKADDR;
- memcpy(nd_opt + 1, flow->arp_sha, ETH_ADDR_LEN);
+ nd_opt->nd_opt_mac = flow->arp_sha;
}
if (!eth_addr_is_zero(flow->arp_tha)) {
l4_len += 8;
nd_opt = dp_packet_put_zeros(p, 8);
nd_opt->nd_opt_len = 1;
nd_opt->nd_opt_type = ND_OPT_TARGET_LINKADDR;
- memcpy(nd_opt + 1, flow->arp_tha, ETH_ADDR_LEN);
+ nd_opt->nd_opt_mac = flow->arp_tha;
}
}
icmp->icmp6_cksum = (OVS_FORCE uint16_t)
flow->nw_proto == ARP_OP_REPLY) {
put_16aligned_be32(&arp->ar_spa, flow->nw_src);
put_16aligned_be32(&arp->ar_tpa, flow->nw_dst);
- memcpy(arp->ar_sha, flow->arp_sha, ETH_ADDR_LEN);
- memcpy(arp->ar_tha, flow->arp_tha, ETH_ADDR_LEN);
+ arp->ar_sha = flow->arp_sha;
+ arp->ar_tha = flow->arp_tha;
}
}