* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 34)
+#if (FLOW_WC_SEQ != 35)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
#define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
{ \
MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
- (FLOWMAP_MAX << ((IDX) % MAP_T_BITS)))); \
+ (MAP_MAX << ((IDX) % MAP_T_BITS)))); \
for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
MINIFLOW_ASSERT(!(FM)->bits[i]); \
} \
flowmap_set(&MF.map, (OFS), 1); \
}
-#define miniflow_assert_in_map(MF, OFS) \
- MINIFLOW_ASSERT(FLOWMAP_IS_SET(MF.map, (OFS))); \
+#define miniflow_assert_in_map(MF, OFS) \
+ MINIFLOW_ASSERT(flowmap_is_set(&MF.map, (OFS))); \
ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
#define miniflow_push_uint64_(MF, OFS, VALUE) \
} \
}
+#define miniflow_push_uint8_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint8_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 7) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint8_t *)MF.data + 7) = VALUE; \
+ MF.data++; \
+ } else { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint8_t *)MF.data + ((OFS) % 8)) = VALUE; \
+ } \
+}
+
#define miniflow_pad_to_64_(MF, OFS) \
{ \
MINIFLOW_ASSERT((OFS) % 8 != 0); \
MF.data++; \
}
+#define miniflow_pad_from_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ miniflow_set_map(MF, OFS / 8); \
+ \
+ memset((uint8_t *)MF.data, 0, (OFS) % 8); \
+}
+
#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
+#define miniflow_push_be8_(MF, OFS, VALUE) \
+ miniflow_push_uint8_(MF, OFS, (OVS_FORCE uint8_t)VALUE);
+
#define miniflow_set_maps(MF, OFS, N_WORDS) \
{ \
size_t ofs = (OFS); \
#define miniflow_push_be16(MF, FIELD, VALUE) \
miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
+#define miniflow_push_uint8(MF, FIELD, VALUE) \
+ miniflow_push_uint8_(MF, offsetof(struct flow, FIELD), VALUE)
+
#define miniflow_pad_to_64(MF, FIELD) \
- miniflow_pad_to_64_(MF, offsetof(struct flow, FIELD))
+ miniflow_pad_to_64_(MF, OFFSETOFEND(struct flow, FIELD))
+
+#define miniflow_pad_from_64(MF, FIELD) \
+ miniflow_pad_from_64_(MF, offsetof(struct flow, FIELD))
#define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
uint8_t nw_frag, nw_tos, nw_ttl, nw_proto;
/* Metadata. */
- if (md->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&md->tunnel)) {
miniflow_push_words(mf, tunnel, &md->tunnel,
offsetof(struct flow_tnl, metadata) /
sizeof(uint64_t));
if (md->ct_state) {
miniflow_push_uint32(mf, ct_mark, md->ct_mark);
- miniflow_pad_to_64(mf, pad1);
+ miniflow_pad_to_64(mf, ct_mark);
+
+ if (!ovs_u128_is_zero(&md->ct_label)) {
+ miniflow_push_words(mf, ct_label, &md->ct_label,
+ sizeof md->ct_label / sizeof(uint64_t));
+ }
}
/* Initialize packet's layer pointer and offsets. */
arp_buf[0] = arp->ar_sha;
arp_buf[1] = arp->ar_tha;
miniflow_push_macs(mf, arp_sha, arp_buf);
- miniflow_pad_to_64(mf, tcp_flags);
+ miniflow_pad_to_64(mf, arp_tha);
}
}
goto out;
TCP_FLAGS_BE32(tcp->tcp_ctl));
miniflow_push_be16(mf, tp_src, tcp->tcp_src);
miniflow_push_be16(mf, tp_dst, tcp->tcp_dst);
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_UDP)) {
if (OVS_LIKELY(size >= UDP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, udp->udp_src);
miniflow_push_be16(mf, tp_dst, udp->udp_dst);
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_SCTP)) {
if (OVS_LIKELY(size >= SCTP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, sctp->sctp_src);
miniflow_push_be16(mf, tp_dst, sctp->sctp_dst);
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_ICMP)) {
if (OVS_LIKELY(size >= ICMP_HEADER_LEN)) {
miniflow_push_be16(mf, tp_src, htons(icmp->icmp_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp_code));
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
} else if (OVS_LIKELY(nw_proto == IPPROTO_IGMP)) {
if (OVS_LIKELY(size >= IGMP_HEADER_LEN)) {
sizeof *nd_target / sizeof(uint64_t));
}
miniflow_push_macs(mf, arp_sha, arp_buf);
- miniflow_pad_to_64(mf, tcp_flags);
+ miniflow_pad_to_64(mf, arp_tha);
miniflow_push_be16(mf, tp_src, htons(icmp->icmp6_type));
miniflow_push_be16(mf, tp_dst, htons(icmp->icmp6_code));
- miniflow_pad_to_64(mf, igmp_group_ip4);
+ miniflow_pad_to_64(mf, tp_dst);
}
}
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
match_init_catchall(flow_metadata);
if (flow->tunnel.tun_id != htonll(0)) {
match_set_tun_flags(flow_metadata,
flow->tunnel.flags & FLOW_TNL_PUB_F_MASK);
}
- if (flow->tunnel.ip_src != htonl(0)) {
+ if (flow->tunnel.ip_src) {
match_set_tun_src(flow_metadata, flow->tunnel.ip_src);
}
- if (flow->tunnel.ip_dst != htonl(0)) {
+ if (flow->tunnel.ip_dst) {
match_set_tun_dst(flow_metadata, flow->tunnel.ip_dst);
}
+ if (ipv6_addr_is_set(&flow->tunnel.ipv6_src)) {
+ match_set_tun_ipv6_src(flow_metadata, &flow->tunnel.ipv6_src);
+ }
+ if (ipv6_addr_is_set(&flow->tunnel.ipv6_dst)) {
+ match_set_tun_ipv6_dst(flow_metadata, &flow->tunnel.ipv6_dst);
+ }
if (flow->tunnel.gbp_id != htons(0)) {
match_set_tun_gbp_id(flow_metadata, flow->tunnel.gbp_id);
}
if (flow->ct_mark != 0) {
match_set_ct_mark(flow_metadata, flow->ct_mark);
}
+ if (!ovs_u128_is_zero(&flow->ct_label)) {
+ match_set_ct_label(flow_metadata, flow->ct_label);
+ }
}
const char *ct_state_to_string(uint32_t state)
return "rel";
case CS_INVALID:
return "inv";
+ case CS_SRC_NAT:
+ return "snat";
+ case CS_DST_NAT:
+ return "dnat";
default:
return NULL;
}
if (!flow->ct_mark) {
WC_UNMASK_FIELD(wc, ct_mark);
}
+ if (ovs_u128_is_zero(&flow->ct_label)) {
+ WC_UNMASK_FIELD(wc, ct_label);
+ }
for (int i = 0; i < FLOW_N_REGS; i++) {
if (!flow->regs[i]) {
WC_UNMASK_FIELD(wc, regs[i]);
memset(&wc->masks, 0x0, sizeof wc->masks);
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
- if (flow->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
WC_MASK_FIELD(wc, tunnel.tun_id);
}
WC_MASK_FIELD(wc, tunnel.ip_src);
WC_MASK_FIELD(wc, tunnel.ip_dst);
+ WC_MASK_FIELD(wc, tunnel.ipv6_src);
+ WC_MASK_FIELD(wc, tunnel.ipv6_dst);
WC_MASK_FIELD(wc, tunnel.flags);
WC_MASK_FIELD(wc, tunnel.ip_tos);
WC_MASK_FIELD(wc, tunnel.ip_ttl);
WC_MASK_FIELD(wc, ct_state);
WC_MASK_FIELD(wc, ct_zone);
WC_MASK_FIELD(wc, ct_mark);
+ WC_MASK_FIELD(wc, ct_label);
WC_MASK_FIELD(wc, recirc_id);
WC_MASK_FIELD(wc, dp_hash);
WC_MASK_FIELD(wc, in_port);
flow_wc_map(const struct flow *flow, struct flowmap *map)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
flowmap_init(map);
- if (flow->tunnel.ip_dst) {
+ if (flow_tnl_dst_is_set(&flow->tunnel)) {
FLOWMAP_SET__(map, tunnel, offsetof(struct flow_tnl, metadata));
if (!(flow->tunnel.flags & FLOW_TNL_F_UDPIF)) {
if (flow->tunnel.metadata.present.map) {
FLOWMAP_SET(map, ct_state);
FLOWMAP_SET(map, ct_zone);
FLOWMAP_SET(map, ct_mark);
+ FLOWMAP_SET(map, ct_label);
/* Ethertype-dependent fields. */
if (OVS_LIKELY(flow->dl_type == htons(ETH_TYPE_IP))) {
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields and dp_hash. */
- BUILD_ASSERT(FLOW_WC_SEQ == 34);
+ BUILD_ASSERT(FLOW_WC_SEQ == 35);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
flow->dp_hash = 0;