#include <stdlib.h>
#include <string.h>
#include "byte-order.h"
+#include "colors.h"
#include "coverage.h"
#include "csum.h"
-#include "dynamic-string.h"
+#include "openvswitch/dynamic-string.h"
#include "hash.h"
#include "jhash.h"
-#include "match.h"
+#include "openvswitch/match.h"
#include "dp-packet.h"
#include "openflow/openflow.h"
#include "packets.h"
#include "odp-util.h"
#include "random.h"
#include "unaligned.h"
+#include "util.h"
COVERAGE_DEFINE(flow_extract);
COVERAGE_DEFINE(miniflow_malloc);
* away. Some GCC versions gave warnings on ALWAYS_INLINE, so these are
* defined as macros. */
-#if (FLOW_WC_SEQ != 35)
+#if (FLOW_WC_SEQ != 36)
#define MINIFLOW_ASSERT(X) ovs_assert(X)
BUILD_MESSAGE("FLOW_WC_SEQ changed: miniflow_extract() will have runtime "
"assertions enabled. Consider updating FLOW_WC_SEQ after "
#define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
{ \
MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
- (FLOWMAP_MAX << ((IDX) % MAP_T_BITS)))); \
+ (MAP_MAX << ((IDX) % MAP_T_BITS)))); \
for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
MINIFLOW_ASSERT(!(FM)->bits[i]); \
} \
flowmap_set(&MF.map, (OFS), 1); \
}
-#define miniflow_assert_in_map(MF, OFS) \
- MINIFLOW_ASSERT(FLOWMAP_IS_SET(MF.map, (OFS))); \
+#define miniflow_assert_in_map(MF, OFS) \
+ MINIFLOW_ASSERT(flowmap_is_set(&MF.map, (OFS))); \
ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
#define miniflow_push_uint64_(MF, OFS, VALUE) \
} \
}
+#define miniflow_push_uint8_(MF, OFS, VALUE) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ if ((OFS) % 8 == 0) { \
+ miniflow_set_map(MF, OFS / 8); \
+ *(uint8_t *)MF.data = VALUE; \
+ } else if ((OFS) % 8 == 7) { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint8_t *)MF.data + 7) = VALUE; \
+ MF.data++; \
+ } else { \
+ miniflow_assert_in_map(MF, OFS / 8); \
+ *((uint8_t *)MF.data + ((OFS) % 8)) = VALUE; \
+ } \
+}
+
#define miniflow_pad_to_64_(MF, OFS) \
{ \
MINIFLOW_ASSERT((OFS) % 8 != 0); \
MF.data++; \
}
+#define miniflow_pad_from_64_(MF, OFS) \
+{ \
+ MINIFLOW_ASSERT(MF.data < MF.end); \
+ \
+ MINIFLOW_ASSERT((OFS) % 8 != 0); \
+ miniflow_set_map(MF, OFS / 8); \
+ \
+ memset((uint8_t *)MF.data, 0, (OFS) % 8); \
+}
+
#define miniflow_push_be16_(MF, OFS, VALUE) \
miniflow_push_uint16_(MF, OFS, (OVS_FORCE uint16_t)VALUE);
+#define miniflow_push_be8_(MF, OFS, VALUE) \
+ miniflow_push_uint8_(MF, OFS, (OVS_FORCE uint8_t)VALUE);
+
#define miniflow_set_maps(MF, OFS, N_WORDS) \
{ \
size_t ofs = (OFS); \
#define miniflow_push_be16(MF, FIELD, VALUE) \
miniflow_push_be16_(MF, offsetof(struct flow, FIELD), VALUE)
+#define miniflow_push_uint8(MF, FIELD, VALUE) \
+ miniflow_push_uint8_(MF, offsetof(struct flow, FIELD), VALUE)
+
#define miniflow_pad_to_64(MF, FIELD) \
miniflow_pad_to_64_(MF, OFFSETOFEND(struct flow, FIELD))
+#define miniflow_pad_from_64(MF, FIELD) \
+ miniflow_pad_from_64_(MF, offsetof(struct flow, FIELD))
+
#define miniflow_push_words(MF, FIELD, VALUEP, N_WORDS) \
miniflow_push_words_(MF, offsetof(struct flow, FIELD), VALUEP, N_WORDS)
miniflow_push_uint32(mf, ct_mark, md->ct_mark);
miniflow_pad_to_64(mf, ct_mark);
- if (!ovs_u128_is_zero(&md->ct_label)) {
+ if (!ovs_u128_is_zero(md->ct_label)) {
miniflow_push_words(mf, ct_label, &md->ct_label,
sizeof md->ct_label / sizeof(uint64_t));
}
{
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36);
match_init_catchall(flow_metadata);
if (flow->tunnel.tun_id != htonll(0)) {
if (flow->ct_mark != 0) {
match_set_ct_mark(flow_metadata, flow->ct_mark);
}
- if (!ovs_u128_is_zero(&flow->ct_label)) {
+ if (!ovs_u128_is_zero(flow->ct_label)) {
match_set_ct_label(flow_metadata, flow->ct_label);
}
}
uint32_t mask, uint32_t max_mask)
{
if (name) {
- ds_put_format(ds, "%s=", name);
+ ds_put_format(ds, "%s%s=%s", colors.param, name, colors.end);
}
if (mask == max_mask) {
if (!flow->ct_mark) {
WC_UNMASK_FIELD(wc, ct_mark);
}
- if (ovs_u128_is_zero(&flow->ct_label)) {
+ if (ovs_u128_is_zero(flow->ct_label)) {
WC_UNMASK_FIELD(wc, ct_label);
}
for (int i = 0; i < FLOW_N_REGS; i++) {
memset(&wc->masks, 0x0, sizeof wc->masks);
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36);
if (flow_tnl_dst_is_set(&flow->tunnel)) {
if (flow->tunnel.flags & FLOW_TNL_F_KEY) {
flow_wc_map(const struct flow *flow, struct flowmap *map)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36);
flowmap_init(map);
FLOWMAP_SET(map, nw_frag);
FLOWMAP_SET(map, nw_tos);
FLOWMAP_SET(map, nw_ttl);
+ FLOWMAP_SET(map, tp_src);
+ FLOWMAP_SET(map, tp_dst);
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_IGMP)) {
FLOWMAP_SET(map, igmp_group_ip4);
} else {
FLOWMAP_SET(map, tcp_flags);
- FLOWMAP_SET(map, tp_src);
- FLOWMAP_SET(map, tp_dst);
}
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
FLOWMAP_SET(map, ipv6_src);
FLOWMAP_SET(map, nw_frag);
FLOWMAP_SET(map, nw_tos);
FLOWMAP_SET(map, nw_ttl);
+ FLOWMAP_SET(map, tp_src);
+ FLOWMAP_SET(map, tp_dst);
if (OVS_UNLIKELY(flow->nw_proto == IPPROTO_ICMPV6)) {
FLOWMAP_SET(map, nd_target);
FLOWMAP_SET(map, arp_tha);
} else {
FLOWMAP_SET(map, tcp_flags);
- FLOWMAP_SET(map, tp_src);
- FLOWMAP_SET(map, tp_dst);
}
} else if (eth_type_mpls(flow->dl_type)) {
FLOWMAP_SET(map, mpls_lse);
flow_wildcards_clear_non_packet_fields(struct flow_wildcards *wc)
{
/* Update this function whenever struct flow changes. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36);
memset(&wc->masks.metadata, 0, sizeof wc->masks.metadata);
memset(&wc->masks.regs, 0, sizeof wc->masks.regs);
flow_set_xreg(&wc->masks, idx, mask);
}
+/* Sets the wildcard mask for register 'idx' in 'wc' to 'mask'.
+ * (A 0-bit indicates a wildcard bit.) */
+void
+flow_wildcards_set_xxreg_mask(struct flow_wildcards *wc, int idx,
+ ovs_u128 mask)
+{
+ flow_set_xxreg(&wc->masks, idx, mask);
+}
+
/* Calculates the 5-tuple hash from the given miniflow.
* This returns the same value as flow_hash_5tuple for the corresponding
* flow. */
uint32_t
miniflow_hash_5tuple(const struct miniflow *flow, uint32_t basis)
{
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36);
uint32_t hash = basis;
if (flow) {
ovs_be16 dl_type = MINIFLOW_GET_BE16(flow, dl_type);
+ uint8_t nw_proto;
- hash = hash_add(hash, MINIFLOW_GET_U8(flow, nw_proto));
-
- /* Separate loops for better optimization. */
if (dl_type == htons(ETH_TYPE_IPV6)) {
struct flowmap map = FLOWMAP_EMPTY_INITIALIZER;
uint64_t value;
MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, map) {
hash = hash_add64(hash, value);
}
- } else {
+ } else if (dl_type == htons(ETH_TYPE_IP)
+ || dl_type == htons(ETH_TYPE_ARP)) {
hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_src));
hash = hash_add(hash, MINIFLOW_GET_U32(flow, nw_dst));
+ } else {
+ goto out;
}
+
+ nw_proto = MINIFLOW_GET_U8(flow, nw_proto);
+ hash = hash_add(hash, nw_proto);
+ if (nw_proto != IPPROTO_TCP && nw_proto != IPPROTO_UDP
+ && nw_proto != IPPROTO_SCTP && nw_proto != IPPROTO_ICMP
+ && nw_proto != IPPROTO_ICMPV6) {
+ goto out;
+ }
+
/* Add both ports at once. */
hash = hash_add(hash, MINIFLOW_GET_U32(flow, tp_src));
- hash = hash_finish(hash, 42); /* Arbitrary number. */
}
- return hash;
+out:
+ return hash_finish(hash, 42);
}
ASSERT_SEQUENTIAL_SAME_WORD(tp_src, tp_dst);
uint32_t
flow_hash_5tuple(const struct flow *flow, uint32_t basis)
{
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 36);
uint32_t hash = basis;
if (flow) {
- hash = hash_add(hash, flow->nw_proto);
if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
const uint64_t *flow_u64 = (const uint64_t *)flow;
for (;ofs < end; ofs++) {
hash = hash_add64(hash, flow_u64[ofs]);
}
- } else {
+ } else if (flow->dl_type == htons(ETH_TYPE_IP)
+ || flow->dl_type == htons(ETH_TYPE_ARP)) {
hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_src);
hash = hash_add(hash, (OVS_FORCE uint32_t) flow->nw_dst);
+ } else {
+ goto out;
+ }
+
+ hash = hash_add(hash, flow->nw_proto);
+ if (flow->nw_proto != IPPROTO_TCP && flow->nw_proto != IPPROTO_UDP
+ && flow->nw_proto != IPPROTO_SCTP && flow->nw_proto != IPPROTO_ICMP
+ && flow->nw_proto != IPPROTO_ICMPV6) {
+ goto out;
}
+
/* Add both ports at once. */
hash = hash_add(hash,
((const uint32_t *)flow)[offsetof(struct flow, tp_src)
/ sizeof(uint32_t)]);
- hash = hash_finish(hash, 42); /* Arbitrary number. */
}
- return hash;
+out:
+ return hash_finish(hash, 42); /* Arbitrary number. */
}
/* Hashes 'flow' based on its L2 through L4 protocol information. */
flow->mpls_lse[0] = set_mpls_lse_values(ttl, tc, 1, htonl(label));
/* Clear all L3 and L4 fields and dp_hash. */
- BUILD_ASSERT(FLOW_WC_SEQ == 35);
+ BUILD_ASSERT(FLOW_WC_SEQ == 36);
memset((char *) flow + FLOW_SEGMENT_2_ENDS_AT, 0,
sizeof(struct flow) - FLOW_SEGMENT_2_ENDS_AT);
flow->dp_hash = 0;
icmp = dp_packet_put_zeros(p, l4_len);
icmp->icmp_type = ntohs(flow->tp_src);
icmp->icmp_code = ntohs(flow->tp_dst);
+ /* Checksum has already been zeroed by put_zeros call. */
icmp->icmp_csum = csum(icmp, ICMP_HEADER_LEN);
} else if (flow->nw_proto == IPPROTO_IGMP) {
struct igmp_header *igmp;
igmp->igmp_type = ntohs(flow->tp_src);
igmp->igmp_code = ntohs(flow->tp_dst);
put_16aligned_be32(&igmp->group, flow->igmp_group_ip4);
+ /* Checksum has already been zeroed by put_zeros call. */
igmp->igmp_csum = csum(igmp, IGMP_HEADER_LEN);
} else if (flow->nw_proto == IPPROTO_ICMPV6) {
struct icmp6_hdr *icmp;
ip = dp_packet_l3(p);
ip->ip_tot_len = htons(p->l4_ofs - p->l3_ofs + l4_len);
+ /* Checksum has already been zeroed by put_zeros call. */
ip->ip_csum = csum(ip, sizeof *ip);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
struct ovs_16aligned_ip6_hdr *nh;