VLOG_DEFINE_THIS_MODULE(nx_match);
+/* OXM headers.
+ *
+ *
+ * Standard OXM/NXM
+ * ================
+ *
+ * The header is 32 bits long. It looks like this:
+ *
+ * |31 16 15 9| 8 7 0
+ * +----------------------------------+---------------+--+------------------+
+ * | oxm_class | oxm_field |hm| oxm_length |
+ * +----------------------------------+---------------+--+------------------+
+ *
+ * where hm stands for oxm_hasmask. It is followed by oxm_length bytes of
+ * payload. When oxm_hasmask is 0, the payload is the value of the field
+ * identified by the header; when oxm_hasmask is 1, the payload is a value for
+ * the field followed by a mask of equal length.
+ *
+ * Internally, we represent a standard OXM header as a 64-bit integer with the
+ * above information in the most-significant bits.
+ *
+ *
+ * Experimenter OXM
+ * ================
+ *
+ * The header is 64 bits long. It looks like the diagram above except that a
+ * 32-bit experimenter ID, which we call oxm_vendor and which identifies a
+ * vendor, is inserted just before the payload. Experimenter OXMs are
+ * identified by an all-1-bits oxm_class (OFPXMC12_EXPERIMENTER). The
+ * oxm_length value *includes* the experimenter ID, so that the real payload is
+ * only oxm_length - 4 bytes long.
+ *
+ * Internally, we represent an experimenter OXM header as a 64-bit integer with
+ * the standard header in the upper 32 bits and the experimenter ID in the
+ * lower 32 bits. (It would be more convenient to swap the positions of the
+ * two 32-bit words, but this would be more error-prone because experimenter
+ * OXMs are very rarely used, so accidentally passing one through a 32-bit type
+ * somewhere in the OVS code would be hard to find.)
+ */
+
/*
* OXM Class IDs.
* The high order bit differentiate reserved classes from member classes.
OFPXMC12_EXPERIMENTER = 0xffff, /* Experimenter class */
};
-/* Functions for extracting fields from OXM/NXM headers. */
-static int nxm_vendor(uint32_t header) { return header >> 16; }
-static int nxm_field(uint32_t header) { return (header >> 9) & 0x7f; }
-static bool nxm_hasmask(uint32_t header) { return (header & 0x100) != 0; }
-static int nxm_length(uint32_t header) { return header & 0xff; }
+/* Functions for extracting raw field values from OXM/NXM headers. */
+static uint32_t nxm_vendor(uint64_t header) { return header; }
+static int nxm_class(uint64_t header) { return header >> 48; }
+static int nxm_field(uint64_t header) { return (header >> 41) & 0x7f; }
+static bool nxm_hasmask(uint64_t header) { return (header >> 40) & 1; }
+static int nxm_length(uint64_t header) { return (header >> 32) & 0xff; }
-/* Returns true if 'header' is a legacy NXM header, false if it is an OXM
- * header.*/
static bool
-is_nxm_header(uint32_t header)
+is_experimenter_oxm(uint64_t header)
+{
+ return nxm_class(header) == OFPXMC12_EXPERIMENTER;
+}
+
+/* The OXM header "length" field is somewhat tricky:
+ *
+ * - For a standard OXM header, the length is the number of bytes of the
+ * payload, and the payload consists of just the value (and mask, if
+ * present).
+ *
+ * - For an experimenter OXM header, the length is the number of bytes in
+ * the payload plus 4 (the length of the experimenter ID). That is, the
+ * experimenter ID is included in oxm_length.
+ *
+ * This function returns the length of the experimenter ID field in 'header'.
+ * That is, for an experimenter OXM (when an experimenter ID is present), it
+ * returns 4, and for a standard OXM (when no experimenter ID is present), it
+ * returns 0. */
+static int
+nxm_experimenter_len(uint64_t header)
{
- return nxm_vendor(header) <= 1;
+ return is_experimenter_oxm(header) ? 4 : 0;
}
-#define NXM_HEADER(VENDOR, FIELD, HASMASK, LENGTH) \
- (((VENDOR) << 16) | ((FIELD) << 9) | ((HASMASK) << 8) | (LENGTH))
+/* Returns the number of bytes that follow the header for an NXM/OXM entry
+ * with the given 'header'. */
+static int
+nxm_payload_len(uint64_t header)
+{
+ return nxm_length(header) - nxm_experimenter_len(header);
+}
+
+/* Returns the number of bytes in the header for an NXM/OXM entry with the
+ * given 'header'. */
+static int
+nxm_header_len(uint64_t header)
+{
+ return 4 + nxm_experimenter_len(header);
+}
+
+#define NXM_HEADER(VENDOR, CLASS, FIELD, HASMASK, LENGTH) \
+ (((uint64_t) (CLASS) << 48) | \
+ ((uint64_t) (FIELD) << 41) | \
+ ((uint64_t) (HASMASK) << 40) | \
+ ((uint64_t) (LENGTH) << 32) | \
+ (VENDOR))
-#define NXM_HEADER_FMT "%d:%d:%d:%d"
-#define NXM_HEADER_ARGS(HEADER) \
- nxm_vendor(HEADER), nxm_field(HEADER), \
+#define NXM_HEADER_FMT "%#"PRIx32":%d:%d:%d:%d"
+#define NXM_HEADER_ARGS(HEADER) \
+ nxm_vendor(HEADER), nxm_class(HEADER), nxm_field(HEADER), \
nxm_hasmask(HEADER), nxm_length(HEADER)
/* Functions for turning the "hasmask" bit on or off. (This also requires
* adjusting the length.) */
-static uint32_t
-nxm_make_exact_header(uint32_t header)
+static uint64_t
+nxm_make_exact_header(uint64_t header)
{
- return NXM_HEADER(nxm_vendor(header), nxm_field(header), 0,
- nxm_length(header) / 2);
+ int new_len = nxm_payload_len(header) / 2 + nxm_experimenter_len(header);
+ return NXM_HEADER(nxm_vendor(header), nxm_class(header),
+ nxm_field(header), 0, new_len);
}
-static uint32_t
-nxm_make_wild_header(uint32_t header)
+static uint64_t
+nxm_make_wild_header(uint64_t header)
{
- return NXM_HEADER(nxm_vendor(header), nxm_field(header), 1,
- nxm_length(header) * 2);
+ int new_len = nxm_payload_len(header) * 2 + nxm_experimenter_len(header);
+ return NXM_HEADER(nxm_vendor(header), nxm_class(header),
+ nxm_field(header), 1, new_len);
}
/* Flow cookie.
* with specific cookies. See the "nx_flow_mod" and "nx_flow_stats_request"
* structure definitions for more details. This match is otherwise not
* allowed. */
-#define NXM_NX_COOKIE NXM_HEADER (0x0001, 30, 0, 8)
+#define NXM_NX_COOKIE NXM_HEADER (0, 0x0001, 30, 0, 8)
#define NXM_NX_COOKIE_W nxm_make_wild_header(NXM_NX_COOKIE)
struct nxm_field {
- uint32_t header;
+ uint64_t header;
enum ofp_version version;
const char *name; /* e.g. "NXM_OF_IN_PORT". */
enum mf_field_id id;
};
-static const struct nxm_field *nxm_field_by_header(uint32_t header);
+static const struct nxm_field *nxm_field_by_header(uint64_t header);
static const struct nxm_field *nxm_field_by_name(const char *name, size_t len);
-static const struct nxm_field *nxm_field_by_mf_id(enum mf_field_id);
-static const struct nxm_field *oxm_field_by_mf_id(enum mf_field_id);
+static const struct nxm_field *nxm_field_by_mf_id(enum mf_field_id,
+ enum ofp_version);
+
+static void nx_put_header__(struct ofpbuf *, uint64_t header, bool masked);
/* Rate limit for nx_match parse errors. These always indicate a bug in the
* peer and so there's not much point in showing a lot of them. */
static const struct nxm_field *
mf_parse_subfield_name(const char *name, int name_len, bool *wild);
-static const struct nxm_field *
-nxm_field_from_mf_field(enum mf_field_id id, enum ofp_version version)
-{
- const struct nxm_field *oxm = oxm_field_by_mf_id(id);
- const struct nxm_field *nxm = nxm_field_by_mf_id(id);
- return oxm && (version >= oxm->version || !nxm) ? oxm : nxm;
-}
-
/* Returns the preferred OXM header to use for field 'id' in OpenFlow version
* 'version'. Specify 0 for 'version' if an NXM legacy header should be
* preferred over any standardized OXM header. Returns 0 if field 'id' cannot
* be expressed in NXM or OXM. */
-uint32_t
+static uint64_t
mf_oxm_header(enum mf_field_id id, enum ofp_version version)
{
- const struct nxm_field *f = nxm_field_from_mf_field(id, version);
+ const struct nxm_field *f = nxm_field_by_mf_id(id, version);
return f ? f->header : 0;
}
+/* Returns the 32-bit OXM or NXM header to use for field 'id', preferring an
+ * NXM legacy header over any standardized OXM header. Returns 0 if field 'id'
+ * cannot be expressed with a 32-bit NXM or OXM header.
+ *
+ * Whenever possible, use nx_pull_header() instead of this function, because
+ * this function cannot support 64-bit experimenter OXM headers. */
+uint32_t
+mf_nxm_header(enum mf_field_id id)
+{
+ uint64_t oxm = mf_oxm_header(id, 0);
+ return is_experimenter_oxm(oxm) ? 0 : oxm >> 32;
+}
+
+static const struct mf_field *
+mf_from_oxm_header(uint64_t header)
+{
+ const struct nxm_field *f = nxm_field_by_header(header);
+ return f ? mf_from_id(f->id) : NULL;
+}
+
/* Returns the "struct mf_field" that corresponds to NXM or OXM header
* 'header', or NULL if 'header' doesn't correspond to any known field. */
const struct mf_field *
mf_from_nxm_header(uint32_t header)
{
- const struct nxm_field *f = nxm_field_by_header(header);
- return f ? mf_from_id(f->id) : NULL;
+ return mf_from_oxm_header((uint64_t) header << 32);
}
/* Returns the width of the data for a field with the given 'header', in
* bytes. */
static int
-nxm_field_bytes(uint32_t header)
+nxm_field_bytes(uint64_t header)
{
- unsigned int length = nxm_length(header);
+ unsigned int length = nxm_payload_len(header);
return nxm_hasmask(header) ? length / 2 : length;
}
-
-/* Returns the earliest version of OpenFlow that standardized an OXM header for
- * field 'id', or UINT8_MAX if no version of OpenFlow does. */
-static enum ofp_version
-mf_oxm_version(enum mf_field_id id)
-{
- const struct nxm_field *oxm = oxm_field_by_mf_id(id);
- return oxm ? oxm->version : UINT8_MAX;
-}
- \f
+\f
/* nx_pull_match() and helpers. */
/* Given NXM/OXM value 'value' and mask 'mask' associated with 'header', checks
* for any 1-bit in the value where there is a 0-bit in the mask. Returns 0 if
* none, otherwise an error code. */
static bool
-is_mask_consistent(uint32_t header, const uint8_t *value, const uint8_t *mask)
+is_mask_consistent(uint64_t header, const uint8_t *value, const uint8_t *mask)
{
unsigned int width = nxm_field_bytes(header);
unsigned int i;
}
static bool
-is_cookie_pseudoheader(uint32_t header)
+is_cookie_pseudoheader(uint64_t header)
{
return header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W;
}
static enum ofperr
-nx_pull_header__(struct ofpbuf *b, bool allow_cookie, uint32_t *header,
+nx_pull_header__(struct ofpbuf *b, bool allow_cookie, uint64_t *header,
const struct mf_field **field)
{
if (ofpbuf_size(b) < 4) {
- VLOG_DBG_RL(&rl, "encountered partial (%"PRIu32"-byte) OXM entry",
- ofpbuf_size(b));
- goto error;
+ goto bad_len;
+ }
+
+ *header = ((uint64_t) ntohl(get_unaligned_be32(ofpbuf_data(b)))) << 32;
+ if (is_experimenter_oxm(*header)) {
+ if (ofpbuf_size(b) < 8) {
+ goto bad_len;
+ }
+ *header = ntohll(get_unaligned_be64(ofpbuf_data(b)));
}
- *header = ntohl(get_unaligned_be32(ofpbuf_pull(b, 4)));
- if (nxm_length(*header) == 0) {
- VLOG_WARN_RL(&rl, "OXM header "NXM_HEADER_FMT" has zero length",
- NXM_HEADER_ARGS(*header));
+ if (nxm_length(*header) <= nxm_experimenter_len(*header)) {
+ VLOG_WARN_RL(&rl, "OXM header "NXM_HEADER_FMT" has invalid length %d "
+ "(minimum is %d)",
+ NXM_HEADER_ARGS(*header), nxm_length(*header),
+ nxm_header_len(*header) + 1);
goto error;
}
+ ofpbuf_pull(b, nxm_header_len(*header));
+
if (field) {
- *field = mf_from_nxm_header(*header);
+ *field = mf_from_oxm_header(*header);
if (!*field && !(allow_cookie && is_cookie_pseudoheader(*header))) {
VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" is unknown",
NXM_HEADER_ARGS(*header));
return 0;
+bad_len:
+ VLOG_DBG_RL(&rl, "encountered partial (%"PRIu32"-byte) OXM entry",
+ ofpbuf_size(b));
error:
*header = 0;
*field = NULL;
}
static enum ofperr
-nx_pull_entry__(struct ofpbuf *b, bool allow_cookie, uint32_t *header,
+nx_pull_entry__(struct ofpbuf *b, bool allow_cookie, uint64_t *header,
const struct mf_field **field,
union mf_value *value, union mf_value *mask)
{
return header_error;
}
- payload_len = nxm_length(*header);
+ payload_len = nxm_payload_len(*header);
payload = ofpbuf_try_pull(b, payload_len);
if (!payload) {
VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" calls for %u-byte "
nx_pull_entry(struct ofpbuf *b, const struct mf_field **field,
union mf_value *value, union mf_value *mask)
{
- uint32_t header;
+ uint64_t header;
return nx_pull_entry__(b, false, &header, field, value, mask);
}
nx_pull_header(struct ofpbuf *b, const struct mf_field **field, bool *masked)
{
enum ofperr error;
- uint32_t header;
+ uint64_t header;
error = nx_pull_header__(b, false, &header, field);
if (masked) {
union mf_value *value, union mf_value *mask)
{
enum ofperr error;
- uint32_t header;
+ uint64_t header;
error = nx_pull_entry__(b, allow_cookie, &header, field, value, mask);
if (error) {
*/
static void
-nxm_put_header(struct ofpbuf *b, uint32_t header)
+nxm_put_unmasked(struct ofpbuf *b, enum mf_field_id field,
+ enum ofp_version version, const void *value, size_t n_bytes)
{
- ovs_be32 n_header = htonl(header);
- ofpbuf_put(b, &n_header, sizeof n_header);
-}
-
-static void
-nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
-{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
-}
-
-static void
-nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
-{
- switch (mask) {
- case 0:
- break;
-
- case UINT8_MAX:
- nxm_put_8(b, header, value);
- break;
-
- default:
- nxm_put_header(b, nxm_make_wild_header(header));
- ofpbuf_put(b, &value, sizeof value);
- ofpbuf_put(b, &mask, sizeof mask);
- }
-}
-
-static void
-nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
-{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
-}
-
-static void
-nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
-{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
- ofpbuf_put(b, &mask, sizeof mask);
+ nx_put_header(b, field, version, false);
+ ofpbuf_put(b, value, n_bytes);
}
static void
-nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
-{
- switch (mask) {
- case 0:
- break;
-
- case OVS_BE16_MAX:
- nxm_put_16(b, header, value);
- break;
-
- default:
- nxm_put_16w(b, nxm_make_wild_header(header), value, mask);
- break;
+nxm_put(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ const void *value, const void *mask, size_t n_bytes)
+{
+ if (!is_all_zeros(mask, n_bytes)) {
+ bool masked = !is_all_ones(mask, n_bytes);
+ nx_put_header(b, field, version, masked);
+ ofpbuf_put(b, value, n_bytes);
+ if (masked) {
+ ofpbuf_put(b, mask, n_bytes);
+ }
}
}
static void
-nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
+nxm_put_8m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ uint8_t value, uint8_t mask)
{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
+ nxm_put(b, field, version, &value, &mask, sizeof value);
}
static void
-nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
+nxm_put_8(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ uint8_t value)
{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
- ofpbuf_put(b, &mask, sizeof mask);
+ nxm_put_unmasked(b, field, version, &value, sizeof value);
}
static void
-nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
+nxm_put_16m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ ovs_be16 value, ovs_be16 mask)
{
- switch (mask) {
- case 0:
- break;
-
- case OVS_BE32_MAX:
- nxm_put_32(b, header, value);
- break;
-
- default:
- nxm_put_32w(b, nxm_make_wild_header(header), value, mask);
- break;
- }
+ nxm_put(b, field, version, &value, &mask, sizeof value);
}
static void
-nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
+nxm_put_16(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ ovs_be16 value)
{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
+ nxm_put_unmasked(b, field, version, &value, sizeof value);
}
static void
-nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
+nxm_put_32m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ ovs_be32 value, ovs_be32 mask)
{
- nxm_put_header(b, header);
- ofpbuf_put(b, &value, sizeof value);
- ofpbuf_put(b, &mask, sizeof mask);
+ nxm_put(b, field, version, &value, &mask, sizeof value);
}
static void
-nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
+nxm_put_32(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ ovs_be32 value)
{
- switch (mask) {
- case 0:
- break;
-
- case OVS_BE64_MAX:
- nxm_put_64(b, header, value);
- break;
-
- default:
- nxm_put_64w(b, nxm_make_wild_header(header), value, mask);
- break;
- }
+ nxm_put_unmasked(b, field, version, &value, sizeof value);
}
static void
-nxm_put_eth(struct ofpbuf *b, uint32_t header,
- const uint8_t value[ETH_ADDR_LEN])
+nxm_put_64m(struct ofpbuf *b, enum mf_field_id field, enum ofp_version version,
+ ovs_be64 value, ovs_be64 mask)
{
- nxm_put_header(b, header);
- ofpbuf_put(b, value, ETH_ADDR_LEN);
+ nxm_put(b, field, version, &value, &mask, sizeof value);
}
static void
-nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
+nxm_put_eth_masked(struct ofpbuf *b,
+ enum mf_field_id field, enum ofp_version version,
const uint8_t value[ETH_ADDR_LEN],
const uint8_t mask[ETH_ADDR_LEN])
{
- if (!eth_addr_is_zero(mask)) {
- if (eth_mask_is_exact(mask)) {
- nxm_put_eth(b, header, value);
- } else {
- nxm_put_header(b, nxm_make_wild_header(header));
- ofpbuf_put(b, value, ETH_ADDR_LEN);
- ofpbuf_put(b, mask, ETH_ADDR_LEN);
- }
- }
+ nxm_put(b, field, version, value, mask, ETH_ADDR_LEN);
}
static void
-nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
+nxm_put_ipv6(struct ofpbuf *b,
+ enum mf_field_id field, enum ofp_version version,
const struct in6_addr *value, const struct in6_addr *mask)
{
- if (ipv6_mask_is_any(mask)) {
- return;
- } else if (ipv6_mask_is_exact(mask)) {
- nxm_put_header(b, header);
- ofpbuf_put(b, value, sizeof *value);
- } else {
- nxm_put_header(b, nxm_make_wild_header(header));
- ofpbuf_put(b, value, sizeof *value);
- ofpbuf_put(b, mask, sizeof *mask);
- }
+ nxm_put(b, field, version, value->s6_addr, mask->s6_addr,
+ sizeof value->s6_addr);
}
static void
-nxm_put_frag(struct ofpbuf *b, const struct match *match, enum ofp_version oxm)
+nxm_put_frag(struct ofpbuf *b, const struct match *match,
+ enum ofp_version version)
{
- uint32_t header = mf_oxm_header(MFF_IP_FRAG, oxm);
- uint8_t nw_frag = match->flow.nw_frag;
- uint8_t nw_frag_mask = match->wc.masks.nw_frag;
+ uint8_t nw_frag = match->flow.nw_frag & FLOW_NW_FRAG_MASK;
+ uint8_t nw_frag_mask = match->wc.masks.nw_frag & FLOW_NW_FRAG_MASK;
- switch (nw_frag_mask) {
- case 0:
- break;
-
- case FLOW_NW_FRAG_MASK:
- nxm_put_8(b, header, nw_frag);
- break;
-
- default:
- nxm_put_8m(b, header, nw_frag, nw_frag_mask & FLOW_NW_FRAG_MASK);
- break;
- }
+ nxm_put_8m(b, MFF_IP_FRAG, version, nw_frag,
+ nw_frag_mask == FLOW_NW_FRAG_MASK ? UINT8_MAX : nw_frag_mask);
}
/* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in
const struct flow *flow = &match->flow;
if (flow->dl_type == htons(ETH_TYPE_IP)) {
- nxm_put_32m(b, mf_oxm_header(MFF_IPV4_SRC, oxm),
+ nxm_put_32m(b, MFF_IPV4_SRC, oxm,
flow->nw_src, match->wc.masks.nw_src);
- nxm_put_32m(b, mf_oxm_header(MFF_IPV4_DST, oxm),
+ nxm_put_32m(b, MFF_IPV4_DST, oxm,
flow->nw_dst, match->wc.masks.nw_dst);
} else {
- nxm_put_ipv6(b, mf_oxm_header(MFF_IPV6_SRC, oxm),
+ nxm_put_ipv6(b, MFF_IPV6_SRC, oxm,
&flow->ipv6_src, &match->wc.masks.ipv6_src);
- nxm_put_ipv6(b, mf_oxm_header(MFF_IPV6_DST, oxm),
+ nxm_put_ipv6(b, MFF_IPV6_DST, oxm,
&flow->ipv6_dst, &match->wc.masks.ipv6_dst);
}
if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
if (oxm) {
- nxm_put_8(b, mf_oxm_header(MFF_IP_DSCP_SHIFTED, oxm),
+ nxm_put_8(b, MFF_IP_DSCP_SHIFTED, oxm,
flow->nw_tos >> 2);
} else {
- nxm_put_8(b, mf_oxm_header(MFF_IP_DSCP, oxm),
+ nxm_put_8(b, MFF_IP_DSCP, oxm,
flow->nw_tos & IP_DSCP_MASK);
}
}
if (match->wc.masks.nw_tos & IP_ECN_MASK) {
- nxm_put_8(b, mf_oxm_header(MFF_IP_ECN, oxm),
+ nxm_put_8(b, MFF_IP_ECN, oxm,
flow->nw_tos & IP_ECN_MASK);
}
if (!oxm && match->wc.masks.nw_ttl) {
- nxm_put_8(b, mf_oxm_header(MFF_IP_TTL, oxm), flow->nw_ttl);
+ nxm_put_8(b, MFF_IP_TTL, oxm, flow->nw_ttl);
}
- nxm_put_32m(b, mf_oxm_header(MFF_IPV6_LABEL, oxm),
+ nxm_put_32m(b, MFF_IPV6_LABEL, oxm,
flow->ipv6_label, match->wc.masks.ipv6_label);
if (match->wc.masks.nw_proto) {
- nxm_put_8(b, mf_oxm_header(MFF_IP_PROTO, oxm), flow->nw_proto);
+ nxm_put_8(b, MFF_IP_PROTO, oxm, flow->nw_proto);
if (flow->nw_proto == IPPROTO_TCP) {
- nxm_put_16m(b, mf_oxm_header(MFF_TCP_SRC, oxm),
+ nxm_put_16m(b, MFF_TCP_SRC, oxm,
flow->tp_src, match->wc.masks.tp_src);
- nxm_put_16m(b, mf_oxm_header(MFF_TCP_DST, oxm),
+ nxm_put_16m(b, MFF_TCP_DST, oxm,
flow->tp_dst, match->wc.masks.tp_dst);
- nxm_put_16m(b, mf_oxm_header(MFF_TCP_FLAGS, oxm),
+ nxm_put_16m(b, MFF_TCP_FLAGS, oxm,
flow->tcp_flags, match->wc.masks.tcp_flags);
} else if (flow->nw_proto == IPPROTO_UDP) {
- nxm_put_16m(b, mf_oxm_header(MFF_UDP_SRC, oxm),
+ nxm_put_16m(b, MFF_UDP_SRC, oxm,
flow->tp_src, match->wc.masks.tp_src);
- nxm_put_16m(b, mf_oxm_header(MFF_UDP_DST, oxm),
+ nxm_put_16m(b, MFF_UDP_DST, oxm,
flow->tp_dst, match->wc.masks.tp_dst);
} else if (flow->nw_proto == IPPROTO_SCTP) {
- nxm_put_16m(b, mf_oxm_header(MFF_SCTP_SRC, oxm), flow->tp_src,
+ nxm_put_16m(b, MFF_SCTP_SRC, oxm, flow->tp_src,
match->wc.masks.tp_src);
- nxm_put_16m(b, mf_oxm_header(MFF_SCTP_DST, oxm), flow->tp_dst,
+ nxm_put_16m(b, MFF_SCTP_DST, oxm, flow->tp_dst,
match->wc.masks.tp_dst);
} else if (is_icmpv4(flow)) {
if (match->wc.masks.tp_src) {
- nxm_put_8(b, mf_oxm_header(MFF_ICMPV4_TYPE, oxm),
+ nxm_put_8(b, MFF_ICMPV4_TYPE, oxm,
ntohs(flow->tp_src));
}
if (match->wc.masks.tp_dst) {
- nxm_put_8(b, mf_oxm_header(MFF_ICMPV4_CODE, oxm),
+ nxm_put_8(b, MFF_ICMPV4_CODE, oxm,
ntohs(flow->tp_dst));
}
} else if (is_icmpv6(flow)) {
if (match->wc.masks.tp_src) {
- nxm_put_8(b, mf_oxm_header(MFF_ICMPV6_TYPE, oxm),
+ nxm_put_8(b, MFF_ICMPV6_TYPE, oxm,
ntohs(flow->tp_src));
}
if (match->wc.masks.tp_dst) {
- nxm_put_8(b, mf_oxm_header(MFF_ICMPV6_CODE, oxm),
+ nxm_put_8(b, MFF_ICMPV6_CODE, oxm,
ntohs(flow->tp_dst));
}
if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
- nxm_put_ipv6(b, mf_oxm_header(MFF_ND_TARGET, oxm),
+ nxm_put_ipv6(b, MFF_ND_TARGET, oxm,
&flow->nd_target, &match->wc.masks.nd_target);
if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
- nxm_put_eth_masked(b, mf_oxm_header(MFF_ND_SLL, oxm),
+ nxm_put_eth_masked(b, MFF_ND_SLL, oxm,
flow->arp_sha, match->wc.masks.arp_sha);
}
if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
- nxm_put_eth_masked(b, mf_oxm_header(MFF_ND_TLL, oxm),
+ nxm_put_eth_masked(b, MFF_ND_TLL, oxm,
flow->arp_tha, match->wc.masks.arp_tha);
}
}
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 28);
/* Metadata. */
if (match->wc.masks.dp_hash) {
- nxm_put_32m(b, mf_oxm_header(MFF_DP_HASH, oxm),
+ nxm_put_32m(b, MFF_DP_HASH, oxm,
htonl(flow->dp_hash), htonl(match->wc.masks.dp_hash));
}
if (match->wc.masks.recirc_id) {
- nxm_put_32(b, mf_oxm_header(MFF_RECIRC_ID, oxm),
- htonl(flow->recirc_id));
+ nxm_put_32(b, MFF_RECIRC_ID, oxm, htonl(flow->recirc_id));
}
if (match->wc.masks.in_port.ofp_port) {
ofp_port_t in_port = flow->in_port.ofp_port;
if (oxm) {
- nxm_put_32(b, mf_oxm_header(MFF_IN_PORT_OXM, oxm),
+ nxm_put_32(b, MFF_IN_PORT_OXM, oxm,
ofputil_port_to_ofp11(in_port));
} else {
- nxm_put_16(b, mf_oxm_header(MFF_IN_PORT, oxm),
+ nxm_put_16(b, MFF_IN_PORT, oxm,
htons(ofp_to_u16(in_port)));
}
}
+ if (match->wc.masks.actset_output) {
+ nxm_put_32(b, MFF_ACTSET_OUTPUT, oxm,
+ ofputil_port_to_ofp11(flow->actset_output));
+ }
/* Ethernet. */
- nxm_put_eth_masked(b, mf_oxm_header(MFF_ETH_SRC, oxm),
+ nxm_put_eth_masked(b, MFF_ETH_SRC, oxm,
flow->dl_src, match->wc.masks.dl_src);
- nxm_put_eth_masked(b, mf_oxm_header(MFF_ETH_DST, oxm),
+ nxm_put_eth_masked(b, MFF_ETH_DST, oxm,
flow->dl_dst, match->wc.masks.dl_dst);
- nxm_put_16m(b, mf_oxm_header(MFF_ETH_TYPE, oxm),
+ nxm_put_16m(b, MFF_ETH_TYPE, oxm,
ofputil_dl_type_to_openflow(flow->dl_type),
match->wc.masks.dl_type);
ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
- nxm_put_16(b, mf_oxm_header(MFF_VLAN_VID, oxm), vid);
+ nxm_put_16(b, MFF_VLAN_VID, oxm, vid);
} else if (mask) {
- nxm_put_16m(b, mf_oxm_header(MFF_VLAN_VID, oxm), vid, mask);
+ nxm_put_16m(b, MFF_VLAN_VID, oxm, vid, mask);
}
if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
- nxm_put_8(b, mf_oxm_header(MFF_VLAN_PCP, oxm),
+ nxm_put_8(b, MFF_VLAN_PCP, oxm,
vlan_tci_to_pcp(flow->vlan_tci));
}
} else {
- nxm_put_16m(b, mf_oxm_header(MFF_VLAN_TCI, oxm), flow->vlan_tci,
+ nxm_put_16m(b, MFF_VLAN_TCI, oxm, flow->vlan_tci,
match->wc.masks.vlan_tci);
}
/* MPLS. */
if (eth_type_mpls(flow->dl_type)) {
if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
- nxm_put_8(b, mf_oxm_header(MFF_MPLS_TC, oxm),
+ nxm_put_8(b, MFF_MPLS_TC, oxm,
mpls_lse_to_tc(flow->mpls_lse[0]));
}
if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
- nxm_put_8(b, mf_oxm_header(MFF_MPLS_BOS, oxm),
+ nxm_put_8(b, MFF_MPLS_BOS, oxm,
mpls_lse_to_bos(flow->mpls_lse[0]));
}
if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
- nxm_put_32(b, mf_oxm_header(MFF_MPLS_LABEL, oxm),
+ nxm_put_32(b, MFF_MPLS_LABEL, oxm,
htonl(mpls_lse_to_label(flow->mpls_lse[0])));
}
}
flow->dl_type == htons(ETH_TYPE_RARP)) {
/* ARP. */
if (match->wc.masks.nw_proto) {
- nxm_put_16(b, mf_oxm_header(MFF_ARP_OP, oxm),
+ nxm_put_16(b, MFF_ARP_OP, oxm,
htons(flow->nw_proto));
}
- nxm_put_32m(b, mf_oxm_header(MFF_ARP_SPA, oxm),
+ nxm_put_32m(b, MFF_ARP_SPA, oxm,
flow->nw_src, match->wc.masks.nw_src);
- nxm_put_32m(b, mf_oxm_header(MFF_ARP_TPA, oxm),
+ nxm_put_32m(b, MFF_ARP_TPA, oxm,
flow->nw_dst, match->wc.masks.nw_dst);
- nxm_put_eth_masked(b, mf_oxm_header(MFF_ARP_SHA, oxm),
+ nxm_put_eth_masked(b, MFF_ARP_SHA, oxm,
flow->arp_sha, match->wc.masks.arp_sha);
- nxm_put_eth_masked(b, mf_oxm_header(MFF_ARP_THA, oxm),
+ nxm_put_eth_masked(b, MFF_ARP_THA, oxm,
flow->arp_tha, match->wc.masks.arp_tha);
}
/* Tunnel ID. */
- nxm_put_64m(b, mf_oxm_header(MFF_TUN_ID, oxm),
+ nxm_put_64m(b, MFF_TUN_ID, oxm,
flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
/* Other tunnel metadata. */
- nxm_put_32m(b, mf_oxm_header(MFF_TUN_SRC, oxm),
+ nxm_put_32m(b, MFF_TUN_SRC, oxm,
flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
- nxm_put_32m(b, mf_oxm_header(MFF_TUN_DST, oxm),
+ nxm_put_32m(b, MFF_TUN_DST, oxm,
flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
/* Registers. */
if (oxm < OFP15_VERSION) {
for (i = 0; i < FLOW_N_REGS; i++) {
- nxm_put_32m(b, mf_oxm_header(MFF_REG0 + i, oxm),
+ nxm_put_32m(b, MFF_REG0 + i, oxm,
htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
}
} else {
for (i = 0; i < FLOW_N_XREGS; i++) {
- nxm_put_64m(b, mf_oxm_header(MFF_XREG0 + i, oxm),
+ nxm_put_64m(b, MFF_XREG0 + i, oxm,
htonll(flow_get_xreg(flow, i)),
htonll(flow_get_xreg(&match->wc.masks, i)));
}
}
/* Mark. */
- nxm_put_32m(b, mf_oxm_header(MFF_PKT_MARK, oxm), htonl(flow->pkt_mark),
+ nxm_put_32m(b, MFF_PKT_MARK, oxm, htonl(flow->pkt_mark),
htonl(match->wc.masks.pkt_mark));
/* OpenFlow 1.1+ Metadata. */
- nxm_put_64m(b, mf_oxm_header(MFF_METADATA, oxm),
+ nxm_put_64m(b, MFF_METADATA, oxm,
flow->metadata, match->wc.masks.metadata);
/* Cookie. */
- nxm_put_64m(b, NXM_NX_COOKIE, cookie & cookie_mask, cookie_mask);
+ if (cookie_mask) {
+ bool masked = cookie_mask != OVS_BE64_MAX;
+
+ cookie &= cookie_mask;
+ nx_put_header__(b, NXM_NX_COOKIE, masked);
+ ofpbuf_put(b, &cookie, sizeof cookie);
+ if (masked) {
+ ofpbuf_put(b, &cookie_mask, sizeof cookie_mask);
+ }
+ }
match_len = ofpbuf_size(b) - start_len;
return match_len;
return match_len;
}
+static void
+nx_put_header__(struct ofpbuf *b, uint64_t header, bool masked)
+{
+ uint64_t masked_header = masked ? nxm_make_wild_header(header) : header;
+ ovs_be64 network_header = htonll(masked_header);
+
+ ofpbuf_put(b, &network_header, nxm_header_len(header));
+}
+
void
nx_put_header(struct ofpbuf *b, enum mf_field_id field,
enum ofp_version version, bool masked)
{
- uint32_t header = mf_oxm_header(field, version);
- nxm_put_header(b, masked ? nxm_make_wild_header(header) : header);
+ nx_put_header__(b, mf_oxm_header(field, version), masked);
}
void
\f
/* nx_match_to_string() and helpers. */
-static void format_nxm_field_name(struct ds *, uint32_t header);
+static void format_nxm_field_name(struct ds *, uint64_t header);
char *
nx_match_to_string(const uint8_t *p, unsigned int match_len)
union mf_value value;
union mf_value mask;
enum ofperr error;
- uint32_t header;
+ uint64_t header;
int value_len;
error = nx_pull_entry__(&b, true, &header, NULL, &value, &mask);
}
static void
-format_nxm_field_name(struct ds *s, uint32_t header)
+format_nxm_field_name(struct ds *s, uint64_t header)
{
const struct nxm_field *f = nxm_field_by_header(header);
if (f) {
} else if (header == NXM_NX_COOKIE_W) {
ds_put_cstr(s, "NXM_NX_COOKIE_W");
} else {
- ds_put_format(s, "%d:%d", nxm_vendor(header), nxm_field(header));
+ ds_put_format(s, "%d:%d", nxm_class(header), nxm_field(header));
}
}
return strlen(b) == a_len && !memcmp(a, b, a_len);
}
-static uint32_t
+static uint64_t
parse_nxm_field_name(const char *name, int name_len)
{
const struct nxm_field *f;
return NXM_NX_COOKIE_W;
}
- /* Check whether it's a 32-bit field header value as hex.
+ /* Check whether it's a field header value as hex.
* (This isn't ordinarily useful except for testing error behavior.) */
if (name_len == 8) {
- uint32_t header = hexits_value(name, name_len, NULL);
- if (header != UINT_MAX) {
+ uint64_t header;
+ bool ok;
+
+ header = hexits_value(name, name_len, &ok) << 32;
+ if (ok) {
+ return header;
+ }
+ } else if (name_len == 16) {
+ uint64_t header;
+ bool ok;
+
+ header = hexits_value(name, name_len, &ok);
+ if (ok && is_experimenter_oxm(header)) {
return header;
}
}
for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
const char *name;
- uint32_t header;
+ uint64_t header;
int name_len;
size_t n;
s += name_len + 1;
- nxm_put_header(b, header);
+ nx_put_header__(b, header, false);
s = ofpbuf_put_hex(b, s, &n);
if (n != nxm_field_bytes(header)) {
ovs_fatal(0, "%.2s: hex digits expected", s);
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
-char * WARN_UNUSED_RESULT
+char * OVS_WARN_UNUSED_RESULT
nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
{
const char *full_s = s;
}
return NULL;
}
-
-/* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
- * '*load'.
- *
- * Returns NULL if successful, otherwise a malloc()'d string describing the
- * error. The caller is responsible for freeing the returned string. */
-char * WARN_UNUSED_RESULT
-nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
-{
- const char *full_s = s;
- uint64_t value = strtoull(s, (char **) &s, 0);
- char *error;
-
- if (strncmp(s, "->", 2)) {
- return xasprintf("%s: missing `->' following value", full_s);
- }
- s += 2;
- error = mf_parse_subfield(&load->dst, s);
- if (error) {
- return error;
- }
-
- if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
- return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
- full_s, value, load->dst.n_bits);
- }
-
- load->subvalue.be64[0] = htonll(0);
- load->subvalue.be64[1] = htonll(value);
- return NULL;
-}
\f
-/* nxm_format_reg_move(), nxm_format_reg_load(). */
+/* nxm_format_reg_move(). */
void
nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
mf_format_subfield(&move->dst, s);
}
-void
-nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
-{
- ds_put_cstr(s, "load:");
- mf_format_subvalue(&load->subvalue, s);
- ds_put_cstr(s, "->");
- mf_format_subfield(&load->dst, s);
-}
\f
enum ofperr
nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
return error;
}
- return mf_check_dst(&move->dst, NULL);
+ return mf_check_dst(&move->dst, flow);
}
-
-enum ofperr
-nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
-{
- return mf_check_dst(&load->dst, flow);
-}
-\f
\f
-/* nxm_execute_reg_move(), nxm_execute_reg_load(). */
+/* nxm_execute_reg_move(). */
void
nxm_execute_reg_move(const struct ofpact_reg_move *move,
mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
mf_mask_field_and_prereqs(move->src.field, &wc->masks);
- mf_get_value(move->dst.field, flow, &dst_value);
- mf_get_value(move->src.field, flow, &src_value);
- bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
- &dst_value, move->dst.field->n_bytes, move->dst.ofs,
- move->src.n_bits);
- mf_set_flow_value(move->dst.field, &dst_value, flow);
-}
+ /* A flow may wildcard nw_frag. Do nothing if setting a transport
+ * header field on a packet that does not have them. */
+ if (mf_are_prereqs_ok(move->dst.field, flow)
+ && mf_are_prereqs_ok(move->src.field, flow)) {
-void
-nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
- struct flow_wildcards *wc)
-{
- /* Since at the datapath interface we do not have set actions for
- * individual fields, but larger sets of fields for a given protocol
- * layer, the set action will in practice only ever apply to exactly
- * matched flows for the given protocol layer. For example, if the
- * reg_load changes the IP TTL, the corresponding datapath action will
- * rewrite also the IP addresses and TOS byte. Since these other field
- * values may not be explicitly set, they depend on the incoming flow field
- * values, and are hence all of them are set in the wildcards masks, when
- * the action is committed to the datapath. For the rare case, where the
- * reg_load action does not actually change the value, and no other flow
- * field values are set (or loaded), the datapath action is skipped, and
- * no mask bits are set. Such a datapath flow should, however, be
- * dependent on the specific field value, so the corresponding wildcard
- * mask bits must be set, lest the datapath flow be applied to packets
- * containing some other value in the field and the field value remain
- * unchanged regardless of the incoming value.
- *
- * We set the masks here for the whole fields, and their prerequisities.
- * Even if only the lower byte of a TCP destination port is set,
- * we set the mask for the whole field, and also the ip_proto in the IP
- * header, so that the kernel flow would not be applied on, e.g., a UDP
- * packet, or any other IP protocol in addition to TCP packets.
- */
- mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
- mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
+ mf_get_value(move->dst.field, flow, &dst_value);
+ mf_get_value(move->src.field, flow, &src_value);
+ bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
+ &dst_value, move->dst.field->n_bytes, move->dst.ofs,
+ move->src.n_bits);
+ mf_set_flow_value(move->dst.field, &dst_value, flow);
+ }
}
void
*
* Returns NULL if successful, otherwise a malloc()'d string describing the
* error. The caller is responsible for freeing the returned string. */
-char * WARN_UNUSED_RESULT
+char * OVS_WARN_UNUSED_RESULT
nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
{
char *error;
if (!sf->field) {
ds_put_cstr(s, "<unknown>");
} else {
- const struct nxm_field *f = nxm_field_from_mf_field(sf->field->id, 0);
+ const struct nxm_field *f = nxm_field_by_mf_id(sf->field->id, 0);
ds_put_cstr(s, f ? f->name : sf->field->name);
}
* bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
* may both be omitted (the [] are still required) to indicate an entire
* field. */
-char * WARN_UNUSED_RESULT
+char * OVS_WARN_UNUSED_RESULT
mf_parse_subfield__(struct mf_subfield *sf, const char **sp)
{
const struct mf_field *field;
* bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
* may both be omitted (the [] are still required) to indicate an entire
* field. */
-char * WARN_UNUSED_RESULT
+char * OVS_WARN_UNUSED_RESULT
mf_parse_subfield(struct mf_subfield *sf, const char *s)
{
char *error = mf_parse_subfield__(sf, &s);
int i;
BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->bm) {
- uint32_t oxm = mf_oxm_header(i, version);
- uint32_t vendor = nxm_vendor(oxm);
+ uint64_t oxm = mf_oxm_header(i, version);
+ uint32_t class = nxm_class(oxm);
int field = nxm_field(oxm);
- if (vendor == OFPXMC12_OPENFLOW_BASIC && field < 64) {
+ if (class == OFPXMC12_OPENFLOW_BASIC && field < 64) {
oxm_bitmap |= UINT64_C(1) << field;
}
}
struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
for (enum mf_field_id id = 0; id < MFF_N_IDS; id++) {
- if (version >= mf_oxm_version(id)) {
- uint32_t oxm = mf_oxm_header(id, version);
- uint32_t vendor = nxm_vendor(oxm);
+ uint64_t oxm = mf_oxm_header(id, version);
+ if (oxm && version >= nxm_field_by_header(oxm)->version) {
+ uint32_t class = nxm_class(oxm);
int field = nxm_field(oxm);
- if (vendor == OFPXMC12_OPENFLOW_BASIC
+ if (class == OFPXMC12_OPENFLOW_BASIC
&& field < 64
&& oxm_bitmap & htonll(UINT64_C(1) << field)) {
bitmap_set1(fields.bm, id);
}
\f
struct nxm_field_index {
- struct hmap_node header_node;
- struct hmap_node name_node;
- struct nxm_field nf;
+ struct hmap_node header_node; /* In nxm_header_map. */
+ struct hmap_node name_node; /* In nxm_name_map. */
+ struct ovs_list mf_node; /* In mf_mf_map[nf.id]. */
+ const struct nxm_field nf;
};
#include "nx-match.inc"
static struct hmap nxm_header_map;
static struct hmap nxm_name_map;
-static struct nxm_field *nxm_fields[MFF_N_IDS];
-static struct nxm_field *oxm_fields[MFF_N_IDS];
+static struct ovs_list nxm_mf_map[MFF_N_IDS];
static void
nxm_init(void)
if (ovsthread_once_start(&once)) {
hmap_init(&nxm_header_map);
hmap_init(&nxm_name_map);
+ for (int i = 0; i < MFF_N_IDS; i++) {
+ list_init(&nxm_mf_map[i]);
+ }
for (struct nxm_field_index *nfi = all_nxm_fields;
nfi < &all_nxm_fields[ARRAY_SIZE(all_nxm_fields)]; nfi++) {
hmap_insert(&nxm_header_map, &nfi->header_node,
hash_int(nfi->nf.header, 0));
hmap_insert(&nxm_name_map, &nfi->name_node,
hash_string(nfi->nf.name, 0));
- if (is_nxm_header(nfi->nf.header)) {
- nxm_fields[nfi->nf.id] = &nfi->nf;
- } else {
- oxm_fields[nfi->nf.id] = &nfi->nf;
- }
+ list_push_back(&nxm_mf_map[nfi->nf.id], &nfi->mf_node);
}
ovsthread_once_done(&once);
}
}
static const struct nxm_field *
-nxm_field_by_header(uint32_t header)
+nxm_field_by_header(uint64_t header)
{
const struct nxm_field_index *nfi;
}
static const struct nxm_field *
-nxm_field_by_mf_id(enum mf_field_id id)
+nxm_field_by_mf_id(enum mf_field_id id, enum ofp_version version)
{
- nxm_init();
- return nxm_fields[id];
-}
+ const struct nxm_field_index *nfi;
+ const struct nxm_field *f;
-static const struct nxm_field *
-oxm_field_by_mf_id(enum mf_field_id id)
-{
nxm_init();
- return oxm_fields[id];
-}
+ f = NULL;
+ LIST_FOR_EACH (nfi, mf_node, &nxm_mf_map[id]) {
+ if (!f || version >= nfi->nf.version) {
+ f = &nfi->nf;
+ }
+ }
+ return f;
+}