2 * Copyright (c) 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include <netinet/icmp6.h>
23 #include "classifier.h"
24 #include "dynamic-string.h"
26 #include "meta-flow.h"
27 #include "ofp-actions.h"
28 #include "ofp-errors.h"
31 #include "openflow/nicira-ext.h"
34 #include "unaligned.h"
38 VLOG_DEFINE_THIS_MODULE(nx_match);
42 * The high order bit differentiate reserved classes from member classes.
43 * Classes 0x0000 to 0x7FFF are member classes, allocated by ONF.
44 * Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation.
46 enum ofp12_oxm_class {
47 OFPXMC12_NXM_0 = 0x0000, /* Backward compatibility with NXM */
48 OFPXMC12_NXM_1 = 0x0001, /* Backward compatibility with NXM */
49 OFPXMC12_OPENFLOW_BASIC = 0x8000, /* Basic class for OpenFlow */
50 OFPXMC15_PACKET_REGS = 0x8001, /* Packet registers (pipeline fields). */
51 OFPXMC12_EXPERIMENTER = 0xffff, /* Experimenter class */
54 /* Functions for extracting fields from OXM/NXM headers. */
55 static int nxm_vendor(uint32_t header) { return header >> 16; }
56 static int nxm_field(uint32_t header) { return (header >> 9) & 0x7f; }
57 static bool nxm_hasmask(uint32_t header) { return (header & 0x100) != 0; }
58 static int nxm_length(uint32_t header) { return header & 0xff; }
60 /* Returns true if 'header' is a legacy NXM header, false if it is an OXM
63 is_nxm_header(uint32_t header)
65 return nxm_vendor(header) <= 1;
68 #define NXM_HEADER(VENDOR, FIELD, HASMASK, LENGTH) \
69 (((VENDOR) << 16) | ((FIELD) << 9) | ((HASMASK) << 8) | (LENGTH))
71 #define NXM_HEADER_FMT "%d:%d:%d:%d"
72 #define NXM_HEADER_ARGS(HEADER) \
73 nxm_vendor(HEADER), nxm_field(HEADER), \
74 nxm_hasmask(HEADER), nxm_length(HEADER)
76 /* Functions for turning the "hasmask" bit on or off. (This also requires
77 * adjusting the length.) */
79 nxm_make_exact_header(uint32_t header)
81 return NXM_HEADER(nxm_vendor(header), nxm_field(header), 0,
82 nxm_length(header) / 2);
85 nxm_make_wild_header(uint32_t header)
87 return NXM_HEADER(nxm_vendor(header), nxm_field(header), 1,
88 nxm_length(header) * 2);
93 * This may be used to gain the OpenFlow 1.1-like ability to restrict
94 * certain NXM-based Flow Mod and Flow Stats Request messages to flows
95 * with specific cookies. See the "nx_flow_mod" and "nx_flow_stats_request"
96 * structure definitions for more details. This match is otherwise not
98 #define NXM_NX_COOKIE NXM_HEADER (0x0001, 30, 0, 8)
99 #define NXM_NX_COOKIE_W nxm_make_wild_header(NXM_NX_COOKIE)
103 enum ofp_version version;
104 const char *name; /* e.g. "NXM_OF_IN_PORT". */
109 static const struct nxm_field *nxm_field_by_header(uint32_t header);
110 static const struct nxm_field *nxm_field_by_name(const char *name, size_t len);
111 static const struct nxm_field *nxm_field_by_mf_id(enum mf_field_id);
112 static const struct nxm_field *oxm_field_by_mf_id(enum mf_field_id);
114 /* Rate limit for nx_match parse errors. These always indicate a bug in the
115 * peer and so there's not much point in showing a lot of them. */
116 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
118 static const struct nxm_field *
119 mf_parse_subfield_name(const char *name, int name_len, bool *wild);
121 static const struct nxm_field *
122 nxm_field_from_mf_field(enum mf_field_id id, enum ofp_version version)
124 const struct nxm_field *oxm = oxm_field_by_mf_id(id);
125 const struct nxm_field *nxm = nxm_field_by_mf_id(id);
126 return oxm && (version >= oxm->version || !nxm) ? oxm : nxm;
129 /* Returns the preferred OXM header to use for field 'id' in OpenFlow version
130 * 'version'. Specify 0 for 'version' if an NXM legacy header should be
131 * preferred over any standardized OXM header. Returns 0 if field 'id' cannot
132 * be expressed in NXM or OXM. */
134 mf_oxm_header(enum mf_field_id id, enum ofp_version version)
136 const struct nxm_field *f = nxm_field_from_mf_field(id, version);
137 return f ? f->header : 0;
140 /* Returns the "struct mf_field" that corresponds to NXM or OXM header
141 * 'header', or NULL if 'header' doesn't correspond to any known field. */
142 const struct mf_field *
143 mf_from_nxm_header(uint32_t header)
145 const struct nxm_field *f = nxm_field_by_header(header);
146 return f ? mf_from_id(f->id) : NULL;
149 /* Returns the width of the data for a field with the given 'header', in
152 nxm_field_bytes(uint32_t header)
154 unsigned int length = nxm_length(header);
155 return nxm_hasmask(header) ? length / 2 : length;
158 /* Returns the earliest version of OpenFlow that standardized an OXM header for
159 * field 'id', or UINT8_MAX if no version of OpenFlow does. */
160 static enum ofp_version
161 mf_oxm_version(enum mf_field_id id)
163 const struct nxm_field *oxm = oxm_field_by_mf_id(id);
164 return oxm ? oxm->version : UINT8_MAX;
167 /* nx_pull_match() and helpers. */
169 /* Given NXM/OXM value 'value' and mask 'mask' associated with 'header', checks
170 * for any 1-bit in the value where there is a 0-bit in the mask. Returns 0 if
171 * none, otherwise an error code. */
173 is_mask_consistent(uint32_t header, const uint8_t *value, const uint8_t *mask)
175 unsigned int width = nxm_field_bytes(header);
178 for (i = 0; i < width; i++) {
179 if (value[i] & ~mask[i]) {
180 if (!VLOG_DROP_WARN(&rl)) {
181 VLOG_WARN_RL(&rl, "Rejecting NXM/OXM entry "NXM_HEADER_FMT " "
182 "with 1-bits in value for bits wildcarded by the "
183 "mask.", NXM_HEADER_ARGS(header));
192 is_cookie_pseudoheader(uint32_t header)
194 return header == NXM_NX_COOKIE || header == NXM_NX_COOKIE_W;
198 nx_pull_header__(struct ofpbuf *b, bool allow_cookie, uint32_t *header,
199 const struct mf_field **field)
201 if (ofpbuf_size(b) < 4) {
202 VLOG_DBG_RL(&rl, "encountered partial (%"PRIu32"-byte) OXM entry",
206 *header = ntohl(get_unaligned_be32(ofpbuf_pull(b, 4)));
207 if (nxm_length(*header) == 0) {
208 VLOG_WARN_RL(&rl, "OXM header "NXM_HEADER_FMT" has zero length",
209 NXM_HEADER_ARGS(*header));
213 *field = mf_from_nxm_header(*header);
214 if (!*field && !(allow_cookie && is_cookie_pseudoheader(*header))) {
215 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" is unknown",
216 NXM_HEADER_ARGS(*header));
217 return OFPERR_OFPBMC_BAD_FIELD;
226 return OFPERR_OFPBMC_BAD_LEN;
230 nx_pull_entry__(struct ofpbuf *b, bool allow_cookie, uint32_t *header,
231 const struct mf_field **field,
232 union mf_value *value, union mf_value *mask)
234 enum ofperr header_error;
235 unsigned int payload_len;
236 const uint8_t *payload;
239 header_error = nx_pull_header__(b, allow_cookie, header, field);
240 if (header_error && header_error != OFPERR_OFPBMC_BAD_FIELD) {
244 payload_len = nxm_length(*header);
245 payload = ofpbuf_try_pull(b, payload_len);
247 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" calls for %u-byte "
248 "payload but only %"PRIu32" bytes follow OXM header",
249 NXM_HEADER_ARGS(*header), payload_len, ofpbuf_size(b));
250 return OFPERR_OFPBMC_BAD_LEN;
253 width = nxm_field_bytes(*header);
254 if (nxm_hasmask(*header)
255 && !is_mask_consistent(*header, payload, payload + width)) {
256 return OFPERR_OFPBMC_BAD_WILDCARDS;
259 memcpy(value, payload, MIN(width, sizeof *value));
261 if (nxm_hasmask(*header)) {
262 memcpy(mask, payload + width, MIN(width, sizeof *mask));
264 memset(mask, 0xff, MIN(width, sizeof *mask));
266 } else if (nxm_hasmask(*header)) {
267 VLOG_DBG_RL(&rl, "OXM header "NXM_HEADER_FMT" includes mask but "
268 "masked OXMs are not allowed here",
269 NXM_HEADER_ARGS(*header));
270 return OFPERR_OFPBMC_BAD_MASK;
276 /* Attempts to pull an NXM or OXM header, value, and mask (if present) from the
277 * beginning of 'b'. If successful, stores a pointer to the "struct mf_field"
278 * corresponding to the pulled header in '*field', the value into '*value',
279 * and the mask into '*mask', and returns 0. On error, returns an OpenFlow
280 * error; in this case, some bytes might have been pulled off 'b' anyhow, and
281 * the output parameters might have been modified.
283 * If a NULL 'mask' is supplied, masked OXM or NXM entries are treated as
284 * errors (with OFPERR_OFPBMC_BAD_MASK).
287 nx_pull_entry(struct ofpbuf *b, const struct mf_field **field,
288 union mf_value *value, union mf_value *mask)
292 return nx_pull_entry__(b, false, &header, field, value, mask);
295 /* Attempts to pull an NXM or OXM header from the beginning of 'b'. If
296 * successful, stores a pointer to the "struct mf_field" corresponding to the
297 * pulled header in '*field', stores the header's hasmask bit in '*masked'
298 * (true if hasmask=1, false if hasmask=0), and returns 0. On error, returns
299 * an OpenFlow error; in this case, some bytes might have been pulled off 'b'
300 * anyhow, and the output parameters might have been modified.
302 * If NULL 'masked' is supplied, masked OXM or NXM headers are treated as
303 * errors (with OFPERR_OFPBMC_BAD_MASK).
306 nx_pull_header(struct ofpbuf *b, const struct mf_field **field, bool *masked)
311 error = nx_pull_header__(b, false, &header, field);
313 *masked = !error && nxm_hasmask(header);
314 } else if (!error && nxm_hasmask(header)) {
315 error = OFPERR_OFPBMC_BAD_MASK;
321 nx_pull_match_entry(struct ofpbuf *b, bool allow_cookie,
322 const struct mf_field **field,
323 union mf_value *value, union mf_value *mask)
328 error = nx_pull_entry__(b, allow_cookie, &header, field, value, mask);
332 if (field && *field) {
333 if (!mf_is_mask_valid(*field, mask)) {
334 VLOG_DBG_RL(&rl, "bad mask for field %s", (*field)->name);
335 return OFPERR_OFPBMC_BAD_MASK;
337 if (!mf_is_value_valid(*field, value)) {
338 VLOG_DBG_RL(&rl, "bad value for field %s", (*field)->name);
339 return OFPERR_OFPBMC_BAD_VALUE;
346 nx_pull_raw(const uint8_t *p, unsigned int match_len, bool strict,
347 struct match *match, ovs_be64 *cookie, ovs_be64 *cookie_mask)
351 ovs_assert((cookie != NULL) == (cookie_mask != NULL));
353 match_init_catchall(match);
355 *cookie = *cookie_mask = htonll(0);
358 ofpbuf_use_const(&b, p, match_len);
359 while (ofpbuf_size(&b)) {
360 const uint8_t *pos = ofpbuf_data(&b);
361 const struct mf_field *field;
362 union mf_value value;
366 error = nx_pull_match_entry(&b, cookie != NULL, &field, &value, &mask);
368 if (error == OFPERR_OFPBMC_BAD_FIELD && !strict) {
373 error = OFPERR_OFPBMC_BAD_FIELD;
374 } else if (*cookie_mask) {
375 error = OFPERR_OFPBMC_DUP_FIELD;
377 *cookie = value.be64;
378 *cookie_mask = mask.be64;
380 } else if (!mf_are_prereqs_ok(field, &match->flow)) {
381 error = OFPERR_OFPBMC_BAD_PREREQ;
382 } else if (!mf_is_all_wild(field, &match->wc)) {
383 error = OFPERR_OFPBMC_DUP_FIELD;
385 mf_set(field, &value, &mask, match);
389 VLOG_DBG_RL(&rl, "error parsing OXM at offset %"PRIdPTR" "
390 "within match (%s)", pos -
391 p, ofperr_to_string(error));
400 nx_pull_match__(struct ofpbuf *b, unsigned int match_len, bool strict,
402 ovs_be64 *cookie, ovs_be64 *cookie_mask)
407 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
409 VLOG_DBG_RL(&rl, "nx_match length %u, rounded up to a "
410 "multiple of 8, is longer than space in message (max "
411 "length %"PRIu32")", match_len, ofpbuf_size(b));
412 return OFPERR_OFPBMC_BAD_LEN;
416 return nx_pull_raw(p, match_len, strict, match, cookie, cookie_mask);
419 /* Parses the nx_match formatted match description in 'b' with length
420 * 'match_len'. Stores the results in 'match'. If 'cookie' and 'cookie_mask'
421 * are valid pointers, then stores the cookie and mask in them if 'b' contains
422 * a "NXM_NX_COOKIE*" match. Otherwise, stores 0 in both.
424 * Fails with an error upon encountering an unknown NXM header.
426 * Returns 0 if successful, otherwise an OpenFlow error code. */
428 nx_pull_match(struct ofpbuf *b, unsigned int match_len, struct match *match,
429 ovs_be64 *cookie, ovs_be64 *cookie_mask)
431 return nx_pull_match__(b, match_len, true, match, cookie, cookie_mask);
434 /* Behaves the same as nx_pull_match(), but skips over unknown NXM headers,
435 * instead of failing with an error. */
437 nx_pull_match_loose(struct ofpbuf *b, unsigned int match_len,
439 ovs_be64 *cookie, ovs_be64 *cookie_mask)
441 return nx_pull_match__(b, match_len, false, match, cookie, cookie_mask);
445 oxm_pull_match__(struct ofpbuf *b, bool strict, struct match *match)
447 struct ofp11_match_header *omh = ofpbuf_data(b);
451 if (ofpbuf_size(b) < sizeof *omh) {
452 return OFPERR_OFPBMC_BAD_LEN;
455 match_len = ntohs(omh->length);
456 if (match_len < sizeof *omh) {
457 return OFPERR_OFPBMC_BAD_LEN;
460 if (omh->type != htons(OFPMT_OXM)) {
461 return OFPERR_OFPBMC_BAD_TYPE;
464 p = ofpbuf_try_pull(b, ROUND_UP(match_len, 8));
466 VLOG_DBG_RL(&rl, "oxm length %u, rounded up to a "
467 "multiple of 8, is longer than space in message (max "
468 "length %"PRIu32")", match_len, ofpbuf_size(b));
469 return OFPERR_OFPBMC_BAD_LEN;
472 return nx_pull_raw(p + sizeof *omh, match_len - sizeof *omh,
473 strict, match, NULL, NULL);
476 /* Parses the oxm formatted match description preceded by a struct
477 * ofp11_match_header in 'b'. Stores the result in 'match'.
479 * Fails with an error when encountering unknown OXM headers.
481 * Returns 0 if successful, otherwise an OpenFlow error code. */
483 oxm_pull_match(struct ofpbuf *b, struct match *match)
485 return oxm_pull_match__(b, true, match);
488 /* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
489 * OXM headers instead of failing with an error when they are encountered. */
491 oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
493 return oxm_pull_match__(b, false, match);
496 /* nx_put_match() and helpers.
498 * 'put' functions whose names end in 'w' add a wildcarded field.
499 * 'put' functions whose names end in 'm' add a field that might be wildcarded.
500 * Other 'put' functions add exact-match fields.
504 nxm_put_header(struct ofpbuf *b, uint32_t header)
506 ovs_be32 n_header = htonl(header);
507 ofpbuf_put(b, &n_header, sizeof n_header);
511 nxm_put_8(struct ofpbuf *b, uint32_t header, uint8_t value)
513 nxm_put_header(b, header);
514 ofpbuf_put(b, &value, sizeof value);
518 nxm_put_8m(struct ofpbuf *b, uint32_t header, uint8_t value, uint8_t mask)
525 nxm_put_8(b, header, value);
529 nxm_put_header(b, nxm_make_wild_header(header));
530 ofpbuf_put(b, &value, sizeof value);
531 ofpbuf_put(b, &mask, sizeof mask);
536 nxm_put_16(struct ofpbuf *b, uint32_t header, ovs_be16 value)
538 nxm_put_header(b, header);
539 ofpbuf_put(b, &value, sizeof value);
543 nxm_put_16w(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
545 nxm_put_header(b, header);
546 ofpbuf_put(b, &value, sizeof value);
547 ofpbuf_put(b, &mask, sizeof mask);
551 nxm_put_16m(struct ofpbuf *b, uint32_t header, ovs_be16 value, ovs_be16 mask)
558 nxm_put_16(b, header, value);
562 nxm_put_16w(b, nxm_make_wild_header(header), value, mask);
568 nxm_put_32(struct ofpbuf *b, uint32_t header, ovs_be32 value)
570 nxm_put_header(b, header);
571 ofpbuf_put(b, &value, sizeof value);
575 nxm_put_32w(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
577 nxm_put_header(b, header);
578 ofpbuf_put(b, &value, sizeof value);
579 ofpbuf_put(b, &mask, sizeof mask);
583 nxm_put_32m(struct ofpbuf *b, uint32_t header, ovs_be32 value, ovs_be32 mask)
590 nxm_put_32(b, header, value);
594 nxm_put_32w(b, nxm_make_wild_header(header), value, mask);
600 nxm_put_64(struct ofpbuf *b, uint32_t header, ovs_be64 value)
602 nxm_put_header(b, header);
603 ofpbuf_put(b, &value, sizeof value);
607 nxm_put_64w(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
609 nxm_put_header(b, header);
610 ofpbuf_put(b, &value, sizeof value);
611 ofpbuf_put(b, &mask, sizeof mask);
615 nxm_put_64m(struct ofpbuf *b, uint32_t header, ovs_be64 value, ovs_be64 mask)
622 nxm_put_64(b, header, value);
626 nxm_put_64w(b, nxm_make_wild_header(header), value, mask);
632 nxm_put_eth(struct ofpbuf *b, uint32_t header,
633 const uint8_t value[ETH_ADDR_LEN])
635 nxm_put_header(b, header);
636 ofpbuf_put(b, value, ETH_ADDR_LEN);
640 nxm_put_eth_masked(struct ofpbuf *b, uint32_t header,
641 const uint8_t value[ETH_ADDR_LEN],
642 const uint8_t mask[ETH_ADDR_LEN])
644 if (!eth_addr_is_zero(mask)) {
645 if (eth_mask_is_exact(mask)) {
646 nxm_put_eth(b, header, value);
648 nxm_put_header(b, nxm_make_wild_header(header));
649 ofpbuf_put(b, value, ETH_ADDR_LEN);
650 ofpbuf_put(b, mask, ETH_ADDR_LEN);
656 nxm_put_ipv6(struct ofpbuf *b, uint32_t header,
657 const struct in6_addr *value, const struct in6_addr *mask)
659 if (ipv6_mask_is_any(mask)) {
661 } else if (ipv6_mask_is_exact(mask)) {
662 nxm_put_header(b, header);
663 ofpbuf_put(b, value, sizeof *value);
665 nxm_put_header(b, nxm_make_wild_header(header));
666 ofpbuf_put(b, value, sizeof *value);
667 ofpbuf_put(b, mask, sizeof *mask);
672 nxm_put_frag(struct ofpbuf *b, const struct match *match, enum ofp_version oxm)
674 uint32_t header = mf_oxm_header(MFF_IP_FRAG, oxm);
675 uint8_t nw_frag = match->flow.nw_frag;
676 uint8_t nw_frag_mask = match->wc.masks.nw_frag;
678 switch (nw_frag_mask) {
682 case FLOW_NW_FRAG_MASK:
683 nxm_put_8(b, header, nw_frag);
687 nxm_put_8m(b, header, nw_frag, nw_frag_mask & FLOW_NW_FRAG_MASK);
692 /* Appends to 'b' a set of OXM or NXM matches for the IPv4 or IPv6 fields in
695 nxm_put_ip(struct ofpbuf *b, const struct match *match, enum ofp_version oxm)
697 const struct flow *flow = &match->flow;
699 if (flow->dl_type == htons(ETH_TYPE_IP)) {
700 nxm_put_32m(b, mf_oxm_header(MFF_IPV4_SRC, oxm),
701 flow->nw_src, match->wc.masks.nw_src);
702 nxm_put_32m(b, mf_oxm_header(MFF_IPV4_DST, oxm),
703 flow->nw_dst, match->wc.masks.nw_dst);
705 nxm_put_ipv6(b, mf_oxm_header(MFF_IPV6_SRC, oxm),
706 &flow->ipv6_src, &match->wc.masks.ipv6_src);
707 nxm_put_ipv6(b, mf_oxm_header(MFF_IPV6_DST, oxm),
708 &flow->ipv6_dst, &match->wc.masks.ipv6_dst);
711 nxm_put_frag(b, match, oxm);
713 if (match->wc.masks.nw_tos & IP_DSCP_MASK) {
715 nxm_put_8(b, mf_oxm_header(MFF_IP_DSCP_SHIFTED, oxm),
718 nxm_put_8(b, mf_oxm_header(MFF_IP_DSCP, oxm),
719 flow->nw_tos & IP_DSCP_MASK);
723 if (match->wc.masks.nw_tos & IP_ECN_MASK) {
724 nxm_put_8(b, mf_oxm_header(MFF_IP_ECN, oxm),
725 flow->nw_tos & IP_ECN_MASK);
728 if (!oxm && match->wc.masks.nw_ttl) {
729 nxm_put_8(b, mf_oxm_header(MFF_IP_TTL, oxm), flow->nw_ttl);
732 nxm_put_32m(b, mf_oxm_header(MFF_IPV6_LABEL, oxm),
733 flow->ipv6_label, match->wc.masks.ipv6_label);
735 if (match->wc.masks.nw_proto) {
736 nxm_put_8(b, mf_oxm_header(MFF_IP_PROTO, oxm), flow->nw_proto);
738 if (flow->nw_proto == IPPROTO_TCP) {
739 nxm_put_16m(b, mf_oxm_header(MFF_TCP_SRC, oxm),
740 flow->tp_src, match->wc.masks.tp_src);
741 nxm_put_16m(b, mf_oxm_header(MFF_TCP_DST, oxm),
742 flow->tp_dst, match->wc.masks.tp_dst);
743 nxm_put_16m(b, mf_oxm_header(MFF_TCP_FLAGS, oxm),
744 flow->tcp_flags, match->wc.masks.tcp_flags);
745 } else if (flow->nw_proto == IPPROTO_UDP) {
746 nxm_put_16m(b, mf_oxm_header(MFF_UDP_SRC, oxm),
747 flow->tp_src, match->wc.masks.tp_src);
748 nxm_put_16m(b, mf_oxm_header(MFF_UDP_DST, oxm),
749 flow->tp_dst, match->wc.masks.tp_dst);
750 } else if (flow->nw_proto == IPPROTO_SCTP) {
751 nxm_put_16m(b, mf_oxm_header(MFF_SCTP_SRC, oxm), flow->tp_src,
752 match->wc.masks.tp_src);
753 nxm_put_16m(b, mf_oxm_header(MFF_SCTP_DST, oxm), flow->tp_dst,
754 match->wc.masks.tp_dst);
755 } else if (is_icmpv4(flow)) {
756 if (match->wc.masks.tp_src) {
757 nxm_put_8(b, mf_oxm_header(MFF_ICMPV4_TYPE, oxm),
758 ntohs(flow->tp_src));
760 if (match->wc.masks.tp_dst) {
761 nxm_put_8(b, mf_oxm_header(MFF_ICMPV4_CODE, oxm),
762 ntohs(flow->tp_dst));
764 } else if (is_icmpv6(flow)) {
765 if (match->wc.masks.tp_src) {
766 nxm_put_8(b, mf_oxm_header(MFF_ICMPV6_TYPE, oxm),
767 ntohs(flow->tp_src));
769 if (match->wc.masks.tp_dst) {
770 nxm_put_8(b, mf_oxm_header(MFF_ICMPV6_CODE, oxm),
771 ntohs(flow->tp_dst));
773 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT) ||
774 flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
775 nxm_put_ipv6(b, mf_oxm_header(MFF_ND_TARGET, oxm),
776 &flow->nd_target, &match->wc.masks.nd_target);
777 if (flow->tp_src == htons(ND_NEIGHBOR_SOLICIT)) {
778 nxm_put_eth_masked(b, mf_oxm_header(MFF_ND_SLL, oxm),
779 flow->arp_sha, match->wc.masks.arp_sha);
781 if (flow->tp_src == htons(ND_NEIGHBOR_ADVERT)) {
782 nxm_put_eth_masked(b, mf_oxm_header(MFF_ND_TLL, oxm),
783 flow->arp_tha, match->wc.masks.arp_tha);
790 /* Appends to 'b' the nx_match format that expresses 'match'. For Flow Mod and
791 * Flow Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
792 * Otherwise, 'cookie_mask' should be zero.
794 * Specify 'oxm' as 0 to express the match in NXM format; otherwise, specify
795 * 'oxm' as the OpenFlow version number for the OXM format to use.
797 * This function can cause 'b''s data to be reallocated.
799 * Returns the number of bytes appended to 'b', excluding padding.
801 * If 'match' is a catch-all rule that matches every packet, then this function
802 * appends nothing to 'b' and returns 0. */
804 nx_put_raw(struct ofpbuf *b, enum ofp_version oxm, const struct match *match,
805 ovs_be64 cookie, ovs_be64 cookie_mask)
807 const struct flow *flow = &match->flow;
808 const size_t start_len = ofpbuf_size(b);
812 BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
815 if (match->wc.masks.dp_hash) {
816 nxm_put_32m(b, mf_oxm_header(MFF_DP_HASH, oxm),
817 htonl(flow->dp_hash), htonl(match->wc.masks.dp_hash));
820 if (match->wc.masks.recirc_id) {
821 nxm_put_32(b, mf_oxm_header(MFF_RECIRC_ID, oxm),
822 htonl(flow->recirc_id));
825 if (match->wc.masks.in_port.ofp_port) {
826 ofp_port_t in_port = flow->in_port.ofp_port;
828 nxm_put_32(b, mf_oxm_header(MFF_IN_PORT_OXM, oxm),
829 ofputil_port_to_ofp11(in_port));
831 nxm_put_16(b, mf_oxm_header(MFF_IN_PORT, oxm),
832 htons(ofp_to_u16(in_port)));
837 nxm_put_eth_masked(b, mf_oxm_header(MFF_ETH_SRC, oxm),
838 flow->dl_src, match->wc.masks.dl_src);
839 nxm_put_eth_masked(b, mf_oxm_header(MFF_ETH_DST, oxm),
840 flow->dl_dst, match->wc.masks.dl_dst);
841 nxm_put_16m(b, mf_oxm_header(MFF_ETH_TYPE, oxm),
842 ofputil_dl_type_to_openflow(flow->dl_type),
843 match->wc.masks.dl_type);
847 ovs_be16 VID_CFI_MASK = htons(VLAN_VID_MASK | VLAN_CFI);
848 ovs_be16 vid = flow->vlan_tci & VID_CFI_MASK;
849 ovs_be16 mask = match->wc.masks.vlan_tci & VID_CFI_MASK;
851 if (mask == htons(VLAN_VID_MASK | VLAN_CFI)) {
852 nxm_put_16(b, mf_oxm_header(MFF_VLAN_VID, oxm), vid);
854 nxm_put_16m(b, mf_oxm_header(MFF_VLAN_VID, oxm), vid, mask);
857 if (vid && vlan_tci_to_pcp(match->wc.masks.vlan_tci)) {
858 nxm_put_8(b, mf_oxm_header(MFF_VLAN_PCP, oxm),
859 vlan_tci_to_pcp(flow->vlan_tci));
863 nxm_put_16m(b, mf_oxm_header(MFF_VLAN_TCI, oxm), flow->vlan_tci,
864 match->wc.masks.vlan_tci);
868 if (eth_type_mpls(flow->dl_type)) {
869 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_TC_MASK)) {
870 nxm_put_8(b, mf_oxm_header(MFF_MPLS_TC, oxm),
871 mpls_lse_to_tc(flow->mpls_lse[0]));
874 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_BOS_MASK)) {
875 nxm_put_8(b, mf_oxm_header(MFF_MPLS_BOS, oxm),
876 mpls_lse_to_bos(flow->mpls_lse[0]));
879 if (match->wc.masks.mpls_lse[0] & htonl(MPLS_LABEL_MASK)) {
880 nxm_put_32(b, mf_oxm_header(MFF_MPLS_LABEL, oxm),
881 htonl(mpls_lse_to_label(flow->mpls_lse[0])));
886 if (is_ip_any(flow)) {
887 nxm_put_ip(b, match, oxm);
888 } else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
889 flow->dl_type == htons(ETH_TYPE_RARP)) {
891 if (match->wc.masks.nw_proto) {
892 nxm_put_16(b, mf_oxm_header(MFF_ARP_OP, oxm),
893 htons(flow->nw_proto));
895 nxm_put_32m(b, mf_oxm_header(MFF_ARP_SPA, oxm),
896 flow->nw_src, match->wc.masks.nw_src);
897 nxm_put_32m(b, mf_oxm_header(MFF_ARP_TPA, oxm),
898 flow->nw_dst, match->wc.masks.nw_dst);
899 nxm_put_eth_masked(b, mf_oxm_header(MFF_ARP_SHA, oxm),
900 flow->arp_sha, match->wc.masks.arp_sha);
901 nxm_put_eth_masked(b, mf_oxm_header(MFF_ARP_THA, oxm),
902 flow->arp_tha, match->wc.masks.arp_tha);
906 nxm_put_64m(b, mf_oxm_header(MFF_TUN_ID, oxm),
907 flow->tunnel.tun_id, match->wc.masks.tunnel.tun_id);
909 /* Other tunnel metadata. */
910 nxm_put_32m(b, mf_oxm_header(MFF_TUN_SRC, oxm),
911 flow->tunnel.ip_src, match->wc.masks.tunnel.ip_src);
912 nxm_put_32m(b, mf_oxm_header(MFF_TUN_DST, oxm),
913 flow->tunnel.ip_dst, match->wc.masks.tunnel.ip_dst);
916 if (oxm < OFP15_VERSION) {
917 for (i = 0; i < FLOW_N_REGS; i++) {
918 nxm_put_32m(b, mf_oxm_header(MFF_REG0 + i, oxm),
919 htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
922 for (i = 0; i < FLOW_N_XREGS; i++) {
923 nxm_put_64m(b, mf_oxm_header(MFF_XREG0 + i, oxm),
924 htonll(flow_get_xreg(flow, i)),
925 htonll(flow_get_xreg(&match->wc.masks, i)));
930 nxm_put_32m(b, mf_oxm_header(MFF_PKT_MARK, oxm), htonl(flow->pkt_mark),
931 htonl(match->wc.masks.pkt_mark));
933 /* OpenFlow 1.1+ Metadata. */
934 nxm_put_64m(b, mf_oxm_header(MFF_METADATA, oxm),
935 flow->metadata, match->wc.masks.metadata);
938 nxm_put_64m(b, NXM_NX_COOKIE, cookie & cookie_mask, cookie_mask);
940 match_len = ofpbuf_size(b) - start_len;
944 /* Appends to 'b' the nx_match format that expresses 'match', plus enough zero
945 * bytes to pad the nx_match out to a multiple of 8. For Flow Mod and Flow
946 * Stats Requests messages, a 'cookie' and 'cookie_mask' may be supplied.
947 * Otherwise, 'cookie_mask' should be zero.
949 * This function can cause 'b''s data to be reallocated.
951 * Returns the number of bytes appended to 'b', excluding padding. The return
952 * value can be zero if it appended nothing at all to 'b' (which happens if
953 * 'cr' is a catch-all rule that matches every packet). */
955 nx_put_match(struct ofpbuf *b, const struct match *match,
956 ovs_be64 cookie, ovs_be64 cookie_mask)
958 int match_len = nx_put_raw(b, 0, match, cookie, cookie_mask);
960 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
964 /* Appends to 'b' an struct ofp11_match_header followed by the OXM format that
965 * expresses 'cr', plus enough zero bytes to pad the data appended out to a
968 * OXM differs slightly among versions of OpenFlow. Specify the OpenFlow
969 * version in use as 'version'.
971 * This function can cause 'b''s data to be reallocated.
973 * Returns the number of bytes appended to 'b', excluding the padding. Never
976 oxm_put_match(struct ofpbuf *b, const struct match *match,
977 enum ofp_version version)
980 struct ofp11_match_header *omh;
981 size_t start_len = ofpbuf_size(b);
982 ovs_be64 cookie = htonll(0), cookie_mask = htonll(0);
984 ofpbuf_put_uninit(b, sizeof *omh);
985 match_len = (nx_put_raw(b, version, match, cookie, cookie_mask)
987 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
989 omh = ofpbuf_at(b, start_len, sizeof *omh);
990 omh->type = htons(OFPMT_OXM);
991 omh->length = htons(match_len);
997 nx_put_header(struct ofpbuf *b, enum mf_field_id field,
998 enum ofp_version version, bool masked)
1000 uint32_t header = mf_oxm_header(field, version);
1001 nxm_put_header(b, masked ? nxm_make_wild_header(header) : header);
1005 nx_put_entry(struct ofpbuf *b,
1006 enum mf_field_id field, enum ofp_version version,
1007 const union mf_value *value, const union mf_value *mask)
1009 int n_bytes = mf_from_id(field)->n_bytes;
1010 bool masked = mask && !is_all_ones(mask, n_bytes);
1012 nx_put_header(b, field, version, masked);
1013 ofpbuf_put(b, value, n_bytes);
1015 ofpbuf_put(b, mask, n_bytes);
1019 /* nx_match_to_string() and helpers. */
1021 static void format_nxm_field_name(struct ds *, uint32_t header);
1024 nx_match_to_string(const uint8_t *p, unsigned int match_len)
1030 return xstrdup("<any>");
1033 ofpbuf_use_const(&b, p, match_len);
1035 while (ofpbuf_size(&b)) {
1036 union mf_value value;
1037 union mf_value mask;
1042 error = nx_pull_entry__(&b, true, &header, NULL, &value, &mask);
1046 value_len = MIN(sizeof value, nxm_field_bytes(header));
1049 ds_put_cstr(&s, ", ");
1052 format_nxm_field_name(&s, header);
1053 ds_put_char(&s, '(');
1055 for (int i = 0; i < value_len; i++) {
1056 ds_put_format(&s, "%02x", ((const uint8_t *) &value)[i]);
1058 if (nxm_hasmask(header)) {
1059 ds_put_char(&s, '/');
1060 for (int i = 0; i < value_len; i++) {
1061 ds_put_format(&s, "%02x", ((const uint8_t *) &mask)[i]);
1064 ds_put_char(&s, ')');
1067 if (ofpbuf_size(&b)) {
1069 ds_put_cstr(&s, ", ");
1072 ds_put_format(&s, "<%u invalid bytes>", ofpbuf_size(&b));
1075 return ds_steal_cstr(&s);
1079 oxm_match_to_string(const struct ofpbuf *p, unsigned int match_len)
1081 const struct ofp11_match_header *omh = ofpbuf_data(p);
1082 uint16_t match_len_;
1087 if (match_len < sizeof *omh) {
1088 ds_put_format(&s, "<match too short: %u>", match_len);
1092 if (omh->type != htons(OFPMT_OXM)) {
1093 ds_put_format(&s, "<bad match type field: %u>", ntohs(omh->type));
1097 match_len_ = ntohs(omh->length);
1098 if (match_len_ < sizeof *omh) {
1099 ds_put_format(&s, "<match length field too short: %u>", match_len_);
1103 if (match_len_ != match_len) {
1104 ds_put_format(&s, "<match length field incorrect: %u != %u>",
1105 match_len_, match_len);
1109 return nx_match_to_string(ofpbuf_at(p, sizeof *omh, 0),
1110 match_len - sizeof *omh);
1113 return ds_steal_cstr(&s);
1117 nx_format_field_name(enum mf_field_id id, enum ofp_version version,
1120 format_nxm_field_name(s, mf_oxm_header(id, version));
1124 format_nxm_field_name(struct ds *s, uint32_t header)
1126 const struct nxm_field *f = nxm_field_by_header(header);
1128 ds_put_cstr(s, f->name);
1129 if (nxm_hasmask(header)) {
1130 ds_put_cstr(s, "_W");
1132 } else if (header == NXM_NX_COOKIE) {
1133 ds_put_cstr(s, "NXM_NX_COOKIE");
1134 } else if (header == NXM_NX_COOKIE_W) {
1135 ds_put_cstr(s, "NXM_NX_COOKIE_W");
1137 ds_put_format(s, "%d:%d", nxm_vendor(header), nxm_field(header));
1142 streq_len(const char *a, size_t a_len, const char *b)
1144 return strlen(b) == a_len && !memcmp(a, b, a_len);
1148 parse_nxm_field_name(const char *name, int name_len)
1150 const struct nxm_field *f;
1153 f = mf_parse_subfield_name(name, name_len, &wild);
1157 } else if (mf_from_id(f->id)->maskable != MFM_NONE) {
1158 return nxm_make_wild_header(f->header);
1162 if (streq_len(name, name_len, "NXM_NX_COOKIE")) {
1163 return NXM_NX_COOKIE;
1164 } else if (streq_len(name, name_len, "NXM_NX_COOKIE_W")) {
1165 return NXM_NX_COOKIE_W;
1168 /* Check whether it's a 32-bit field header value as hex.
1169 * (This isn't ordinarily useful except for testing error behavior.) */
1170 if (name_len == 8) {
1171 uint32_t header = hexits_value(name, name_len, NULL);
1172 if (header != UINT_MAX) {
1180 /* nx_match_from_string(). */
1183 nx_match_from_string_raw(const char *s, struct ofpbuf *b)
1185 const char *full_s = s;
1186 const size_t start_len = ofpbuf_size(b);
1188 if (!strcmp(s, "<any>")) {
1189 /* Ensure that 'ofpbuf_data(b)' isn't actually null. */
1190 ofpbuf_prealloc_tailroom(b, 1);
1194 for (s += strspn(s, ", "); *s; s += strspn(s, ", ")) {
1201 name_len = strcspn(s, "(");
1202 if (s[name_len] != '(') {
1203 ovs_fatal(0, "%s: missing ( at end of nx_match", full_s);
1206 header = parse_nxm_field_name(name, name_len);
1208 ovs_fatal(0, "%s: unknown field `%.*s'", full_s, name_len, s);
1213 nxm_put_header(b, header);
1214 s = ofpbuf_put_hex(b, s, &n);
1215 if (n != nxm_field_bytes(header)) {
1216 ovs_fatal(0, "%.2s: hex digits expected", s);
1218 if (nxm_hasmask(header)) {
1219 s += strspn(s, " ");
1221 ovs_fatal(0, "%s: missing / in masked field %.*s",
1222 full_s, name_len, name);
1224 s = ofpbuf_put_hex(b, s + 1, &n);
1225 if (n != nxm_field_bytes(header)) {
1226 ovs_fatal(0, "%.2s: hex digits expected", s);
1230 s += strspn(s, " ");
1232 ovs_fatal(0, "%s: missing ) following field %.*s",
1233 full_s, name_len, name);
1238 return ofpbuf_size(b) - start_len;
1242 nx_match_from_string(const char *s, struct ofpbuf *b)
1244 int match_len = nx_match_from_string_raw(s, b);
1245 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1250 oxm_match_from_string(const char *s, struct ofpbuf *b)
1253 struct ofp11_match_header *omh;
1254 size_t start_len = ofpbuf_size(b);
1256 ofpbuf_put_uninit(b, sizeof *omh);
1257 match_len = nx_match_from_string_raw(s, b) + sizeof *omh;
1258 ofpbuf_put_zeros(b, PAD_SIZE(match_len, 8));
1260 omh = ofpbuf_at(b, start_len, sizeof *omh);
1261 omh->type = htons(OFPMT_OXM);
1262 omh->length = htons(match_len);
1267 /* Parses 's' as a "move" action, in the form described in ovs-ofctl(8), into
1270 * Returns NULL if successful, otherwise a malloc()'d string describing the
1271 * error. The caller is responsible for freeing the returned string. */
1272 char * WARN_UNUSED_RESULT
1273 nxm_parse_reg_move(struct ofpact_reg_move *move, const char *s)
1275 const char *full_s = s;
1278 error = mf_parse_subfield__(&move->src, &s);
1282 if (strncmp(s, "->", 2)) {
1283 return xasprintf("%s: missing `->' following source", full_s);
1286 error = mf_parse_subfield(&move->dst, s);
1291 if (move->src.n_bits != move->dst.n_bits) {
1292 return xasprintf("%s: source field is %d bits wide but destination is "
1293 "%d bits wide", full_s,
1294 move->src.n_bits, move->dst.n_bits);
1299 /* Parses 's' as a "load" action, in the form described in ovs-ofctl(8), into
1302 * Returns NULL if successful, otherwise a malloc()'d string describing the
1303 * error. The caller is responsible for freeing the returned string. */
1304 char * WARN_UNUSED_RESULT
1305 nxm_parse_reg_load(struct ofpact_reg_load *load, const char *s)
1307 const char *full_s = s;
1308 uint64_t value = strtoull(s, (char **) &s, 0);
1311 if (strncmp(s, "->", 2)) {
1312 return xasprintf("%s: missing `->' following value", full_s);
1315 error = mf_parse_subfield(&load->dst, s);
1320 if (load->dst.n_bits < 64 && (value >> load->dst.n_bits) != 0) {
1321 return xasprintf("%s: value %"PRIu64" does not fit into %d bits",
1322 full_s, value, load->dst.n_bits);
1325 load->subvalue.be64[0] = htonll(0);
1326 load->subvalue.be64[1] = htonll(value);
1330 /* nxm_format_reg_move(), nxm_format_reg_load(). */
1333 nxm_format_reg_move(const struct ofpact_reg_move *move, struct ds *s)
1335 ds_put_format(s, "move:");
1336 mf_format_subfield(&move->src, s);
1337 ds_put_cstr(s, "->");
1338 mf_format_subfield(&move->dst, s);
1342 nxm_format_reg_load(const struct ofpact_reg_load *load, struct ds *s)
1344 ds_put_cstr(s, "load:");
1345 mf_format_subvalue(&load->subvalue, s);
1346 ds_put_cstr(s, "->");
1347 mf_format_subfield(&load->dst, s);
1351 nxm_reg_move_check(const struct ofpact_reg_move *move, const struct flow *flow)
1355 error = mf_check_src(&move->src, flow);
1360 return mf_check_dst(&move->dst, NULL);
1364 nxm_reg_load_check(const struct ofpact_reg_load *load, const struct flow *flow)
1366 return mf_check_dst(&load->dst, flow);
1370 /* nxm_execute_reg_move(), nxm_execute_reg_load(). */
1373 nxm_execute_reg_move(const struct ofpact_reg_move *move,
1374 struct flow *flow, struct flow_wildcards *wc)
1376 union mf_value src_value;
1377 union mf_value dst_value;
1379 mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
1380 mf_mask_field_and_prereqs(move->src.field, &wc->masks);
1382 mf_get_value(move->dst.field, flow, &dst_value);
1383 mf_get_value(move->src.field, flow, &src_value);
1384 bitwise_copy(&src_value, move->src.field->n_bytes, move->src.ofs,
1385 &dst_value, move->dst.field->n_bytes, move->dst.ofs,
1387 mf_set_flow_value(move->dst.field, &dst_value, flow);
1391 nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
1392 struct flow_wildcards *wc)
1394 /* Since at the datapath interface we do not have set actions for
1395 * individual fields, but larger sets of fields for a given protocol
1396 * layer, the set action will in practice only ever apply to exactly
1397 * matched flows for the given protocol layer. For example, if the
1398 * reg_load changes the IP TTL, the corresponding datapath action will
1399 * rewrite also the IP addresses and TOS byte. Since these other field
1400 * values may not be explicitly set, they depend on the incoming flow field
1401 * values, and are hence all of them are set in the wildcards masks, when
1402 * the action is committed to the datapath. For the rare case, where the
1403 * reg_load action does not actually change the value, and no other flow
1404 * field values are set (or loaded), the datapath action is skipped, and
1405 * no mask bits are set. Such a datapath flow should, however, be
1406 * dependent on the specific field value, so the corresponding wildcard
1407 * mask bits must be set, lest the datapath flow be applied to packets
1408 * containing some other value in the field and the field value remain
1409 * unchanged regardless of the incoming value.
1411 * We set the masks here for the whole fields, and their prerequisities.
1412 * Even if only the lower byte of a TCP destination port is set,
1413 * we set the mask for the whole field, and also the ip_proto in the IP
1414 * header, so that the kernel flow would not be applied on, e.g., a UDP
1415 * packet, or any other IP protocol in addition to TCP packets.
1417 mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
1418 mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
1422 nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
1423 struct flow *flow, struct flow_wildcards *wc)
1425 union mf_subvalue src_subvalue;
1426 union mf_subvalue mask_value;
1427 ovs_be64 src_data_be = htonll(src_data);
1429 memset(&mask_value, 0xff, sizeof mask_value);
1430 mf_write_subfield_flow(dst, &mask_value, &wc->masks);
1432 bitwise_copy(&src_data_be, sizeof src_data_be, 0,
1433 &src_subvalue, sizeof src_subvalue, 0,
1434 sizeof src_data_be * 8);
1435 mf_write_subfield_flow(dst, &src_subvalue, flow);
1438 /* nxm_parse_stack_action, works for both push() and pop(). */
1440 /* Parses 's' as a "push" or "pop" action, in the form described in
1441 * ovs-ofctl(8), into '*stack_action'.
1443 * Returns NULL if successful, otherwise a malloc()'d string describing the
1444 * error. The caller is responsible for freeing the returned string. */
1445 char * WARN_UNUSED_RESULT
1446 nxm_parse_stack_action(struct ofpact_stack *stack_action, const char *s)
1450 error = mf_parse_subfield__(&stack_action->subfield, &s);
1456 return xasprintf("%s: trailing garbage following push or pop", s);
1463 nxm_format_stack_push(const struct ofpact_stack *push, struct ds *s)
1465 ds_put_cstr(s, "push:");
1466 mf_format_subfield(&push->subfield, s);
1470 nxm_format_stack_pop(const struct ofpact_stack *pop, struct ds *s)
1472 ds_put_cstr(s, "pop:");
1473 mf_format_subfield(&pop->subfield, s);
1477 nxm_stack_push_check(const struct ofpact_stack *push,
1478 const struct flow *flow)
1480 return mf_check_src(&push->subfield, flow);
1484 nxm_stack_pop_check(const struct ofpact_stack *pop,
1485 const struct flow *flow)
1487 return mf_check_dst(&pop->subfield, flow);
1490 /* nxm_execute_stack_push(), nxm_execute_stack_pop(). */
1492 nx_stack_push(struct ofpbuf *stack, union mf_subvalue *v)
1494 ofpbuf_put(stack, v, sizeof *v);
1497 static union mf_subvalue *
1498 nx_stack_pop(struct ofpbuf *stack)
1500 union mf_subvalue *v = NULL;
1502 if (ofpbuf_size(stack)) {
1504 ofpbuf_set_size(stack, ofpbuf_size(stack) - sizeof *v);
1505 v = (union mf_subvalue *) ofpbuf_tail(stack);
1512 nxm_execute_stack_push(const struct ofpact_stack *push,
1513 const struct flow *flow, struct flow_wildcards *wc,
1514 struct ofpbuf *stack)
1516 union mf_subvalue mask_value;
1517 union mf_subvalue dst_value;
1519 memset(&mask_value, 0xff, sizeof mask_value);
1520 mf_write_subfield_flow(&push->subfield, &mask_value, &wc->masks);
1522 mf_read_subfield(&push->subfield, flow, &dst_value);
1523 nx_stack_push(stack, &dst_value);
1527 nxm_execute_stack_pop(const struct ofpact_stack *pop,
1528 struct flow *flow, struct flow_wildcards *wc,
1529 struct ofpbuf *stack)
1531 union mf_subvalue *src_value;
1533 src_value = nx_stack_pop(stack);
1535 /* Only pop if stack is not empty. Otherwise, give warning. */
1537 union mf_subvalue mask_value;
1539 memset(&mask_value, 0xff, sizeof mask_value);
1540 mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
1541 mf_write_subfield_flow(&pop->subfield, src_value, flow);
1543 if (!VLOG_DROP_WARN(&rl)) {
1544 char *flow_str = flow_to_string(flow);
1545 VLOG_WARN_RL(&rl, "Failed to pop from an empty stack. On flow \n"
1552 /* Formats 'sf' into 's' in a format normally acceptable to
1553 * mf_parse_subfield(). (It won't be acceptable if sf->field is NULL or if
1554 * sf->field has no NXM name.) */
1556 mf_format_subfield(const struct mf_subfield *sf, struct ds *s)
1559 ds_put_cstr(s, "<unknown>");
1561 const struct nxm_field *f = nxm_field_from_mf_field(sf->field->id, 0);
1562 ds_put_cstr(s, f ? f->name : sf->field->name);
1565 if (sf->field && sf->ofs == 0 && sf->n_bits == sf->field->n_bits) {
1566 ds_put_cstr(s, "[]");
1567 } else if (sf->n_bits == 1) {
1568 ds_put_format(s, "[%d]", sf->ofs);
1570 ds_put_format(s, "[%d..%d]", sf->ofs, sf->ofs + sf->n_bits - 1);
1574 static const struct nxm_field *
1575 mf_parse_subfield_name(const char *name, int name_len, bool *wild)
1577 *wild = name_len > 2 && !memcmp(&name[name_len - 2], "_W", 2);
1582 return nxm_field_by_name(name, name_len);
1585 /* Parses a subfield from the beginning of '*sp' into 'sf'. If successful,
1586 * returns NULL and advances '*sp' to the first byte following the parsed
1587 * string. On failure, returns a malloc()'d error message, does not modify
1588 * '*sp', and does not properly initialize 'sf'.
1590 * The syntax parsed from '*sp' takes the form "header[start..end]" where
1591 * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive)
1592 * bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
1593 * may both be omitted (the [] are still required) to indicate an entire
1595 char * WARN_UNUSED_RESULT
1596 mf_parse_subfield__(struct mf_subfield *sf, const char **sp)
1598 const struct mf_field *field;
1599 const struct nxm_field *f;
1608 name_len = strcspn(s, "[");
1609 if (s[name_len] != '[') {
1610 return xasprintf("%s: missing [ looking for field name", *sp);
1613 f = mf_parse_subfield_name(name, name_len, &wild);
1615 return xasprintf("%s: unknown field `%.*s'", *sp, name_len, s);
1617 field = mf_from_id(f->id);
1620 if (ovs_scan(s, "[%d..%d]", &start, &end)) {
1621 /* Nothing to do. */
1622 } else if (ovs_scan(s, "[%d]", &start)) {
1624 } else if (!strncmp(s, "[]", 2)) {
1626 end = field->n_bits - 1;
1628 return xasprintf("%s: syntax error expecting [] or [<bit>] or "
1629 "[<start>..<end>]", *sp);
1631 s = strchr(s, ']') + 1;
1634 return xasprintf("%s: starting bit %d is after ending bit %d",
1636 } else if (start >= field->n_bits) {
1637 return xasprintf("%s: starting bit %d is not valid because field is "
1638 "only %d bits wide", *sp, start, field->n_bits);
1639 } else if (end >= field->n_bits){
1640 return xasprintf("%s: ending bit %d is not valid because field is "
1641 "only %d bits wide", *sp, end, field->n_bits);
1646 sf->n_bits = end - start + 1;
1652 /* Parses a subfield from the entirety of 's' into 'sf'. Returns NULL if
1653 * successful, otherwise a malloc()'d string describing the error. The caller
1654 * is responsible for freeing the returned string.
1656 * The syntax parsed from 's' takes the form "header[start..end]" where
1657 * 'header' is the name of an NXM field and 'start' and 'end' are (inclusive)
1658 * bit indexes. "..end" may be omitted to indicate a single bit. "start..end"
1659 * may both be omitted (the [] are still required) to indicate an entire
1661 char * WARN_UNUSED_RESULT
1662 mf_parse_subfield(struct mf_subfield *sf, const char *s)
1664 char *error = mf_parse_subfield__(sf, &s);
1665 if (!error && s[0]) {
1666 error = xstrdup("unexpected input following field syntax");
1671 /* Returns an bitmap in which each bit corresponds to the like-numbered field
1672 * in the OFPXMC12_OPENFLOW_BASIC OXM class, in which the bit values are taken
1673 * from the 'fields' bitmap. Only fields defined in OpenFlow 'version' are
1676 * This is useful for encoding OpenFlow 1.2 table stats messages. */
1678 oxm_bitmap_from_mf_bitmap(const struct mf_bitmap *fields,
1679 enum ofp_version version)
1681 uint64_t oxm_bitmap = 0;
1684 BITMAP_FOR_EACH_1 (i, MFF_N_IDS, fields->bm) {
1685 uint32_t oxm = mf_oxm_header(i, version);
1686 uint32_t vendor = nxm_vendor(oxm);
1687 int field = nxm_field(oxm);
1689 if (vendor == OFPXMC12_OPENFLOW_BASIC && field < 64) {
1690 oxm_bitmap |= UINT64_C(1) << field;
1693 return htonll(oxm_bitmap);
1696 /* Opposite conversion from oxm_bitmap_from_mf_bitmap().
1698 * This is useful for decoding OpenFlow 1.2 table stats messages. */
1700 oxm_bitmap_to_mf_bitmap(ovs_be64 oxm_bitmap, enum ofp_version version)
1702 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
1704 for (enum mf_field_id id = 0; id < MFF_N_IDS; id++) {
1705 if (version >= mf_oxm_version(id)) {
1706 uint32_t oxm = mf_oxm_header(id, version);
1707 uint32_t vendor = nxm_vendor(oxm);
1708 int field = nxm_field(oxm);
1710 if (vendor == OFPXMC12_OPENFLOW_BASIC
1712 && oxm_bitmap & htonll(UINT64_C(1) << field)) {
1713 bitmap_set1(fields.bm, id);
1720 /* Returns a bitmap of fields that can be encoded in OXM and that can be
1721 * modified with a "set_field" action. */
1723 oxm_writable_fields(void)
1725 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
1728 for (i = 0; i < MFF_N_IDS; i++) {
1729 if (mf_oxm_header(i, 0) && mf_from_id(i)->writable) {
1730 bitmap_set1(b.bm, i);
1736 /* Returns a bitmap of fields that can be encoded in OXM and that can be
1737 * matched in a flow table. */
1739 oxm_matchable_fields(void)
1741 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
1744 for (i = 0; i < MFF_N_IDS; i++) {
1745 if (mf_oxm_header(i, 0)) {
1746 bitmap_set1(b.bm, i);
1752 /* Returns a bitmap of fields that can be encoded in OXM and that can be
1753 * matched in a flow table with an arbitrary bitmask. */
1755 oxm_maskable_fields(void)
1757 struct mf_bitmap b = MF_BITMAP_INITIALIZER;
1760 for (i = 0; i < MFF_N_IDS; i++) {
1761 if (mf_oxm_header(i, 0) && mf_from_id(i)->maskable == MFM_FULLY) {
1762 bitmap_set1(b.bm, i);
1768 struct nxm_field_index {
1769 struct hmap_node header_node;
1770 struct hmap_node name_node;
1771 struct nxm_field nf;
1774 #include "nx-match.inc"
1776 static struct hmap nxm_header_map;
1777 static struct hmap nxm_name_map;
1778 static struct nxm_field *nxm_fields[MFF_N_IDS];
1779 static struct nxm_field *oxm_fields[MFF_N_IDS];
1784 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
1785 if (ovsthread_once_start(&once)) {
1786 hmap_init(&nxm_header_map);
1787 hmap_init(&nxm_name_map);
1788 for (struct nxm_field_index *nfi = all_nxm_fields;
1789 nfi < &all_nxm_fields[ARRAY_SIZE(all_nxm_fields)]; nfi++) {
1790 hmap_insert(&nxm_header_map, &nfi->header_node,
1791 hash_int(nfi->nf.header, 0));
1792 hmap_insert(&nxm_name_map, &nfi->name_node,
1793 hash_string(nfi->nf.name, 0));
1794 if (is_nxm_header(nfi->nf.header)) {
1795 nxm_fields[nfi->nf.id] = &nfi->nf;
1797 oxm_fields[nfi->nf.id] = &nfi->nf;
1800 ovsthread_once_done(&once);
1804 static const struct nxm_field *
1805 nxm_field_by_header(uint32_t header)
1807 const struct nxm_field_index *nfi;
1810 if (nxm_hasmask(header)) {
1811 header = nxm_make_exact_header(header);
1814 HMAP_FOR_EACH_IN_BUCKET (nfi, header_node, hash_int(header, 0),
1816 if (header == nfi->nf.header) {
1823 static const struct nxm_field *
1824 nxm_field_by_name(const char *name, size_t len)
1826 const struct nxm_field_index *nfi;
1829 HMAP_FOR_EACH_WITH_HASH (nfi, name_node, hash_bytes(name, len, 0),
1831 if (strlen(nfi->nf.name) == len && !memcmp(nfi->nf.name, name, len)) {
1838 static const struct nxm_field *
1839 nxm_field_by_mf_id(enum mf_field_id id)
1842 return nxm_fields[id];
1845 static const struct nxm_field *
1846 oxm_field_by_mf_id(enum mf_field_id id)
1849 return oxm_fields[id];