2 * Copyright (c) 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
26 #include "odp-netlink.h"
28 #include "ovs-thread.h"
31 #include "tun-metadata.h"
33 struct tun_meta_entry {
34 struct hmap_node node; /* In struct tun_table's key_hmap. */
35 uint32_t key; /* (class << 16) | type. */
36 struct tun_metadata_loc loc;
37 bool valid; /* True if allocated to a class and type. */
40 /* Maps from Geneve option class+type to positions in a struct tun_metadata's
43 /* TUN_METADATA<i> is stored in element <i>. */
44 struct tun_meta_entry entries[TUN_METADATA_NUM_OPTS];
46 /* Each bit represents 4 bytes of space, 0-bits are free space. */
47 unsigned long alloc_map[BITMAP_N_LONGS(TUN_METADATA_TOT_OPT_SIZE / 4)];
49 /* The valid elements in entries[], indexed by class+type. */
52 BUILD_ASSERT_DECL(TUN_METADATA_TOT_OPT_SIZE % 4 == 0);
54 static struct ovs_mutex tab_mutex = OVS_MUTEX_INITIALIZER;
55 static OVSRCU_TYPE(struct tun_table *) metadata_tab;
57 static enum ofperr tun_metadata_add_entry(struct tun_table *map, uint8_t idx,
58 uint16_t opt_class, uint8_t type,
59 uint8_t len) OVS_REQUIRES(tab_mutex);
60 static void tun_metadata_del_entry(struct tun_table *map, uint8_t idx)
61 OVS_REQUIRES(tab_mutex);
62 static void memcpy_to_metadata(struct tun_metadata *dst, const void *src,
63 const struct tun_metadata_loc *,
65 static void memcpy_from_metadata(void *dst, const struct tun_metadata *src,
66 const struct tun_metadata_loc *);
69 tun_meta_key(ovs_be16 class, uint8_t type)
71 return (OVS_FORCE uint16_t)class << 8 | type;
75 tun_key_class(uint32_t key)
77 return (OVS_FORCE ovs_be16)(key >> 8);
81 tun_key_type(uint32_t key)
86 /* Returns a newly allocated tun_table. If 'old_map' is nonnull then the new
87 * tun_table is a deep copy of the old one. */
88 static struct tun_table *
89 table_alloc(const struct tun_table *old_map) OVS_REQUIRES(tab_mutex)
91 struct tun_table *new_map;
93 new_map = xzalloc(sizeof *new_map);
96 struct tun_meta_entry *entry;
99 hmap_init(&new_map->key_hmap);
101 HMAP_FOR_EACH (entry, node, &old_map->key_hmap) {
102 struct tun_meta_entry *new_entry;
103 struct tun_metadata_loc_chain *chain;
105 new_entry = &new_map->entries[entry - old_map->entries];
106 hmap_insert(&new_map->key_hmap, &new_entry->node, entry->node.hash);
108 chain = &new_entry->loc.c;
109 while (chain->next) {
110 chain->next = xmemdup(chain->next, sizeof *chain->next);
115 hmap_init(&new_map->key_hmap);
121 /* Frees 'map' and all the memory it owns. */
123 table_free(struct tun_table *map) OVS_REQUIRES(tab_mutex)
125 struct tun_meta_entry *entry;
131 HMAP_FOR_EACH (entry, node, &map->key_hmap) {
132 tun_metadata_del_entry(map, entry - map->entries);
138 /* Creates a global tunnel metadata mapping table, if none already exists. */
140 tun_metadata_init(void)
142 ovs_mutex_lock(&tab_mutex);
144 if (!ovsrcu_get_protected(struct tun_table *, &metadata_tab)) {
145 ovsrcu_set(&metadata_tab, table_alloc(NULL));
148 ovs_mutex_unlock(&tab_mutex);
152 tun_metadata_table_mod(struct ofputil_geneve_table_mod *gtm)
154 struct tun_table *old_map, *new_map;
155 struct ofputil_geneve_map *ofp_map;
158 ovs_mutex_lock(&tab_mutex);
160 old_map = ovsrcu_get_protected(struct tun_table *, &metadata_tab);
162 switch (gtm->command) {
164 new_map = table_alloc(old_map);
166 LIST_FOR_EACH (ofp_map, list_node, >m->mappings) {
167 err = tun_metadata_add_entry(new_map, ofp_map->index,
168 ofp_map->option_class,
169 ofp_map->option_type,
170 ofp_map->option_len);
179 new_map = table_alloc(old_map);
181 LIST_FOR_EACH (ofp_map, list_node, >m->mappings) {
182 tun_metadata_del_entry(new_map, ofp_map->index);
187 new_map = table_alloc(NULL);
194 ovsrcu_set(&metadata_tab, new_map);
195 ovsrcu_postpone(table_free, old_map);
198 ovs_mutex_unlock(&tab_mutex);
203 tun_metadata_table_request(struct ofputil_geneve_table_reply *gtr)
205 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
208 gtr->max_option_space = TUN_METADATA_TOT_OPT_SIZE;
209 gtr->max_fields = TUN_METADATA_NUM_OPTS;
210 list_init(>r->mappings);
212 for (i = 0; i < TUN_METADATA_NUM_OPTS; i++) {
213 struct tun_meta_entry *entry = &map->entries[i];
214 struct ofputil_geneve_map *map;
220 map = xmalloc(sizeof *map);
221 map->option_class = ntohs(tun_key_class(entry->key));
222 map->option_type = tun_key_type(entry->key);
223 map->option_len = entry->loc.len;
226 list_push_back(>r->mappings, &map->list_node);
230 /* Copies the value of field 'mf' from 'tnl' (which must be in non-UDPIF format) * into 'value'.
232 * 'mf' must be an MFF_TUN_METADATA* field.
234 * This uses the global tunnel metadata mapping table created by
235 * tun_metadata_init(). If no such table has been created or if 'mf' hasn't
236 * been allocated in it yet, this just zeros 'value'. */
238 tun_metadata_read(const struct flow_tnl *tnl,
239 const struct mf_field *mf, union mf_value *value)
241 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
242 unsigned int idx = mf->id - MFF_TUN_METADATA0;
243 struct tun_metadata_loc *loc;
246 memset(value->tun_metadata, 0, mf->n_bytes);
250 loc = &map->entries[idx].loc;
252 memset(value->tun_metadata, 0, mf->n_bytes - loc->len);
253 memcpy_from_metadata(value->tun_metadata + mf->n_bytes - loc->len,
254 &tnl->metadata, loc);
257 /* Copies 'value' into field 'mf' in 'tnl' (in non-UDPIF format).
259 * 'mf' must be an MFF_TUN_METADATA* field.
261 * This uses the global tunnel metadata mapping table created by
262 * tun_metadata_init(). If no such table has been created or if 'mf' hasn't
263 * been allocated in it yet, this function does nothing. */
265 tun_metadata_write(struct flow_tnl *tnl,
266 const struct mf_field *mf, const union mf_value *value)
268 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
269 unsigned int idx = mf->id - MFF_TUN_METADATA0;
270 struct tun_metadata_loc *loc;
272 if (!map || !map->entries[idx].valid) {
276 loc = &map->entries[idx].loc;
277 memcpy_to_metadata(&tnl->metadata,
278 value->tun_metadata + mf->n_bytes - loc->len, loc, idx);
281 static const struct tun_metadata_loc *
282 metadata_loc_from_match(struct tun_table *map, struct match *match,
283 const char *name, unsigned int idx,
284 unsigned int field_len, bool masked, char **err_str)
286 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
293 if (map->entries[idx].valid) {
294 return &map->entries[idx].loc;
300 if (match->tun_md.alloc_offset + field_len > TUN_METADATA_TOT_OPT_SIZE) {
302 *err_str = xasprintf("field %s exceeds maximum size for tunnel "
303 "metadata (used %d, max %d)", name,
304 match->tun_md.alloc_offset + field_len,
305 TUN_METADATA_TOT_OPT_SIZE);
311 if (ULLONG_GET(match->wc.masks.tunnel.metadata.present.map, idx)) {
313 *err_str = xasprintf("field %s set multiple times", name);
319 match->tun_md.entry[idx].loc.len = field_len;
320 match->tun_md.entry[idx].loc.c.offset = match->tun_md.alloc_offset;
321 match->tun_md.entry[idx].loc.c.len = field_len;
322 match->tun_md.entry[idx].loc.c.next = NULL;
323 match->tun_md.entry[idx].masked = masked;
324 match->tun_md.alloc_offset += field_len;
325 match->tun_md.valid = true;
327 return &match->tun_md.entry[idx].loc;
330 /* Makes 'match' match 'value'/'mask' on field 'mf'.
332 * 'mf' must be an MFF_TUN_METADATA* field. 'match' must be in non-UDPIF format.
334 * If there is global tunnel metadata matching table, this function is
335 * effective only if there is already a mapping for 'mf'. Otherwise, the
336 * metadata mapping table integrated into 'match' is used, adding 'mf' to its
337 * mapping table if it isn't already mapped (and if there is room). If 'mf'
338 * isn't or can't be mapped, this function returns without modifying 'match'.
340 * 'value' may be NULL; if so, then 'mf' is made to match on an all-zeros
343 * 'mask' may be NULL; if so, then 'mf' is made exact-match.
345 * If non-NULL, 'err_str' returns a malloc'ed string describing any errors
346 * with the request or NULL if there is no error. The caller is reponsible
347 * for freeing the string.
350 tun_metadata_set_match(const struct mf_field *mf, const union mf_value *value,
351 const union mf_value *mask, struct match *match,
354 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
355 const struct tun_metadata_loc *loc;
356 unsigned int idx = mf->id - MFF_TUN_METADATA0;
357 unsigned int field_len;
359 unsigned int data_offset;
362 ovs_assert(!(match->flow.tunnel.flags & FLOW_TNL_F_UDPIF));
364 field_len = mf_field_len(mf, value, mask, &is_masked);
365 loc = metadata_loc_from_match(map, match, mf->name, idx, field_len,
371 data_offset = mf->n_bytes - loc->len;
374 memset(data.tun_metadata, 0, loc->len);
376 memcpy(data.tun_metadata, value->tun_metadata + data_offset, loc->len);
379 for (i = 0; i < loc->len; i++) {
380 data.tun_metadata[i] = value->tun_metadata[data_offset + i] &
381 mask->tun_metadata[data_offset + i];
384 memcpy_to_metadata(&match->flow.tunnel.metadata, data.tun_metadata,
388 memset(data.tun_metadata, 0, loc->len);
390 memset(data.tun_metadata, 0xff, loc->len);
392 memcpy(data.tun_metadata, mask->tun_metadata + data_offset, loc->len);
394 memcpy_to_metadata(&match->wc.masks.tunnel.metadata, data.tun_metadata,
399 udpif_to_parsed(const struct flow_tnl *flow, const struct flow_tnl *mask,
400 struct flow_tnl *flow_xlate, struct flow_tnl *mask_xlate)
402 if (flow->flags & FLOW_TNL_F_UDPIF) {
405 err = tun_metadata_from_geneve_udpif(flow, flow, flow_xlate);
411 tun_metadata_from_geneve_udpif(flow, mask, mask_xlate);
417 if (flow->metadata.present.map == 0) {
418 /* There is no tunnel metadata, don't bother copying. */
422 memcpy(flow_xlate, flow, sizeof *flow_xlate);
424 memcpy(mask_xlate, mask, sizeof *mask_xlate);
427 if (!flow_xlate->metadata.tab) {
428 flow_xlate->metadata.tab = ovsrcu_get(struct tun_table *,
436 /* Copies all MFF_TUN_METADATA* fields from 'tnl' to 'flow_metadata'. */
438 tun_metadata_get_fmd(const struct flow_tnl *tnl, struct match *flow_metadata)
440 struct flow_tnl flow;
443 if (!udpif_to_parsed(tnl, NULL, &flow, NULL)) {
447 ULLONG_FOR_EACH_1 (i, flow.metadata.present.map) {
449 const struct tun_metadata_loc *old_loc = &flow.metadata.tab->entries[i].loc;
450 const struct tun_metadata_loc *new_loc;
452 new_loc = metadata_loc_from_match(NULL, flow_metadata, NULL, i,
453 old_loc->len, false, NULL);
455 memcpy_from_metadata(opts.tun_metadata, &flow.metadata, old_loc);
456 memcpy_to_metadata(&flow_metadata->flow.tunnel.metadata,
457 opts.tun_metadata, new_loc, i);
459 memset(opts.tun_metadata, 0xff, old_loc->len);
460 memcpy_to_metadata(&flow_metadata->wc.masks.tunnel.metadata,
461 opts.tun_metadata, new_loc, i);
466 tun_meta_hash(uint32_t key)
468 return hash_int(key, 0);
471 static struct tun_meta_entry *
472 tun_meta_find_key(const struct hmap *hmap, uint32_t key)
474 struct tun_meta_entry *entry;
476 HMAP_FOR_EACH_IN_BUCKET (entry, node, tun_meta_hash(key), hmap) {
477 if (entry->key == key) {
485 memcpy_to_metadata(struct tun_metadata *dst, const void *src,
486 const struct tun_metadata_loc *loc, unsigned int idx)
488 const struct tun_metadata_loc_chain *chain = &loc->c;
492 memcpy(dst->opts.u8 + loc->c.offset + addr, (uint8_t *)src + addr,
498 ULLONG_SET1(dst->present.map, idx);
502 memcpy_from_metadata(void *dst, const struct tun_metadata *src,
503 const struct tun_metadata_loc *loc)
505 const struct tun_metadata_loc_chain *chain = &loc->c;
509 memcpy((uint8_t *)dst + addr, src->opts.u8 + loc->c.offset + addr,
517 tun_metadata_alloc_chain(struct tun_table *map, uint8_t len,
518 struct tun_metadata_loc_chain *loc)
519 OVS_REQUIRES(tab_mutex)
521 int alloc_len = len / 4;
523 int scan_end = TUN_METADATA_TOT_OPT_SIZE / 4;
524 int pos_start, pos_end, pos_len;
525 int best_start = 0, best_len = 0;
528 pos_start = bitmap_scan(map->alloc_map, 0, scan_start, scan_end);
529 if (pos_start == scan_end) {
533 pos_end = bitmap_scan(map->alloc_map, 1, pos_start,
534 MIN(pos_start + alloc_len, scan_end));
535 pos_len = pos_end - pos_start;
536 if (pos_len == alloc_len) {
540 if (pos_len > best_len) {
541 best_start = pos_start;
544 scan_start = pos_end + 1;
551 pos_start = best_start;
555 bitmap_set_multiple(map->alloc_map, pos_start, pos_len, 1);
556 loc->offset = pos_start * 4;
557 loc->len = pos_len * 4;
563 tun_metadata_add_entry(struct tun_table *map, uint8_t idx, uint16_t opt_class,
564 uint8_t type, uint8_t len) OVS_REQUIRES(tab_mutex)
566 struct tun_meta_entry *entry;
567 struct tun_metadata_loc_chain *cur_chain, *prev_chain;
569 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
571 entry = &map->entries[idx];
573 return OFPERR_NXGTMFC_ALREADY_MAPPED;
576 entry->key = tun_meta_key(htons(opt_class), type);
577 if (tun_meta_find_key(&map->key_hmap, entry->key)) {
578 return OFPERR_NXGTMFC_DUP_ENTRY;
582 hmap_insert(&map->key_hmap, &entry->node,
583 tun_meta_hash(entry->key));
585 entry->loc.len = len;
586 cur_chain = &entry->loc.c;
587 memset(cur_chain, 0, sizeof *cur_chain);
594 cur_chain = xzalloc(sizeof *cur_chain);
597 err = tun_metadata_alloc_chain(map, len, cur_chain);
599 tun_metadata_del_entry(map, idx);
600 return OFPERR_NXGTMFC_TABLE_FULL;
603 len -= cur_chain->len;
606 prev_chain->next = cur_chain;
608 prev_chain = cur_chain;
616 tun_metadata_del_entry(struct tun_table *map, uint8_t idx)
617 OVS_REQUIRES(tab_mutex)
619 struct tun_meta_entry *entry;
620 struct tun_metadata_loc_chain *chain;
622 if (idx >= TUN_METADATA_NUM_OPTS) {
626 entry = &map->entries[idx];
631 chain = &entry->loc.c;
633 struct tun_metadata_loc_chain *next = chain->next;
635 bitmap_set_multiple(map->alloc_map, chain->offset / 4,
637 if (chain != &entry->loc.c) {
643 entry->valid = false;
644 hmap_remove(&map->key_hmap, &entry->node);
645 memset(&entry->loc, 0, sizeof entry->loc);
649 tun_metadata_from_geneve__(const struct tun_metadata *flow_metadata,
650 const struct geneve_opt *opt,
651 const struct geneve_opt *flow_opt, int opts_len,
652 struct tun_metadata *metadata)
654 struct tun_table *map;
655 bool is_mask = flow_opt != opt;
658 map = ovsrcu_get(struct tun_table *, &metadata_tab);
661 map = flow_metadata->tab;
668 while (opts_len > 0) {
670 struct tun_meta_entry *entry;
672 if (opts_len < sizeof(*opt)) {
676 len = sizeof(*opt) + flow_opt->length * 4;
677 if (len > opts_len) {
681 entry = tun_meta_find_key(&map->key_hmap,
682 tun_meta_key(flow_opt->opt_class,
685 if (entry->loc.len == flow_opt->length * 4) {
686 memcpy_to_metadata(metadata, opt + 1, &entry->loc,
687 entry - map->entries);
691 } else if (flow_opt->type & GENEVE_CRIT_OPT_TYPE) {
695 opt = opt + len / sizeof(*opt);
696 flow_opt = flow_opt + len / sizeof(*opt);
703 static const struct nlattr *
704 tun_metadata_find_geneve_key(const struct nlattr *key, uint32_t key_len)
706 const struct nlattr *tnl_key;
708 tnl_key = nl_attr_find__(key, key_len, OVS_KEY_ATTR_TUNNEL);
713 return nl_attr_find_nested(tnl_key, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
716 /* Converts from Geneve netlink attributes in 'attr' to tunnel metadata
717 * in 'tun'. The result may either in be UDPIF format or not, as determined
720 * In the event that a mask is being converted, it is also necessary to
721 * pass in flow information. This includes the full set of netlink attributes
722 * (i.e. not just the Geneve attribute) in 'flow_attrs'/'flow_attr_len' and
723 * the previously converted tunnel metadata 'flow_tun'.
725 * If a flow rather than mask is being converted, 'flow_attrs' must be NULL. */
727 tun_metadata_from_geneve_nlattr(const struct nlattr *attr,
728 const struct nlattr *flow_attrs,
729 size_t flow_attr_len,
730 const struct flow_tnl *flow_tun, bool udpif,
731 struct flow_tnl *tun)
733 bool is_mask = !!flow_attrs;
734 int attr_len = nl_attr_get_size(attr);
735 const struct nlattr *flow;
737 /* No need for real translation, just copy things over. */
739 memcpy(tun->metadata.opts.gnv, nl_attr_get(attr), attr_len);
742 tun->metadata.present.len = attr_len;
743 tun->flags |= FLOW_TNL_F_UDPIF;
745 /* We need to exact match on the length so we don't
746 * accidentally match on sets of options that are the same
747 * at the beginning but with additional options after. */
748 tun->metadata.present.len = 0xff;
755 flow = tun_metadata_find_geneve_key(flow_attrs, flow_attr_len);
757 return attr_len ? EINVAL : 0;
760 if (attr_len != nl_attr_get_size(flow)) {
767 return tun_metadata_from_geneve__(&flow_tun->metadata, nl_attr_get(attr),
768 nl_attr_get(flow), nl_attr_get_size(flow),
772 /* Converts from the flat Geneve options representation extracted directly
773 * from the tunnel header to the representation that maps options to
774 * pre-allocated locations. The original version (in UDPIF form) is passed
775 * in 'src' and the translated form in stored in 'dst'. To handle masks, the
776 * flow must also be passed in through 'flow' (in the original, raw form). */
778 tun_metadata_from_geneve_udpif(const struct flow_tnl *flow,
779 const struct flow_tnl *src,
780 struct flow_tnl *dst)
782 ovs_assert(flow->flags & FLOW_TNL_F_UDPIF);
785 dst->flags = flow->flags & ~FLOW_TNL_F_UDPIF;
787 dst->metadata.tab = NULL;
789 dst->metadata.present.map = 0;
790 return tun_metadata_from_geneve__(&flow->metadata, src->metadata.opts.gnv,
791 flow->metadata.opts.gnv,
792 flow->metadata.present.len,
797 tun_metadata_to_geneve__(const struct tun_metadata *flow, struct ofpbuf *b,
800 struct tun_table *map;
805 map = ovsrcu_get(struct tun_table *, &metadata_tab);
810 ULLONG_FOR_EACH_1 (i, flow->present.map) {
811 struct tun_meta_entry *entry = &map->entries[i];
812 struct geneve_opt *opt;
814 opt = ofpbuf_put_uninit(b, sizeof *opt + entry->loc.len);
816 opt->opt_class = tun_key_class(entry->key);
817 opt->type = tun_key_type(entry->key);
818 opt->length = entry->loc.len / 4;
823 memcpy_from_metadata(opt + 1, flow, &entry->loc);
824 *crit_opt |= !!(opt->type & GENEVE_CRIT_OPT_TYPE);
829 tun_metadata_to_geneve_nlattr_flow(const struct flow_tnl *flow,
832 size_t nlattr_offset;
835 if (!flow->metadata.present.map) {
839 /* For all intents and purposes, the Geneve options are nested
840 * attributes even if this doesn't show up directly to netlink. It's
841 * similar enough that we can use the same mechanism. */
842 nlattr_offset = nl_msg_start_nested(b, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
844 tun_metadata_to_geneve__(&flow->metadata, b, &crit_opt);
846 nl_msg_end_nested(b, nlattr_offset);
849 /* Converts from processed tunnel metadata information (in non-udpif
850 * format) in 'flow' to a stream of Geneve options suitable for
851 * transmission in 'opts'. Additionally returns whether there were
852 * any critical options in 'crit_opt' as well as the total length of
855 tun_metadata_to_geneve_header(const struct flow_tnl *flow,
856 struct geneve_opt *opts, bool *crit_opt)
860 ovs_assert(!(flow->flags & FLOW_TNL_F_UDPIF));
862 ofpbuf_use_stack(&b, opts, GENEVE_TOT_OPT_SIZE);
863 tun_metadata_to_geneve__(&flow->metadata, &b, crit_opt);
869 tun_metadata_to_geneve_mask__(const struct tun_metadata *flow,
870 const struct tun_metadata *mask,
871 struct geneve_opt *opt, int opts_len)
873 struct tun_table *map = flow->tab;
879 /* All of these options have already been validated, so no need
880 * for sanity checking. */
881 while (opts_len > 0) {
882 struct tun_meta_entry *entry;
883 int len = sizeof(*opt) + opt->length * 4;
885 entry = tun_meta_find_key(&map->key_hmap,
886 tun_meta_key(opt->opt_class, opt->type));
888 memcpy_from_metadata(opt + 1, mask, &entry->loc);
890 memset(opt + 1, 0, opt->length * 4);
893 opt->opt_class = htons(0xffff);
900 opt = opt + len / sizeof(*opt);
906 tun_metadata_to_geneve_nlattr_mask(const struct ofpbuf *key,
907 const struct flow_tnl *mask,
908 const struct flow_tnl *flow,
911 const struct nlattr *geneve_key;
912 struct nlattr *geneve_mask;
913 struct geneve_opt *opt;
920 geneve_key = tun_metadata_find_geneve_key(key->data, key->size);
925 geneve_mask = ofpbuf_tail(b);
926 nl_msg_put(b, geneve_key, geneve_key->nla_len);
928 opt = CONST_CAST(struct geneve_opt *, nl_attr_get(geneve_mask));
929 opts_len = nl_attr_get_size(geneve_mask);
931 tun_metadata_to_geneve_mask__(&flow->metadata, &mask->metadata,
935 /* Convert from the tunnel metadata in 'tun' to netlink attributes stored
936 * in 'b'. Either UDPIF or non-UDPIF input forms are accepted.
938 * To assist with parsing, it is necessary to also pass in the tunnel metadata
939 * from the flow in 'flow' as well in the original netlink form of the flow in
942 tun_metadata_to_geneve_nlattr(const struct flow_tnl *tun,
943 const struct flow_tnl *flow,
944 const struct ofpbuf *key,
947 bool is_mask = tun != flow;
949 if (!(flow->flags & FLOW_TNL_F_UDPIF)) {
951 tun_metadata_to_geneve_nlattr_flow(tun, b);
953 tun_metadata_to_geneve_nlattr_mask(key, tun, flow, b);
955 } else if (flow->metadata.present.len || is_mask) {
956 nl_msg_put_unspec(b, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
957 tun->metadata.opts.gnv,
958 flow->metadata.present.len);
962 /* Converts 'mask_src' (in non-UDPIF format) to a series of masked options in
963 * 'dst'. 'flow_src' (also in non-UDPIF format) and the original set of
964 * options 'flow_src_opt'/'opts_len' are needed as a guide to interpret the
967 tun_metadata_to_geneve_udpif_mask(const struct flow_tnl *flow_src,
968 const struct flow_tnl *mask_src,
969 const struct geneve_opt *flow_src_opt,
970 int opts_len, struct geneve_opt *dst)
972 ovs_assert(!(flow_src->flags & FLOW_TNL_F_UDPIF));
974 memcpy(dst, flow_src_opt, opts_len);
975 tun_metadata_to_geneve_mask__(&flow_src->metadata,
976 &mask_src->metadata, dst, opts_len);
979 static const struct tun_metadata_loc *
980 metadata_loc_from_match_read(struct tun_table *map, const struct match *match,
981 unsigned int idx, struct flow_tnl *mask,
984 union mf_value mask_opts;
986 if (match->tun_md.valid) {
987 *is_masked = match->tun_md.entry[idx].masked;
988 return &match->tun_md.entry[idx].loc;
991 memcpy_from_metadata(mask_opts.tun_metadata, &mask->metadata,
992 &map->entries[idx].loc);
994 *is_masked = map->entries[idx].loc.len == 0 ||
995 !is_all_ones(mask_opts.tun_metadata,
996 map->entries[idx].loc.len);
997 return &map->entries[idx].loc;
1001 tun_metadata_to_nx_match(struct ofpbuf *b, enum ofp_version oxm,
1002 const struct match *match)
1004 struct flow_tnl flow, mask;
1007 if (!udpif_to_parsed(&match->flow.tunnel, &match->wc.masks.tunnel,
1012 ULLONG_FOR_EACH_1 (i, mask.metadata.present.map) {
1013 const struct tun_metadata_loc *loc;
1015 union mf_value opts;
1016 union mf_value mask_opts;
1018 loc = metadata_loc_from_match_read(flow.metadata.tab, match, i,
1020 memcpy_from_metadata(opts.tun_metadata, &flow.metadata, loc);
1021 memcpy_from_metadata(mask_opts.tun_metadata, &mask.metadata, loc);
1022 nxm_put__(b, MFF_TUN_METADATA0 + i, oxm, opts.tun_metadata,
1023 is_masked ? mask_opts.tun_metadata : NULL, loc->len);
1028 tun_metadata_match_format(struct ds *s, const struct match *match)
1030 struct flow_tnl flow, mask;
1033 if (!udpif_to_parsed(&match->flow.tunnel, &match->wc.masks.tunnel,
1038 ULLONG_FOR_EACH_1 (i, mask.metadata.present.map) {
1039 const struct tun_metadata_loc *loc;
1041 union mf_value opts, mask_opts;
1043 loc = metadata_loc_from_match_read(flow.metadata.tab, match, i,
1046 ds_put_format(s, "tun_metadata%u", i);
1047 memcpy_from_metadata(mask_opts.tun_metadata, &mask.metadata, loc);
1049 if (!ULLONG_GET(flow.metadata.present.map, i)) {
1050 /* Indicate that we are matching on the field being not present. */
1051 ds_put_cstr(s, "=NP");
1052 } else if (!(is_masked &&
1053 is_all_zeros(mask_opts.tun_metadata, loc->len))) {
1054 ds_put_char(s, '=');
1056 memcpy_from_metadata(opts.tun_metadata, &flow.metadata, loc);
1057 ds_put_hex(s, opts.tun_metadata, loc->len);
1059 if (!is_all_ones(mask_opts.tun_metadata, loc->len)) {
1060 ds_put_char(s, '/');
1061 ds_put_hex(s, mask_opts.tun_metadata, loc->len);
1064 ds_put_char(s, ',');