2 * Copyright (c) 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "openvswitch/hmap.h"
24 #include "openvswitch/match.h"
26 #include "odp-netlink.h"
27 #include "openvswitch/ofp-util.h"
28 #include "ovs-thread.h"
31 #include "tun-metadata.h"
34 struct tun_meta_entry {
35 struct hmap_node node; /* In struct tun_table's key_hmap. */
36 uint32_t key; /* (class << 16) | type. */
37 struct tun_metadata_loc loc;
38 bool valid; /* True if allocated to a class and type. */
41 /* Maps from TLV option class+type to positions in a struct tun_metadata's
44 /* TUN_METADATA<i> is stored in element <i>. */
45 struct tun_meta_entry entries[TUN_METADATA_NUM_OPTS];
47 /* Each bit represents 4 bytes of space, 0-bits are free space. */
48 unsigned long alloc_map[BITMAP_N_LONGS(TUN_METADATA_TOT_OPT_SIZE / 4)];
50 /* The valid elements in entries[], indexed by class+type. */
53 BUILD_ASSERT_DECL(TUN_METADATA_TOT_OPT_SIZE % 4 == 0);
55 static struct ovs_mutex tab_mutex = OVS_MUTEX_INITIALIZER;
56 static OVSRCU_TYPE(struct tun_table *) metadata_tab;
58 static enum ofperr tun_metadata_add_entry(struct tun_table *map, uint8_t idx,
59 uint16_t opt_class, uint8_t type,
60 uint8_t len) OVS_REQUIRES(tab_mutex);
61 static void tun_metadata_del_entry(struct tun_table *map, uint8_t idx)
62 OVS_REQUIRES(tab_mutex);
63 static void memcpy_to_metadata(struct tun_metadata *dst, const void *src,
64 const struct tun_metadata_loc *,
66 static void memcpy_from_metadata(void *dst, const struct tun_metadata *src,
67 const struct tun_metadata_loc *);
70 tun_meta_key(ovs_be16 class, uint8_t type)
72 return (OVS_FORCE uint16_t)class << 8 | type;
76 tun_key_class(uint32_t key)
78 return (OVS_FORCE ovs_be16)(key >> 8);
82 tun_key_type(uint32_t key)
87 /* Returns a newly allocated tun_table. If 'old_map' is nonnull then the new
88 * tun_table is a deep copy of the old one. */
89 static struct tun_table *
90 table_alloc(const struct tun_table *old_map) OVS_REQUIRES(tab_mutex)
92 struct tun_table *new_map;
94 new_map = xzalloc(sizeof *new_map);
97 struct tun_meta_entry *entry;
100 hmap_init(&new_map->key_hmap);
102 HMAP_FOR_EACH (entry, node, &old_map->key_hmap) {
103 struct tun_meta_entry *new_entry;
104 struct tun_metadata_loc_chain *chain;
106 new_entry = &new_map->entries[entry - old_map->entries];
107 hmap_insert(&new_map->key_hmap, &new_entry->node, entry->node.hash);
109 chain = &new_entry->loc.c;
110 while (chain->next) {
111 chain->next = xmemdup(chain->next, sizeof *chain->next);
116 hmap_init(&new_map->key_hmap);
122 /* Frees 'map' and all the memory it owns. */
124 table_free(struct tun_table *map) OVS_REQUIRES(tab_mutex)
126 struct tun_meta_entry *entry;
132 HMAP_FOR_EACH (entry, node, &map->key_hmap) {
133 tun_metadata_del_entry(map, entry - map->entries);
136 hmap_destroy(&map->key_hmap);
140 /* Creates a global tunnel metadata mapping table, if none already exists. */
142 tun_metadata_init(void)
144 ovs_mutex_lock(&tab_mutex);
146 if (!ovsrcu_get_protected(struct tun_table *, &metadata_tab)) {
147 ovsrcu_set(&metadata_tab, table_alloc(NULL));
150 ovs_mutex_unlock(&tab_mutex);
154 tun_metadata_table_mod(struct ofputil_tlv_table_mod *ttm)
156 struct tun_table *old_map, *new_map;
157 struct ofputil_tlv_map *ofp_map;
160 ovs_mutex_lock(&tab_mutex);
162 old_map = ovsrcu_get_protected(struct tun_table *, &metadata_tab);
164 switch (ttm->command) {
166 new_map = table_alloc(old_map);
168 LIST_FOR_EACH (ofp_map, list_node, &ttm->mappings) {
169 err = tun_metadata_add_entry(new_map, ofp_map->index,
170 ofp_map->option_class,
171 ofp_map->option_type,
172 ofp_map->option_len);
181 new_map = table_alloc(old_map);
183 LIST_FOR_EACH (ofp_map, list_node, &ttm->mappings) {
184 tun_metadata_del_entry(new_map, ofp_map->index);
189 new_map = table_alloc(NULL);
196 ovsrcu_set(&metadata_tab, new_map);
197 ovsrcu_postpone(table_free, old_map);
200 ovs_mutex_unlock(&tab_mutex);
205 tun_metadata_table_request(struct ofputil_tlv_table_reply *ttr)
207 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
210 ttr->max_option_space = TUN_METADATA_TOT_OPT_SIZE;
211 ttr->max_fields = TUN_METADATA_NUM_OPTS;
212 ovs_list_init(&ttr->mappings);
214 for (i = 0; i < TUN_METADATA_NUM_OPTS; i++) {
215 struct tun_meta_entry *entry = &map->entries[i];
216 struct ofputil_tlv_map *map;
222 map = xmalloc(sizeof *map);
223 map->option_class = ntohs(tun_key_class(entry->key));
224 map->option_type = tun_key_type(entry->key);
225 map->option_len = entry->loc.len;
228 ovs_list_push_back(&ttr->mappings, &map->list_node);
232 /* Copies the value of field 'mf' from 'tnl' (which must be in non-UDPIF format) * into 'value'.
234 * 'mf' must be an MFF_TUN_METADATA* field.
236 * This uses the global tunnel metadata mapping table created by
237 * tun_metadata_init(). If no such table has been created or if 'mf' hasn't
238 * been allocated in it yet, this just zeros 'value'. */
240 tun_metadata_read(const struct flow_tnl *tnl,
241 const struct mf_field *mf, union mf_value *value)
243 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
244 unsigned int idx = mf->id - MFF_TUN_METADATA0;
245 struct tun_metadata_loc *loc;
248 memset(value->tun_metadata, 0, mf->n_bytes);
252 loc = &map->entries[idx].loc;
254 memset(value->tun_metadata, 0, mf->n_bytes - loc->len);
255 memcpy_from_metadata(value->tun_metadata + mf->n_bytes - loc->len,
256 &tnl->metadata, loc);
259 /* Copies 'value' into field 'mf' in 'tnl' (in non-UDPIF format).
261 * 'mf' must be an MFF_TUN_METADATA* field.
263 * This uses the global tunnel metadata mapping table created by
264 * tun_metadata_init(). If no such table has been created or if 'mf' hasn't
265 * been allocated in it yet, this function does nothing. */
267 tun_metadata_write(struct flow_tnl *tnl,
268 const struct mf_field *mf, const union mf_value *value)
270 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
271 unsigned int idx = mf->id - MFF_TUN_METADATA0;
272 struct tun_metadata_loc *loc;
274 if (!map || !map->entries[idx].valid) {
278 loc = &map->entries[idx].loc;
279 memcpy_to_metadata(&tnl->metadata,
280 value->tun_metadata + mf->n_bytes - loc->len, loc, idx);
283 static const struct tun_metadata_loc *
284 metadata_loc_from_match(struct tun_table *map, struct match *match,
285 const char *name, unsigned int idx,
286 unsigned int field_len, bool masked, char **err_str)
288 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
295 if (map->entries[idx].valid) {
296 return &map->entries[idx].loc;
302 if (match->tun_md.alloc_offset + field_len > TUN_METADATA_TOT_OPT_SIZE) {
304 *err_str = xasprintf("field %s exceeds maximum size for tunnel "
305 "metadata (used %d, max %d)", name,
306 match->tun_md.alloc_offset + field_len,
307 TUN_METADATA_TOT_OPT_SIZE);
313 if (ULLONG_GET(match->wc.masks.tunnel.metadata.present.map, idx)) {
315 *err_str = xasprintf("field %s set multiple times", name);
321 match->tun_md.entry[idx].loc.len = field_len;
322 match->tun_md.entry[idx].loc.c.offset = match->tun_md.alloc_offset;
323 match->tun_md.entry[idx].loc.c.len = field_len;
324 match->tun_md.entry[idx].loc.c.next = NULL;
325 match->tun_md.entry[idx].masked = masked;
326 match->tun_md.alloc_offset += field_len;
327 match->tun_md.valid = true;
329 return &match->tun_md.entry[idx].loc;
332 /* Makes 'match' match 'value'/'mask' on field 'mf'.
334 * 'mf' must be an MFF_TUN_METADATA* field. 'match' must be in non-UDPIF format.
336 * If there is global tunnel metadata matching table, this function is
337 * effective only if there is already a mapping for 'mf'. Otherwise, the
338 * metadata mapping table integrated into 'match' is used, adding 'mf' to its
339 * mapping table if it isn't already mapped (and if there is room). If 'mf'
340 * isn't or can't be mapped, this function returns without modifying 'match'.
342 * 'value' may be NULL; if so, then 'mf' is made to match on an all-zeros
345 * 'mask' may be NULL; if so, then 'mf' is made exact-match.
347 * If non-NULL, 'err_str' returns a malloc'ed string describing any errors
348 * with the request or NULL if there is no error. The caller is reponsible
349 * for freeing the string.
352 tun_metadata_set_match(const struct mf_field *mf, const union mf_value *value,
353 const union mf_value *mask, struct match *match,
356 struct tun_table *map = ovsrcu_get(struct tun_table *, &metadata_tab);
357 const struct tun_metadata_loc *loc;
358 unsigned int idx = mf->id - MFF_TUN_METADATA0;
359 unsigned int field_len;
361 unsigned int data_offset;
364 ovs_assert(!(match->flow.tunnel.flags & FLOW_TNL_F_UDPIF));
366 field_len = mf_field_len(mf, value, mask, &is_masked);
367 loc = metadata_loc_from_match(map, match, mf->name, idx, field_len,
373 data_offset = mf->n_bytes - loc->len;
376 memset(data.tun_metadata, 0, loc->len);
378 memcpy(data.tun_metadata, value->tun_metadata + data_offset, loc->len);
381 for (i = 0; i < loc->len; i++) {
382 data.tun_metadata[i] = value->tun_metadata[data_offset + i] &
383 mask->tun_metadata[data_offset + i];
386 memcpy_to_metadata(&match->flow.tunnel.metadata, data.tun_metadata,
390 memset(data.tun_metadata, 0, loc->len);
392 memset(data.tun_metadata, 0xff, loc->len);
394 memcpy(data.tun_metadata, mask->tun_metadata + data_offset, loc->len);
396 memcpy_to_metadata(&match->wc.masks.tunnel.metadata, data.tun_metadata,
401 udpif_to_parsed(const struct flow_tnl *flow, const struct flow_tnl *mask,
402 struct flow_tnl *flow_xlate, struct flow_tnl *mask_xlate)
404 if (flow->flags & FLOW_TNL_F_UDPIF) {
407 err = tun_metadata_from_geneve_udpif(flow, flow, flow_xlate);
413 tun_metadata_from_geneve_udpif(flow, mask, mask_xlate);
419 if (flow->metadata.present.map == 0) {
420 /* There is no tunnel metadata, don't bother copying. */
424 memcpy(flow_xlate, flow, sizeof *flow_xlate);
426 memcpy(mask_xlate, mask, sizeof *mask_xlate);
429 if (!flow_xlate->metadata.tab) {
430 flow_xlate->metadata.tab = ovsrcu_get(struct tun_table *,
438 /* Copies all MFF_TUN_METADATA* fields from 'tnl' to 'flow_metadata'. */
440 tun_metadata_get_fmd(const struct flow_tnl *tnl, struct match *flow_metadata)
442 struct flow_tnl flow;
445 if (!udpif_to_parsed(tnl, NULL, &flow, NULL)) {
449 ULLONG_FOR_EACH_1 (i, flow.metadata.present.map) {
451 const struct tun_metadata_loc *old_loc = &flow.metadata.tab->entries[i].loc;
452 const struct tun_metadata_loc *new_loc;
454 new_loc = metadata_loc_from_match(NULL, flow_metadata, NULL, i,
455 old_loc->len, false, NULL);
457 memcpy_from_metadata(opts.tun_metadata, &flow.metadata, old_loc);
458 memcpy_to_metadata(&flow_metadata->flow.tunnel.metadata,
459 opts.tun_metadata, new_loc, i);
461 memset(opts.tun_metadata, 0xff, old_loc->len);
462 memcpy_to_metadata(&flow_metadata->wc.masks.tunnel.metadata,
463 opts.tun_metadata, new_loc, i);
468 tun_meta_hash(uint32_t key)
470 return hash_int(key, 0);
473 static struct tun_meta_entry *
474 tun_meta_find_key(const struct hmap *hmap, uint32_t key)
476 struct tun_meta_entry *entry;
478 HMAP_FOR_EACH_IN_BUCKET (entry, node, tun_meta_hash(key), hmap) {
479 if (entry->key == key) {
487 memcpy_to_metadata(struct tun_metadata *dst, const void *src,
488 const struct tun_metadata_loc *loc, unsigned int idx)
490 const struct tun_metadata_loc_chain *chain = &loc->c;
494 memcpy(dst->opts.u8 + chain->offset, (uint8_t *)src + addr,
500 ULLONG_SET1(dst->present.map, idx);
504 memcpy_from_metadata(void *dst, const struct tun_metadata *src,
505 const struct tun_metadata_loc *loc)
507 const struct tun_metadata_loc_chain *chain = &loc->c;
511 memcpy((uint8_t *)dst + addr, src->opts.u8 + chain->offset,
519 tun_metadata_alloc_chain(struct tun_table *map, uint8_t len,
520 struct tun_metadata_loc_chain *loc)
521 OVS_REQUIRES(tab_mutex)
523 int alloc_len = len / 4;
525 int scan_end = TUN_METADATA_TOT_OPT_SIZE / 4;
526 int pos_start, pos_end, pos_len;
527 int best_start = 0, best_len = 0;
530 pos_start = bitmap_scan(map->alloc_map, 0, scan_start, scan_end);
531 if (pos_start == scan_end) {
535 pos_end = bitmap_scan(map->alloc_map, 1, pos_start,
536 MIN(pos_start + alloc_len, scan_end));
537 pos_len = pos_end - pos_start;
538 if (pos_len == alloc_len) {
542 if (pos_len > best_len) {
543 best_start = pos_start;
546 scan_start = pos_end + 1;
553 pos_start = best_start;
557 bitmap_set_multiple(map->alloc_map, pos_start, pos_len, 1);
558 loc->offset = pos_start * 4;
559 loc->len = pos_len * 4;
565 tun_metadata_add_entry(struct tun_table *map, uint8_t idx, uint16_t opt_class,
566 uint8_t type, uint8_t len) OVS_REQUIRES(tab_mutex)
568 struct tun_meta_entry *entry;
569 struct tun_metadata_loc_chain *cur_chain, *prev_chain;
571 ovs_assert(idx < TUN_METADATA_NUM_OPTS);
573 entry = &map->entries[idx];
575 return OFPERR_NXTTMFC_ALREADY_MAPPED;
578 entry->key = tun_meta_key(htons(opt_class), type);
579 if (tun_meta_find_key(&map->key_hmap, entry->key)) {
580 return OFPERR_NXTTMFC_DUP_ENTRY;
584 hmap_insert(&map->key_hmap, &entry->node,
585 tun_meta_hash(entry->key));
587 entry->loc.len = len;
588 cur_chain = &entry->loc.c;
589 memset(cur_chain, 0, sizeof *cur_chain);
596 cur_chain = xzalloc(sizeof *cur_chain);
597 prev_chain->next = cur_chain;
600 err = tun_metadata_alloc_chain(map, len, cur_chain);
602 tun_metadata_del_entry(map, idx);
603 return OFPERR_NXTTMFC_TABLE_FULL;
606 len -= cur_chain->len;
608 prev_chain = cur_chain;
616 tun_metadata_del_entry(struct tun_table *map, uint8_t idx)
617 OVS_REQUIRES(tab_mutex)
619 struct tun_meta_entry *entry;
620 struct tun_metadata_loc_chain *chain;
622 if (idx >= TUN_METADATA_NUM_OPTS) {
626 entry = &map->entries[idx];
631 chain = &entry->loc.c;
633 struct tun_metadata_loc_chain *next = chain->next;
635 bitmap_set_multiple(map->alloc_map, chain->offset / 4,
637 if (chain != &entry->loc.c) {
643 entry->valid = false;
644 hmap_remove(&map->key_hmap, &entry->node);
645 memset(&entry->loc, 0, sizeof entry->loc);
649 tun_metadata_from_geneve__(const struct tun_metadata *flow_metadata,
650 const struct geneve_opt *opt,
651 const struct geneve_opt *flow_opt, int opts_len,
652 struct tun_metadata *metadata)
654 struct tun_table *map;
655 bool is_mask = flow_opt != opt;
658 map = ovsrcu_get(struct tun_table *, &metadata_tab);
661 map = flow_metadata->tab;
668 while (opts_len > 0) {
670 struct tun_meta_entry *entry;
672 if (opts_len < sizeof(*opt)) {
676 len = sizeof(*opt) + flow_opt->length * 4;
677 if (len > opts_len) {
681 entry = tun_meta_find_key(&map->key_hmap,
682 tun_meta_key(flow_opt->opt_class,
685 if (entry->loc.len == flow_opt->length * 4) {
686 memcpy_to_metadata(metadata, opt + 1, &entry->loc,
687 entry - map->entries);
691 } else if (flow_opt->type & GENEVE_CRIT_OPT_TYPE) {
695 opt = opt + len / sizeof(*opt);
696 flow_opt = flow_opt + len / sizeof(*opt);
703 static const struct nlattr *
704 tun_metadata_find_geneve_key(const struct nlattr *key, uint32_t key_len)
706 const struct nlattr *tnl_key;
708 tnl_key = nl_attr_find__(key, key_len, OVS_KEY_ATTR_TUNNEL);
713 return nl_attr_find_nested(tnl_key, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
716 /* Converts from Geneve netlink attributes in 'attr' to tunnel metadata
717 * in 'tun'. The result may either in be UDPIF format or not, as determined
720 * In the event that a mask is being converted, it is also necessary to
721 * pass in flow information. This includes the full set of netlink attributes
722 * (i.e. not just the Geneve attribute) in 'flow_attrs'/'flow_attr_len' and
723 * the previously converted tunnel metadata 'flow_tun'.
725 * If a flow rather than mask is being converted, 'flow_attrs' must be NULL. */
727 tun_metadata_from_geneve_nlattr(const struct nlattr *attr,
728 const struct nlattr *flow_attrs,
729 size_t flow_attr_len,
730 const struct flow_tnl *flow_tun, bool udpif,
731 struct flow_tnl *tun)
733 bool is_mask = !!flow_attrs;
734 int attr_len = nl_attr_get_size(attr);
735 const struct nlattr *flow;
737 /* No need for real translation, just copy things over. */
739 memcpy(tun->metadata.opts.gnv, nl_attr_get(attr), attr_len);
742 tun->metadata.present.len = attr_len;
743 tun->flags |= FLOW_TNL_F_UDPIF;
745 /* We need to exact match on the length so we don't
746 * accidentally match on sets of options that are the same
747 * at the beginning but with additional options after. */
748 tun->metadata.present.len = 0xff;
755 flow = tun_metadata_find_geneve_key(flow_attrs, flow_attr_len);
757 return attr_len ? EINVAL : 0;
760 if (attr_len != nl_attr_get_size(flow)) {
767 return tun_metadata_from_geneve__(&flow_tun->metadata, nl_attr_get(attr),
768 nl_attr_get(flow), nl_attr_get_size(flow),
772 /* Converts from the flat Geneve options representation extracted directly
773 * from the tunnel header to the representation that maps options to
774 * pre-allocated locations. The original version (in UDPIF form) is passed
775 * in 'src' and the translated form in stored in 'dst'. To handle masks, the
776 * flow must also be passed in through 'flow' (in the original, raw form). */
778 tun_metadata_from_geneve_udpif(const struct flow_tnl *flow,
779 const struct flow_tnl *src,
780 struct flow_tnl *dst)
782 ovs_assert(flow->flags & FLOW_TNL_F_UDPIF);
785 dst->flags = flow->flags & ~FLOW_TNL_F_UDPIF;
787 dst->metadata.tab = NULL;
789 dst->metadata.present.map = 0;
790 return tun_metadata_from_geneve__(&flow->metadata, src->metadata.opts.gnv,
791 flow->metadata.opts.gnv,
792 flow->metadata.present.len,
797 tun_metadata_to_geneve__(const struct tun_metadata *flow, struct ofpbuf *b,
800 struct tun_table *map;
805 map = ovsrcu_get(struct tun_table *, &metadata_tab);
810 ULLONG_FOR_EACH_1 (i, flow->present.map) {
811 struct tun_meta_entry *entry = &map->entries[i];
812 struct geneve_opt *opt;
814 opt = ofpbuf_put_uninit(b, sizeof *opt + entry->loc.len);
816 opt->opt_class = tun_key_class(entry->key);
817 opt->type = tun_key_type(entry->key);
818 opt->length = entry->loc.len / 4;
823 memcpy_from_metadata(opt + 1, flow, &entry->loc);
824 *crit_opt |= !!(opt->type & GENEVE_CRIT_OPT_TYPE);
829 tun_metadata_to_geneve_nlattr_flow(const struct flow_tnl *flow,
832 size_t nlattr_offset;
835 if (!flow->metadata.present.map) {
839 /* For all intents and purposes, the Geneve options are nested
840 * attributes even if this doesn't show up directly to netlink. It's
841 * similar enough that we can use the same mechanism. */
842 nlattr_offset = nl_msg_start_nested(b, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS);
844 tun_metadata_to_geneve__(&flow->metadata, b, &crit_opt);
846 nl_msg_end_nested(b, nlattr_offset);
849 /* Converts from processed tunnel metadata information (in non-udpif
850 * format) in 'flow' to a stream of Geneve options suitable for
851 * transmission in 'opts'. Additionally returns whether there were
852 * any critical options in 'crit_opt' as well as the total length of
855 tun_metadata_to_geneve_header(const struct flow_tnl *flow,
856 struct geneve_opt *opts, bool *crit_opt)
860 ovs_assert(!(flow->flags & FLOW_TNL_F_UDPIF));
862 ofpbuf_use_stack(&b, opts, TLV_TOT_OPT_SIZE);
863 tun_metadata_to_geneve__(&flow->metadata, &b, crit_opt);
869 tun_metadata_to_geneve_mask__(const struct tun_metadata *flow,
870 const struct tun_metadata *mask,
871 struct geneve_opt *opt, int opts_len)
873 struct tun_table *map = flow->tab;
879 /* All of these options have already been validated, so no need
880 * for sanity checking. */
881 while (opts_len > 0) {
882 struct tun_meta_entry *entry;
883 int len = sizeof(*opt) + opt->length * 4;
885 entry = tun_meta_find_key(&map->key_hmap,
886 tun_meta_key(opt->opt_class, opt->type));
888 memcpy_from_metadata(opt + 1, mask, &entry->loc);
890 memset(opt + 1, 0, opt->length * 4);
893 opt->opt_class = htons(0xffff);
900 opt = opt + len / sizeof(*opt);
906 tun_metadata_to_geneve_nlattr_mask(const struct ofpbuf *key,
907 const struct flow_tnl *mask,
908 const struct flow_tnl *flow,
911 const struct nlattr *geneve_key;
912 struct nlattr *geneve_mask;
913 struct geneve_opt *opt;
920 geneve_key = tun_metadata_find_geneve_key(key->data, key->size);
925 geneve_mask = ofpbuf_tail(b);
926 nl_msg_put(b, geneve_key, geneve_key->nla_len);
928 opt = CONST_CAST(struct geneve_opt *, nl_attr_get(geneve_mask));
929 opts_len = nl_attr_get_size(geneve_mask);
931 tun_metadata_to_geneve_mask__(&flow->metadata, &mask->metadata,
935 /* Convert from the tunnel metadata in 'tun' to netlink attributes stored
936 * in 'b'. Either UDPIF or non-UDPIF input forms are accepted.
938 * To assist with parsing, it is necessary to also pass in the tunnel metadata
939 * from the flow in 'flow' as well in the original netlink form of the flow in
942 tun_metadata_to_geneve_nlattr(const struct flow_tnl *tun,
943 const struct flow_tnl *flow,
944 const struct ofpbuf *key,
947 bool is_mask = tun != flow;
949 if (!(flow->flags & FLOW_TNL_F_UDPIF)) {
951 tun_metadata_to_geneve_nlattr_flow(tun, b);
953 tun_metadata_to_geneve_nlattr_mask(key, tun, flow, b);
955 } else if (flow->metadata.present.len || is_mask) {
956 nl_msg_put_unspec(b, OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS,
957 tun->metadata.opts.gnv,
958 flow->metadata.present.len);
962 /* Converts 'mask_src' (in non-UDPIF format) to a series of masked options in
963 * 'dst'. 'flow_src' (also in non-UDPIF format) and the original set of
964 * options 'flow_src_opt'/'opts_len' are needed as a guide to interpret the
967 tun_metadata_to_geneve_udpif_mask(const struct flow_tnl *flow_src,
968 const struct flow_tnl *mask_src,
969 const struct geneve_opt *flow_src_opt,
970 int opts_len, struct geneve_opt *dst)
972 ovs_assert(!(flow_src->flags & FLOW_TNL_F_UDPIF));
974 memcpy(dst, flow_src_opt, opts_len);
975 tun_metadata_to_geneve_mask__(&flow_src->metadata,
976 &mask_src->metadata, dst, opts_len);
979 static const struct tun_metadata_loc *
980 metadata_loc_from_match_read(struct tun_table *map, const struct match *match,
981 unsigned int idx, struct flow_tnl *mask,
984 union mf_value mask_opts;
986 if (match->tun_md.valid) {
987 *is_masked = match->tun_md.entry[idx].masked;
988 return &match->tun_md.entry[idx].loc;
991 memcpy_from_metadata(mask_opts.tun_metadata, &mask->metadata,
992 &map->entries[idx].loc);
994 *is_masked = map->entries[idx].loc.len == 0 ||
995 !is_all_ones(mask_opts.tun_metadata,
996 map->entries[idx].loc.len);
997 return &map->entries[idx].loc;
1001 tun_metadata_to_nx_match(struct ofpbuf *b, enum ofp_version oxm,
1002 const struct match *match)
1004 struct flow_tnl flow, mask;
1007 if (!udpif_to_parsed(&match->flow.tunnel, &match->wc.masks.tunnel,
1012 ULLONG_FOR_EACH_1 (i, mask.metadata.present.map) {
1013 const struct tun_metadata_loc *loc;
1015 union mf_value opts;
1016 union mf_value mask_opts;
1018 loc = metadata_loc_from_match_read(flow.metadata.tab, match, i,
1020 memcpy_from_metadata(opts.tun_metadata, &flow.metadata, loc);
1021 memcpy_from_metadata(mask_opts.tun_metadata, &mask.metadata, loc);
1022 nxm_put__(b, MFF_TUN_METADATA0 + i, oxm, opts.tun_metadata,
1023 is_masked ? mask_opts.tun_metadata : NULL, loc->len);
1028 tun_metadata_match_format(struct ds *s, const struct match *match)
1030 struct flow_tnl flow, mask;
1033 if (!udpif_to_parsed(&match->flow.tunnel, &match->wc.masks.tunnel,
1038 ULLONG_FOR_EACH_1 (i, mask.metadata.present.map) {
1039 const struct tun_metadata_loc *loc;
1041 union mf_value opts, mask_opts;
1043 loc = metadata_loc_from_match_read(flow.metadata.tab, match, i,
1046 ds_put_format(s, "tun_metadata%u", i);
1047 memcpy_from_metadata(mask_opts.tun_metadata, &mask.metadata, loc);
1049 if (!ULLONG_GET(flow.metadata.present.map, i)) {
1050 /* Indicate that we are matching on the field being not present. */
1051 ds_put_cstr(s, "=NP");
1052 } else if (!(is_masked &&
1053 is_all_zeros(mask_opts.tun_metadata, loc->len))) {
1054 ds_put_char(s, '=');
1056 memcpy_from_metadata(opts.tun_metadata, &flow.metadata, loc);
1057 ds_put_hex(s, opts.tun_metadata, loc->len);
1059 if (!is_all_ones(mask_opts.tun_metadata, loc->len)) {
1060 ds_put_char(s, '/');
1061 ds_put_hex(s, mask_opts.tun_metadata, loc->len);
1064 ds_put_char(s, ',');