2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
19 #include "classifier-private.h"
21 #include <netinet/in.h>
22 #include "byte-order.h"
23 #include "dynamic-string.h"
30 VLOG_DEFINE_THIS_MODULE(classifier);
34 /* Ports trie depends on both ports sharing the same ovs_be32. */
35 #define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
36 BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
38 static struct cls_match *
39 cls_match_alloc(struct cls_rule *rule)
41 int count = count_1bits(rule->match.flow.map);
43 struct cls_match *cls_match
44 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
45 + MINIFLOW_VALUES_SIZE(count));
47 rculist_init(&cls_match->list);
48 *CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
49 *CONST_CAST(int *, &cls_match->priority) = rule->priority;
50 miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
51 &rule->match.flow, count);
56 static struct cls_subtable *find_subtable(const struct classifier *cls,
57 const struct minimask *);
58 static struct cls_subtable *insert_subtable(struct classifier *cls,
59 const struct minimask *)
60 OVS_REQUIRES(cls->mutex);
61 static void destroy_subtable(struct classifier *cls, struct cls_subtable *)
62 OVS_REQUIRES(cls->mutex);
64 static const struct cls_match *find_match_wc(const struct cls_subtable *,
68 struct flow_wildcards *);
69 static struct cls_match *find_equal(const struct cls_subtable *,
70 const struct miniflow *, uint32_t hash);
72 static inline const struct cls_match *
73 next_rule_in_list__(const struct cls_match *rule)
75 const struct cls_match *next = NULL;
76 next = OBJECT_CONTAINING(rculist_next(&rule->list), next, list);
80 static inline const struct cls_match *
81 next_rule_in_list(const struct cls_match *rule)
83 const struct cls_match *next = next_rule_in_list__(rule);
84 return next->priority < rule->priority ? next : NULL;
87 static inline struct cls_match *
88 next_rule_in_list_protected__(struct cls_match *rule)
90 struct cls_match *next = NULL;
91 next = OBJECT_CONTAINING(rculist_next_protected(&rule->list), next, list);
95 static inline struct cls_match *
96 next_rule_in_list_protected(struct cls_match *rule)
98 struct cls_match *next = next_rule_in_list_protected__(rule);
99 return next->priority < rule->priority ? next : NULL;
102 /* Iterates RULE over HEAD and all of the cls_rules on HEAD->list.
103 * Classifier's mutex must be held while iterating, as the list is
104 * protoceted by it. */
105 #define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
106 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
107 #define FOR_EACH_RULE_IN_LIST_PROTECTED(RULE, HEAD) \
108 for ((RULE) = (HEAD); (RULE) != NULL; \
109 (RULE) = next_rule_in_list_protected(RULE))
111 static unsigned int minimask_get_prefix_len(const struct minimask *,
112 const struct mf_field *);
113 static void trie_init(struct classifier *cls, int trie_idx,
114 const struct mf_field *)
115 OVS_REQUIRES(cls->mutex);
116 static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
117 union mf_value *plens);
118 static unsigned int trie_lookup_value(const rcu_trie_ptr *,
119 const ovs_be32 value[], ovs_be32 plens[],
120 unsigned int value_bits);
121 static void trie_destroy(rcu_trie_ptr *);
122 static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
123 static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
125 static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
126 static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
128 static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
129 unsigned int n_bits);
130 static bool mask_prefix_bits_set(const struct flow_wildcards *,
131 uint8_t be32ofs, unsigned int n_bits);
135 /* Initializes 'rule' to match packets specified by 'match' at the given
136 * 'priority'. 'match' must satisfy the invariant described in the comment at
137 * the definition of struct match.
139 * The caller must eventually destroy 'rule' with cls_rule_destroy().
141 * Clients should not use priority INT_MIN. (OpenFlow uses priorities between
142 * 0 and UINT16_MAX, inclusive.) */
144 cls_rule_init(struct cls_rule *rule, const struct match *match, int priority)
146 minimatch_init(&rule->match, match);
147 rule->priority = priority;
148 rule->cls_match = NULL;
151 /* Same as cls_rule_init() for initialization from a "struct minimatch". */
153 cls_rule_init_from_minimatch(struct cls_rule *rule,
154 const struct minimatch *match, int priority)
156 minimatch_clone(&rule->match, match);
157 rule->priority = priority;
158 rule->cls_match = NULL;
161 /* Initializes 'dst' as a copy of 'src'.
163 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
165 cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
167 minimatch_clone(&dst->match, &src->match);
168 dst->priority = src->priority;
169 dst->cls_match = NULL;
172 /* Initializes 'dst' with the data in 'src', destroying 'src'.
174 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
176 cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
178 minimatch_move(&dst->match, &src->match);
179 dst->priority = src->priority;
180 dst->cls_match = NULL;
183 /* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
184 * normally embedded into a larger structure).
186 * ('rule' must not currently be in a classifier.) */
188 cls_rule_destroy(struct cls_rule *rule)
190 ovs_assert(!rule->cls_match);
191 minimatch_destroy(&rule->match);
194 /* Returns true if 'a' and 'b' match the same packets at the same priority,
195 * false if they differ in some way. */
197 cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
199 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
202 /* Returns a hash value for 'rule', folding in 'basis'. */
204 cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
206 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
209 /* Appends a string describing 'rule' to 's'. */
211 cls_rule_format(const struct cls_rule *rule, struct ds *s)
213 minimatch_format(&rule->match, s, rule->priority);
216 /* Returns true if 'rule' matches every packet, false otherwise. */
218 cls_rule_is_catchall(const struct cls_rule *rule)
220 return minimask_is_catchall(&rule->match.mask);
223 /* Initializes 'cls' as a classifier that initially contains no classification
226 classifier_init(struct classifier *cls, const uint8_t *flow_segments)
227 OVS_EXCLUDED(cls->mutex)
229 ovs_mutex_init(&cls->mutex);
230 ovs_mutex_lock(&cls->mutex);
232 cmap_init(&cls->subtables_map);
233 pvector_init(&cls->subtables);
234 cmap_init(&cls->partitions);
235 cls->n_flow_segments = 0;
237 while (cls->n_flow_segments < CLS_MAX_INDICES
238 && *flow_segments < FLOW_U32S) {
239 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
243 for (int i = 0; i < CLS_MAX_TRIES; i++) {
244 trie_init(cls, i, NULL);
246 ovs_mutex_unlock(&cls->mutex);
249 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
250 * caller's responsibility.
251 * May only be called after all the readers have been terminated. */
253 classifier_destroy(struct classifier *cls)
254 OVS_EXCLUDED(cls->mutex)
257 struct cls_partition *partition;
258 struct cls_subtable *subtable;
261 ovs_mutex_lock(&cls->mutex);
262 for (i = 0; i < cls->n_tries; i++) {
263 trie_destroy(&cls->tries[i].root);
266 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
267 destroy_subtable(cls, subtable);
269 cmap_destroy(&cls->subtables_map);
271 CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
272 ovsrcu_postpone(free, partition);
274 cmap_destroy(&cls->partitions);
276 pvector_destroy(&cls->subtables);
277 ovs_mutex_unlock(&cls->mutex);
278 ovs_mutex_destroy(&cls->mutex);
282 /* Set the fields for which prefix lookup should be performed. */
284 classifier_set_prefix_fields(struct classifier *cls,
285 const enum mf_field_id *trie_fields,
286 unsigned int n_fields)
287 OVS_EXCLUDED(cls->mutex)
289 const struct mf_field * new_fields[CLS_MAX_TRIES];
290 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
292 bool changed = false;
294 ovs_mutex_lock(&cls->mutex);
295 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
296 const struct mf_field *field = mf_from_id(trie_fields[i]);
297 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
298 /* Incompatible field. This is the only place where we
299 * enforce these requirements, but the rest of the trie code
300 * depends on the flow_be32ofs to be non-negative and the
301 * field length to be a multiple of 32 bits. */
305 if (bitmap_is_set(fields.bm, trie_fields[i])) {
306 /* Duplicate field, there is no need to build more than
307 * one index for any one field. */
310 bitmap_set1(fields.bm, trie_fields[i]);
312 new_fields[n_tries] = NULL;
313 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
314 new_fields[n_tries] = field;
320 if (changed || n_tries < cls->n_tries) {
321 struct cls_subtable *subtable;
323 /* Trie configuration needs to change. Disable trie lookups
324 * for the tries that are changing and wait all the current readers
325 * with the old configuration to be done. */
327 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
328 for (i = 0; i < cls->n_tries; i++) {
329 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
330 if (subtable->trie_plen[i]) {
331 subtable->trie_plen[i] = 0;
337 /* Synchronize if any readers were using tries. The readers may
338 * temporarily function without the trie lookup based optimizations. */
340 /* ovsrcu_synchronize() functions as a memory barrier, so it does
341 * not matter that subtable->trie_plen is not atomic. */
342 ovsrcu_synchronize();
345 /* Now set up the tries. */
346 for (i = 0; i < n_tries; i++) {
348 trie_init(cls, i, new_fields[i]);
351 /* Destroy the rest, if any. */
352 for (; i < cls->n_tries; i++) {
353 trie_init(cls, i, NULL);
356 cls->n_tries = n_tries;
357 ovs_mutex_unlock(&cls->mutex);
361 ovs_mutex_unlock(&cls->mutex);
362 return false; /* No change. */
366 trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
367 OVS_REQUIRES(cls->mutex)
369 struct cls_trie *trie = &cls->tries[trie_idx];
370 struct cls_subtable *subtable;
372 if (trie_idx < cls->n_tries) {
373 trie_destroy(&trie->root);
375 ovsrcu_set_hidden(&trie->root, NULL);
379 /* Add existing rules to the new trie. */
380 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
383 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
385 struct cls_match *head;
387 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
388 trie_insert(trie, head->cls_rule, plen);
391 /* Initialize subtable's prefix length on this field. This will
392 * allow readers to use the trie. */
393 atomic_thread_fence(memory_order_release);
394 subtable->trie_plen[trie_idx] = plen;
398 /* Returns true if 'cls' contains no classification rules, false otherwise.
399 * Checking the cmap requires no locking. */
401 classifier_is_empty(const struct classifier *cls)
403 return cmap_is_empty(&cls->subtables_map);
406 /* Returns the number of rules in 'cls'. */
408 classifier_count(const struct classifier *cls)
409 OVS_NO_THREAD_SAFETY_ANALYSIS
411 /* n_rules is an int, so in the presence of concurrent writers this will
412 * return either the old or a new value. */
417 hash_metadata(ovs_be64 metadata_)
419 uint64_t metadata = (OVS_FORCE uint64_t) metadata_;
420 return hash_uint64(metadata);
423 static struct cls_partition *
424 find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
426 struct cls_partition *partition;
428 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
429 if (partition->metadata == metadata) {
437 static struct cls_partition *
438 create_partition(struct classifier *cls, struct cls_subtable *subtable,
440 OVS_REQUIRES(cls->mutex)
442 uint32_t hash = hash_metadata(metadata);
443 struct cls_partition *partition = find_partition(cls, metadata, hash);
445 partition = xmalloc(sizeof *partition);
446 partition->metadata = metadata;
448 tag_tracker_init(&partition->tracker);
449 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
451 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
455 static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
457 /* Could optimize to use the same map if needed for fast path. */
458 return MINIFLOW_GET_BE32(&match->flow, tp_src)
459 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
463 subtable_replace_head_rule(struct classifier *cls OVS_UNUSED,
464 struct cls_subtable *subtable,
465 struct cls_match *head, struct cls_match *new,
466 uint32_t hash, uint32_t ihash[CLS_MAX_INDICES])
467 OVS_REQUIRES(cls->mutex)
469 /* Rule's data is already in the tries. */
471 new->partition = head->partition; /* Steal partition, if any. */
472 head->partition = NULL;
474 for (int i = 0; i < subtable->n_indices; i++) {
475 cmap_replace(&subtable->indices[i], &head->index_nodes[i],
476 &new->index_nodes[i], ihash[i]);
478 cmap_replace(&subtable->rules, &head->cmap_node, &new->cmap_node, hash);
481 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
482 * must not modify or free it.
484 * If 'cls' already contains an identical rule (including wildcards, values of
485 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
486 * rule that was replaced. The caller takes ownership of the returned rule and
487 * is thus responsible for destroying it with cls_rule_destroy(), after RCU
488 * grace period has passed (see ovsrcu_postpone()).
490 * Returns NULL if 'cls' does not contain a rule with an identical key, after
491 * inserting the new rule. In this case, no rules are displaced by the new
492 * rule, even rules that cannot have any effect because the new rule matches a
493 * superset of their flows and has higher priority.
495 const struct cls_rule *
496 classifier_replace(struct classifier *cls, struct cls_rule *rule)
497 OVS_EXCLUDED(cls->mutex)
499 struct cls_match *new = cls_match_alloc(rule);
500 struct cls_subtable *subtable;
501 uint32_t ihash[CLS_MAX_INDICES];
502 uint8_t prev_be32ofs = 0;
503 struct cls_match *head;
509 ovs_mutex_lock(&cls->mutex);
510 rule->cls_match = new;
512 subtable = find_subtable(cls, &rule->match.mask);
514 subtable = insert_subtable(cls, &rule->match.mask);
517 /* Compute hashes in segments. */
519 for (i = 0; i < subtable->n_indices; i++) {
520 ihash[i] = minimatch_hash_range(&rule->match, prev_be32ofs,
521 subtable->index_ofs[i], &basis);
522 prev_be32ofs = subtable->index_ofs[i];
524 hash = minimatch_hash_range(&rule->match, prev_be32ofs, FLOW_U32S, &basis);
526 head = find_equal(subtable, &rule->match.flow, hash);
528 /* Add rule to tries.
530 * Concurrent readers might miss seeing the rule until this update,
531 * which might require being fixed up by revalidation later. */
532 for (i = 0; i < cls->n_tries; i++) {
533 if (subtable->trie_plen[i]) {
534 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
538 /* Add rule to ports trie. */
539 if (subtable->ports_mask_len) {
540 /* We mask the value to be inserted to always have the wildcarded
541 * bits in known (zero) state, so we can include them in comparison
542 * and they will always match (== their original value does not
544 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
546 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
547 subtable->ports_mask_len);
550 /* Add rule to partitions.
552 * Concurrent readers might miss seeing the rule until this update,
553 * which might require being fixed up by revalidation later. */
554 new->partition = NULL;
555 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
556 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
558 new->partition = create_partition(cls, subtable, metadata);
561 /* Make rule visible to lookups. */
563 /* Add new node to segment indices.
565 * Readers may find the rule in the indices before the rule is visible
566 * in the subtables 'rules' map. This may result in us losing the
567 * opportunity to quit lookups earlier, resulting in sub-optimal
568 * wildcarding. This will be fixed later by revalidation (always
569 * scheduled after flow table changes). */
570 for (i = 0; i < subtable->n_indices; i++) {
571 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
573 n_rules = cmap_insert(&subtable->rules, &new->cmap_node, hash);
574 } else { /* Equal rules exist in the classifier already. */
575 struct cls_match *iter;
577 /* Scan the list for the insertion point that will keep the list in
578 * order of decreasing priority. */
579 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, head) {
580 if (rule->priority >= iter->priority) {
585 /* 'iter' now at the insertion point or NULL it at end. */
587 struct cls_rule *old;
589 if (rule->priority == iter->priority) {
590 rculist_replace(&new->list, &iter->list);
591 old = CONST_CAST(struct cls_rule *, iter->cls_rule);
593 rculist_insert(&iter->list, &new->list);
597 /* Replace the existing head in data structures, if rule is the new
600 subtable_replace_head_rule(cls, subtable, head, new, hash,
605 ovsrcu_postpone(free, iter);
606 old->cls_match = NULL;
608 /* No change in subtable's max priority or max count. */
610 /* Return displaced rule. Caller is responsible for keeping it
611 * around until all threads quiesce. */
612 ovs_mutex_unlock(&cls->mutex);
616 rculist_push_back(&head->list, &new->list);
620 /* Rule was added, not replaced. Update 'subtable's 'max_priority' and
621 * 'max_count', if necessary.
623 * The rule was already inserted, but concurrent readers may not see the
624 * rule yet as the subtables vector is not updated yet. This will have to
625 * be fixed by revalidation later. */
627 subtable->max_priority = rule->priority;
628 subtable->max_count = 1;
629 pvector_insert(&cls->subtables, subtable, rule->priority);
630 } else if (rule->priority == subtable->max_priority) {
631 ++subtable->max_count;
632 } else if (rule->priority > subtable->max_priority) {
633 subtable->max_priority = rule->priority;
634 subtable->max_count = 1;
635 pvector_change_priority(&cls->subtables, subtable, rule->priority);
638 /* Nothing was replaced. */
640 ovs_mutex_unlock(&cls->mutex);
644 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
645 * must not modify or free it.
647 * 'cls' must not contain an identical rule (including wildcards, values of
648 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
651 classifier_insert(struct classifier *cls, struct cls_rule *rule)
653 const struct cls_rule *displaced_rule = classifier_replace(cls, rule);
654 ovs_assert(!displaced_rule);
657 /* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
658 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
659 * resides, etc., as necessary.
661 * Does nothing if 'rule' has been already removed, or was never inserted.
663 * Returns the removed rule, or NULL, if it was already removed.
665 const struct cls_rule *
666 classifier_remove(struct classifier *cls, const struct cls_rule *rule)
667 OVS_EXCLUDED(cls->mutex)
669 struct cls_partition *partition;
670 struct cls_match *cls_match;
671 struct cls_subtable *subtable;
672 struct cls_match *prev;
673 struct cls_match *next;
675 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
676 uint8_t prev_be32ofs = 0;
679 ovs_mutex_lock(&cls->mutex);
680 cls_match = rule->cls_match;
683 goto unlock; /* Already removed. */
685 /* Mark as removed. */
686 CONST_CAST(struct cls_rule *, rule)->cls_match = NULL;
688 INIT_CONTAINER(prev, rculist_back_protected(&cls_match->list), list);
689 INIT_CONTAINER(next, rculist_next(&cls_match->list), list);
691 /* Remove from the list of equal rules. */
692 rculist_remove(&cls_match->list);
694 /* Check if this is NOT a head rule. */
695 if (prev->priority > rule->priority) {
696 /* Not the highest priority rule, no need to check subtable's
701 subtable = find_subtable(cls, &rule->match.mask);
702 ovs_assert(subtable);
704 for (i = 0; i < subtable->n_indices; i++) {
705 ihash[i] = minimatch_hash_range(&rule->match, prev_be32ofs,
706 subtable->index_ofs[i], &basis);
707 prev_be32ofs = subtable->index_ofs[i];
709 hash = minimatch_hash_range(&rule->match, prev_be32ofs, FLOW_U32S, &basis);
711 /* Head rule. Check if 'next' is an identical, lower-priority rule that
712 * will replace 'rule' in the data structures. */
713 if (next->priority < rule->priority) {
714 subtable_replace_head_rule(cls, subtable, cls_match, next, hash,
719 /* 'rule' is last of the kind in the classifier, must remove from all the
720 * data structures. */
722 if (subtable->ports_mask_len) {
723 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
725 trie_remove_prefix(&subtable->ports_trie,
726 &masked_ports, subtable->ports_mask_len);
728 for (i = 0; i < cls->n_tries; i++) {
729 if (subtable->trie_plen[i]) {
730 trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
734 /* Remove rule node from indices. */
735 for (i = 0; i < subtable->n_indices; i++) {
736 cmap_remove(&subtable->indices[i], &cls_match->index_nodes[i],
739 n_rules = cmap_remove(&subtable->rules, &cls_match->cmap_node, hash);
741 partition = cls_match->partition;
743 tag_tracker_subtract(&partition->tracker, &partition->tags,
745 if (!partition->tags) {
746 cmap_remove(&cls->partitions, &partition->cmap_node,
747 hash_metadata(partition->metadata));
748 ovsrcu_postpone(free, partition);
753 destroy_subtable(cls, subtable);
756 if (subtable->max_priority == rule->priority
757 && --subtable->max_count == 0) {
758 /* Find the new 'max_priority' and 'max_count'. */
759 struct cls_match *head;
760 int max_priority = INT_MIN;
762 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
763 if (head->priority > max_priority) {
764 max_priority = head->priority;
765 subtable->max_count = 1;
766 } else if (head->priority == max_priority) {
767 ++subtable->max_count;
770 subtable->max_priority = max_priority;
771 pvector_change_priority(&cls->subtables, subtable, max_priority);
775 ovsrcu_postpone(free, cls_match);
778 ovs_mutex_unlock(&cls->mutex);
783 /* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
784 * subtables which have a prefix match on the trie field, but whose prefix
785 * length is not indicated in 'match_plens'. For example, a subtable that
786 * has a 8-bit trie field prefix match can be skipped if
787 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
788 * must be unwildcarded to make datapath flow only match packets it should. */
790 const struct cls_trie *trie;
791 bool lookup_done; /* Status of the lookup. */
792 uint8_t be32ofs; /* U32 offset of the field in question. */
793 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
794 union mf_value match_plens; /* Bitmask of prefix lengths with possible
799 trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
802 ctx->be32ofs = trie->field->flow_be32ofs;
803 ctx->lookup_done = false;
806 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
807 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
808 * of equal priority match 'flow', returns one arbitrarily.
810 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
811 * set of bits that were significant in the lookup. At some point
812 * earlier, 'wc' should have been initialized (e.g., by
813 * flow_wildcards_init_catchall()). */
814 const struct cls_rule *
815 classifier_lookup(const struct classifier *cls, const struct flow *flow,
816 struct flow_wildcards *wc)
818 const struct cls_partition *partition;
820 int best_priority = INT_MIN;
821 const struct cls_match *best;
822 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
823 struct cls_subtable *subtable;
825 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
826 * when table configuration changes, which happens typically only on
828 atomic_thread_fence(memory_order_acquire);
830 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
831 * then 'flow' cannot possibly match in 'subtable':
833 * - If flow->metadata maps to a given 'partition', then we can use
834 * 'tags' for 'partition->tags'.
836 * - If flow->metadata has no partition, then no rule in 'cls' has an
837 * exact-match for flow->metadata. That means that we don't need to
838 * search any subtable that includes flow->metadata in its mask.
840 * In either case, we always need to search any cls_subtables that do not
841 * include flow->metadata in its mask. One way to do that would be to
842 * check the "cls_subtable"s explicitly for that, but that would require an
843 * extra branch per subtable. Instead, we mark such a cls_subtable's
844 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
845 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
846 * need a special case.
848 partition = (cmap_is_empty(&cls->partitions)
850 : find_partition(cls, flow->metadata,
851 hash_metadata(flow->metadata)));
852 tags = partition ? partition->tags : TAG_ARBITRARY;
854 /* Initialize trie contexts for find_match_wc(). */
855 for (int i = 0; i < cls->n_tries; i++) {
856 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
860 PVECTOR_FOR_EACH_PRIORITY(subtable, best_priority, 2,
861 sizeof(struct cls_subtable), &cls->subtables) {
862 const struct cls_match *rule;
864 if (!tag_intersects(tags, subtable->tag)) {
868 rule = find_match_wc(subtable, flow, trie_ctx, cls->n_tries, wc);
869 if (rule && rule->priority > best_priority) {
870 best_priority = rule->priority;
875 return best ? best->cls_rule : NULL;
878 /* Finds and returns a rule in 'cls' with exactly the same priority and
879 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
880 * contain an exact match. */
881 const struct cls_rule *
882 classifier_find_rule_exactly(const struct classifier *cls,
883 const struct cls_rule *target)
885 const struct cls_match *head, *rule;
886 const struct cls_subtable *subtable;
888 subtable = find_subtable(cls, &target->match.mask);
893 head = find_equal(subtable, &target->match.flow,
894 miniflow_hash_in_minimask(&target->match.flow,
895 &target->match.mask, 0));
899 FOR_EACH_RULE_IN_LIST (rule, head) {
900 if (target->priority >= rule->priority) {
901 return target->priority == rule->priority ? rule->cls_rule : NULL;
907 /* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
908 * same matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
909 * contain an exact match. */
910 const struct cls_rule *
911 classifier_find_match_exactly(const struct classifier *cls,
912 const struct match *target, int priority)
914 const struct cls_rule *retval;
917 cls_rule_init(&cr, target, priority);
918 retval = classifier_find_rule_exactly(cls, &cr);
919 cls_rule_destroy(&cr);
924 /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
925 * considered to overlap if both rules have the same priority and a packet
926 * could match both. */
928 classifier_rule_overlaps(const struct classifier *cls,
929 const struct cls_rule *target)
930 OVS_EXCLUDED(cls->mutex)
932 struct cls_subtable *subtable;
934 ovs_mutex_lock(&cls->mutex);
935 /* Iterate subtables in the descending max priority order. */
936 PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
937 sizeof(struct cls_subtable), &cls->subtables) {
938 uint32_t storage[FLOW_U32S];
939 struct minimask mask;
940 struct cls_match *head;
942 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
943 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
944 struct cls_match *rule;
946 FOR_EACH_RULE_IN_LIST_PROTECTED (rule, head) {
947 if (rule->priority < target->priority) {
948 break; /* Rules in descending priority order. */
950 if (rule->priority == target->priority
951 && miniflow_equal_in_minimask(&target->match.flow,
952 &rule->flow, &mask)) {
953 ovs_mutex_unlock(&cls->mutex);
960 ovs_mutex_unlock(&cls->mutex);
964 /* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
965 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
966 * function returns true if, for every field:
968 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
971 * - 'criteria' wildcards the field,
973 * Conversely, 'rule' does not match 'criteria' and this function returns false
974 * if, for at least one field:
976 * - 'criteria' and 'rule' specify different values for the field, or
978 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
980 * Equivalently, the truth table for whether a field matches is:
985 * r +---------+---------+
986 * i wild | yes | yes |
988 * e +---------+---------+
989 * r exact | no |if values|
991 * a +---------+---------+
993 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
994 * commands and by OpenFlow 1.0 aggregate and flow stats.
996 * Ignores rule->priority. */
998 cls_rule_is_loose_match(const struct cls_rule *rule,
999 const struct minimatch *criteria)
1001 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
1002 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
1009 rule_matches(const struct cls_match *rule, const struct cls_rule *target)
1012 || miniflow_equal_in_minimask(&rule->flow,
1013 &target->match.flow,
1014 &target->match.mask));
1017 static const struct cls_match *
1018 search_subtable(const struct cls_subtable *subtable,
1019 struct cls_cursor *cursor)
1022 || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
1023 const struct cls_match *rule;
1025 CMAP_CURSOR_FOR_EACH (rule, cmap_node, &cursor->rules,
1027 if (rule_matches(rule, cursor->target)) {
1035 /* Initializes 'cursor' for iterating through rules in 'cls', and returns the
1036 * first matching cls_rule via '*pnode', or NULL if there are no matches.
1038 * - If 'target' is null, the cursor will visit every rule in 'cls'.
1040 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
1041 * such that cls_rule_is_loose_match(rule, target) returns true.
1043 * Ignores target->priority. */
1044 struct cls_cursor cls_cursor_start(const struct classifier *cls,
1045 const struct cls_rule *target,
1047 OVS_NO_THREAD_SAFETY_ANALYSIS
1049 struct cls_cursor cursor;
1050 struct cls_subtable *subtable;
1054 cursor.target = target && !cls_rule_is_catchall(target) ? target : NULL;
1057 /* Find first rule. */
1058 ovs_mutex_lock(&cursor.cls->mutex);
1059 CMAP_CURSOR_FOR_EACH (subtable, cmap_node, &cursor.subtables,
1060 &cursor.cls->subtables_map) {
1061 const struct cls_match *rule = search_subtable(subtable, &cursor);
1064 cursor.subtable = subtable;
1065 cursor.rule = rule->cls_rule;
1070 /* Leave locked if requested and have a rule. */
1071 if (safe || !cursor.rule) {
1072 ovs_mutex_unlock(&cursor.cls->mutex);
1077 static const struct cls_rule *
1078 cls_cursor_next(struct cls_cursor *cursor)
1079 OVS_NO_THREAD_SAFETY_ANALYSIS
1081 const struct cls_match *rule = cursor->rule->cls_match;
1082 const struct cls_subtable *subtable;
1083 const struct cls_match *next;
1085 next = next_rule_in_list__(rule);
1086 if (next->priority < rule->priority) {
1087 return next->cls_rule;
1090 /* 'next' is the head of the list, that is, the rule that is included in
1091 * the subtable's map. (This is important when the classifier contains
1092 * rules that differ only in priority.) */
1094 CMAP_CURSOR_FOR_EACH_CONTINUE (rule, cmap_node, &cursor->rules) {
1095 if (rule_matches(rule, cursor->target)) {
1096 return rule->cls_rule;
1100 subtable = cursor->subtable;
1101 CMAP_CURSOR_FOR_EACH_CONTINUE (subtable, cmap_node, &cursor->subtables) {
1102 rule = search_subtable(subtable, cursor);
1104 cursor->subtable = subtable;
1105 return rule->cls_rule;
1112 /* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
1113 * or to null if all matching rules have been visited. */
1115 cls_cursor_advance(struct cls_cursor *cursor)
1116 OVS_NO_THREAD_SAFETY_ANALYSIS
1119 ovs_mutex_lock(&cursor->cls->mutex);
1121 cursor->rule = cls_cursor_next(cursor);
1122 if (cursor->safe || !cursor->rule) {
1123 ovs_mutex_unlock(&cursor->cls->mutex);
1127 static struct cls_subtable *
1128 find_subtable(const struct classifier *cls, const struct minimask *mask)
1130 struct cls_subtable *subtable;
1132 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
1133 &cls->subtables_map) {
1134 if (minimask_equal(mask, &subtable->mask)) {
1141 /* The new subtable will be visible to the readers only after this. */
1142 static struct cls_subtable *
1143 insert_subtable(struct classifier *cls, const struct minimask *mask)
1144 OVS_REQUIRES(cls->mutex)
1146 uint32_t hash = minimask_hash(mask, 0);
1147 struct cls_subtable *subtable;
1149 struct flow_wildcards old, new;
1151 int count = count_1bits(mask->masks.map);
1153 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1154 + MINIFLOW_VALUES_SIZE(count));
1155 cmap_init(&subtable->rules);
1156 miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
1157 &mask->masks, count);
1159 /* Init indices for segmented lookup, if any. */
1160 flow_wildcards_init_catchall(&new);
1163 for (i = 0; i < cls->n_flow_segments; i++) {
1164 flow_wildcards_fold_minimask_range(&new, mask, prev,
1165 cls->flow_segments[i]);
1166 /* Add an index if it adds mask bits. */
1167 if (!flow_wildcards_equal(&new, &old)) {
1168 cmap_init(&subtable->indices[index]);
1169 *CONST_CAST(uint8_t *, &subtable->index_ofs[index])
1170 = cls->flow_segments[i];
1174 prev = cls->flow_segments[i];
1176 /* Check if the rest of the subtable's mask adds any bits,
1177 * and remove the last index if it doesn't. */
1179 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S);
1180 if (flow_wildcards_equal(&new, &old)) {
1182 *CONST_CAST(uint8_t *, &subtable->index_ofs[index]) = 0;
1183 cmap_destroy(&subtable->indices[index]);
1186 *CONST_CAST(uint8_t *, &subtable->n_indices) = index;
1188 *CONST_CAST(tag_type *, &subtable->tag) =
1189 (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1190 ? tag_create_deterministic(hash)
1193 for (i = 0; i < cls->n_tries; i++) {
1194 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1195 cls->tries[i].field);
1199 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
1200 *CONST_CAST(int *, &subtable->ports_mask_len)
1201 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1203 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
1208 /* RCU readers may still access the subtable before it is actually freed. */
1210 destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
1211 OVS_REQUIRES(cls->mutex)
1215 pvector_remove(&cls->subtables, subtable);
1216 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1217 minimask_hash(&subtable->mask, 0));
1219 ovs_assert(ovsrcu_get_protected(struct trie_node *, &subtable->ports_trie)
1221 ovs_assert(cmap_is_empty(&subtable->rules));
1223 for (i = 0; i < subtable->n_indices; i++) {
1224 cmap_destroy(&subtable->indices[i]);
1226 cmap_destroy(&subtable->rules);
1227 ovsrcu_postpone(free, subtable);
1235 static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1237 /* Return 'true' if can skip rest of the subtable based on the prefix trie
1238 * lookup results. */
1240 check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1241 const unsigned int field_plen[CLS_MAX_TRIES],
1242 const struct range ofs, const struct flow *flow,
1243 struct flow_wildcards *wc)
1247 /* Check if we could avoid fully unwildcarding the next level of
1248 * fields using the prefix tries. The trie checks are done only as
1249 * needed to avoid folding in additional bits to the wildcards mask. */
1250 for (j = 0; j < n_tries; j++) {
1251 /* Is the trie field relevant for this subtable? */
1252 if (field_plen[j]) {
1253 struct trie_ctx *ctx = &trie_ctx[j];
1254 uint8_t be32ofs = ctx->be32ofs;
1256 /* Is the trie field within the current range of fields? */
1257 if (be32ofs >= ofs.start && be32ofs < ofs.end) {
1258 /* On-demand trie lookup. */
1259 if (!ctx->lookup_done) {
1260 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1261 ctx->maskbits = trie_lookup(ctx->trie, flow,
1263 ctx->lookup_done = true;
1265 /* Possible to skip the rest of the subtable if subtable's
1266 * prefix on the field is not included in the lookup result. */
1267 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1268 /* We want the trie lookup to never result in unwildcarding
1269 * any bits that would not be unwildcarded otherwise.
1270 * Since the trie is shared by the whole classifier, it is
1271 * possible that the 'maskbits' contain bits that are
1272 * irrelevant for the partition relevant for the current
1273 * packet. Hence the checks below. */
1275 /* Check that the trie result will not unwildcard more bits
1276 * than this subtable would otherwise. */
1277 if (ctx->maskbits <= field_plen[j]) {
1278 /* Unwildcard the bits and skip the rest. */
1279 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1280 /* Note: Prerequisite already unwildcarded, as the only
1281 * prerequisite of the supported trie lookup fields is
1282 * the ethertype, which is always unwildcarded. */
1285 /* Can skip if the field is already unwildcarded. */
1286 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
1296 /* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1297 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1298 * value has the correct value in 'target'.
1300 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
1301 * target, mask) but this is faster because of the invariant that
1302 * flow->map and mask->masks.map are the same, and that this version
1303 * takes the 'wc'. */
1305 miniflow_and_mask_matches_flow(const struct miniflow *flow,
1306 const struct minimask *mask,
1307 const struct flow *target)
1309 const uint32_t *flowp = miniflow_get_u32_values(flow);
1310 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1313 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1314 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & *maskp++;
1324 static inline const struct cls_match *
1325 find_match(const struct cls_subtable *subtable, const struct flow *flow,
1328 const struct cls_match *rule;
1330 CMAP_FOR_EACH_WITH_HASH (rule, cmap_node, hash, &subtable->rules) {
1331 if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
1340 /* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1341 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1342 * value has the correct value in 'target'.
1344 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1345 * version fills in the mask bits in 'wc'. */
1347 miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1348 const struct minimask *mask,
1349 const struct flow *target,
1350 struct flow_wildcards *wc)
1352 const uint32_t *flowp = miniflow_get_u32_values(flow);
1353 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1356 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1357 uint32_t mask = *maskp++;
1358 uint32_t diff = (*flowp++ ^ flow_u32_value(target, idx)) & mask;
1361 /* Only unwildcard if none of the differing bits is already
1363 if (!(flow_u32_value(&wc->masks, idx) & diff)) {
1364 /* Keep one bit of the difference. */
1365 *flow_u32_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
1369 /* Fill in the bits that were looked at. */
1370 *flow_u32_lvalue(&wc->masks, idx) |= mask;
1376 /* Unwildcard the fields looked up so far, if any. */
1378 fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1382 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1386 static const struct cls_match *
1387 find_match_wc(const struct cls_subtable *subtable, const struct flow *flow,
1388 struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1389 struct flow_wildcards *wc)
1391 uint32_t basis = 0, hash;
1392 const struct cls_match *rule = NULL;
1396 if (OVS_UNLIKELY(!wc)) {
1397 return find_match(subtable, flow,
1398 flow_hash_in_minimask(flow, &subtable->mask, 0));
1402 /* Try to finish early by checking fields in segments. */
1403 for (i = 0; i < subtable->n_indices; i++) {
1404 const struct cmap_node *inode;
1406 ofs.end = subtable->index_ofs[i];
1408 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1410 /* 'wc' bits for the trie field set, now unwildcard the preceding
1411 * bits used so far. */
1412 fill_range_wc(subtable, wc, ofs.start);
1415 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1417 inode = cmap_find(&subtable->indices[i], hash);
1419 /* No match, can stop immediately, but must fold in the bits
1420 * used in lookup so far. */
1421 fill_range_wc(subtable, wc, ofs.end);
1425 /* If we have narrowed down to a single rule already, check whether
1426 * that rule matches. Either way, we're done.
1428 * (Rare) hash collisions may cause us to miss the opportunity for this
1430 if (!cmap_node_next(inode)) {
1431 ASSIGN_CONTAINER(rule, inode - i, index_nodes);
1432 if (miniflow_and_mask_matches_flow_wc(&rule->flow, &subtable->mask,
1438 ofs.start = ofs.end;
1440 ofs.end = FLOW_U32S;
1441 /* Trie check for the final range. */
1442 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
1443 fill_range_wc(subtable, wc, ofs.start);
1446 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1448 rule = find_match(subtable, flow, hash);
1449 if (!rule && subtable->ports_mask_len) {
1450 /* Ports are always part of the final range, if any.
1451 * No match was found for the ports. Use the ports trie to figure out
1452 * which ports bits to unwildcard. */
1454 ovs_be32 value, plens, mask;
1456 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1457 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
1458 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
1460 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
1461 mask & be32_prefix_mask(mbits);
1463 /* Unwildcard all bits in the mask upto the ports, as they were used
1464 * to determine there is no match. */
1465 fill_range_wc(subtable, wc, TP_PORTS_OFS32);
1469 /* Must unwildcard all the fields, as they were looked at. */
1470 flow_wildcards_fold_minimask(wc, &subtable->mask);
1474 static struct cls_match *
1475 find_equal(const struct cls_subtable *subtable, const struct miniflow *flow,
1478 struct cls_match *head;
1480 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
1481 if (miniflow_equal(&head->flow, flow)) {
1488 /* A longest-prefix match tree. */
1490 /* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1491 * Prefixes are in the network byte order, and the offset 0 corresponds to
1492 * the most significant bit of the first byte. The offset can be read as
1493 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1495 raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1499 pr += ofs / 32; /* Where to start. */
1500 ofs %= 32; /* How many bits to skip at 'pr'. */
1502 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1503 if (plen > 32 - ofs) { /* Need more than we have already? */
1504 prefix |= ntohl(*++pr) >> (32 - ofs);
1506 /* Return with possible unwanted bits at the end. */
1510 /* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1511 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1512 * corresponds to the most significant bit of the first byte. The offset can
1513 * be read as "how many bits to skip from the start of the prefix starting at
1516 trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1521 if (plen > TRIE_PREFIX_BITS) {
1522 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1524 /* Return with unwanted bits cleared. */
1525 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1528 /* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
1529 * starting at "MSB 0"-based offset 'ofs'. */
1531 prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
1534 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
1535 /* Set the bit after the relevant bits to limit the result. */
1536 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
1539 /* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1540 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1542 trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1543 unsigned int ofs, unsigned int plen)
1545 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
1549 /* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1550 * be greater than 31. */
1552 be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1554 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1557 /* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
1558 * be between 0 and 31, inclusive. */
1560 get_bit_at(const uint32_t prefix, unsigned int ofs)
1562 return (prefix >> (31 - ofs)) & 1u;
1565 /* Create new branch. */
1566 static struct trie_node *
1567 trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
1568 unsigned int n_rules)
1570 struct trie_node *node = xmalloc(sizeof *node);
1572 node->prefix = trie_get_prefix(prefix, ofs, plen);
1574 if (plen <= TRIE_PREFIX_BITS) {
1575 node->n_bits = plen;
1576 ovsrcu_set_hidden(&node->edges[0], NULL);
1577 ovsrcu_set_hidden(&node->edges[1], NULL);
1578 node->n_rules = n_rules;
1579 } else { /* Need intermediate nodes. */
1580 struct trie_node *subnode = trie_branch_create(prefix,
1581 ofs + TRIE_PREFIX_BITS,
1582 plen - TRIE_PREFIX_BITS,
1584 int bit = get_bit_at(subnode->prefix, 0);
1585 node->n_bits = TRIE_PREFIX_BITS;
1586 ovsrcu_set_hidden(&node->edges[bit], subnode);
1587 ovsrcu_set_hidden(&node->edges[!bit], NULL);
1594 trie_node_destroy(const struct trie_node *node)
1596 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
1599 /* Copy a trie node for modification and postpone delete the old one. */
1600 static struct trie_node *
1601 trie_node_rcu_realloc(const struct trie_node *node)
1603 struct trie_node *new_node = xmalloc(sizeof *node);
1606 trie_node_destroy(node);
1611 /* May only be called while holding the classifier mutex. */
1613 trie_destroy(rcu_trie_ptr *trie)
1615 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
1618 ovsrcu_set_hidden(trie, NULL);
1619 trie_destroy(&node->edges[0]);
1620 trie_destroy(&node->edges[1]);
1621 trie_node_destroy(node);
1626 trie_is_leaf(const struct trie_node *trie)
1629 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
1630 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
1634 mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
1635 unsigned int n_bits)
1637 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1640 for (i = 0; i < n_bits / 32; i++) {
1641 mask[i] = OVS_BE32_MAX;
1644 mask[i] |= htonl(~0u << (32 - n_bits % 32));
1649 mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
1650 unsigned int n_bits)
1652 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
1654 ovs_be32 zeroes = 0;
1656 for (i = 0; i < n_bits / 32; i++) {
1660 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
1663 return !zeroes; /* All 'n_bits' bits set. */
1666 static rcu_trie_ptr *
1667 trie_next_edge(struct trie_node *node, const ovs_be32 value[],
1670 return node->edges + be_get_bit_at(value, ofs);
1673 static const struct trie_node *
1674 trie_next_node(const struct trie_node *node, const ovs_be32 value[],
1677 return ovsrcu_get(struct trie_node *,
1678 &node->edges[be_get_bit_at(value, ofs)]);
1681 /* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
1684 be_set_bit_at(ovs_be32 value[], unsigned int ofs)
1686 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
1689 /* Returns the number of bits in the prefix mask necessary to determine a
1690 * mismatch, in case there are longer prefixes in the tree below the one that
1692 * '*plens' will have a bit set for each prefix length that may have matching
1693 * rules. The caller is responsible for clearing the '*plens' prior to
1697 trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
1698 ovs_be32 plens[], unsigned int n_bits)
1700 const struct trie_node *prev = NULL;
1701 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
1702 unsigned int match_len = 0; /* Number of matching bits. */
1704 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
1705 unsigned int eqbits;
1706 /* Check if this edge can be followed. */
1707 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
1709 match_len += eqbits;
1710 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
1711 /* Bit at offset 'match_len' differed. */
1712 return match_len + 1; /* Includes the first mismatching bit. */
1714 /* Full match, check if rules exist at this prefix length. */
1715 if (node->n_rules > 0) {
1716 be_set_bit_at(plens, match_len - 1);
1718 if (match_len >= n_bits) {
1719 return n_bits; /* Full prefix. */
1722 /* node == NULL. Full match so far, but we tried to follow an
1723 * non-existing branch. Need to exclude the other branch if it exists
1724 * (it does not if we were called on an empty trie or 'prev' is a leaf
1726 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
1730 trie_lookup(const struct cls_trie *trie, const struct flow *flow,
1731 union mf_value *plens)
1733 const struct mf_field *mf = trie->field;
1735 /* Check that current flow matches the prerequisites for the trie
1736 * field. Some match fields are used for multiple purposes, so we
1737 * must check that the trie is relevant for this flow. */
1738 if (mf_are_prereqs_ok(mf, flow)) {
1739 return trie_lookup_value(&trie->root,
1740 &((ovs_be32 *)flow)[mf->flow_be32ofs],
1741 &plens->be32, mf->n_bits);
1743 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
1744 return 0; /* Value not used in this case. */
1747 /* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
1748 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
1749 * 'miniflow_index' is not NULL. */
1751 minimask_get_prefix_len(const struct minimask *minimask,
1752 const struct mf_field *mf)
1754 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
1755 uint8_t u32_ofs = mf->flow_be32ofs;
1756 uint8_t u32_end = u32_ofs + mf->n_bytes / 4;
1758 for (; u32_ofs < u32_end; ++u32_ofs) {
1760 mask = ntohl((OVS_FORCE ovs_be32)minimask_get(minimask, u32_ofs));
1762 /* Validate mask, count the mask length. */
1765 return 0; /* No bits allowed after mask ended. */
1768 if (~mask & (~mask + 1)) {
1769 return 0; /* Mask not contiguous. */
1771 mask_tz = ctz32(mask);
1772 n_bits += 32 - mask_tz;
1780 * This is called only when mask prefix is known to be CIDR and non-zero.
1781 * Relies on the fact that the flow and mask have the same map, and since
1782 * the mask is CIDR, the storage for the flow field exists even if it
1783 * happened to be zeros.
1785 static const ovs_be32 *
1786 minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
1788 return miniflow_get_be32_values(&match->flow) +
1789 count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1));
1792 /* Insert rule in to the prefix tree.
1793 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
1796 trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
1798 trie_insert_prefix(&trie->root,
1799 minimatch_get_prefix(&rule->match, trie->field), mlen);
1803 trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
1805 struct trie_node *node;
1808 /* Walk the tree. */
1809 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
1810 edge = trie_next_edge(node, prefix, ofs)) {
1811 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
1813 if (eqbits < node->n_bits) {
1814 /* Mismatch, new node needs to be inserted above. */
1815 int old_branch = get_bit_at(node->prefix, eqbits);
1816 struct trie_node *new_parent;
1818 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
1819 ofs == mlen ? 1 : 0);
1820 /* Copy the node to modify it. */
1821 node = trie_node_rcu_realloc(node);
1822 /* Adjust the new node for its new position in the tree. */
1823 node->prefix <<= eqbits;
1824 node->n_bits -= eqbits;
1825 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
1827 /* Check if need a new branch for the new rule. */
1829 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
1830 trie_branch_create(prefix, ofs, mlen - ofs,
1833 ovsrcu_set(edge, new_parent); /* Publish changes. */
1836 /* Full match so far. */
1839 /* Full match at the current node, rule needs to be added here. */
1844 /* Must insert a new tree branch for the new rule. */
1845 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
1848 /* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
1851 trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
1853 trie_remove_prefix(&trie->root,
1854 minimatch_get_prefix(&rule->match, trie->field), mlen);
1857 /* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
1860 trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
1862 struct trie_node *node;
1863 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
1864 int depth = 0, ofs = 0;
1866 /* Walk the tree. */
1867 for (edges[0] = root;
1868 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
1869 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
1870 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
1872 if (eqbits < node->n_bits) {
1873 /* Mismatch, nothing to be removed. This should never happen, as
1874 * only rules in the classifier are ever removed. */
1875 break; /* Log a warning. */
1877 /* Full match so far. */
1881 /* Full prefix match at the current node, remove rule here. */
1882 if (!node->n_rules) {
1883 break; /* Log a warning. */
1887 /* Check if can prune the tree. */
1888 while (!node->n_rules) {
1889 struct trie_node *next,
1890 *edge0 = ovsrcu_get_protected(struct trie_node *,
1892 *edge1 = ovsrcu_get_protected(struct trie_node *,
1895 if (edge0 && edge1) {
1896 break; /* A branching point, cannot prune. */
1899 /* Else have at most one child node, remove this node. */
1900 next = edge0 ? edge0 : edge1;
1903 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
1904 break; /* Cannot combine. */
1906 next = trie_node_rcu_realloc(next); /* Modify. */
1908 /* Combine node with next. */
1909 next->prefix = node->prefix | next->prefix >> node->n_bits;
1910 next->n_bits += node->n_bits;
1912 /* Update the parent's edge. */
1913 ovsrcu_set(edges[depth], next); /* Publish changes. */
1914 trie_node_destroy(node);
1916 if (next || !depth) {
1917 /* Branch not pruned or at root, nothing more to do. */
1920 node = ovsrcu_get_protected(struct trie_node *,
1926 /* Cannot go deeper. This should never happen, since only rules
1927 * that actually exist in the classifier are ever removed. */
1928 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");