2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
19 #include "classifier-private.h"
21 #include <netinet/in.h>
22 #include "byte-order.h"
23 #include "dynamic-string.h"
28 #include "openvswitch/vlog.h"
30 VLOG_DEFINE_THIS_MODULE(classifier);
34 /* A collection of "struct cls_conjunction"s currently embedded into a
36 struct cls_conjunction_set {
37 /* Link back to the cls_match.
39 * cls_conjunction_set is mostly used during classifier lookup, and, in
40 * turn, during classifier lookup the most used member of
41 * cls_conjunction_set is the rule's priority, so we cache it here for fast
43 struct cls_match *match;
44 int priority; /* Cached copy of match->priority. */
46 /* Conjunction information.
48 * 'min_n_clauses' allows some optimization during classifier lookup. */
49 unsigned int n; /* Number of elements in 'conj'. */
50 unsigned int min_n_clauses; /* Smallest 'n' among elements of 'conj'. */
51 struct cls_conjunction conj[];
54 /* Ports trie depends on both ports sharing the same ovs_be32. */
55 #define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
56 BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
57 BUILD_ASSERT_DECL(TP_PORTS_OFS32 % 2 == 0);
58 #define TP_PORTS_OFS64 (TP_PORTS_OFS32 / 2)
61 cls_conjunction_set_size(size_t n)
63 return (sizeof(struct cls_conjunction_set)
64 + n * sizeof(struct cls_conjunction));
67 static struct cls_conjunction_set *
68 cls_conjunction_set_alloc(struct cls_match *match,
69 const struct cls_conjunction conj[], size_t n)
72 size_t min_n_clauses = conj[0].n_clauses;
73 for (size_t i = 1; i < n; i++) {
74 min_n_clauses = MIN(min_n_clauses, conj[i].n_clauses);
77 struct cls_conjunction_set *set = xmalloc(cls_conjunction_set_size(n));
79 set->priority = match->priority;
81 set->min_n_clauses = min_n_clauses;
82 memcpy(set->conj, conj, n * sizeof *conj);
89 static struct cls_match *
90 cls_match_alloc(const struct cls_rule *rule,
91 const struct cls_conjunction conj[], size_t n)
93 int count = count_1bits(rule->match.flow.map);
95 struct cls_match *cls_match
96 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
97 + MINIFLOW_VALUES_SIZE(count));
99 ovsrcu_init(&cls_match->next, NULL);
100 *CONST_CAST(const struct cls_rule **, &cls_match->cls_rule) = rule;
101 *CONST_CAST(int *, &cls_match->priority) = rule->priority;
102 atomic_init(&cls_match->visibility, 0); /* Initially invisible. */
103 miniflow_clone_inline(CONST_CAST(struct miniflow *, &cls_match->flow),
104 &rule->match.flow, count);
105 ovsrcu_set_hidden(&cls_match->conj_set,
106 cls_conjunction_set_alloc(cls_match, conj, n));
111 static struct cls_subtable *find_subtable(const struct classifier *cls,
112 const struct minimask *);
113 static struct cls_subtable *insert_subtable(struct classifier *cls,
114 const struct minimask *);
115 static void destroy_subtable(struct classifier *cls, struct cls_subtable *);
117 static const struct cls_match *find_match_wc(const struct cls_subtable *,
121 unsigned int n_tries,
122 struct flow_wildcards *);
123 static struct cls_match *find_equal(const struct cls_subtable *,
124 const struct miniflow *, uint32_t hash);
126 /* Return the next visible (lower-priority) rule in the list. Multiple
127 * identical rules with the same priority may exist transitionally, but when
128 * versioning is used at most one of them is ever visible for lookups on any
129 * given 'version'. */
130 static inline const struct cls_match *
131 next_visible_rule_in_list(const struct cls_match *rule, long long version)
134 rule = cls_match_next(rule);
136 /* We have reached the head of the list, stop. */
139 } while (!cls_match_visible_in_version(rule, version));
144 static unsigned int minimask_get_prefix_len(const struct minimask *,
145 const struct mf_field *);
146 static void trie_init(struct classifier *cls, int trie_idx,
147 const struct mf_field *);
148 static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
149 union mf_value *plens);
150 static unsigned int trie_lookup_value(const rcu_trie_ptr *,
151 const ovs_be32 value[], ovs_be32 plens[],
152 unsigned int value_bits);
153 static void trie_destroy(rcu_trie_ptr *);
154 static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
155 static void trie_insert_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
157 static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
158 static void trie_remove_prefix(rcu_trie_ptr *, const ovs_be32 *prefix,
160 static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
161 unsigned int n_bits);
162 static bool mask_prefix_bits_set(const struct flow_wildcards *,
163 uint8_t be32ofs, unsigned int n_bits);
168 cls_rule_init__(struct cls_rule *rule, unsigned int priority,
171 ovs_assert(version > 0);
173 rculist_init(&rule->node);
174 *CONST_CAST(int *, &rule->priority) = priority;
175 *CONST_CAST(long long *, &rule->version) = version;
176 rule->cls_match = NULL;
179 /* Initializes 'rule' to match packets specified by 'match' at the given
180 * 'priority'. 'match' must satisfy the invariant described in the comment at
181 * the definition of struct match.
183 * The caller must eventually destroy 'rule' with cls_rule_destroy().
185 * Clients should not use priority INT_MIN. (OpenFlow uses priorities between
186 * 0 and UINT16_MAX, inclusive.) */
188 cls_rule_init(struct cls_rule *rule, const struct match *match, int priority,
191 cls_rule_init__(rule, priority, version);
192 minimatch_init(CONST_CAST(struct minimatch *, &rule->match), match);
195 /* Same as cls_rule_init() for initialization from a "struct minimatch". */
197 cls_rule_init_from_minimatch(struct cls_rule *rule,
198 const struct minimatch *match, int priority,
201 cls_rule_init__(rule, priority, version);
202 minimatch_clone(CONST_CAST(struct minimatch *, &rule->match), match);
205 /* Initializes 'dst' as a copy of 'src', but with 'version'.
207 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
209 cls_rule_clone_in_version(struct cls_rule *dst, const struct cls_rule *src,
212 cls_rule_init__(dst, src->priority, version);
213 minimatch_clone(CONST_CAST(struct minimatch *, &dst->match), &src->match);
216 /* Initializes 'dst' as a copy of 'src'.
218 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
220 cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
222 cls_rule_clone_in_version(dst, src, src->version);
225 /* Initializes 'dst' with the data in 'src', destroying 'src'.
227 * 'src' must be a cls_rule NOT in a classifier.
229 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
231 cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
233 cls_rule_init__(dst, src->priority, src->version);
234 minimatch_move(CONST_CAST(struct minimatch *, &dst->match),
235 CONST_CAST(struct minimatch *, &src->match));
238 /* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
239 * normally embedded into a larger structure).
241 * ('rule' must not currently be in a classifier.) */
243 cls_rule_destroy(struct cls_rule *rule)
245 ovs_assert(!rule->cls_match); /* Must not be in a classifier. */
247 /* Check that the rule has been properly removed from the classifier and
248 * that the destruction only happens after the RCU grace period, or that
249 * the rule was never inserted to the classifier in the first place. */
250 ovs_assert(rculist_next_protected(&rule->node) == RCULIST_POISON
251 || rculist_is_empty(&rule->node));
253 minimatch_destroy(CONST_CAST(struct minimatch *, &rule->match));
257 cls_rule_set_conjunctions(struct cls_rule *cr,
258 const struct cls_conjunction *conj, size_t n)
260 struct cls_match *match = cr->cls_match;
261 struct cls_conjunction_set *old
262 = ovsrcu_get_protected(struct cls_conjunction_set *, &match->conj_set);
263 struct cls_conjunction *old_conj = old ? old->conj : NULL;
264 unsigned int old_n = old ? old->n : 0;
266 if (old_n != n || (n && memcmp(old_conj, conj, n * sizeof *conj))) {
268 ovsrcu_postpone(free, old);
270 ovsrcu_set(&match->conj_set,
271 cls_conjunction_set_alloc(match, conj, n));
276 /* Returns true if 'a' and 'b' match the same packets at the same priority,
277 * false if they differ in some way. */
279 cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
281 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
284 /* Returns a hash value for 'rule', folding in 'basis'. */
286 cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
288 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
291 /* Appends a string describing 'rule' to 's'. */
293 cls_rule_format(const struct cls_rule *rule, struct ds *s)
295 minimatch_format(&rule->match, s, rule->priority);
298 /* Returns true if 'rule' matches every packet, false otherwise. */
300 cls_rule_is_catchall(const struct cls_rule *rule)
302 return minimask_is_catchall(&rule->match.mask);
305 /* Makes rule invisible after 'version'. Once that version is made invisible
306 * (by changing the version parameter used in lookups), the rule should be
307 * actually removed via ovsrcu_postpone().
309 * 'rule_' must be in a classifier. */
311 cls_rule_make_invisible_in_version(const struct cls_rule *rule_,
312 long long version, long long lookup_version)
314 struct cls_match *rule = rule_->cls_match;
316 /* XXX: Adjust when versioning is actually used. */
317 ovs_assert(version >= rule_->version && version >= lookup_version);
319 /* Normally, we call this when deleting a rule that is already visible to
320 * lookups. However, sometimes a bundle transaction will add a rule and
321 * then delete it before the rule has ever become visible. If we set such
322 * a rule to become invisible in a future 'version', it would become
323 * visible to all prior versions. So, in this case we must set the rule
324 * visibility to 0 (== never visible). */
325 if (cls_match_visible_in_version(rule, lookup_version)) {
326 /* Make invisible starting at 'version'. */
327 atomic_store_relaxed(&rule->visibility, -version);
329 /* Rule has not yet been visible to lookups, make invisible in all
331 atomic_store_relaxed(&rule->visibility, 0);
335 /* This undoes the change made by cls_rule_make_invisible_after_version().
337 * 'rule' must be in a classifier. */
339 cls_rule_restore_visibility(const struct cls_rule *rule)
341 atomic_store_relaxed(&rule->cls_match->visibility, rule->version);
344 /* Return true if 'rule' is visible in 'version'.
346 * 'rule' must be in a classifier. */
348 cls_rule_visible_in_version(const struct cls_rule *rule, long long version)
350 return cls_match_visible_in_version(rule->cls_match, version);
353 /* Initializes 'cls' as a classifier that initially contains no classification
356 classifier_init(struct classifier *cls, const uint8_t *flow_segments)
359 cmap_init(&cls->subtables_map);
360 pvector_init(&cls->subtables);
361 cmap_init(&cls->partitions);
362 cls->n_flow_segments = 0;
364 while (cls->n_flow_segments < CLS_MAX_INDICES
365 && *flow_segments < FLOW_U64S) {
366 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
370 for (int i = 0; i < CLS_MAX_TRIES; i++) {
371 trie_init(cls, i, NULL);
376 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
377 * caller's responsibility.
378 * May only be called after all the readers have been terminated. */
380 classifier_destroy(struct classifier *cls)
383 struct cls_partition *partition;
384 struct cls_subtable *subtable;
387 for (i = 0; i < cls->n_tries; i++) {
388 trie_destroy(&cls->tries[i].root);
391 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
392 destroy_subtable(cls, subtable);
394 cmap_destroy(&cls->subtables_map);
396 CMAP_FOR_EACH (partition, cmap_node, &cls->partitions) {
397 ovsrcu_postpone(free, partition);
399 cmap_destroy(&cls->partitions);
401 pvector_destroy(&cls->subtables);
405 /* Set the fields for which prefix lookup should be performed. */
407 classifier_set_prefix_fields(struct classifier *cls,
408 const enum mf_field_id *trie_fields,
409 unsigned int n_fields)
411 const struct mf_field * new_fields[CLS_MAX_TRIES];
412 struct mf_bitmap fields = MF_BITMAP_INITIALIZER;
414 bool changed = false;
416 for (i = 0; i < n_fields && n_tries < CLS_MAX_TRIES; i++) {
417 const struct mf_field *field = mf_from_id(trie_fields[i]);
418 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
419 /* Incompatible field. This is the only place where we
420 * enforce these requirements, but the rest of the trie code
421 * depends on the flow_be32ofs to be non-negative and the
422 * field length to be a multiple of 32 bits. */
426 if (bitmap_is_set(fields.bm, trie_fields[i])) {
427 /* Duplicate field, there is no need to build more than
428 * one index for any one field. */
431 bitmap_set1(fields.bm, trie_fields[i]);
433 new_fields[n_tries] = NULL;
434 if (n_tries >= cls->n_tries || field != cls->tries[n_tries].field) {
435 new_fields[n_tries] = field;
441 if (changed || n_tries < cls->n_tries) {
442 struct cls_subtable *subtable;
444 /* Trie configuration needs to change. Disable trie lookups
445 * for the tries that are changing and wait all the current readers
446 * with the old configuration to be done. */
448 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
449 for (i = 0; i < cls->n_tries; i++) {
450 if ((i < n_tries && new_fields[i]) || i >= n_tries) {
451 if (subtable->trie_plen[i]) {
452 subtable->trie_plen[i] = 0;
458 /* Synchronize if any readers were using tries. The readers may
459 * temporarily function without the trie lookup based optimizations. */
461 /* ovsrcu_synchronize() functions as a memory barrier, so it does
462 * not matter that subtable->trie_plen is not atomic. */
463 ovsrcu_synchronize();
466 /* Now set up the tries. */
467 for (i = 0; i < n_tries; i++) {
469 trie_init(cls, i, new_fields[i]);
472 /* Destroy the rest, if any. */
473 for (; i < cls->n_tries; i++) {
474 trie_init(cls, i, NULL);
477 cls->n_tries = n_tries;
481 return false; /* No change. */
485 trie_init(struct classifier *cls, int trie_idx, const struct mf_field *field)
487 struct cls_trie *trie = &cls->tries[trie_idx];
488 struct cls_subtable *subtable;
490 if (trie_idx < cls->n_tries) {
491 trie_destroy(&trie->root);
493 ovsrcu_set_hidden(&trie->root, NULL);
497 /* Add existing rules to the new trie. */
498 CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
501 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
503 struct cls_match *head;
505 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
506 trie_insert(trie, head->cls_rule, plen);
509 /* Initialize subtable's prefix length on this field. This will
510 * allow readers to use the trie. */
511 atomic_thread_fence(memory_order_release);
512 subtable->trie_plen[trie_idx] = plen;
516 /* Returns true if 'cls' contains no classification rules, false otherwise.
517 * Checking the cmap requires no locking. */
519 classifier_is_empty(const struct classifier *cls)
521 return cmap_is_empty(&cls->subtables_map);
524 /* Returns the number of rules in 'cls'. */
526 classifier_count(const struct classifier *cls)
528 /* n_rules is an int, so in the presence of concurrent writers this will
529 * return either the old or a new value. */
534 hash_metadata(ovs_be64 metadata)
536 return hash_uint64((OVS_FORCE uint64_t) metadata);
539 static struct cls_partition *
540 find_partition(const struct classifier *cls, ovs_be64 metadata, uint32_t hash)
542 struct cls_partition *partition;
544 CMAP_FOR_EACH_WITH_HASH (partition, cmap_node, hash, &cls->partitions) {
545 if (partition->metadata == metadata) {
553 static struct cls_partition *
554 create_partition(struct classifier *cls, struct cls_subtable *subtable,
557 uint32_t hash = hash_metadata(metadata);
558 struct cls_partition *partition = find_partition(cls, metadata, hash);
560 partition = xmalloc(sizeof *partition);
561 partition->metadata = metadata;
563 tag_tracker_init(&partition->tracker);
564 cmap_insert(&cls->partitions, &partition->cmap_node, hash);
566 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
570 static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
572 /* Could optimize to use the same map if needed for fast path. */
573 return MINIFLOW_GET_BE32(&match->flow, tp_src)
574 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
578 subtable_replace_head_rule(struct classifier *cls OVS_UNUSED,
579 struct cls_subtable *subtable,
580 struct cls_match *head, struct cls_match *new,
581 uint32_t hash, uint32_t ihash[CLS_MAX_INDICES])
583 /* Rule's data is already in the tries. */
585 new->partition = head->partition; /* Steal partition, if any. */
586 head->partition = NULL;
588 for (int i = 0; i < subtable->n_indices; i++) {
589 cmap_replace(&subtable->indices[i], &head->index_nodes[i],
590 &new->index_nodes[i], ihash[i]);
592 cmap_replace(&subtable->rules, &head->cmap_node, &new->cmap_node, hash);
595 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
596 * must not modify or free it.
598 * If 'cls' already contains an identical rule (including wildcards, values of
599 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
600 * rule that was replaced. The caller takes ownership of the returned rule and
601 * is thus responsible for destroying it with cls_rule_destroy(), after RCU
602 * grace period has passed (see ovsrcu_postpone()).
604 * Returns NULL if 'cls' does not contain a rule with an identical key, after
605 * inserting the new rule. In this case, no rules are displaced by the new
606 * rule, even rules that cannot have any effect because the new rule matches a
607 * superset of their flows and has higher priority.
609 const struct cls_rule *
610 classifier_replace(struct classifier *cls, const struct cls_rule *rule,
611 const struct cls_conjunction *conjs, size_t n_conjs)
613 struct cls_match *new;
614 struct cls_subtable *subtable;
615 uint32_t ihash[CLS_MAX_INDICES];
616 uint8_t prev_be64ofs = 0;
617 struct cls_match *head;
623 ovs_assert(rule->version > 0);
625 /* 'new' is initially invisible to lookups. */
626 new = cls_match_alloc(rule, conjs, n_conjs);
628 CONST_CAST(struct cls_rule *, rule)->cls_match = new;
630 subtable = find_subtable(cls, &rule->match.mask);
632 subtable = insert_subtable(cls, &rule->match.mask);
635 /* Compute hashes in segments. */
637 for (i = 0; i < subtable->n_indices; i++) {
638 ihash[i] = minimatch_hash_range(&rule->match, prev_be64ofs,
639 subtable->index_ofs[i], &basis);
640 prev_be64ofs = subtable->index_ofs[i];
642 hash = minimatch_hash_range(&rule->match, prev_be64ofs, FLOW_U64S, &basis);
644 head = find_equal(subtable, &rule->match.flow, hash);
646 /* Add rule to tries.
648 * Concurrent readers might miss seeing the rule until this update,
649 * which might require being fixed up by revalidation later. */
650 for (i = 0; i < cls->n_tries; i++) {
651 if (subtable->trie_plen[i]) {
652 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
656 /* Add rule to ports trie. */
657 if (subtable->ports_mask_len) {
658 /* We mask the value to be inserted to always have the wildcarded
659 * bits in known (zero) state, so we can include them in comparison
660 * and they will always match (== their original value does not
662 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
664 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
665 subtable->ports_mask_len);
668 /* Add rule to partitions.
670 * Concurrent readers might miss seeing the rule until this update,
671 * which might require being fixed up by revalidation later. */
672 new->partition = NULL;
673 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
674 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
676 new->partition = create_partition(cls, subtable, metadata);
679 /* Add new node to segment indices.
681 * Readers may find the rule in the indices before the rule is visible
682 * in the subtables 'rules' map. This may result in us losing the
683 * opportunity to quit lookups earlier, resulting in sub-optimal
684 * wildcarding. This will be fixed later by revalidation (always
685 * scheduled after flow table changes). */
686 for (i = 0; i < subtable->n_indices; i++) {
687 cmap_insert(&subtable->indices[i], &new->index_nodes[i], ihash[i]);
689 n_rules = cmap_insert(&subtable->rules, &new->cmap_node, hash);
690 } else { /* Equal rules exist in the classifier already. */
691 struct cls_match *prev, *iter;
693 /* Scan the list for the insertion point that will keep the list in
694 * order of decreasing priority. Insert after rules marked invisible
695 * in any version of the same priority. */
696 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, prev, head) {
697 if (rule->priority > iter->priority
698 || (rule->priority == iter->priority
699 && !cls_match_is_eventually_invisible(iter))) {
704 /* Replace 'iter' with 'new' or insert 'new' between 'prev' and
707 struct cls_rule *old;
709 if (rule->priority == iter->priority) {
710 cls_match_replace(prev, iter, new);
711 old = CONST_CAST(struct cls_rule *, iter->cls_rule);
713 cls_match_insert(prev, iter, new);
717 /* Replace the existing head in data structures, if rule is the new
720 subtable_replace_head_rule(cls, subtable, head, new, hash,
725 struct cls_conjunction_set *conj_set;
727 conj_set = ovsrcu_get_protected(struct cls_conjunction_set *,
730 ovsrcu_postpone(free, conj_set);
733 ovsrcu_postpone(cls_match_free_cb, iter);
734 old->cls_match = NULL;
736 /* No change in subtable's max priority or max count. */
738 /* Make 'new' visible to lookups in the appropriate version. */
739 cls_match_set_visibility(new, rule->version);
741 /* Make rule visible to iterators (immediately). */
742 rculist_replace(CONST_CAST(struct rculist *, &rule->node),
745 /* Return displaced rule. Caller is responsible for keeping it
746 * around until all threads quiesce. */
750 /* 'new' is new node after 'prev' */
751 cls_match_insert(prev, iter, new);
755 /* Make 'new' visible to lookups in the appropriate version. */
756 cls_match_set_visibility(new, rule->version);
758 /* Make rule visible to iterators (immediately). */
759 rculist_push_back(&subtable->rules_list,
760 CONST_CAST(struct rculist *, &rule->node));
762 /* Rule was added, not replaced. Update 'subtable's 'max_priority' and
763 * 'max_count', if necessary.
765 * The rule was already inserted, but concurrent readers may not see the
766 * rule yet as the subtables vector is not updated yet. This will have to
767 * be fixed by revalidation later. */
769 subtable->max_priority = rule->priority;
770 subtable->max_count = 1;
771 pvector_insert(&cls->subtables, subtable, rule->priority);
772 } else if (rule->priority == subtable->max_priority) {
773 ++subtable->max_count;
774 } else if (rule->priority > subtable->max_priority) {
775 subtable->max_priority = rule->priority;
776 subtable->max_count = 1;
777 pvector_change_priority(&cls->subtables, subtable, rule->priority);
780 /* Nothing was replaced. */
784 pvector_publish(&cls->subtables);
790 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
791 * must not modify or free it.
793 * 'cls' must not contain an identical rule (including wildcards, values of
794 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
797 classifier_insert(struct classifier *cls, const struct cls_rule *rule,
798 const struct cls_conjunction conj[], size_t n_conj)
800 const struct cls_rule *displaced_rule
801 = classifier_replace(cls, rule, conj, n_conj);
802 ovs_assert(!displaced_rule);
805 /* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
806 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
807 * resides, etc., as necessary.
809 * Does nothing if 'rule' has been already removed, or was never inserted.
811 * Returns the removed rule, or NULL, if it was already removed.
813 const struct cls_rule *
814 classifier_remove(struct classifier *cls, const struct cls_rule *cls_rule)
816 struct cls_match *rule, *prev, *next, *head;
817 struct cls_partition *partition;
818 struct cls_conjunction_set *conj_set;
819 struct cls_subtable *subtable;
821 uint32_t basis = 0, hash, ihash[CLS_MAX_INDICES];
822 uint8_t prev_be64ofs = 0;
825 rule = cls_rule->cls_match;
829 /* Mark as removed. */
830 CONST_CAST(struct cls_rule *, cls_rule)->cls_match = NULL;
832 /* Remove 'cls_rule' from the subtable's rules list. */
833 rculist_remove(CONST_CAST(struct rculist *, &cls_rule->node));
835 subtable = find_subtable(cls, &cls_rule->match.mask);
836 ovs_assert(subtable);
838 for (i = 0; i < subtable->n_indices; i++) {
839 ihash[i] = minimatch_hash_range(&cls_rule->match, prev_be64ofs,
840 subtable->index_ofs[i], &basis);
841 prev_be64ofs = subtable->index_ofs[i];
843 hash = minimatch_hash_range(&cls_rule->match, prev_be64ofs, FLOW_U64S,
846 head = find_equal(subtable, &cls_rule->match.flow, hash);
848 /* Check if the rule is not the head rule. */
850 struct cls_match *iter;
852 /* Not the head rule, but potentially one with the same priority. */
853 /* Remove from the list of equal rules. */
854 FOR_EACH_RULE_IN_LIST_PROTECTED (iter, prev, head) {
859 ovs_assert(iter == rule);
861 cls_match_remove(prev, rule);
866 /* 'rule' is the head rule. Check if there is another rule to
867 * replace 'rule' in the data structures. */
868 next = cls_match_next_protected(rule);
870 subtable_replace_head_rule(cls, subtable, rule, next, hash, ihash);
874 /* 'rule' is last of the kind in the classifier, must remove from all the
875 * data structures. */
877 if (subtable->ports_mask_len) {
878 ovs_be32 masked_ports = minimatch_get_ports(&cls_rule->match);
880 trie_remove_prefix(&subtable->ports_trie,
881 &masked_ports, subtable->ports_mask_len);
883 for (i = 0; i < cls->n_tries; i++) {
884 if (subtable->trie_plen[i]) {
885 trie_remove(&cls->tries[i], cls_rule, subtable->trie_plen[i]);
889 /* Remove rule node from indices. */
890 for (i = 0; i < subtable->n_indices; i++) {
891 cmap_remove(&subtable->indices[i], &rule->index_nodes[i], ihash[i]);
893 n_rules = cmap_remove(&subtable->rules, &rule->cmap_node, hash);
895 partition = rule->partition;
897 tag_tracker_subtract(&partition->tracker, &partition->tags,
899 if (!partition->tags) {
900 cmap_remove(&cls->partitions, &partition->cmap_node,
901 hash_metadata(partition->metadata));
902 ovsrcu_postpone(free, partition);
907 destroy_subtable(cls, subtable);
910 if (subtable->max_priority == rule->priority
911 && --subtable->max_count == 0) {
912 /* Find the new 'max_priority' and 'max_count'. */
913 int max_priority = INT_MIN;
914 struct cls_match *head;
916 CMAP_FOR_EACH (head, cmap_node, &subtable->rules) {
917 if (head->priority > max_priority) {
918 max_priority = head->priority;
919 subtable->max_count = 1;
920 } else if (head->priority == max_priority) {
921 ++subtable->max_count;
924 subtable->max_priority = max_priority;
925 pvector_change_priority(&cls->subtables, subtable, max_priority);
930 pvector_publish(&cls->subtables);
934 conj_set = ovsrcu_get_protected(struct cls_conjunction_set *,
937 ovsrcu_postpone(free, conj_set);
939 ovsrcu_postpone(cls_match_free_cb, rule);
945 /* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
946 * subtables which have a prefix match on the trie field, but whose prefix
947 * length is not indicated in 'match_plens'. For example, a subtable that
948 * has a 8-bit trie field prefix match can be skipped if
949 * !be_get_bit_at(&match_plens, 8 - 1). If skipped, 'maskbits' prefix bits
950 * must be unwildcarded to make datapath flow only match packets it should. */
952 const struct cls_trie *trie;
953 bool lookup_done; /* Status of the lookup. */
954 uint8_t be32ofs; /* U32 offset of the field in question. */
955 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
956 union mf_value match_plens; /* Bitmask of prefix lengths with possible
961 trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
964 ctx->be32ofs = trie->field->flow_be32ofs;
965 ctx->lookup_done = false;
968 struct conjunctive_match {
969 struct hmap_node hmap_node;
974 static struct conjunctive_match *
975 find_conjunctive_match__(struct hmap *matches, uint64_t id, uint32_t hash)
977 struct conjunctive_match *m;
979 HMAP_FOR_EACH_IN_BUCKET (m, hmap_node, hash, matches) {
988 find_conjunctive_match(const struct cls_conjunction_set *set,
989 unsigned int max_n_clauses, struct hmap *matches,
990 struct conjunctive_match *cm_stubs, size_t n_cm_stubs,
993 const struct cls_conjunction *c;
995 if (max_n_clauses < set->min_n_clauses) {
999 for (c = set->conj; c < &set->conj[set->n]; c++) {
1000 struct conjunctive_match *cm;
1003 if (c->n_clauses > max_n_clauses) {
1007 hash = hash_int(c->id, 0);
1008 cm = find_conjunctive_match__(matches, c->id, hash);
1010 size_t n = hmap_count(matches);
1012 cm = n < n_cm_stubs ? &cm_stubs[n] : xmalloc(sizeof *cm);
1013 hmap_insert(matches, &cm->hmap_node, hash);
1015 cm->clauses = UINT64_MAX << (c->n_clauses & 63);
1017 cm->clauses |= UINT64_C(1) << c->clause;
1018 if (cm->clauses == UINT64_MAX) {
1027 free_conjunctive_matches(struct hmap *matches,
1028 struct conjunctive_match *cm_stubs, size_t n_cm_stubs)
1030 if (hmap_count(matches) > n_cm_stubs) {
1031 struct conjunctive_match *cm, *next;
1033 HMAP_FOR_EACH_SAFE (cm, next, hmap_node, matches) {
1034 if (!(cm >= cm_stubs && cm < &cm_stubs[n_cm_stubs])) {
1039 hmap_destroy(matches);
1042 /* Like classifier_lookup(), except that support for conjunctive matches can be
1043 * configured with 'allow_conjunctive_matches'. That feature is not exposed
1044 * externally because turning off conjunctive matches is only useful to avoid
1045 * recursion within this function itself.
1047 * 'flow' is non-const to allow for temporary modifications during the lookup.
1048 * Any changes are restored before returning. */
1049 static const struct cls_rule *
1050 classifier_lookup__(const struct classifier *cls, long long version,
1051 struct flow *flow, struct flow_wildcards *wc,
1052 bool allow_conjunctive_matches)
1054 const struct cls_partition *partition;
1055 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
1056 const struct cls_match *match;
1059 /* Highest-priority flow in 'cls' that certainly matches 'flow'. */
1060 const struct cls_match *hard = NULL;
1061 int hard_pri = INT_MIN; /* hard ? hard->priority : INT_MIN. */
1063 /* Highest-priority conjunctive flows in 'cls' matching 'flow'. Since
1064 * these are (components of) conjunctive flows, we can only know whether
1065 * the full conjunctive flow matches after seeing multiple of them. Thus,
1066 * we refer to these as "soft matches". */
1067 struct cls_conjunction_set *soft_stub[64];
1068 struct cls_conjunction_set **soft = soft_stub;
1069 size_t n_soft = 0, allocated_soft = ARRAY_SIZE(soft_stub);
1070 int soft_pri = INT_MIN; /* n_soft ? MAX(soft[*]->priority) : INT_MIN. */
1072 /* Synchronize for cls->n_tries and subtable->trie_plen. They can change
1073 * when table configuration changes, which happens typically only on
1075 atomic_thread_fence(memory_order_acquire);
1077 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
1078 * then 'flow' cannot possibly match in 'subtable':
1080 * - If flow->metadata maps to a given 'partition', then we can use
1081 * 'tags' for 'partition->tags'.
1083 * - If flow->metadata has no partition, then no rule in 'cls' has an
1084 * exact-match for flow->metadata. That means that we don't need to
1085 * search any subtable that includes flow->metadata in its mask.
1087 * In either case, we always need to search any cls_subtables that do not
1088 * include flow->metadata in its mask. One way to do that would be to
1089 * check the "cls_subtable"s explicitly for that, but that would require an
1090 * extra branch per subtable. Instead, we mark such a cls_subtable's
1091 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
1092 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
1093 * need a special case.
1095 partition = (cmap_is_empty(&cls->partitions)
1097 : find_partition(cls, flow->metadata,
1098 hash_metadata(flow->metadata)));
1099 tags = partition ? partition->tags : TAG_ARBITRARY;
1101 /* Initialize trie contexts for find_match_wc(). */
1102 for (int i = 0; i < cls->n_tries; i++) {
1103 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
1107 struct cls_subtable *subtable;
1108 PVECTOR_FOR_EACH_PRIORITY (subtable, hard_pri, 2, sizeof *subtable,
1110 struct cls_conjunction_set *conj_set;
1112 /* Skip subtables not in our partition. */
1113 if (!tag_intersects(tags, subtable->tag)) {
1117 /* Skip subtables with no match, or where the match is lower-priority
1118 * than some certain match we've already found. */
1119 match = find_match_wc(subtable, version, flow, trie_ctx, cls->n_tries,
1121 if (!match || match->priority <= hard_pri) {
1125 conj_set = ovsrcu_get(struct cls_conjunction_set *, &match->conj_set);
1127 /* 'match' isn't part of a conjunctive match. It's the best
1128 * certain match we've got so far, since we know that it's
1129 * higher-priority than hard_pri.
1131 * (There might be a higher-priority conjunctive match. We can't
1134 hard_pri = hard->priority;
1135 } else if (allow_conjunctive_matches) {
1136 /* 'match' is part of a conjunctive match. Add it to the list. */
1137 if (OVS_UNLIKELY(n_soft >= allocated_soft)) {
1138 struct cls_conjunction_set **old_soft = soft;
1140 allocated_soft *= 2;
1141 soft = xmalloc(allocated_soft * sizeof *soft);
1142 memcpy(soft, old_soft, n_soft * sizeof *soft);
1143 if (old_soft != soft_stub) {
1147 soft[n_soft++] = conj_set;
1149 /* Keep track of the highest-priority soft match. */
1150 if (soft_pri < match->priority) {
1151 soft_pri = match->priority;
1156 /* In the common case, at this point we have no soft matches and we can
1157 * return immediately. (We do the same thing if we have potential soft
1158 * matches but none of them are higher-priority than our hard match.) */
1159 if (hard_pri >= soft_pri) {
1160 if (soft != soft_stub) {
1163 return hard ? hard->cls_rule : NULL;
1166 /* At this point, we have some soft matches. We might also have a hard
1167 * match; if so, its priority is lower than the highest-priority soft
1172 * Check whether soft matches are real matches. */
1174 /* Delete soft matches that are null. This only happens in second and
1175 * subsequent iterations of the soft match loop, when we drop back from
1176 * a high-priority soft match to a lower-priority one.
1178 * Also, delete soft matches whose priority is less than or equal to
1179 * the hard match's priority. In the first iteration of the soft
1180 * match, these can be in 'soft' because the earlier main loop found
1181 * the soft match before the hard match. In second and later iteration
1182 * of the soft match loop, these can be in 'soft' because we dropped
1183 * back from a high-priority soft match to a lower-priority soft match.
1185 * It is tempting to delete soft matches that cannot be satisfied
1186 * because there are fewer soft matches than required to satisfy any of
1187 * their conjunctions, but we cannot do that because there might be
1188 * lower priority soft or hard matches with otherwise identical
1189 * matches. (We could special case those here, but there's no
1190 * need--we'll do so at the bottom of the soft match loop anyway and
1191 * this duplicates less code.)
1193 * It's also tempting to break out of the soft match loop if 'n_soft ==
1194 * 1' but that would also miss lower-priority hard matches. We could
1195 * special case that also but again there's no need. */
1196 for (int i = 0; i < n_soft; ) {
1197 if (!soft[i] || soft[i]->priority <= hard_pri) {
1198 soft[i] = soft[--n_soft];
1207 /* Find the highest priority among the soft matches. (We know this
1208 * must be higher than the hard match's priority; otherwise we would
1209 * have deleted all of the soft matches in the previous loop.) Count
1210 * the number of soft matches that have that priority. */
1213 for (int i = 0; i < n_soft; i++) {
1214 if (soft[i]->priority > soft_pri) {
1215 soft_pri = soft[i]->priority;
1217 } else if (soft[i]->priority == soft_pri) {
1221 ovs_assert(soft_pri > hard_pri);
1223 /* Look for a real match among the highest-priority soft matches.
1225 * It's unusual to have many conjunctive matches, so we use stubs to
1226 * avoid calling malloc() in the common case. An hmap has a built-in
1227 * stub for up to 2 hmap_nodes; possibly, we would benefit a variant
1228 * with a bigger stub. */
1229 struct conjunctive_match cm_stubs[16];
1230 struct hmap matches;
1232 hmap_init(&matches);
1233 for (int i = 0; i < n_soft; i++) {
1236 if (soft[i]->priority == soft_pri
1237 && find_conjunctive_match(soft[i], n_soft_pri, &matches,
1238 cm_stubs, ARRAY_SIZE(cm_stubs),
1240 uint32_t saved_conj_id = flow->conj_id;
1241 const struct cls_rule *rule;
1244 rule = classifier_lookup__(cls, version, flow, wc, false);
1245 flow->conj_id = saved_conj_id;
1248 free_conjunctive_matches(&matches,
1249 cm_stubs, ARRAY_SIZE(cm_stubs));
1250 if (soft != soft_stub) {
1257 free_conjunctive_matches(&matches, cm_stubs, ARRAY_SIZE(cm_stubs));
1259 /* There's no real match among the highest-priority soft matches.
1260 * However, if any of those soft matches has a lower-priority but
1261 * otherwise identical flow match, then we need to consider those for
1262 * soft or hard matches.
1264 * The next iteration of the soft match loop will delete any null
1265 * pointers we put into 'soft' (and some others too). */
1266 for (int i = 0; i < n_soft; i++) {
1267 if (soft[i]->priority != soft_pri) {
1271 /* Find next-lower-priority flow with identical flow match. */
1272 match = next_visible_rule_in_list(soft[i]->match, version);
1274 soft[i] = ovsrcu_get(struct cls_conjunction_set *,
1277 /* The flow is a hard match; don't treat as a soft
1279 if (match->priority > hard_pri) {
1281 hard_pri = hard->priority;
1285 /* No such lower-priority flow (probably the common case). */
1291 if (soft != soft_stub) {
1294 return hard ? hard->cls_rule : NULL;
1297 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow' and
1298 * that is visible in 'version'. Returns a null pointer if no rules in 'cls'
1299 * match 'flow'. If multiple rules of equal priority match 'flow', returns one
1302 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
1303 * set of bits that were significant in the lookup. At some point
1304 * earlier, 'wc' should have been initialized (e.g., by
1305 * flow_wildcards_init_catchall()).
1307 * 'flow' is non-const to allow for temporary modifications during the lookup.
1308 * Any changes are restored before returning. */
1309 const struct cls_rule *
1310 classifier_lookup(const struct classifier *cls, long long version,
1311 struct flow *flow, struct flow_wildcards *wc)
1313 return classifier_lookup__(cls, version, flow, wc, true);
1316 /* Finds and returns a rule in 'cls' with exactly the same priority and
1317 * matching criteria as 'target', and that is visible in 'target->version.
1318 * Only one such rule may ever exist. Returns a null pointer if 'cls' doesn't
1319 * contain an exact match. */
1320 const struct cls_rule *
1321 classifier_find_rule_exactly(const struct classifier *cls,
1322 const struct cls_rule *target)
1324 const struct cls_match *head, *rule;
1325 const struct cls_subtable *subtable;
1327 subtable = find_subtable(cls, &target->match.mask);
1332 head = find_equal(subtable, &target->match.flow,
1333 miniflow_hash_in_minimask(&target->match.flow,
1334 &target->match.mask, 0));
1338 CLS_MATCH_FOR_EACH (rule, head) {
1339 if (rule->priority < target->priority) {
1340 break; /* Not found. */
1342 if (rule->priority == target->priority
1343 && cls_match_visible_in_version(rule, target->version)) {
1344 return rule->cls_rule;
1350 /* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
1351 * same matching criteria as 'target', and that is visible in 'version'.
1352 * Returns a null pointer if 'cls' doesn't contain an exact match visible in
1354 const struct cls_rule *
1355 classifier_find_match_exactly(const struct classifier *cls,
1356 const struct match *target, int priority,
1359 const struct cls_rule *retval;
1362 cls_rule_init(&cr, target, priority, version);
1363 retval = classifier_find_rule_exactly(cls, &cr);
1364 cls_rule_destroy(&cr);
1369 /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
1370 * considered to overlap if both rules have the same priority and a packet
1371 * could match both, and if both rules are visible in the same version.
1373 * A trivial example of overlapping rules is two rules matching disjoint sets
1374 * of fields. E.g., if one rule matches only on port number, while another only
1375 * on dl_type, any packet from that specific port and with that specific
1376 * dl_type could match both, if the rules also have the same priority. */
1378 classifier_rule_overlaps(const struct classifier *cls,
1379 const struct cls_rule *target)
1381 struct cls_subtable *subtable;
1383 /* Iterate subtables in the descending max priority order. */
1384 PVECTOR_FOR_EACH_PRIORITY (subtable, target->priority - 1, 2,
1385 sizeof(struct cls_subtable), &cls->subtables) {
1386 uint64_t storage[FLOW_U64S];
1387 struct minimask mask;
1388 const struct cls_rule *rule;
1390 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
1392 RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
1393 if (rule->priority == target->priority
1394 && miniflow_equal_in_minimask(&target->match.flow,
1395 &rule->match.flow, &mask)
1396 && cls_match_visible_in_version(rule->cls_match,
1405 /* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1406 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1407 * function returns true if, for every field:
1409 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1412 * - 'criteria' wildcards the field,
1414 * Conversely, 'rule' does not match 'criteria' and this function returns false
1415 * if, for at least one field:
1417 * - 'criteria' and 'rule' specify different values for the field, or
1419 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1421 * Equivalently, the truth table for whether a field matches is:
1426 * r +---------+---------+
1427 * i wild | yes | yes |
1429 * e +---------+---------+
1430 * r exact | no |if values|
1432 * a +---------+---------+
1434 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1435 * commands and by OpenFlow 1.0 aggregate and flow stats.
1437 * Ignores rule->priority. */
1439 cls_rule_is_loose_match(const struct cls_rule *rule,
1440 const struct minimatch *criteria)
1442 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
1443 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
1449 /* Rule may only match a target if it is visible in target's version. For NULL
1450 * target we only return rules that are not invisible in any version. */
1452 rule_matches(const struct cls_rule *rule, const struct cls_rule *target)
1454 /* Iterators never see duplicate rules with the same priority. */
1456 ? (miniflow_equal_in_minimask(&rule->match.flow, &target->match.flow,
1457 &target->match.mask)
1458 && cls_match_visible_in_version(rule->cls_match, target->version))
1459 : !cls_match_is_eventually_invisible(rule->cls_match);
1462 static const struct cls_rule *
1463 search_subtable(const struct cls_subtable *subtable,
1464 struct cls_cursor *cursor)
1467 || !minimask_has_extra(&subtable->mask, &cursor->target->match.mask)) {
1468 const struct cls_rule *rule;
1470 RCULIST_FOR_EACH (rule, node, &subtable->rules_list) {
1471 if (rule_matches(rule, cursor->target)) {
1479 /* Initializes 'cursor' for iterating through rules in 'cls', and returns the
1480 * first matching cls_rule via '*pnode', or NULL if there are no matches.
1482 * - If 'target' is null, or if the 'target' is a catchall target and the
1483 * target's version is CLS_NO_VERSION, the cursor will visit every rule
1484 * in 'cls' that is not invisible in any version.
1486 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
1487 * such that cls_rule_is_loose_match(rule, target) returns true and that
1488 * the rule is visible in 'target->version'.
1490 * Ignores target->priority. */
1492 cls_cursor_start(const struct classifier *cls, const struct cls_rule *target)
1494 struct cls_cursor cursor;
1495 struct cls_subtable *subtable;
1498 cursor.target = target && (!cls_rule_is_catchall(target)
1499 || target->version != CLS_MAX_VERSION)
1503 /* Find first rule. */
1504 PVECTOR_CURSOR_FOR_EACH (subtable, &cursor.subtables,
1505 &cursor.cls->subtables) {
1506 const struct cls_rule *rule = search_subtable(subtable, &cursor);
1509 cursor.subtable = subtable;
1518 static const struct cls_rule *
1519 cls_cursor_next(struct cls_cursor *cursor)
1521 const struct cls_rule *rule;
1522 const struct cls_subtable *subtable;
1524 rule = cursor->rule;
1525 subtable = cursor->subtable;
1526 RCULIST_FOR_EACH_CONTINUE (rule, node, &subtable->rules_list) {
1527 if (rule_matches(rule, cursor->target)) {
1532 PVECTOR_CURSOR_FOR_EACH_CONTINUE (subtable, &cursor->subtables) {
1533 rule = search_subtable(subtable, cursor);
1535 cursor->subtable = subtable;
1543 /* Sets 'cursor->rule' to the next matching cls_rule in 'cursor''s iteration,
1544 * or to null if all matching rules have been visited. */
1546 cls_cursor_advance(struct cls_cursor *cursor)
1548 cursor->rule = cls_cursor_next(cursor);
1551 static struct cls_subtable *
1552 find_subtable(const struct classifier *cls, const struct minimask *mask)
1554 struct cls_subtable *subtable;
1556 CMAP_FOR_EACH_WITH_HASH (subtable, cmap_node, minimask_hash(mask, 0),
1557 &cls->subtables_map) {
1558 if (minimask_equal(mask, &subtable->mask)) {
1565 /* The new subtable will be visible to the readers only after this. */
1566 static struct cls_subtable *
1567 insert_subtable(struct classifier *cls, const struct minimask *mask)
1569 uint32_t hash = minimask_hash(mask, 0);
1570 struct cls_subtable *subtable;
1572 struct flow_wildcards old, new;
1574 int count = count_1bits(mask->masks.map);
1576 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1577 + MINIFLOW_VALUES_SIZE(count));
1578 cmap_init(&subtable->rules);
1579 miniflow_clone_inline(CONST_CAST(struct miniflow *, &subtable->mask.masks),
1580 &mask->masks, count);
1582 /* Init indices for segmented lookup, if any. */
1583 flow_wildcards_init_catchall(&new);
1586 for (i = 0; i < cls->n_flow_segments; i++) {
1587 flow_wildcards_fold_minimask_range(&new, mask, prev,
1588 cls->flow_segments[i]);
1589 /* Add an index if it adds mask bits. */
1590 if (!flow_wildcards_equal(&new, &old)) {
1591 cmap_init(&subtable->indices[index]);
1592 *CONST_CAST(uint8_t *, &subtable->index_ofs[index])
1593 = cls->flow_segments[i];
1597 prev = cls->flow_segments[i];
1599 /* Check if the rest of the subtable's mask adds any bits,
1600 * and remove the last index if it doesn't. */
1602 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U64S);
1603 if (flow_wildcards_equal(&new, &old)) {
1605 *CONST_CAST(uint8_t *, &subtable->index_ofs[index]) = 0;
1606 cmap_destroy(&subtable->indices[index]);
1609 *CONST_CAST(uint8_t *, &subtable->n_indices) = index;
1611 *CONST_CAST(tag_type *, &subtable->tag) =
1612 (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1613 ? tag_create_deterministic(hash)
1616 for (i = 0; i < cls->n_tries; i++) {
1617 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1618 cls->tries[i].field);
1622 ovsrcu_set_hidden(&subtable->ports_trie, NULL);
1623 *CONST_CAST(int *, &subtable->ports_mask_len)
1624 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1626 /* List of rules. */
1627 rculist_init(&subtable->rules_list);
1629 cmap_insert(&cls->subtables_map, &subtable->cmap_node, hash);
1634 /* RCU readers may still access the subtable before it is actually freed. */
1636 destroy_subtable(struct classifier *cls, struct cls_subtable *subtable)
1640 pvector_remove(&cls->subtables, subtable);
1641 cmap_remove(&cls->subtables_map, &subtable->cmap_node,
1642 minimask_hash(&subtable->mask, 0));
1644 ovs_assert(ovsrcu_get_protected(struct trie_node *, &subtable->ports_trie)
1646 ovs_assert(cmap_is_empty(&subtable->rules));
1647 ovs_assert(rculist_is_empty(&subtable->rules_list));
1649 for (i = 0; i < subtable->n_indices; i++) {
1650 cmap_destroy(&subtable->indices[i]);
1652 cmap_destroy(&subtable->rules);
1653 ovsrcu_postpone(free, subtable);
1661 static unsigned int be_get_bit_at(const ovs_be32 value[], unsigned int ofs);
1663 /* Return 'true' if can skip rest of the subtable based on the prefix trie
1664 * lookup results. */
1666 check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1667 const unsigned int field_plen[CLS_MAX_TRIES],
1668 const struct range ofs, const struct flow *flow,
1669 struct flow_wildcards *wc)
1673 /* Check if we could avoid fully unwildcarding the next level of
1674 * fields using the prefix tries. The trie checks are done only as
1675 * needed to avoid folding in additional bits to the wildcards mask. */
1676 for (j = 0; j < n_tries; j++) {
1677 /* Is the trie field relevant for this subtable? */
1678 if (field_plen[j]) {
1679 struct trie_ctx *ctx = &trie_ctx[j];
1680 uint8_t be32ofs = ctx->be32ofs;
1681 uint8_t be64ofs = be32ofs / 2;
1683 /* Is the trie field within the current range of fields? */
1684 if (be64ofs >= ofs.start && be64ofs < ofs.end) {
1685 /* On-demand trie lookup. */
1686 if (!ctx->lookup_done) {
1687 memset(&ctx->match_plens, 0, sizeof ctx->match_plens);
1688 ctx->maskbits = trie_lookup(ctx->trie, flow,
1690 ctx->lookup_done = true;
1692 /* Possible to skip the rest of the subtable if subtable's
1693 * prefix on the field is not included in the lookup result. */
1694 if (!be_get_bit_at(&ctx->match_plens.be32, field_plen[j] - 1)) {
1695 /* We want the trie lookup to never result in unwildcarding
1696 * any bits that would not be unwildcarded otherwise.
1697 * Since the trie is shared by the whole classifier, it is
1698 * possible that the 'maskbits' contain bits that are
1699 * irrelevant for the partition relevant for the current
1700 * packet. Hence the checks below. */
1702 /* Check that the trie result will not unwildcard more bits
1703 * than this subtable would otherwise. */
1704 if (ctx->maskbits <= field_plen[j]) {
1705 /* Unwildcard the bits and skip the rest. */
1706 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1707 /* Note: Prerequisite already unwildcarded, as the only
1708 * prerequisite of the supported trie lookup fields is
1709 * the ethertype, which is always unwildcarded. */
1712 /* Can skip if the field is already unwildcarded. */
1713 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
1723 /* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1724 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1725 * value has the correct value in 'target'.
1727 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
1728 * target, mask) but this is faster because of the invariant that
1729 * flow->map and mask->masks.map are the same, and that this version
1730 * takes the 'wc'. */
1732 miniflow_and_mask_matches_flow(const struct miniflow *flow,
1733 const struct minimask *mask,
1734 const struct flow *target)
1736 const uint64_t *flowp = miniflow_get_values(flow);
1737 const uint64_t *maskp = miniflow_get_values(&mask->masks);
1740 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1741 uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & *maskp++;
1751 static inline const struct cls_match *
1752 find_match(const struct cls_subtable *subtable, long long version,
1753 const struct flow *flow, uint32_t hash)
1755 const struct cls_match *head, *rule;
1757 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
1758 if (OVS_LIKELY(miniflow_and_mask_matches_flow(&head->flow,
1761 /* Return highest priority rule that is visible. */
1762 CLS_MATCH_FOR_EACH (rule, head) {
1763 if (OVS_LIKELY(cls_match_visible_in_version(rule, version))) {
1773 /* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1774 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1775 * value has the correct value in 'target'.
1777 * This function is equivalent to miniflow_and_mask_matches_flow() but this
1778 * version fills in the mask bits in 'wc'. */
1780 miniflow_and_mask_matches_flow_wc(const struct miniflow *flow,
1781 const struct minimask *mask,
1782 const struct flow *target,
1783 struct flow_wildcards *wc)
1785 const uint64_t *flowp = miniflow_get_values(flow);
1786 const uint64_t *maskp = miniflow_get_values(&mask->masks);
1789 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
1790 uint64_t mask = *maskp++;
1791 uint64_t diff = (*flowp++ ^ flow_u64_value(target, idx)) & mask;
1794 /* Only unwildcard if none of the differing bits is already
1796 if (!(flow_u64_value(&wc->masks, idx) & diff)) {
1797 /* Keep one bit of the difference. The selected bit may be
1798 * different in big-endian v.s. little-endian systems. */
1799 *flow_u64_lvalue(&wc->masks, idx) |= rightmost_1bit(diff);
1803 /* Fill in the bits that were looked at. */
1804 *flow_u64_lvalue(&wc->masks, idx) |= mask;
1810 /* Unwildcard the fields looked up so far, if any. */
1812 fill_range_wc(const struct cls_subtable *subtable, struct flow_wildcards *wc,
1816 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, to);
1820 static const struct cls_match *
1821 find_match_wc(const struct cls_subtable *subtable, long long version,
1822 const struct flow *flow, struct trie_ctx trie_ctx[CLS_MAX_TRIES],
1823 unsigned int n_tries, struct flow_wildcards *wc)
1825 uint32_t basis = 0, hash;
1826 const struct cls_match *rule = NULL;
1830 if (OVS_UNLIKELY(!wc)) {
1831 return find_match(subtable, version, flow,
1832 flow_hash_in_minimask(flow, &subtable->mask, 0));
1836 /* Try to finish early by checking fields in segments. */
1837 for (i = 0; i < subtable->n_indices; i++) {
1838 const struct cmap_node *inode;
1840 ofs.end = subtable->index_ofs[i];
1842 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1844 /* 'wc' bits for the trie field set, now unwildcard the preceding
1845 * bits used so far. */
1846 fill_range_wc(subtable, wc, ofs.start);
1849 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1851 inode = cmap_find(&subtable->indices[i], hash);
1853 /* No match, can stop immediately, but must fold in the bits
1854 * used in lookup so far. */
1855 fill_range_wc(subtable, wc, ofs.end);
1859 /* If we have narrowed down to a single rule already, check whether
1860 * that rule matches. Either way, we're done.
1862 * (Rare) hash collisions may cause us to miss the opportunity for this
1864 if (!cmap_node_next(inode)) {
1865 const struct cls_match *head;
1867 ASSIGN_CONTAINER(head, inode - i, index_nodes);
1868 if (miniflow_and_mask_matches_flow_wc(&head->flow, &subtable->mask,
1870 /* Return highest priority rule that is visible. */
1871 CLS_MATCH_FOR_EACH (rule, head) {
1872 if (OVS_LIKELY(cls_match_visible_in_version(rule,
1880 ofs.start = ofs.end;
1882 ofs.end = FLOW_U64S;
1883 /* Trie check for the final range. */
1884 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
1885 fill_range_wc(subtable, wc, ofs.start);
1888 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1890 rule = find_match(subtable, version, flow, hash);
1891 if (!rule && subtable->ports_mask_len) {
1892 /* Ports are always part of the final range, if any.
1893 * No match was found for the ports. Use the ports trie to figure out
1894 * which ports bits to unwildcard. */
1896 ovs_be32 value, plens, mask;
1898 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1899 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
1900 mbits = trie_lookup_value(&subtable->ports_trie, &value, &plens, 32);
1902 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
1903 mask & be32_prefix_mask(mbits);
1905 /* Unwildcard all bits in the mask upto the ports, as they were used
1906 * to determine there is no match. */
1907 fill_range_wc(subtable, wc, TP_PORTS_OFS64);
1911 /* Must unwildcard all the fields, as they were looked at. */
1912 flow_wildcards_fold_minimask(wc, &subtable->mask);
1916 static struct cls_match *
1917 find_equal(const struct cls_subtable *subtable, const struct miniflow *flow,
1920 struct cls_match *head;
1922 CMAP_FOR_EACH_WITH_HASH (head, cmap_node, hash, &subtable->rules) {
1923 if (miniflow_equal(&head->flow, flow)) {
1930 /* A longest-prefix match tree. */
1932 /* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1933 * Prefixes are in the network byte order, and the offset 0 corresponds to
1934 * the most significant bit of the first byte. The offset can be read as
1935 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1937 raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1941 pr += ofs / 32; /* Where to start. */
1942 ofs %= 32; /* How many bits to skip at 'pr'. */
1944 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1945 if (plen > 32 - ofs) { /* Need more than we have already? */
1946 prefix |= ntohl(*++pr) >> (32 - ofs);
1948 /* Return with possible unwanted bits at the end. */
1952 /* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1953 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1954 * corresponds to the most significant bit of the first byte. The offset can
1955 * be read as "how many bits to skip from the start of the prefix starting at
1958 trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1963 if (plen > TRIE_PREFIX_BITS) {
1964 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1966 /* Return with unwanted bits cleared. */
1967 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1970 /* Return the number of equal bits in 'n_bits' of 'prefix's MSBs and a 'value'
1971 * starting at "MSB 0"-based offset 'ofs'. */
1973 prefix_equal_bits(uint32_t prefix, unsigned int n_bits, const ovs_be32 value[],
1976 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, n_bits);
1977 /* Set the bit after the relevant bits to limit the result. */
1978 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - n_bits));
1981 /* Return the number of equal bits in 'node' prefix and a 'prefix' of length
1982 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
1984 trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
1985 unsigned int ofs, unsigned int plen)
1987 return prefix_equal_bits(node->prefix, MIN(node->n_bits, plen - ofs),
1991 /* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
1992 * be greater than 31. */
1994 be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
1996 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
1999 /* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
2000 * be between 0 and 31, inclusive. */
2002 get_bit_at(const uint32_t prefix, unsigned int ofs)
2004 return (prefix >> (31 - ofs)) & 1u;
2007 /* Create new branch. */
2008 static struct trie_node *
2009 trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
2010 unsigned int n_rules)
2012 struct trie_node *node = xmalloc(sizeof *node);
2014 node->prefix = trie_get_prefix(prefix, ofs, plen);
2016 if (plen <= TRIE_PREFIX_BITS) {
2017 node->n_bits = plen;
2018 ovsrcu_set_hidden(&node->edges[0], NULL);
2019 ovsrcu_set_hidden(&node->edges[1], NULL);
2020 node->n_rules = n_rules;
2021 } else { /* Need intermediate nodes. */
2022 struct trie_node *subnode = trie_branch_create(prefix,
2023 ofs + TRIE_PREFIX_BITS,
2024 plen - TRIE_PREFIX_BITS,
2026 int bit = get_bit_at(subnode->prefix, 0);
2027 node->n_bits = TRIE_PREFIX_BITS;
2028 ovsrcu_set_hidden(&node->edges[bit], subnode);
2029 ovsrcu_set_hidden(&node->edges[!bit], NULL);
2036 trie_node_destroy(const struct trie_node *node)
2038 ovsrcu_postpone(free, CONST_CAST(struct trie_node *, node));
2041 /* Copy a trie node for modification and postpone delete the old one. */
2042 static struct trie_node *
2043 trie_node_rcu_realloc(const struct trie_node *node)
2045 struct trie_node *new_node = xmalloc(sizeof *node);
2048 trie_node_destroy(node);
2054 trie_destroy(rcu_trie_ptr *trie)
2056 struct trie_node *node = ovsrcu_get_protected(struct trie_node *, trie);
2059 ovsrcu_set_hidden(trie, NULL);
2060 trie_destroy(&node->edges[0]);
2061 trie_destroy(&node->edges[1]);
2062 trie_node_destroy(node);
2067 trie_is_leaf(const struct trie_node *trie)
2070 return !ovsrcu_get(struct trie_node *, &trie->edges[0])
2071 && !ovsrcu_get(struct trie_node *, &trie->edges[1]);
2075 mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
2076 unsigned int n_bits)
2078 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2081 for (i = 0; i < n_bits / 32; i++) {
2082 mask[i] = OVS_BE32_MAX;
2085 mask[i] |= htonl(~0u << (32 - n_bits % 32));
2090 mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
2091 unsigned int n_bits)
2093 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2095 ovs_be32 zeroes = 0;
2097 for (i = 0; i < n_bits / 32; i++) {
2101 zeroes |= ~mask[i] & htonl(~0u << (32 - n_bits % 32));
2104 return !zeroes; /* All 'n_bits' bits set. */
2107 static rcu_trie_ptr *
2108 trie_next_edge(struct trie_node *node, const ovs_be32 value[],
2111 return node->edges + be_get_bit_at(value, ofs);
2114 static const struct trie_node *
2115 trie_next_node(const struct trie_node *node, const ovs_be32 value[],
2118 return ovsrcu_get(struct trie_node *,
2119 &node->edges[be_get_bit_at(value, ofs)]);
2122 /* Set the bit at ("MSB 0"-based) offset 'ofs'. 'ofs' can be greater than 31.
2125 be_set_bit_at(ovs_be32 value[], unsigned int ofs)
2127 ((uint8_t *)value)[ofs / 8] |= 1u << (7 - ofs % 8);
2130 /* Returns the number of bits in the prefix mask necessary to determine a
2131 * mismatch, in case there are longer prefixes in the tree below the one that
2133 * '*plens' will have a bit set for each prefix length that may have matching
2134 * rules. The caller is responsible for clearing the '*plens' prior to
2138 trie_lookup_value(const rcu_trie_ptr *trie, const ovs_be32 value[],
2139 ovs_be32 plens[], unsigned int n_bits)
2141 const struct trie_node *prev = NULL;
2142 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
2143 unsigned int match_len = 0; /* Number of matching bits. */
2145 for (; node; prev = node, node = trie_next_node(node, value, match_len)) {
2146 unsigned int eqbits;
2147 /* Check if this edge can be followed. */
2148 eqbits = prefix_equal_bits(node->prefix, node->n_bits, value,
2150 match_len += eqbits;
2151 if (eqbits < node->n_bits) { /* Mismatch, nothing more to be found. */
2152 /* Bit at offset 'match_len' differed. */
2153 return match_len + 1; /* Includes the first mismatching bit. */
2155 /* Full match, check if rules exist at this prefix length. */
2156 if (node->n_rules > 0) {
2157 be_set_bit_at(plens, match_len - 1);
2159 if (match_len >= n_bits) {
2160 return n_bits; /* Full prefix. */
2163 /* node == NULL. Full match so far, but we tried to follow an
2164 * non-existing branch. Need to exclude the other branch if it exists
2165 * (it does not if we were called on an empty trie or 'prev' is a leaf
2167 return !prev || trie_is_leaf(prev) ? match_len : match_len + 1;
2171 trie_lookup(const struct cls_trie *trie, const struct flow *flow,
2172 union mf_value *plens)
2174 const struct mf_field *mf = trie->field;
2176 /* Check that current flow matches the prerequisites for the trie
2177 * field. Some match fields are used for multiple purposes, so we
2178 * must check that the trie is relevant for this flow. */
2179 if (mf_are_prereqs_ok(mf, flow)) {
2180 return trie_lookup_value(&trie->root,
2181 &((ovs_be32 *)flow)[mf->flow_be32ofs],
2182 &plens->be32, mf->n_bits);
2184 memset(plens, 0xff, sizeof *plens); /* All prefixes, no skipping. */
2185 return 0; /* Value not used in this case. */
2188 /* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
2189 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
2190 * 'miniflow_index' is not NULL. */
2192 minimask_get_prefix_len(const struct minimask *minimask,
2193 const struct mf_field *mf)
2195 unsigned int n_bits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
2196 uint8_t be32_ofs = mf->flow_be32ofs;
2197 uint8_t be32_end = be32_ofs + mf->n_bytes / 4;
2199 for (; be32_ofs < be32_end; ++be32_ofs) {
2200 uint32_t mask = ntohl(minimask_get_be32(minimask, be32_ofs));
2202 /* Validate mask, count the mask length. */
2205 return 0; /* No bits allowed after mask ended. */
2208 if (~mask & (~mask + 1)) {
2209 return 0; /* Mask not contiguous. */
2211 mask_tz = ctz32(mask);
2212 n_bits += 32 - mask_tz;
2220 * This is called only when mask prefix is known to be CIDR and non-zero.
2221 * Relies on the fact that the flow and mask have the same map, and since
2222 * the mask is CIDR, the storage for the flow field exists even if it
2223 * happened to be zeros.
2225 static const ovs_be32 *
2226 minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
2228 return (OVS_FORCE const ovs_be32 *)
2229 (miniflow_get_values(&match->flow)
2230 + count_1bits(match->flow.map &
2231 ((UINT64_C(1) << mf->flow_be32ofs / 2) - 1)))
2232 + (mf->flow_be32ofs & 1);
2235 /* Insert rule in to the prefix tree.
2236 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2239 trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2241 trie_insert_prefix(&trie->root,
2242 minimatch_get_prefix(&rule->match, trie->field), mlen);
2246 trie_insert_prefix(rcu_trie_ptr *edge, const ovs_be32 *prefix, int mlen)
2248 struct trie_node *node;
2251 /* Walk the tree. */
2252 for (; (node = ovsrcu_get_protected(struct trie_node *, edge));
2253 edge = trie_next_edge(node, prefix, ofs)) {
2254 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2256 if (eqbits < node->n_bits) {
2257 /* Mismatch, new node needs to be inserted above. */
2258 int old_branch = get_bit_at(node->prefix, eqbits);
2259 struct trie_node *new_parent;
2261 new_parent = trie_branch_create(prefix, ofs - eqbits, eqbits,
2262 ofs == mlen ? 1 : 0);
2263 /* Copy the node to modify it. */
2264 node = trie_node_rcu_realloc(node);
2265 /* Adjust the new node for its new position in the tree. */
2266 node->prefix <<= eqbits;
2267 node->n_bits -= eqbits;
2268 ovsrcu_set_hidden(&new_parent->edges[old_branch], node);
2270 /* Check if need a new branch for the new rule. */
2272 ovsrcu_set_hidden(&new_parent->edges[!old_branch],
2273 trie_branch_create(prefix, ofs, mlen - ofs,
2276 ovsrcu_set(edge, new_parent); /* Publish changes. */
2279 /* Full match so far. */
2282 /* Full match at the current node, rule needs to be added here. */
2287 /* Must insert a new tree branch for the new rule. */
2288 ovsrcu_set(edge, trie_branch_create(prefix, ofs, mlen - ofs, 1));
2291 /* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2294 trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2296 trie_remove_prefix(&trie->root,
2297 minimatch_get_prefix(&rule->match, trie->field), mlen);
2300 /* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2303 trie_remove_prefix(rcu_trie_ptr *root, const ovs_be32 *prefix, int mlen)
2305 struct trie_node *node;
2306 rcu_trie_ptr *edges[sizeof(union mf_value) * 8];
2307 int depth = 0, ofs = 0;
2309 /* Walk the tree. */
2310 for (edges[0] = root;
2311 (node = ovsrcu_get_protected(struct trie_node *, edges[depth]));
2312 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
2313 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2315 if (eqbits < node->n_bits) {
2316 /* Mismatch, nothing to be removed. This should never happen, as
2317 * only rules in the classifier are ever removed. */
2318 break; /* Log a warning. */
2320 /* Full match so far. */
2324 /* Full prefix match at the current node, remove rule here. */
2325 if (!node->n_rules) {
2326 break; /* Log a warning. */
2330 /* Check if can prune the tree. */
2331 while (!node->n_rules) {
2332 struct trie_node *next,
2333 *edge0 = ovsrcu_get_protected(struct trie_node *,
2335 *edge1 = ovsrcu_get_protected(struct trie_node *,
2338 if (edge0 && edge1) {
2339 break; /* A branching point, cannot prune. */
2342 /* Else have at most one child node, remove this node. */
2343 next = edge0 ? edge0 : edge1;
2346 if (node->n_bits + next->n_bits > TRIE_PREFIX_BITS) {
2347 break; /* Cannot combine. */
2349 next = trie_node_rcu_realloc(next); /* Modify. */
2351 /* Combine node with next. */
2352 next->prefix = node->prefix | next->prefix >> node->n_bits;
2353 next->n_bits += node->n_bits;
2355 /* Update the parent's edge. */
2356 ovsrcu_set(edges[depth], next); /* Publish changes. */
2357 trie_node_destroy(node);
2359 if (next || !depth) {
2360 /* Branch not pruned or at root, nothing more to do. */
2363 node = ovsrcu_get_protected(struct trie_node *,
2369 /* Cannot go deeper. This should never happen, since only rules
2370 * that actually exist in the classifier are ever removed. */
2371 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");
2375 #define CLS_MATCH_POISON (struct cls_match *)(UINTPTR_MAX / 0xf * 0xb)
2378 cls_match_free_cb(struct cls_match *rule)
2380 ovsrcu_set_hidden(&rule->next, CLS_MATCH_POISON);