2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "classifier.h"
20 #include <netinet/in.h>
21 #include "byte-order.h"
22 #include "dynamic-string.h"
27 #include "ovs-thread.h"
31 VLOG_DEFINE_THIS_MODULE(classifier);
36 /* Ports trie depends on both ports sharing the same ovs_be32. */
37 #define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
38 BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
40 /* Prefix trie for a 'field' */
42 const struct mf_field *field; /* Trie field, or NULL. */
43 struct trie_node *root; /* NULL if none. */
46 struct cls_subtable_entry {
47 struct cls_subtable *subtable;
49 unsigned int max_priority;
52 struct cls_subtable_cache {
53 struct cls_subtable_entry *subtables;
54 size_t alloc_size; /* Number of allocated elements. */
55 size_t size; /* One past last valid array element. */
59 CLS_MAX_INDICES = 3 /* Maximum number of lookup indices per subtable. */
62 struct cls_classifier {
63 int n_rules; /* Total number of rules. */
64 uint8_t n_flow_segments;
65 uint8_t flow_segments[CLS_MAX_INDICES]; /* Flow segment boundaries to use
66 * for staged lookup. */
67 struct hmap subtables; /* Contains "struct cls_subtable"s. */
68 struct cls_subtable_cache subtables_priority;
69 struct hmap partitions; /* Contains "struct cls_partition"s. */
70 struct cls_trie tries[CLS_MAX_TRIES]; /* Prefix tries. */
74 /* A set of rules that all have the same fields wildcarded. */
76 struct hmap_node hmap_node; /* Within struct cls_classifier 'subtables'
78 struct hmap rules; /* Contains "struct cls_rule"s. */
79 int n_rules; /* Number of rules, including duplicates. */
80 unsigned int max_priority; /* Max priority of any rule in the subtable. */
81 unsigned int max_count; /* Count of max_priority rules. */
82 tag_type tag; /* Tag generated from mask for partitioning. */
83 uint8_t n_indices; /* How many indices to use. */
84 uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 flow segment boundaries. */
85 struct hindex indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
86 unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'. */
88 struct trie_node *ports_trie; /* NULL if none. */
89 struct minimask mask; /* Wildcards for fields. */
90 /* 'mask' must be the last field. */
93 /* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
94 * field) with tags for the "cls_subtable"s that contain rules that match that
96 struct cls_partition {
97 struct hmap_node hmap_node; /* In struct cls_classifier's 'partitions'
99 ovs_be64 metadata; /* metadata value for this partition. */
100 tag_type tags; /* OR of each flow's cls_subtable tag. */
101 struct tag_tracker tracker; /* Tracks the bits in 'tags'. */
104 /* Internal representation of a rule in a "struct cls_subtable". */
106 struct cls_rule *cls_rule;
107 struct hindex_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
109 struct hmap_node hmap_node; /* Within struct cls_subtable 'rules'. */
110 unsigned int priority; /* Larger numbers are higher priorities. */
111 struct cls_partition *partition;
112 struct list list; /* List of identical, lower-priority rules. */
113 struct miniflow flow; /* Matching rule. Mask is in the subtable. */
114 /* 'flow' must be the last field. */
117 static struct cls_match *
118 cls_match_alloc(struct cls_rule *rule)
120 int count = count_1bits(rule->match.flow.map);
122 struct cls_match *cls_match
123 = xmalloc(sizeof *cls_match - sizeof cls_match->flow.inline_values
124 + MINIFLOW_VALUES_SIZE(count));
126 cls_match->cls_rule = rule;
127 miniflow_clone_inline(&cls_match->flow, &rule->match.flow, count);
128 cls_match->priority = rule->priority;
129 rule->cls_match = cls_match;
134 static struct cls_subtable *find_subtable(const struct cls_classifier *,
135 const struct minimask *);
136 static struct cls_subtable *insert_subtable(struct cls_classifier *,
137 const struct minimask *);
139 static void destroy_subtable(struct cls_classifier *, struct cls_subtable *);
141 static void update_subtables_after_insertion(struct cls_classifier *,
142 struct cls_subtable *,
143 unsigned int new_priority);
144 static void update_subtables_after_removal(struct cls_classifier *,
145 struct cls_subtable *,
146 unsigned int del_priority);
148 static struct cls_match *find_match_wc(const struct cls_subtable *,
149 const struct flow *, struct trie_ctx *,
150 unsigned int n_tries,
151 struct flow_wildcards *);
152 static struct cls_match *find_equal(struct cls_subtable *,
153 const struct miniflow *, uint32_t hash);
154 static struct cls_match *insert_rule(struct cls_classifier *,
155 struct cls_subtable *, struct cls_rule *);
157 /* Iterates RULE over HEAD and all of the cls_rules on HEAD->list. */
158 #define FOR_EACH_RULE_IN_LIST(RULE, HEAD) \
159 for ((RULE) = (HEAD); (RULE) != NULL; (RULE) = next_rule_in_list(RULE))
160 #define FOR_EACH_RULE_IN_LIST_SAFE(RULE, NEXT, HEAD) \
161 for ((RULE) = (HEAD); \
162 (RULE) != NULL && ((NEXT) = next_rule_in_list(RULE), true); \
165 static struct cls_match *next_rule_in_list__(struct cls_match *);
166 static struct cls_match *next_rule_in_list(struct cls_match *);
168 static unsigned int minimask_get_prefix_len(const struct minimask *,
169 const struct mf_field *);
170 static void trie_init(struct cls_classifier *, int trie_idx,
171 const struct mf_field *);
172 static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
173 unsigned int *checkbits);
174 static unsigned int trie_lookup_value(const struct trie_node *,
175 const ovs_be32 value[],
176 unsigned int *checkbits);
177 static void trie_destroy(struct trie_node *);
178 static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
179 static void trie_insert_prefix(struct trie_node **, const ovs_be32 *prefix,
181 static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
182 static void trie_remove_prefix(struct trie_node **, const ovs_be32 *prefix,
184 static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
186 static bool mask_prefix_bits_set(const struct flow_wildcards *,
187 uint8_t be32ofs, unsigned int nbits);
190 cls_subtable_cache_init(struct cls_subtable_cache *array)
192 memset(array, 0, sizeof *array);
196 cls_subtable_cache_destroy(struct cls_subtable_cache *array)
198 free(array->subtables);
199 memset(array, 0, sizeof *array);
202 /* Array insertion. */
204 cls_subtable_cache_push_back(struct cls_subtable_cache *array,
205 struct cls_subtable_entry a)
207 if (array->size == array->alloc_size) {
208 array->subtables = x2nrealloc(array->subtables, &array->alloc_size,
212 array->subtables[array->size++] = a;
215 /* Only for rearranging entries in the same cache. */
217 cls_subtable_cache_splice(struct cls_subtable_entry *to,
218 struct cls_subtable_entry *start,
219 struct cls_subtable_entry *end)
222 /* Same as splicing entries to (start) from [end, to). */
223 struct cls_subtable_entry *temp = to;
224 to = start; start = end; end = temp;
227 /* Move elements [start, end) to (to) one by one. */
228 while (start != end) {
229 struct cls_subtable_entry temp = *start;
231 /* Shift array by one, making space for the element at 'temp'. */
232 memmove(to + 1, to, (start - to) * sizeof *to);
236 } /* Else nothing to be done. */
241 cls_subtable_cache_remove(struct cls_subtable_cache *array,
242 struct cls_subtable_entry *elem)
244 ssize_t size = (&array->subtables[array->size]
245 - (elem + 1)) * sizeof *elem;
247 memmove(elem, elem + 1, size);
252 #define CLS_SUBTABLE_CACHE_FOR_EACH(SUBTABLE, ITER, ARRAY) \
253 for (ITER = (ARRAY)->subtables; \
254 ITER < &(ARRAY)->subtables[(ARRAY)->size] \
255 && OVS_LIKELY(SUBTABLE = ITER->subtable); \
257 #define CLS_SUBTABLE_CACHE_FOR_EACH_CONTINUE(SUBTABLE, ITER, ARRAY) \
259 ITER < &(ARRAY)->subtables[(ARRAY)->size] \
260 && OVS_LIKELY(SUBTABLE = ITER->subtable); \
262 #define CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE(SUBTABLE, ITER, ARRAY) \
263 for (ITER = &(ARRAY)->subtables[(ARRAY)->size]; \
264 ITER > (ARRAY)->subtables \
265 && OVS_LIKELY(SUBTABLE = (--ITER)->subtable);)
268 cls_subtable_cache_verify(struct cls_subtable_cache *array)
270 struct cls_subtable *table;
271 struct cls_subtable_entry *iter;
272 unsigned int priority = 0;
274 CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE (table, iter, array) {
275 if (iter->max_priority != table->max_priority) {
276 VLOG_WARN("Subtable %p has mismatching priority in cache (%u != %u)",
277 table, iter->max_priority, table->max_priority);
279 if (iter->max_priority < priority) {
280 VLOG_WARN("Subtable cache is out of order (%u < %u)",
281 iter->max_priority, priority);
283 priority = iter->max_priority;
288 cls_subtable_cache_reset(struct cls_classifier *cls)
290 struct cls_subtable_cache old = cls->subtables_priority;
291 struct cls_subtable *subtable;
293 VLOG_WARN("Resetting subtable cache.");
295 cls_subtable_cache_verify(&cls->subtables_priority);
297 cls_subtable_cache_init(&cls->subtables_priority);
299 HMAP_FOR_EACH (subtable, hmap_node, &cls->subtables) {
300 struct cls_match *head;
301 struct cls_subtable_entry elem;
302 struct cls_subtable *table;
303 struct cls_subtable_entry *iter, *subtable_iter = NULL;
304 unsigned int new_max = 0;
305 unsigned int max_count = 0;
308 /* Verify max_priority. */
309 HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
310 if (head->priority > new_max) {
311 new_max = head->priority;
313 } else if (head->priority == new_max) {
317 if (new_max != subtable->max_priority ||
318 max_count != subtable->max_count) {
319 VLOG_WARN("subtable %p (%u rules) has mismatching max_priority "
320 "(%u) or max_count (%u). Highest priority found was %u, "
322 subtable, subtable->n_rules, subtable->max_priority,
323 subtable->max_count, new_max, max_count);
324 subtable->max_priority = new_max;
325 subtable->max_count = max_count;
328 /* Locate the subtable from the old cache. */
330 CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &old) {
331 if (table == subtable) {
332 if (iter->max_priority != new_max) {
333 VLOG_WARN("Subtable %p has wrong max priority (%u != %u) "
335 subtable, iter->max_priority, new_max);
338 VLOG_WARN("Subtable %p duplicated in the old cache.",
345 VLOG_WARN("Subtable %p not found from the old cache.", subtable);
348 elem.subtable = subtable;
349 elem.tag = subtable->tag;
350 elem.max_priority = subtable->max_priority;
351 cls_subtable_cache_push_back(&cls->subtables_priority, elem);
353 /* Possibly move 'subtable' earlier in the priority list. If we break
354 * out of the loop, then 'subtable_iter' should be moved just before
355 * 'iter'. If the loop terminates normally, then 'iter' will be the
356 * first list element and we'll move subtable just before that
357 * (e.g. to the front of the list). */
358 CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE (table, iter,
359 &cls->subtables_priority) {
360 if (table == subtable) {
361 subtable_iter = iter; /* Locate the subtable as we go. */
362 } else if (table->max_priority >= new_max) {
363 ovs_assert(subtable_iter != NULL);
369 /* Move 'subtable' just before 'iter' (unless it's already there). */
370 if (iter != subtable_iter) {
371 cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
375 /* Verify that the old and the new have the same size. */
376 if (old.size != cls->subtables_priority.size) {
377 VLOG_WARN("subtables cache sizes differ: old (%"PRIuSIZE
378 ") != new (%"PRIuSIZE").",
379 old.size, cls->subtables_priority.size);
382 cls_subtable_cache_destroy(&old);
384 cls_subtable_cache_verify(&cls->subtables_priority);
388 /* flow/miniflow/minimask/minimatch utilities.
389 * These are only used by the classifier, so place them here to allow
390 * for better optimization. */
392 static inline uint64_t
393 miniflow_get_map_in_range(const struct miniflow *miniflow,
394 uint8_t start, uint8_t end, unsigned int *offset)
396 uint64_t map = miniflow->map;
400 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
401 *offset = count_1bits(map & msk);
404 if (end < FLOW_U32S) {
405 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
411 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
412 * 'mask', given 'basis'.
414 * The hash values returned by this function are the same as those returned by
415 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
416 static inline uint32_t
417 flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
420 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
421 const uint32_t *flow_u32 = (const uint32_t *)flow;
422 const uint32_t *p = mask_values;
427 for (map = mask->masks.map; map; map = zero_rightmost_1bit(map)) {
428 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
431 return mhash_finish(hash, (p - mask_values) * 4);
434 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
435 * 'mask', given 'basis'.
437 * The hash values returned by this function are the same as those returned by
438 * flow_hash_in_minimask(), only the form of the arguments differ. */
439 static inline uint32_t
440 miniflow_hash_in_minimask(const struct miniflow *flow,
441 const struct minimask *mask, uint32_t basis)
443 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
444 const uint32_t *p = mask_values;
445 uint32_t hash = basis;
448 MINIFLOW_FOR_EACH_IN_MAP(flow_u32, flow, mask->masks.map) {
449 hash = mhash_add(hash, flow_u32 & *p++);
452 return mhash_finish(hash, (p - mask_values) * 4);
455 /* Returns a hash value for the bits of range [start, end) in 'flow',
456 * where there are 1-bits in 'mask', given 'hash'.
458 * The hash values returned by this function are the same as those returned by
459 * minimatch_hash_range(), only the form of the arguments differ. */
460 static inline uint32_t
461 flow_hash_in_minimask_range(const struct flow *flow,
462 const struct minimask *mask,
463 uint8_t start, uint8_t end, uint32_t *basis)
465 const uint32_t *mask_values = miniflow_get_u32_values(&mask->masks);
466 const uint32_t *flow_u32 = (const uint32_t *)flow;
468 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
470 const uint32_t *p = mask_values + offset;
471 uint32_t hash = *basis;
473 for (; map; map = zero_rightmost_1bit(map)) {
474 hash = mhash_add(hash, flow_u32[raw_ctz(map)] & *p++);
477 *basis = hash; /* Allow continuation from the unfinished value. */
478 return mhash_finish(hash, (p - mask_values) * 4);
481 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
483 flow_wildcards_fold_minimask(struct flow_wildcards *wc,
484 const struct minimask *mask)
486 flow_union_with_miniflow(&wc->masks, &mask->masks);
489 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
490 * in range [start, end). */
492 flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
493 const struct minimask *mask,
494 uint8_t start, uint8_t end)
496 uint32_t *dst_u32 = (uint32_t *)&wc->masks;
498 uint64_t map = miniflow_get_map_in_range(&mask->masks, start, end,
500 const uint32_t *p = miniflow_get_u32_values(&mask->masks) + offset;
502 for (; map; map = zero_rightmost_1bit(map)) {
503 dst_u32[raw_ctz(map)] |= *p++;
507 /* Returns a hash value for 'flow', given 'basis'. */
508 static inline uint32_t
509 miniflow_hash(const struct miniflow *flow, uint32_t basis)
511 const uint32_t *values = miniflow_get_u32_values(flow);
512 const uint32_t *p = values;
513 uint32_t hash = basis;
514 uint64_t hash_map = 0;
517 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
519 hash = mhash_add(hash, *p);
520 hash_map |= rightmost_1bit(map);
524 hash = mhash_add(hash, hash_map);
525 hash = mhash_add(hash, hash_map >> 32);
527 return mhash_finish(hash, p - values);
530 /* Returns a hash value for 'mask', given 'basis'. */
531 static inline uint32_t
532 minimask_hash(const struct minimask *mask, uint32_t basis)
534 return miniflow_hash(&mask->masks, basis);
537 /* Returns a hash value for 'match', given 'basis'. */
538 static inline uint32_t
539 minimatch_hash(const struct minimatch *match, uint32_t basis)
541 return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
544 /* Returns a hash value for the bits of range [start, end) in 'minimatch',
547 * The hash values returned by this function are the same as those returned by
548 * flow_hash_in_minimask_range(), only the form of the arguments differ. */
549 static inline uint32_t
550 minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
554 const uint32_t *p, *q;
555 uint32_t hash = *basis;
558 n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
560 q = miniflow_get_u32_values(&match->mask.masks) + offset;
561 p = miniflow_get_u32_values(&match->flow) + offset;
563 for (i = 0; i < n; i++) {
564 hash = mhash_add(hash, p[i] & q[i]);
566 *basis = hash; /* Allow continuation from the unfinished value. */
567 return mhash_finish(hash, (offset + n) * 4);
573 /* Initializes 'rule' to match packets specified by 'match' at the given
574 * 'priority'. 'match' must satisfy the invariant described in the comment at
575 * the definition of struct match.
577 * The caller must eventually destroy 'rule' with cls_rule_destroy().
579 * (OpenFlow uses priorities between 0 and UINT16_MAX, inclusive, but
580 * internally Open vSwitch supports a wider range.) */
582 cls_rule_init(struct cls_rule *rule,
583 const struct match *match, unsigned int priority)
585 minimatch_init(&rule->match, match);
586 rule->priority = priority;
587 rule->cls_match = NULL;
590 /* Same as cls_rule_init() for initialization from a "struct minimatch". */
592 cls_rule_init_from_minimatch(struct cls_rule *rule,
593 const struct minimatch *match,
594 unsigned int priority)
596 minimatch_clone(&rule->match, match);
597 rule->priority = priority;
598 rule->cls_match = NULL;
601 /* Initializes 'dst' as a copy of 'src'.
603 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
605 cls_rule_clone(struct cls_rule *dst, const struct cls_rule *src)
607 minimatch_clone(&dst->match, &src->match);
608 dst->priority = src->priority;
609 dst->cls_match = NULL;
612 /* Initializes 'dst' with the data in 'src', destroying 'src'.
614 * The caller must eventually destroy 'dst' with cls_rule_destroy(). */
616 cls_rule_move(struct cls_rule *dst, struct cls_rule *src)
618 minimatch_move(&dst->match, &src->match);
619 dst->priority = src->priority;
620 dst->cls_match = NULL;
623 /* Frees memory referenced by 'rule'. Doesn't free 'rule' itself (it's
624 * normally embedded into a larger structure).
626 * ('rule' must not currently be in a classifier.) */
628 cls_rule_destroy(struct cls_rule *rule)
630 ovs_assert(!rule->cls_match);
631 minimatch_destroy(&rule->match);
634 /* Returns true if 'a' and 'b' match the same packets at the same priority,
635 * false if they differ in some way. */
637 cls_rule_equal(const struct cls_rule *a, const struct cls_rule *b)
639 return a->priority == b->priority && minimatch_equal(&a->match, &b->match);
642 /* Returns a hash value for 'rule', folding in 'basis'. */
644 cls_rule_hash(const struct cls_rule *rule, uint32_t basis)
646 return minimatch_hash(&rule->match, hash_int(rule->priority, basis));
649 /* Appends a string describing 'rule' to 's'. */
651 cls_rule_format(const struct cls_rule *rule, struct ds *s)
653 minimatch_format(&rule->match, s, rule->priority);
656 /* Returns true if 'rule' matches every packet, false otherwise. */
658 cls_rule_is_catchall(const struct cls_rule *rule)
660 return minimask_is_catchall(&rule->match.mask);
663 /* Initializes 'cls' as a classifier that initially contains no classification
666 classifier_init(struct classifier *cls_, const uint8_t *flow_segments)
668 struct cls_classifier *cls = xmalloc(sizeof *cls);
670 fat_rwlock_init(&cls_->rwlock);
675 hmap_init(&cls->subtables);
676 cls_subtable_cache_init(&cls->subtables_priority);
677 hmap_init(&cls->partitions);
678 cls->n_flow_segments = 0;
680 while (cls->n_flow_segments < CLS_MAX_INDICES
681 && *flow_segments < FLOW_U32S) {
682 cls->flow_segments[cls->n_flow_segments++] = *flow_segments++;
688 /* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
689 * caller's responsibility. */
691 classifier_destroy(struct classifier *cls_)
694 struct cls_classifier *cls = cls_->cls;
695 struct cls_subtable *partition, *next_partition;
696 struct cls_subtable *subtable, *next_subtable;
699 fat_rwlock_destroy(&cls_->rwlock);
704 for (i = 0; i < cls->n_tries; i++) {
705 trie_destroy(cls->tries[i].root);
708 HMAP_FOR_EACH_SAFE (subtable, next_subtable, hmap_node,
710 destroy_subtable(cls, subtable);
712 hmap_destroy(&cls->subtables);
714 HMAP_FOR_EACH_SAFE (partition, next_partition, hmap_node,
716 hmap_remove(&cls->partitions, &partition->hmap_node);
719 hmap_destroy(&cls->partitions);
721 cls_subtable_cache_destroy(&cls->subtables_priority);
726 /* We use uint64_t as a set for the fields below. */
727 BUILD_ASSERT_DECL(MFF_N_IDS <= 64);
729 /* Set the fields for which prefix lookup should be performed. */
731 classifier_set_prefix_fields(struct classifier *cls_,
732 const enum mf_field_id *trie_fields,
733 unsigned int n_fields)
735 struct cls_classifier *cls = cls_->cls;
739 for (i = 0, trie = 0; i < n_fields && trie < CLS_MAX_TRIES; i++) {
740 const struct mf_field *field = mf_from_id(trie_fields[i]);
741 if (field->flow_be32ofs < 0 || field->n_bits % 32) {
742 /* Incompatible field. This is the only place where we
743 * enforce these requirements, but the rest of the trie code
744 * depends on the flow_be32ofs to be non-negative and the
745 * field length to be a multiple of 32 bits. */
749 if (fields & (UINT64_C(1) << trie_fields[i])) {
750 /* Duplicate field, there is no need to build more than
751 * one index for any one field. */
754 fields |= UINT64_C(1) << trie_fields[i];
756 if (trie >= cls->n_tries || field != cls->tries[trie].field) {
757 trie_init(cls, trie, field);
762 /* Destroy the rest. */
763 for (i = trie; i < cls->n_tries; i++) {
764 trie_init(cls, i, NULL);
770 trie_init(struct cls_classifier *cls, int trie_idx,
771 const struct mf_field *field)
773 struct cls_trie *trie = &cls->tries[trie_idx];
774 struct cls_subtable *subtable;
775 struct cls_subtable_entry *iter;
777 if (trie_idx < cls->n_tries) {
778 trie_destroy(trie->root);
783 /* Add existing rules to the trie. */
784 CLS_SUBTABLE_CACHE_FOR_EACH (subtable, iter, &cls->subtables_priority) {
787 plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
788 /* Initialize subtable's prefix length on this field. */
789 subtable->trie_plen[trie_idx] = plen;
792 struct cls_match *head;
794 HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
795 struct cls_match *match;
797 FOR_EACH_RULE_IN_LIST (match, head) {
798 trie_insert(trie, match->cls_rule, plen);
805 /* Returns true if 'cls' contains no classification rules, false otherwise. */
807 classifier_is_empty(const struct classifier *cls)
809 return cls->cls->n_rules == 0;
812 /* Returns the number of rules in 'cls'. */
814 classifier_count(const struct classifier *cls)
816 return cls->cls->n_rules;
820 hash_metadata(ovs_be64 metadata_)
822 uint64_t metadata = (OVS_FORCE uint64_t) metadata_;
823 return hash_uint64(metadata);
826 static struct cls_partition *
827 find_partition(const struct cls_classifier *cls, ovs_be64 metadata,
830 struct cls_partition *partition;
832 HMAP_FOR_EACH_IN_BUCKET (partition, hmap_node, hash, &cls->partitions) {
833 if (partition->metadata == metadata) {
841 static struct cls_partition *
842 create_partition(struct cls_classifier *cls, struct cls_subtable *subtable,
845 uint32_t hash = hash_metadata(metadata);
846 struct cls_partition *partition = find_partition(cls, metadata, hash);
848 partition = xmalloc(sizeof *partition);
849 partition->metadata = metadata;
851 tag_tracker_init(&partition->tracker);
852 hmap_insert(&cls->partitions, &partition->hmap_node, hash);
854 tag_tracker_add(&partition->tracker, &partition->tags, subtable->tag);
858 static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
860 /* Could optimize to use the same map if needed for fast path. */
861 return MINIFLOW_GET_BE32(&match->flow, tp_src)
862 & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
865 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
866 * must not modify or free it.
868 * If 'cls' already contains an identical rule (including wildcards, values of
869 * fixed fields, and priority), replaces the old rule by 'rule' and returns the
870 * rule that was replaced. The caller takes ownership of the returned rule and
871 * is thus responsible for destroying it with cls_rule_destroy(), freeing the
872 * memory block in which it resides, etc., as necessary.
874 * Returns NULL if 'cls' does not contain a rule with an identical key, after
875 * inserting the new rule. In this case, no rules are displaced by the new
876 * rule, even rules that cannot have any effect because the new rule matches a
877 * superset of their flows and has higher priority. */
879 classifier_replace(struct classifier *cls_, struct cls_rule *rule)
881 struct cls_classifier *cls = cls_->cls;
882 struct cls_match *old_rule;
883 struct cls_subtable *subtable;
885 subtable = find_subtable(cls, &rule->match.mask);
887 subtable = insert_subtable(cls, &rule->match.mask);
890 old_rule = insert_rule(cls, subtable, rule);
894 rule->cls_match->partition = NULL;
895 if (minimask_get_metadata_mask(&rule->match.mask) == OVS_BE64_MAX) {
896 ovs_be64 metadata = miniflow_get_metadata(&rule->match.flow);
897 rule->cls_match->partition = create_partition(cls, subtable,
904 for (i = 0; i < cls->n_tries; i++) {
905 if (subtable->trie_plen[i]) {
906 trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
911 if (subtable->ports_mask_len) {
912 /* We mask the value to be inserted to always have the wildcarded
913 * bits in known (zero) state, so we can include them in comparison
914 * and they will always match (== their original value does not
916 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
918 trie_insert_prefix(&subtable->ports_trie, &masked_ports,
919 subtable->ports_mask_len);
924 struct cls_rule *old_cls_rule = old_rule->cls_rule;
926 rule->cls_match->partition = old_rule->partition;
927 old_cls_rule->cls_match = NULL;
933 /* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
934 * must not modify or free it.
936 * 'cls' must not contain an identical rule (including wildcards, values of
937 * fixed fields, and priority). Use classifier_find_rule_exactly() to find
940 classifier_insert(struct classifier *cls, struct cls_rule *rule)
942 struct cls_rule *displaced_rule = classifier_replace(cls, rule);
943 ovs_assert(!displaced_rule);
946 /* Removes 'rule' from 'cls'. It is the caller's responsibility to destroy
947 * 'rule' with cls_rule_destroy(), freeing the memory block in which 'rule'
948 * resides, etc., as necessary. */
950 classifier_remove(struct classifier *cls_, struct cls_rule *rule)
952 struct cls_classifier *cls = cls_->cls;
953 struct cls_partition *partition;
954 struct cls_match *cls_match = rule->cls_match;
955 struct cls_match *head;
956 struct cls_subtable *subtable;
959 ovs_assert(cls_match);
961 subtable = find_subtable(cls, &rule->match.mask);
962 ovs_assert(subtable);
964 if (subtable->ports_mask_len) {
965 ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
967 trie_remove_prefix(&subtable->ports_trie,
968 &masked_ports, subtable->ports_mask_len);
970 for (i = 0; i < cls->n_tries; i++) {
971 if (subtable->trie_plen[i]) {
972 trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
976 /* Remove rule node from indices. */
977 for (i = 0; i < subtable->n_indices; i++) {
978 hindex_remove(&subtable->indices[i], &cls_match->index_nodes[i]);
981 head = find_equal(subtable, &rule->match.flow, cls_match->hmap_node.hash);
982 if (head != cls_match) {
983 list_remove(&cls_match->list);
984 } else if (list_is_empty(&cls_match->list)) {
985 hmap_remove(&subtable->rules, &cls_match->hmap_node);
987 struct cls_match *next = CONTAINER_OF(cls_match->list.next,
988 struct cls_match, list);
990 list_remove(&cls_match->list);
991 hmap_replace(&subtable->rules, &cls_match->hmap_node,
995 partition = cls_match->partition;
997 tag_tracker_subtract(&partition->tracker, &partition->tags,
999 if (!partition->tags) {
1000 hmap_remove(&cls->partitions, &partition->hmap_node);
1005 if (--subtable->n_rules == 0) {
1006 destroy_subtable(cls, subtable);
1008 update_subtables_after_removal(cls, subtable, cls_match->priority);
1013 rule->cls_match = NULL;
1017 /* Prefix tree context. Valid when 'lookup_done' is true. Can skip all
1018 * subtables which have more than 'match_plen' bits in their corresponding
1019 * field at offset 'be32ofs'. If skipped, 'maskbits' prefix bits should be
1020 * unwildcarded to quarantee datapath flow matches only packets it should. */
1022 const struct cls_trie *trie;
1023 bool lookup_done; /* Status of the lookup. */
1024 uint8_t be32ofs; /* U32 offset of the field in question. */
1025 unsigned int match_plen; /* Longest prefix than could possibly match. */
1026 unsigned int maskbits; /* Prefix length needed to avoid false matches. */
1030 trie_ctx_init(struct trie_ctx *ctx, const struct cls_trie *trie)
1033 ctx->be32ofs = trie->field->flow_be32ofs;
1034 ctx->lookup_done = false;
1038 lookahead_subtable(const struct cls_subtable_entry *subtables)
1040 ovs_prefetch_range(subtables->subtable, sizeof *subtables->subtable);
1043 /* Finds and returns the highest-priority rule in 'cls' that matches 'flow'.
1044 * Returns a null pointer if no rules in 'cls' match 'flow'. If multiple rules
1045 * of equal priority match 'flow', returns one arbitrarily.
1047 * If a rule is found and 'wc' is non-null, bitwise-OR's 'wc' with the
1048 * set of bits that were significant in the lookup. At some point
1049 * earlier, 'wc' should have been initialized (e.g., by
1050 * flow_wildcards_init_catchall()). */
1052 classifier_lookup(const struct classifier *cls_, const struct flow *flow,
1053 struct flow_wildcards *wc)
1055 struct cls_classifier *cls = cls_->cls;
1056 const struct cls_partition *partition;
1058 struct cls_match *best;
1059 struct trie_ctx trie_ctx[CLS_MAX_TRIES];
1061 struct cls_subtable_entry *subtables = cls->subtables_priority.subtables;
1062 int n_subtables = cls->subtables_priority.size;
1063 int64_t best_priority = -1;
1065 /* Prefetch the subtables array. */
1066 ovs_prefetch_range(subtables, n_subtables * sizeof *subtables);
1068 /* Determine 'tags' such that, if 'subtable->tag' doesn't intersect them,
1069 * then 'flow' cannot possibly match in 'subtable':
1071 * - If flow->metadata maps to a given 'partition', then we can use
1072 * 'tags' for 'partition->tags'.
1074 * - If flow->metadata has no partition, then no rule in 'cls' has an
1075 * exact-match for flow->metadata. That means that we don't need to
1076 * search any subtable that includes flow->metadata in its mask.
1078 * In either case, we always need to search any cls_subtables that do not
1079 * include flow->metadata in its mask. One way to do that would be to
1080 * check the "cls_subtable"s explicitly for that, but that would require an
1081 * extra branch per subtable. Instead, we mark such a cls_subtable's
1082 * 'tags' as TAG_ALL and make sure that 'tags' is never empty. This means
1083 * that 'tags' always intersects such a cls_subtable's 'tags', so we don't
1084 * need a special case.
1086 partition = (hmap_is_empty(&cls->partitions)
1088 : find_partition(cls, flow->metadata,
1089 hash_metadata(flow->metadata)));
1090 tags = partition ? partition->tags : TAG_ARBITRARY;
1092 /* Initialize trie contexts for match_find_wc(). */
1093 for (i = 0; i < cls->n_tries; i++) {
1094 trie_ctx_init(&trie_ctx[i], &cls->tries[i]);
1097 /* Prefetch the first subtables. */
1098 if (n_subtables > 1) {
1099 lookahead_subtable(subtables);
1100 lookahead_subtable(subtables + 1);
1104 for (i = 0; OVS_LIKELY(i < n_subtables); i++) {
1105 struct cls_match *rule;
1107 if ((int64_t)subtables[i].max_priority <= best_priority) {
1108 /* Subtables are in descending priority order,
1109 * can not find anything better. */
1113 /* Prefetch a forthcoming subtable. */
1114 if (i + 2 < n_subtables) {
1115 lookahead_subtable(&subtables[i + 2]);
1118 if (!tag_intersects(tags, subtables[i].tag)) {
1122 rule = find_match_wc(subtables[i].subtable, flow, trie_ctx,
1124 if (rule && (int64_t)rule->priority > best_priority) {
1125 best_priority = (int64_t)rule->priority;
1130 return best ? best->cls_rule : NULL;
1133 /* Returns true if 'target' satisifies 'match', that is, if each bit for which
1134 * 'match' specifies a particular value has the correct value in 'target'.
1136 * 'flow' and 'mask' have the same mask! */
1138 miniflow_and_mask_matches_miniflow(const struct miniflow *flow,
1139 const struct minimask *mask,
1140 const struct miniflow *target)
1142 const uint32_t *flowp = miniflow_get_u32_values(flow);
1143 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1144 uint32_t target_u32;
1146 MINIFLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
1147 if ((*flowp++ ^ target_u32) & *maskp++) {
1155 static inline struct cls_match *
1156 find_match_miniflow(const struct cls_subtable *subtable,
1157 const struct miniflow *flow,
1160 struct cls_match *rule;
1162 HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
1163 if (miniflow_and_mask_matches_miniflow(&rule->flow, &subtable->mask,
1172 /* Finds and returns the highest-priority rule in 'cls' that matches
1173 * 'miniflow'. Returns a null pointer if no rules in 'cls' match 'flow'.
1174 * If multiple rules of equal priority match 'flow', returns one arbitrarily.
1176 * This function is optimized for the userspace datapath, which only ever has
1177 * one priority value for it's flows!
1179 struct cls_rule *classifier_lookup_miniflow_first(const struct classifier *cls_,
1180 const struct miniflow *flow)
1182 struct cls_classifier *cls = cls_->cls;
1183 struct cls_subtable *subtable;
1184 struct cls_subtable_entry *iter;
1186 CLS_SUBTABLE_CACHE_FOR_EACH (subtable, iter, &cls->subtables_priority) {
1187 struct cls_match *rule;
1189 rule = find_match_miniflow(subtable, flow,
1190 miniflow_hash_in_minimask(flow,
1194 return rule->cls_rule;
1201 /* Finds and returns a rule in 'cls' with exactly the same priority and
1202 * matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
1203 * contain an exact match. */
1205 classifier_find_rule_exactly(const struct classifier *cls_,
1206 const struct cls_rule *target)
1208 struct cls_classifier *cls = cls_->cls;
1209 struct cls_match *head, *rule;
1210 struct cls_subtable *subtable;
1212 subtable = find_subtable(cls, &target->match.mask);
1217 /* Skip if there is no hope. */
1218 if (target->priority > subtable->max_priority) {
1222 head = find_equal(subtable, &target->match.flow,
1223 miniflow_hash_in_minimask(&target->match.flow,
1224 &target->match.mask, 0));
1225 FOR_EACH_RULE_IN_LIST (rule, head) {
1226 if (target->priority >= rule->priority) {
1227 return target->priority == rule->priority ? rule->cls_rule : NULL;
1233 /* Finds and returns a rule in 'cls' with priority 'priority' and exactly the
1234 * same matching criteria as 'target'. Returns a null pointer if 'cls' doesn't
1235 * contain an exact match. */
1237 classifier_find_match_exactly(const struct classifier *cls,
1238 const struct match *target,
1239 unsigned int priority)
1241 struct cls_rule *retval;
1244 cls_rule_init(&cr, target, priority);
1245 retval = classifier_find_rule_exactly(cls, &cr);
1246 cls_rule_destroy(&cr);
1251 /* Checks if 'target' would overlap any other rule in 'cls'. Two rules are
1252 * considered to overlap if both rules have the same priority and a packet
1253 * could match both. */
1255 classifier_rule_overlaps(const struct classifier *cls_,
1256 const struct cls_rule *target)
1258 struct cls_classifier *cls = cls_->cls;
1259 struct cls_subtable *subtable;
1260 struct cls_subtable_entry *iter;
1262 /* Iterate subtables in the descending max priority order. */
1263 CLS_SUBTABLE_CACHE_FOR_EACH (subtable, iter, &cls->subtables_priority) {
1264 uint32_t storage[FLOW_U32S];
1265 struct minimask mask;
1266 struct cls_match *head;
1268 if (target->priority > iter->max_priority) {
1269 break; /* Can skip this and the rest of the subtables. */
1272 minimask_combine(&mask, &target->match.mask, &subtable->mask, storage);
1273 HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
1274 struct cls_match *rule;
1276 FOR_EACH_RULE_IN_LIST (rule, head) {
1277 if (rule->priority < target->priority) {
1278 break; /* Rules in descending priority order. */
1280 if (rule->priority == target->priority
1281 && miniflow_equal_in_minimask(&target->match.flow,
1282 &rule->flow, &mask)) {
1292 /* Returns true if 'rule' exactly matches 'criteria' or if 'rule' is more
1293 * specific than 'criteria'. That is, 'rule' matches 'criteria' and this
1294 * function returns true if, for every field:
1296 * - 'criteria' and 'rule' specify the same (non-wildcarded) value for the
1299 * - 'criteria' wildcards the field,
1301 * Conversely, 'rule' does not match 'criteria' and this function returns false
1302 * if, for at least one field:
1304 * - 'criteria' and 'rule' specify different values for the field, or
1306 * - 'criteria' specifies a value for the field but 'rule' wildcards it.
1308 * Equivalently, the truth table for whether a field matches is:
1313 * r +---------+---------+
1314 * i wild | yes | yes |
1316 * e +---------+---------+
1317 * r exact | no |if values|
1319 * a +---------+---------+
1321 * This is the matching rule used by OpenFlow 1.0 non-strict OFPT_FLOW_MOD
1322 * commands and by OpenFlow 1.0 aggregate and flow stats.
1324 * Ignores rule->priority. */
1326 cls_rule_is_loose_match(const struct cls_rule *rule,
1327 const struct minimatch *criteria)
1329 return (!minimask_has_extra(&rule->match.mask, &criteria->mask)
1330 && miniflow_equal_in_minimask(&rule->match.flow, &criteria->flow,
1337 rule_matches(const struct cls_match *rule, const struct cls_rule *target)
1340 || miniflow_equal_in_minimask(&rule->flow,
1341 &target->match.flow,
1342 &target->match.mask));
1345 static struct cls_match *
1346 search_subtable(const struct cls_subtable *subtable,
1347 const struct cls_rule *target)
1349 if (!target || !minimask_has_extra(&subtable->mask, &target->match.mask)) {
1350 struct cls_match *rule;
1352 HMAP_FOR_EACH (rule, hmap_node, &subtable->rules) {
1353 if (rule_matches(rule, target)) {
1361 /* Initializes 'cursor' for iterating through rules in 'cls':
1363 * - If 'target' is null, the cursor will visit every rule in 'cls'.
1365 * - If 'target' is nonnull, the cursor will visit each 'rule' in 'cls'
1366 * such that cls_rule_is_loose_match(rule, target) returns true.
1368 * Ignores target->priority. */
1370 cls_cursor_init(struct cls_cursor *cursor, const struct classifier *cls,
1371 const struct cls_rule *target)
1373 cursor->cls = cls->cls;
1374 cursor->target = target && !cls_rule_is_catchall(target) ? target : NULL;
1377 /* Returns the first matching cls_rule in 'cursor''s iteration, or a null
1378 * pointer if there are no matches. */
1380 cls_cursor_first(struct cls_cursor *cursor)
1382 struct cls_subtable *subtable;
1384 HMAP_FOR_EACH (subtable, hmap_node, &cursor->cls->subtables) {
1385 struct cls_match *rule = search_subtable(subtable, cursor->target);
1387 cursor->subtable = subtable;
1388 return rule->cls_rule;
1395 /* Returns the next matching cls_rule in 'cursor''s iteration, or a null
1396 * pointer if there are no more matches. */
1398 cls_cursor_next(struct cls_cursor *cursor, const struct cls_rule *rule_)
1400 struct cls_match *rule = CONST_CAST(struct cls_match *, rule_->cls_match);
1401 const struct cls_subtable *subtable;
1402 struct cls_match *next;
1404 next = next_rule_in_list__(rule);
1405 if (next->priority < rule->priority) {
1406 return next->cls_rule;
1409 /* 'next' is the head of the list, that is, the rule that is included in
1410 * the subtable's hmap. (This is important when the classifier contains
1411 * rules that differ only in priority.) */
1413 HMAP_FOR_EACH_CONTINUE (rule, hmap_node, &cursor->subtable->rules) {
1414 if (rule_matches(rule, cursor->target)) {
1415 return rule->cls_rule;
1419 subtable = cursor->subtable;
1420 HMAP_FOR_EACH_CONTINUE (subtable, hmap_node, &cursor->cls->subtables) {
1421 rule = search_subtable(subtable, cursor->target);
1423 cursor->subtable = subtable;
1424 return rule->cls_rule;
1431 static struct cls_subtable *
1432 find_subtable(const struct cls_classifier *cls, const struct minimask *mask)
1434 struct cls_subtable *subtable;
1436 HMAP_FOR_EACH_IN_BUCKET (subtable, hmap_node, minimask_hash(mask, 0),
1438 if (minimask_equal(mask, &subtable->mask)) {
1445 static struct cls_subtable *
1446 insert_subtable(struct cls_classifier *cls, const struct minimask *mask)
1448 uint32_t hash = minimask_hash(mask, 0);
1449 struct cls_subtable *subtable;
1451 struct flow_wildcards old, new;
1453 struct cls_subtable_entry elem;
1454 int count = count_1bits(mask->masks.map);
1456 subtable = xzalloc(sizeof *subtable - sizeof mask->masks.inline_values
1457 + MINIFLOW_VALUES_SIZE(count));
1458 hmap_init(&subtable->rules);
1459 miniflow_clone_inline(&subtable->mask.masks, &mask->masks, count);
1461 /* Init indices for segmented lookup, if any. */
1462 flow_wildcards_init_catchall(&new);
1465 for (i = 0; i < cls->n_flow_segments; i++) {
1466 flow_wildcards_fold_minimask_range(&new, mask, prev,
1467 cls->flow_segments[i]);
1468 /* Add an index if it adds mask bits. */
1469 if (!flow_wildcards_equal(&new, &old)) {
1470 hindex_init(&subtable->indices[index]);
1471 subtable->index_ofs[index] = cls->flow_segments[i];
1475 prev = cls->flow_segments[i];
1477 /* Check if the rest of the subtable's mask adds any bits,
1478 * and remove the last index if it doesn't. */
1480 flow_wildcards_fold_minimask_range(&new, mask, prev, FLOW_U32S);
1481 if (flow_wildcards_equal(&new, &old)) {
1483 subtable->index_ofs[index] = 0;
1484 hindex_destroy(&subtable->indices[index]);
1487 subtable->n_indices = index;
1489 subtable->tag = (minimask_get_metadata_mask(mask) == OVS_BE64_MAX
1490 ? tag_create_deterministic(hash)
1493 for (i = 0; i < cls->n_tries; i++) {
1494 subtable->trie_plen[i] = minimask_get_prefix_len(mask,
1495 cls->tries[i].field);
1499 subtable->ports_trie = NULL;
1500 subtable->ports_mask_len
1501 = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
1503 hmap_insert(&cls->subtables, &subtable->hmap_node, hash);
1504 elem.subtable = subtable;
1505 elem.tag = subtable->tag;
1506 elem.max_priority = subtable->max_priority;
1507 cls_subtable_cache_push_back(&cls->subtables_priority, elem);
1513 destroy_subtable(struct cls_classifier *cls, struct cls_subtable *subtable)
1516 struct cls_subtable *table = NULL;
1517 struct cls_subtable_entry *iter;
1519 CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &cls->subtables_priority) {
1520 if (table == subtable) {
1521 cls_subtable_cache_remove(&cls->subtables_priority, iter);
1526 trie_destroy(subtable->ports_trie);
1528 for (i = 0; i < subtable->n_indices; i++) {
1529 hindex_destroy(&subtable->indices[i]);
1531 minimask_destroy(&subtable->mask);
1532 hmap_remove(&cls->subtables, &subtable->hmap_node);
1533 hmap_destroy(&subtable->rules);
1537 /* This function performs the following updates for 'subtable' in 'cls'
1538 * following the addition of a new rule with priority 'new_priority' to
1541 * - Update 'subtable->max_priority' and 'subtable->max_count' if necessary.
1543 * - Update 'subtable''s position in 'cls->subtables_priority' if necessary.
1545 * This function should only be called after adding a new rule, not after
1546 * replacing a rule by an identical one or modifying a rule in-place. */
1548 update_subtables_after_insertion(struct cls_classifier *cls,
1549 struct cls_subtable *subtable,
1550 unsigned int new_priority)
1552 if (new_priority == subtable->max_priority) {
1553 ++subtable->max_count;
1554 } else if (new_priority > subtable->max_priority) {
1555 struct cls_subtable *table;
1556 struct cls_subtable_entry *iter, *subtable_iter = NULL;
1558 subtable->max_priority = new_priority;
1559 subtable->max_count = 1;
1561 /* Possibly move 'subtable' earlier in the priority list. If we break
1562 * out of the loop, then 'subtable_iter' should be moved just before
1563 * 'iter'. If the loop terminates normally, then 'iter' will be the
1564 * first list element and we'll move subtable just before that
1565 * (e.g. to the front of the list). */
1566 CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE (table, iter, &cls->subtables_priority) {
1567 if (table == subtable) {
1568 subtable_iter = iter; /* Locate the subtable as we go. */
1569 iter->max_priority = new_priority;
1570 } else if (table->max_priority >= new_priority) {
1571 if (subtable_iter == NULL) {
1572 /* Corrupted cache? */
1573 cls_subtable_cache_reset(cls);
1574 VLOG_ABORT("update_subtables_after_insertion(): Subtable priority list corrupted.");
1582 /* Move 'subtable' just before 'iter' (unless it's already there). */
1583 if (iter != subtable_iter) {
1584 cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
1589 /* This function performs the following updates for 'subtable' in 'cls'
1590 * following the deletion of a rule with priority 'del_priority' from
1593 * - Update 'subtable->max_priority' and 'subtable->max_count' if necessary.
1595 * - Update 'subtable''s position in 'cls->subtables_priority' if necessary.
1597 * This function should only be called after removing a rule, not after
1598 * replacing a rule by an identical one or modifying a rule in-place. */
1600 update_subtables_after_removal(struct cls_classifier *cls,
1601 struct cls_subtable *subtable,
1602 unsigned int del_priority)
1604 if (del_priority == subtable->max_priority && --subtable->max_count == 0) {
1605 struct cls_match *head;
1606 struct cls_subtable *table;
1607 struct cls_subtable_entry *iter, *subtable_iter = NULL;
1609 subtable->max_priority = 0;
1610 HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
1611 if (head->priority > subtable->max_priority) {
1612 subtable->max_priority = head->priority;
1613 subtable->max_count = 1;
1614 } else if (head->priority == subtable->max_priority) {
1615 ++subtable->max_count;
1619 /* Possibly move 'subtable' later in the priority list. If we break
1620 * out of the loop, then 'subtable' should be moved just before that
1621 * 'iter'. If the loop terminates normally, then 'iter' will be the
1622 * list head and we'll move subtable just before that (e.g. to the back
1624 CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &cls->subtables_priority) {
1625 if (table == subtable) {
1626 subtable_iter = iter; /* Locate the subtable as we go. */
1627 iter->max_priority = subtable->max_priority;
1628 } else if (table->max_priority <= subtable->max_priority) {
1629 if (subtable_iter == NULL) {
1630 /* Corrupted cache? */
1631 cls_subtable_cache_reset(cls);
1632 VLOG_ABORT("update_subtables_after_removal(): Subtable priority list corrupted.");
1639 /* Move 'subtable' just before 'iter' (unless it's already there). */
1640 if (iter != subtable_iter) {
1641 cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
1651 /* Return 'true' if can skip rest of the subtable based on the prefix trie
1652 * lookup results. */
1654 check_tries(struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1655 const unsigned int field_plen[CLS_MAX_TRIES],
1656 const struct range ofs, const struct flow *flow,
1657 struct flow_wildcards *wc)
1661 /* Check if we could avoid fully unwildcarding the next level of
1662 * fields using the prefix tries. The trie checks are done only as
1663 * needed to avoid folding in additional bits to the wildcards mask. */
1664 for (j = 0; j < n_tries; j++) {
1665 /* Is the trie field relevant for this subtable? */
1666 if (field_plen[j]) {
1667 struct trie_ctx *ctx = &trie_ctx[j];
1668 uint8_t be32ofs = ctx->be32ofs;
1670 /* Is the trie field within the current range of fields? */
1671 if (be32ofs >= ofs.start && be32ofs < ofs.end) {
1672 /* On-demand trie lookup. */
1673 if (!ctx->lookup_done) {
1674 ctx->match_plen = trie_lookup(ctx->trie, flow,
1676 ctx->lookup_done = true;
1678 /* Possible to skip the rest of the subtable if subtable's
1679 * prefix on the field is longer than what is known to match
1680 * based on the trie lookup. */
1681 if (field_plen[j] > ctx->match_plen) {
1682 /* RFC: We want the trie lookup to never result in
1683 * unwildcarding any bits that would not be unwildcarded
1684 * otherwise. Since the trie is shared by the whole
1685 * classifier, it is possible that the 'maskbits' contain
1686 * bits that are irrelevant for the partition of the
1687 * classifier relevant for the current flow. */
1689 /* Can skip if the field is already unwildcarded. */
1690 if (mask_prefix_bits_set(wc, be32ofs, ctx->maskbits)) {
1693 /* Check that the trie result will not unwildcard more bits
1694 * than this stage will. */
1695 if (ctx->maskbits <= field_plen[j]) {
1696 /* Unwildcard the bits and skip the rest. */
1697 mask_set_prefix_bits(wc, be32ofs, ctx->maskbits);
1698 /* Note: Prerequisite already unwildcarded, as the only
1699 * prerequisite of the supported trie lookup fields is
1700 * the ethertype, which is currently always
1712 /* Returns true if 'target' satisifies 'flow'/'mask', that is, if each bit
1713 * for which 'flow', for which 'mask' has a bit set, specifies a particular
1714 * value has the correct value in 'target'.
1716 * This function is equivalent to miniflow_equal_flow_in_minimask(flow,
1717 * target, mask) but it is faster because of the invariant that
1718 * flow->map and mask->masks.map are the same. */
1720 miniflow_and_mask_matches_flow(const struct miniflow *flow,
1721 const struct minimask *mask,
1722 const struct flow *target)
1724 const uint32_t *flowp = miniflow_get_u32_values(flow);
1725 const uint32_t *maskp = miniflow_get_u32_values(&mask->masks);
1726 uint32_t target_u32;
1728 FLOW_FOR_EACH_IN_MAP(target_u32, target, mask->masks.map) {
1729 if ((*flowp++ ^ target_u32) & *maskp++) {
1737 static inline struct cls_match *
1738 find_match(const struct cls_subtable *subtable, const struct flow *flow,
1741 struct cls_match *rule;
1743 HMAP_FOR_EACH_WITH_HASH (rule, hmap_node, hash, &subtable->rules) {
1744 if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
1753 static struct cls_match *
1754 find_match_wc(const struct cls_subtable *subtable, const struct flow *flow,
1755 struct trie_ctx trie_ctx[CLS_MAX_TRIES], unsigned int n_tries,
1756 struct flow_wildcards *wc)
1758 uint32_t basis = 0, hash;
1759 struct cls_match *rule = NULL;
1763 if (OVS_UNLIKELY(!wc)) {
1764 return find_match(subtable, flow,
1765 flow_hash_in_minimask(flow, &subtable->mask, 0));
1769 /* Try to finish early by checking fields in segments. */
1770 for (i = 0; i < subtable->n_indices; i++) {
1771 struct hindex_node *inode;
1772 ofs.end = subtable->index_ofs[i];
1774 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow,
1778 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1780 ofs.start = ofs.end;
1781 inode = hindex_node_with_hash(&subtable->indices[i], hash);
1783 /* No match, can stop immediately, but must fold in the mask
1784 * covered so far. */
1788 /* If we have narrowed down to a single rule already, check whether
1789 * that rule matches. If it does match, then we're done. If it does
1790 * not match, then we know that we will never get a match, but we do
1791 * not yet know how many wildcards we need to fold into 'wc' so we
1792 * continue iterating through indices to find that out. (We won't
1793 * waste time calling miniflow_and_mask_matches_flow() again because
1794 * we've set 'rule' nonnull.)
1796 * This check shows a measurable benefit with non-trivial flow tables.
1798 * (Rare) hash collisions may cause us to miss the opportunity for this
1800 if (!inode->s && !rule) {
1801 ASSIGN_CONTAINER(rule, inode - i, index_nodes);
1802 if (miniflow_and_mask_matches_flow(&rule->flow, &subtable->mask,
1808 ofs.end = FLOW_U32S;
1809 /* Trie check for the final range. */
1810 if (check_tries(trie_ctx, n_tries, subtable->trie_plen, ofs, flow, wc)) {
1814 /* Multiple potential matches exist, look for one. */
1815 hash = flow_hash_in_minimask_range(flow, &subtable->mask, ofs.start,
1817 rule = find_match(subtable, flow, hash);
1819 /* We already narrowed the matching candidates down to just 'rule',
1820 * but it didn't match. */
1823 if (!rule && subtable->ports_mask_len) {
1824 /* Ports are always part of the final range, if any.
1825 * No match was found for the ports. Use the ports trie to figure out
1826 * which ports bits to unwildcard. */
1828 ovs_be32 value, mask;
1830 mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
1831 value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
1832 trie_lookup_value(subtable->ports_trie, &value, &mbits);
1834 ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
1835 mask & htonl(~0 << (32 - mbits));
1837 ofs.start = TP_PORTS_OFS32;
1841 /* Must unwildcard all the fields, as they were looked at. */
1842 flow_wildcards_fold_minimask(wc, &subtable->mask);
1846 /* Must unwildcard the fields looked up so far, if any. */
1848 flow_wildcards_fold_minimask_range(wc, &subtable->mask, 0, ofs.start);
1853 static struct cls_match *
1854 find_equal(struct cls_subtable *subtable, const struct miniflow *flow,
1857 struct cls_match *head;
1859 HMAP_FOR_EACH_WITH_HASH (head, hmap_node, hash, &subtable->rules) {
1860 if (miniflow_equal(&head->flow, flow)) {
1867 static struct cls_match *
1868 insert_rule(struct cls_classifier *cls, struct cls_subtable *subtable,
1869 struct cls_rule *new)
1871 struct cls_match *cls_match = cls_match_alloc(new);
1872 struct cls_match *head;
1873 struct cls_match *old = NULL;
1875 uint32_t basis = 0, hash;
1876 uint8_t prev_be32ofs = 0;
1878 /* Add new node to segment indices. */
1879 for (i = 0; i < subtable->n_indices; i++) {
1880 hash = minimatch_hash_range(&new->match, prev_be32ofs,
1881 subtable->index_ofs[i], &basis);
1882 hindex_insert(&subtable->indices[i], &cls_match->index_nodes[i], hash);
1883 prev_be32ofs = subtable->index_ofs[i];
1885 hash = minimatch_hash_range(&new->match, prev_be32ofs, FLOW_U32S, &basis);
1886 head = find_equal(subtable, &new->match.flow, hash);
1888 hmap_insert(&subtable->rules, &cls_match->hmap_node, hash);
1889 list_init(&cls_match->list);
1892 /* Scan the list for the insertion point that will keep the list in
1893 * order of decreasing priority. */
1894 struct cls_match *rule;
1896 cls_match->hmap_node.hash = hash; /* Otherwise done by hmap_insert. */
1898 FOR_EACH_RULE_IN_LIST (rule, head) {
1899 if (cls_match->priority >= rule->priority) {
1901 /* 'new' is the new highest-priority flow in the list. */
1902 hmap_replace(&subtable->rules,
1903 &rule->hmap_node, &cls_match->hmap_node);
1906 if (cls_match->priority == rule->priority) {
1907 list_replace(&cls_match->list, &rule->list);
1911 list_insert(&rule->list, &cls_match->list);
1917 /* Insert 'new' at the end of the list. */
1918 list_push_back(&head->list, &cls_match->list);
1923 update_subtables_after_insertion(cls, subtable, cls_match->priority);
1925 /* Remove old node from indices. */
1926 for (i = 0; i < subtable->n_indices; i++) {
1927 hindex_remove(&subtable->indices[i], &old->index_nodes[i]);
1933 static struct cls_match *
1934 next_rule_in_list__(struct cls_match *rule)
1936 struct cls_match *next = OBJECT_CONTAINING(rule->list.next, next, list);
1940 static struct cls_match *
1941 next_rule_in_list(struct cls_match *rule)
1943 struct cls_match *next = next_rule_in_list__(rule);
1944 return next->priority < rule->priority ? next : NULL;
1947 /* A longest-prefix match tree. */
1949 uint32_t prefix; /* Prefix bits for this node, MSB first. */
1950 uint8_t nbits; /* Never zero, except for the root node. */
1951 unsigned int n_rules; /* Number of rules that have this prefix. */
1952 struct trie_node *edges[2]; /* Both NULL if leaf. */
1955 /* Max bits per node. Must fit in struct trie_node's 'prefix'.
1956 * Also tested with 16, 8, and 5 to stress the implementation. */
1957 #define TRIE_PREFIX_BITS 32
1959 /* Return at least 'plen' bits of the 'prefix', starting at bit offset 'ofs'.
1960 * Prefixes are in the network byte order, and the offset 0 corresponds to
1961 * the most significant bit of the first byte. The offset can be read as
1962 * "how many bits to skip from the start of the prefix starting at 'pr'". */
1964 raw_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1968 pr += ofs / 32; /* Where to start. */
1969 ofs %= 32; /* How many bits to skip at 'pr'. */
1971 prefix = ntohl(*pr) << ofs; /* Get the first 32 - ofs bits. */
1972 if (plen > 32 - ofs) { /* Need more than we have already? */
1973 prefix |= ntohl(*++pr) >> (32 - ofs);
1975 /* Return with possible unwanted bits at the end. */
1979 /* Return min(TRIE_PREFIX_BITS, plen) bits of the 'prefix', starting at bit
1980 * offset 'ofs'. Prefixes are in the network byte order, and the offset 0
1981 * corresponds to the most significant bit of the first byte. The offset can
1982 * be read as "how many bits to skip from the start of the prefix starting at
1985 trie_get_prefix(const ovs_be32 pr[], unsigned int ofs, unsigned int plen)
1990 if (plen > TRIE_PREFIX_BITS) {
1991 plen = TRIE_PREFIX_BITS; /* Get at most TRIE_PREFIX_BITS. */
1993 /* Return with unwanted bits cleared. */
1994 return raw_get_prefix(pr, ofs, plen) & ~0u << (32 - plen);
1997 /* Return the number of equal bits in 'nbits' of 'prefix's MSBs and a 'value'
1998 * starting at "MSB 0"-based offset 'ofs'. */
2000 prefix_equal_bits(uint32_t prefix, unsigned int nbits, const ovs_be32 value[],
2003 uint64_t diff = prefix ^ raw_get_prefix(value, ofs, nbits);
2004 /* Set the bit after the relevant bits to limit the result. */
2005 return raw_clz64(diff << 32 | UINT64_C(1) << (63 - nbits));
2008 /* Return the number of equal bits in 'node' prefix and a 'prefix' of length
2009 * 'plen', starting at "MSB 0"-based offset 'ofs'. */
2011 trie_prefix_equal_bits(const struct trie_node *node, const ovs_be32 prefix[],
2012 unsigned int ofs, unsigned int plen)
2014 return prefix_equal_bits(node->prefix, MIN(node->nbits, plen - ofs),
2018 /* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' can
2019 * be greater than 31. */
2021 be_get_bit_at(const ovs_be32 value[], unsigned int ofs)
2023 return (((const uint8_t *)value)[ofs / 8] >> (7 - ofs % 8)) & 1u;
2026 /* Return the bit at ("MSB 0"-based) offset 'ofs' as an int. 'ofs' must
2027 * be between 0 and 31, inclusive. */
2029 get_bit_at(const uint32_t prefix, unsigned int ofs)
2031 return (prefix >> (31 - ofs)) & 1u;
2034 /* Create new branch. */
2035 static struct trie_node *
2036 trie_branch_create(const ovs_be32 *prefix, unsigned int ofs, unsigned int plen,
2037 unsigned int n_rules)
2039 struct trie_node *node = xmalloc(sizeof *node);
2041 node->prefix = trie_get_prefix(prefix, ofs, plen);
2043 if (plen <= TRIE_PREFIX_BITS) {
2045 node->edges[0] = NULL;
2046 node->edges[1] = NULL;
2047 node->n_rules = n_rules;
2048 } else { /* Need intermediate nodes. */
2049 struct trie_node *subnode = trie_branch_create(prefix,
2050 ofs + TRIE_PREFIX_BITS,
2051 plen - TRIE_PREFIX_BITS,
2053 int bit = get_bit_at(subnode->prefix, 0);
2054 node->nbits = TRIE_PREFIX_BITS;
2055 node->edges[bit] = subnode;
2056 node->edges[!bit] = NULL;
2063 trie_node_destroy(struct trie_node *node)
2069 trie_destroy(struct trie_node *node)
2072 trie_destroy(node->edges[0]);
2073 trie_destroy(node->edges[1]);
2079 trie_is_leaf(const struct trie_node *trie)
2081 return !trie->edges[0] && !trie->edges[1]; /* No children. */
2085 mask_set_prefix_bits(struct flow_wildcards *wc, uint8_t be32ofs,
2088 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2091 for (i = 0; i < nbits / 32; i++) {
2092 mask[i] = OVS_BE32_MAX;
2095 mask[i] |= htonl(~0u << (32 - nbits % 32));
2100 mask_prefix_bits_set(const struct flow_wildcards *wc, uint8_t be32ofs,
2103 ovs_be32 *mask = &((ovs_be32 *)&wc->masks)[be32ofs];
2105 ovs_be32 zeroes = 0;
2107 for (i = 0; i < nbits / 32; i++) {
2111 zeroes |= ~mask[i] & htonl(~0u << (32 - nbits % 32));
2114 return !zeroes; /* All 'nbits' bits set. */
2117 static struct trie_node **
2118 trie_next_edge(struct trie_node *node, const ovs_be32 value[],
2121 return node->edges + be_get_bit_at(value, ofs);
2124 static const struct trie_node *
2125 trie_next_node(const struct trie_node *node, const ovs_be32 value[],
2128 return node->edges[be_get_bit_at(value, ofs)];
2131 /* Return the prefix mask length necessary to find the longest-prefix match for
2132 * the '*value' in the prefix tree 'node'.
2133 * '*checkbits' is set to the number of bits in the prefix mask necessary to
2134 * determine a mismatch, in case there are longer prefixes in the tree below
2135 * the one that matched.
2138 trie_lookup_value(const struct trie_node *node, const ovs_be32 value[],
2139 unsigned int *checkbits)
2141 unsigned int plen = 0, match_len = 0;
2142 const struct trie_node *prev = NULL;
2144 for (; node; prev = node, node = trie_next_node(node, value, plen)) {
2145 unsigned int eqbits;
2146 /* Check if this edge can be followed. */
2147 eqbits = prefix_equal_bits(node->prefix, node->nbits, value, plen);
2149 if (eqbits < node->nbits) { /* Mismatch, nothing more to be found. */
2150 /* Bit at offset 'plen' differed. */
2151 *checkbits = plen + 1; /* Includes the first mismatching bit. */
2154 /* Full match, check if rules exist at this prefix length. */
2155 if (node->n_rules > 0) {
2159 /* Dead end, exclude the other branch if it exists. */
2160 *checkbits = !prev || trie_is_leaf(prev) ? plen : plen + 1;
2165 trie_lookup(const struct cls_trie *trie, const struct flow *flow,
2166 unsigned int *checkbits)
2168 const struct mf_field *mf = trie->field;
2170 /* Check that current flow matches the prerequisites for the trie
2171 * field. Some match fields are used for multiple purposes, so we
2172 * must check that the trie is relevant for this flow. */
2173 if (mf_are_prereqs_ok(mf, flow)) {
2174 return trie_lookup_value(trie->root,
2175 &((ovs_be32 *)flow)[mf->flow_be32ofs],
2178 *checkbits = 0; /* Value not used in this case. */
2182 /* Returns the length of a prefix match mask for the field 'mf' in 'minimask'.
2183 * Returns the u32 offset to the miniflow data in '*miniflow_index', if
2184 * 'miniflow_index' is not NULL. */
2186 minimask_get_prefix_len(const struct minimask *minimask,
2187 const struct mf_field *mf)
2189 unsigned int nbits = 0, mask_tz = 0; /* Non-zero when end of mask seen. */
2190 uint8_t u32_ofs = mf->flow_be32ofs;
2191 uint8_t u32_end = u32_ofs + mf->n_bytes / 4;
2193 for (; u32_ofs < u32_end; ++u32_ofs) {
2195 mask = ntohl((OVS_FORCE ovs_be32)minimask_get(minimask, u32_ofs));
2197 /* Validate mask, count the mask length. */
2200 return 0; /* No bits allowed after mask ended. */
2203 if (~mask & (~mask + 1)) {
2204 return 0; /* Mask not contiguous. */
2206 mask_tz = ctz32(mask);
2207 nbits += 32 - mask_tz;
2215 * This is called only when mask prefix is known to be CIDR and non-zero.
2216 * Relies on the fact that the flow and mask have the same map, and since
2217 * the mask is CIDR, the storage for the flow field exists even if it
2218 * happened to be zeros.
2220 static const ovs_be32 *
2221 minimatch_get_prefix(const struct minimatch *match, const struct mf_field *mf)
2223 return miniflow_get_be32_values(&match->flow) +
2224 count_1bits(match->flow.map & ((UINT64_C(1) << mf->flow_be32ofs) - 1));
2227 /* Insert rule in to the prefix tree.
2228 * 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2231 trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2233 trie_insert_prefix(&trie->root,
2234 minimatch_get_prefix(&rule->match, trie->field), mlen);
2238 trie_insert_prefix(struct trie_node **edge, const ovs_be32 *prefix, int mlen)
2240 struct trie_node *node;
2243 /* Walk the tree. */
2244 for (; (node = *edge) != NULL;
2245 edge = trie_next_edge(node, prefix, ofs)) {
2246 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2248 if (eqbits < node->nbits) {
2249 /* Mismatch, new node needs to be inserted above. */
2250 int old_branch = get_bit_at(node->prefix, eqbits);
2252 /* New parent node. */
2253 *edge = trie_branch_create(prefix, ofs - eqbits, eqbits,
2254 ofs == mlen ? 1 : 0);
2256 /* Adjust old node for its new position in the tree. */
2257 node->prefix <<= eqbits;
2258 node->nbits -= eqbits;
2259 (*edge)->edges[old_branch] = node;
2261 /* Check if need a new branch for the new rule. */
2263 (*edge)->edges[!old_branch]
2264 = trie_branch_create(prefix, ofs, mlen - ofs, 1);
2268 /* Full match so far. */
2271 /* Full match at the current node, rule needs to be added here. */
2276 /* Must insert a new tree branch for the new rule. */
2277 *edge = trie_branch_create(prefix, ofs, mlen - ofs, 1);
2280 /* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2283 trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
2285 trie_remove_prefix(&trie->root,
2286 minimatch_get_prefix(&rule->match, trie->field), mlen);
2289 /* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
2292 trie_remove_prefix(struct trie_node **root, const ovs_be32 *prefix, int mlen)
2294 struct trie_node *node;
2295 struct trie_node **edges[sizeof(union mf_value) * 8];
2296 int depth = 0, ofs = 0;
2298 /* Walk the tree. */
2299 for (edges[0] = root;
2300 (node = *edges[depth]) != NULL;
2301 edges[++depth] = trie_next_edge(node, prefix, ofs)) {
2302 unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
2304 if (eqbits < node->nbits) {
2305 /* Mismatch, nothing to be removed. This should never happen, as
2306 * only rules in the classifier are ever removed. */
2307 break; /* Log a warning. */
2309 /* Full match so far. */
2313 /* Full prefix match at the current node, remove rule here. */
2314 if (!node->n_rules) {
2315 break; /* Log a warning. */
2319 /* Check if can prune the tree. */
2320 while (!node->n_rules && !(node->edges[0] && node->edges[1])) {
2321 /* No rules and at most one child node, remove this node. */
2322 struct trie_node *next;
2323 next = node->edges[0] ? node->edges[0] : node->edges[1];
2326 if (node->nbits + next->nbits > TRIE_PREFIX_BITS) {
2327 break; /* Cannot combine. */
2329 /* Combine node with next. */
2330 next->prefix = node->prefix | next->prefix >> node->nbits;
2331 next->nbits += node->nbits;
2333 trie_node_destroy(node);
2334 /* Update the parent's edge. */
2335 *edges[depth] = next;
2336 if (next || !depth) {
2337 /* Branch not pruned or at root, nothing more to do. */
2340 node = *edges[--depth];
2345 /* Cannot go deeper. This should never happen, since only rules
2346 * that actually exist in the classifier are ever removed. */
2347 VLOG_WARN("Trying to remove non-existing rule from a prefix trie.");