#include "dynamic-string.h"
#include "flow.h"
#include "hash.h"
+#include "hindex.h"
+#include "hmap.h"
+#include "list.h"
#include "odp-util.h"
#include "ofp-util.h"
#include "ovs-thread.h"
#include "packets.h"
+#include "tag.h"
+#include "util.h"
#include "vlog.h"
VLOG_DEFINE_THIS_MODULE(classifier);
struct trie_node;
+struct trie_ctx;
+
+/* Ports trie depends on both ports sharing the same ovs_be32. */
+#define TP_PORTS_OFS32 (offsetof(struct flow, tp_src) / 4)
+BUILD_ASSERT_DECL(TP_PORTS_OFS32 == offsetof(struct flow, tp_dst) / 4);
/* Prefix trie for a 'field' */
struct cls_trie {
unsigned int max_priority;
};
-struct cls_subtable_cache {
- struct cls_subtable_entry *subtables;
+struct cls_subtables {
+ size_t count; /* One past last valid array element. */
size_t alloc_size; /* Number of allocated elements. */
- size_t size; /* One past last valid array element. */
+ struct cls_subtable_entry *array;
};
enum {
};
struct cls_classifier {
- int n_rules; /* Total number of rules. */
+ int n_rules; /* Total number of rules. */
uint8_t n_flow_segments;
uint8_t flow_segments[CLS_MAX_INDICES]; /* Flow segment boundaries to use
* for staged lookup. */
- struct hmap subtables; /* Contains "struct cls_subtable"s. */
- struct cls_subtable_cache subtables_priority;
- struct hmap partitions; /* Contains "struct cls_partition"s. */
+ struct hmap subtables_map; /* Contains "struct cls_subtable"s. */
+ struct cls_subtables subtables;
+ struct hmap partitions; /* Contains "struct cls_partition"s. */
struct cls_trie tries[CLS_MAX_TRIES]; /* Prefix tries. */
unsigned int n_tries;
};
/* A set of rules that all have the same fields wildcarded. */
struct cls_subtable {
- struct hmap_node hmap_node; /* Within struct cls_classifier 'subtables'
+ struct hmap_node hmap_node; /* Within struct cls_classifier 'subtables_map'
* hmap. */
struct hmap rules; /* Contains "struct cls_rule"s. */
int n_rules; /* Number of rules, including duplicates. */
uint8_t index_ofs[CLS_MAX_INDICES]; /* u32 flow segment boundaries. */
struct hindex indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'. */
+ int ports_mask_len;
+ struct trie_node *ports_trie; /* NULL if none. */
struct minimask mask; /* Wildcards for fields. */
/* 'mask' must be the last field. */
};
return cls_match;
}
-struct trie_ctx;
static struct cls_subtable *find_subtable(const struct cls_classifier *,
const struct minimask *);
static struct cls_subtable *insert_subtable(struct cls_classifier *,
const struct mf_field *);
static unsigned int trie_lookup(const struct cls_trie *, const struct flow *,
unsigned int *checkbits);
-
+static unsigned int trie_lookup_value(const struct trie_node *,
+ const ovs_be32 value[],
+ unsigned int *checkbits);
static void trie_destroy(struct trie_node *);
static void trie_insert(struct cls_trie *, const struct cls_rule *, int mlen);
+static void trie_insert_prefix(struct trie_node **, const ovs_be32 *prefix,
+ int mlen);
static void trie_remove(struct cls_trie *, const struct cls_rule *, int mlen);
+static void trie_remove_prefix(struct trie_node **, const ovs_be32 *prefix,
+ int mlen);
static void mask_set_prefix_bits(struct flow_wildcards *, uint8_t be32ofs,
unsigned int nbits);
static bool mask_prefix_bits_set(const struct flow_wildcards *,
uint8_t be32ofs, unsigned int nbits);
static void
-cls_subtable_cache_init(struct cls_subtable_cache *array)
+cls_subtables_init(struct cls_subtables *subtables)
{
- memset(array, 0, sizeof *array);
+ memset(subtables, 0, sizeof *subtables);
}
static void
-cls_subtable_cache_destroy(struct cls_subtable_cache *array)
+cls_subtables_destroy(struct cls_subtables *subtables)
{
- free(array->subtables);
- memset(array, 0, sizeof *array);
+ free(subtables->array);
+ memset(subtables, 0, sizeof *subtables);
}
-/* Array insertion. */
+/* Subtables insertion. */
static void
-cls_subtable_cache_push_back(struct cls_subtable_cache *array,
- struct cls_subtable_entry a)
+cls_subtables_push_back(struct cls_subtables *subtables,
+ struct cls_subtable_entry a)
{
- if (array->size == array->alloc_size) {
- array->subtables = x2nrealloc(array->subtables, &array->alloc_size,
+ if (subtables->count == subtables->alloc_size) {
+ subtables->array = x2nrealloc(subtables->array, &subtables->alloc_size,
sizeof a);
}
- array->subtables[array->size++] = a;
+ subtables->array[subtables->count++] = a;
}
-/* Only for rearranging entries in the same cache. */
+/* Move subtable entry at 'from' to 'to', shifting the elements in between
+ * (including the one at 'to') accordingly. */
static inline void
-cls_subtable_cache_splice(struct cls_subtable_entry *to,
- struct cls_subtable_entry *start,
- struct cls_subtable_entry *end)
-{
- if (to > end) {
- /* Same as splicing entries to (start) from [end, to). */
- struct cls_subtable_entry *temp = to;
- to = start; start = end; end = temp;
- }
- if (to < start) {
- while (start != end) {
- struct cls_subtable_entry temp = *start;
-
- memmove(to + 1, to, (start - to) * sizeof *to);
- *to = temp;
- start++;
+cls_subtables_move(struct cls_subtable_entry *to,
+ struct cls_subtable_entry *from)
+{
+ if (to != from) {
+ struct cls_subtable_entry temp = *from;
+
+ if (to > from) {
+ /* Shift entries (from,to] backwards to make space at 'to'. */
+ memmove(from, from + 1, (to - from) * sizeof *to);
+ } else {
+ /* Shift entries [to,from) forward to make space at 'to'. */
+ memmove(to + 1, to, (from - to) * sizeof *to);
}
- } /* Else nothing to be done. */
+
+ *to = temp;
+ }
}
-/* Array removal. */
+/* Subtables removal. */
static inline void
-cls_subtable_cache_remove(struct cls_subtable_cache *array,
- struct cls_subtable_entry *elem)
+cls_subtables_remove(struct cls_subtables *subtables,
+ struct cls_subtable_entry *elem)
{
- ssize_t size = (&array->subtables[array->size]
+ ssize_t size = (&subtables->array[subtables->count]
- (elem + 1)) * sizeof *elem;
if (size > 0) {
memmove(elem, elem + 1, size);
}
- array->size--;
+ subtables->count--;
}
-#define CLS_SUBTABLE_CACHE_FOR_EACH(SUBTABLE, ITER, ARRAY) \
- for (ITER = (ARRAY)->subtables; \
- ITER < &(ARRAY)->subtables[(ARRAY)->size] \
- && OVS_LIKELY(SUBTABLE = ITER->subtable); \
- ++ITER)
-#define CLS_SUBTABLE_CACHE_FOR_EACH_CONTINUE(SUBTABLE, ITER, ARRAY) \
- for (++ITER; \
- ITER < &(ARRAY)->subtables[(ARRAY)->size] \
- && OVS_LIKELY(SUBTABLE = ITER->subtable); \
- ++ITER)
-#define CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE(SUBTABLE, ITER, ARRAY) \
- for (ITER = &(ARRAY)->subtables[(ARRAY)->size]; \
- ITER > (ARRAY)->subtables \
- && OVS_LIKELY(SUBTABLE = (--ITER)->subtable);)
+#define CLS_SUBTABLES_FOR_EACH(SUBTABLE, ITER, SUBTABLES) \
+ for ((ITER) = (SUBTABLES)->array; \
+ (ITER) < &(SUBTABLES)->array[(SUBTABLES)->count] \
+ && OVS_LIKELY((SUBTABLE) = (ITER)->subtable); \
+ ++(ITER))
+#define CLS_SUBTABLES_FOR_EACH_CONTINUE(SUBTABLE, ITER, SUBTABLES) \
+ for (++(ITER); \
+ (ITER) < &(SUBTABLES)->array[(SUBTABLES)->count] \
+ && OVS_LIKELY((SUBTABLE) = (ITER)->subtable); \
+ ++(ITER))
+#define CLS_SUBTABLES_FOR_EACH_REVERSE(SUBTABLE, ITER, SUBTABLES) \
+ for ((ITER) = &(SUBTABLES)->array[(SUBTABLES)->count]; \
+ (ITER) > (SUBTABLES)->array \
+ && OVS_LIKELY((SUBTABLE) = (--(ITER))->subtable);)
+
+static void
+cls_subtables_verify(struct cls_subtables *subtables)
+{
+ struct cls_subtable *table;
+ struct cls_subtable_entry *iter;
+ unsigned int priority = 0;
+
+ CLS_SUBTABLES_FOR_EACH_REVERSE (table, iter, subtables) {
+ if (iter->max_priority != table->max_priority) {
+ VLOG_WARN("Subtable %p has mismatching priority in cache (%u != %u)",
+ table, iter->max_priority, table->max_priority);
+ }
+ if (iter->max_priority < priority) {
+ VLOG_WARN("Subtable cache is out of order (%u < %u)",
+ iter->max_priority, priority);
+ }
+ priority = iter->max_priority;
+ }
+}
+
+static void
+cls_subtables_reset(struct cls_classifier *cls)
+{
+ struct cls_subtables old = cls->subtables;
+ struct cls_subtable *subtable;
+
+ VLOG_WARN("Resetting subtable cache.");
+
+ cls_subtables_verify(&cls->subtables);
+
+ cls_subtables_init(&cls->subtables);
+
+ HMAP_FOR_EACH (subtable, hmap_node, &cls->subtables_map) {
+ struct cls_match *head;
+ struct cls_subtable_entry elem;
+ struct cls_subtable *table;
+ struct cls_subtable_entry *iter, *from = NULL;
+ unsigned int new_max = 0;
+ unsigned int max_count = 0;
+ bool found;
+
+ /* Verify max_priority. */
+ HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
+ if (head->priority > new_max) {
+ new_max = head->priority;
+ max_count = 1;
+ } else if (head->priority == new_max) {
+ max_count++;
+ }
+ }
+ if (new_max != subtable->max_priority ||
+ max_count != subtable->max_count) {
+ VLOG_WARN("subtable %p (%u rules) has mismatching max_priority "
+ "(%u) or max_count (%u). Highest priority found was %u, "
+ "count: %u",
+ subtable, subtable->n_rules, subtable->max_priority,
+ subtable->max_count, new_max, max_count);
+ subtable->max_priority = new_max;
+ subtable->max_count = max_count;
+ }
+
+ /* Locate the subtable from the old cache. */
+ found = false;
+ CLS_SUBTABLES_FOR_EACH (table, iter, &old) {
+ if (table == subtable) {
+ if (iter->max_priority != new_max) {
+ VLOG_WARN("Subtable %p has wrong max priority (%u != %u) "
+ "in the old cache.",
+ subtable, iter->max_priority, new_max);
+ }
+ if (found) {
+ VLOG_WARN("Subtable %p duplicated in the old cache.",
+ subtable);
+ }
+ found = true;
+ }
+ }
+ if (!found) {
+ VLOG_WARN("Subtable %p not found from the old cache.", subtable);
+ }
+
+ elem.subtable = subtable;
+ elem.tag = subtable->tag;
+ elem.max_priority = subtable->max_priority;
+ cls_subtables_push_back(&cls->subtables, elem);
+
+ /* Possibly move 'subtable' earlier in the priority array. If
+ * we break out of the loop, then the subtable (at 'from')
+ * should be moved to the position right after the current
+ * element. If the loop terminates normally, then 'iter' will
+ * be at the first array element and we'll move the subtable
+ * to the front of the array. */
+ CLS_SUBTABLES_FOR_EACH_REVERSE (table, iter, &cls->subtables) {
+ if (table == subtable) {
+ from = iter; /* Locate the subtable as we go. */
+ } else if (table->max_priority >= new_max) {
+ ovs_assert(from != NULL);
+ iter++; /* After this. */
+ break;
+ }
+ }
+
+ /* Move subtable at 'from' to 'iter'. */
+ cls_subtables_move(iter, from);
+ }
+
+ /* Verify that the old and the new have the same size. */
+ if (old.count != cls->subtables.count) {
+ VLOG_WARN("subtables cache sizes differ: old (%"PRIuSIZE
+ ") != new (%"PRIuSIZE").",
+ old.count, cls->subtables.count);
+ }
+
+ cls_subtables_destroy(&old);
+
+ cls_subtables_verify(&cls->subtables);
+}
\f
/* flow/miniflow/minimask/minimatch utilities.
cls_->cls = cls;
cls->n_rules = 0;
- hmap_init(&cls->subtables);
- cls_subtable_cache_init(&cls->subtables_priority);
+ hmap_init(&cls->subtables_map);
+ cls_subtables_init(&cls->subtables);
hmap_init(&cls->partitions);
cls->n_flow_segments = 0;
if (flow_segments) {
{
if (cls_) {
struct cls_classifier *cls = cls_->cls;
- struct cls_subtable *partition, *next_partition;
+ struct cls_partition *partition, *next_partition;
struct cls_subtable *subtable, *next_subtable;
int i;
}
HMAP_FOR_EACH_SAFE (subtable, next_subtable, hmap_node,
- &cls->subtables) {
+ &cls->subtables_map) {
destroy_subtable(cls, subtable);
}
- hmap_destroy(&cls->subtables);
+ hmap_destroy(&cls->subtables_map);
HMAP_FOR_EACH_SAFE (partition, next_partition, hmap_node,
&cls->partitions) {
}
hmap_destroy(&cls->partitions);
- cls_subtable_cache_destroy(&cls->subtables_priority);
+ cls_subtables_destroy(&cls->subtables);
free(cls);
}
}
trie->field = field;
/* Add existing rules to the trie. */
- CLS_SUBTABLE_CACHE_FOR_EACH (subtable, iter, &cls->subtables_priority) {
+ CLS_SUBTABLES_FOR_EACH (subtable, iter, &cls->subtables) {
unsigned int plen;
plen = field ? minimask_get_prefix_len(&subtable->mask, field) : 0;
return partition;
}
+static inline ovs_be32 minimatch_get_ports(const struct minimatch *match)
+{
+ /* Could optimize to use the same map if needed for fast path. */
+ return MINIFLOW_GET_BE32(&match->flow, tp_src)
+ & MINIFLOW_GET_BE32(&match->mask.masks, tp_src);
+}
+
/* Inserts 'rule' into 'cls'. Until 'rule' is removed from 'cls', the caller
* must not modify or free it.
*
trie_insert(&cls->tries[i], rule, subtable->trie_plen[i]);
}
}
+
+ /* Ports trie. */
+ if (subtable->ports_mask_len) {
+ /* We mask the value to be inserted to always have the wildcarded
+ * bits in known (zero) state, so we can include them in comparison
+ * and they will always match (== their original value does not
+ * matter). */
+ ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
+
+ trie_insert_prefix(&subtable->ports_trie, &masked_ports,
+ subtable->ports_mask_len);
+ }
+
return NULL;
} else {
struct cls_rule *old_cls_rule = old_rule->cls_rule;
ovs_assert(cls_match);
subtable = find_subtable(cls, &rule->match.mask);
-
ovs_assert(subtable);
+ if (subtable->ports_mask_len) {
+ ovs_be32 masked_ports = minimatch_get_ports(&rule->match);
+
+ trie_remove_prefix(&subtable->ports_trie,
+ &masked_ports, subtable->ports_mask_len);
+ }
for (i = 0; i < cls->n_tries; i++) {
if (subtable->trie_plen[i]) {
trie_remove(&cls->tries[i], rule, subtable->trie_plen[i]);
struct cls_match *best;
struct trie_ctx trie_ctx[CLS_MAX_TRIES];
int i;
- struct cls_subtable_entry *subtables = cls->subtables_priority.subtables;
- int n_subtables = cls->subtables_priority.size;
+ struct cls_subtable_entry *subtables = cls->subtables.array;
+ int n_subtables = cls->subtables.count;
int64_t best_priority = -1;
/* Prefetch the subtables array. */
/* Prefetch the first subtables. */
if (n_subtables > 1) {
- lookahead_subtable(subtables);
- lookahead_subtable(subtables + 1);
+ lookahead_subtable(subtables);
+ lookahead_subtable(subtables + 1);
}
best = NULL;
struct cls_subtable *subtable;
struct cls_subtable_entry *iter;
- CLS_SUBTABLE_CACHE_FOR_EACH (subtable, iter, &cls->subtables_priority) {
+ CLS_SUBTABLES_FOR_EACH (subtable, iter, &cls->subtables) {
struct cls_match *rule;
rule = find_match_miniflow(subtable, flow,
struct cls_subtable_entry *iter;
/* Iterate subtables in the descending max priority order. */
- CLS_SUBTABLE_CACHE_FOR_EACH (subtable, iter, &cls->subtables_priority) {
+ CLS_SUBTABLES_FOR_EACH (subtable, iter, &cls->subtables) {
uint32_t storage[FLOW_U32S];
struct minimask mask;
struct cls_match *head;
{
struct cls_subtable *subtable;
- HMAP_FOR_EACH (subtable, hmap_node, &cursor->cls->subtables) {
+ HMAP_FOR_EACH (subtable, hmap_node, &cursor->cls->subtables_map) {
struct cls_match *rule = search_subtable(subtable, cursor->target);
if (rule) {
cursor->subtable = subtable;
}
subtable = cursor->subtable;
- HMAP_FOR_EACH_CONTINUE (subtable, hmap_node, &cursor->cls->subtables) {
+ HMAP_FOR_EACH_CONTINUE (subtable, hmap_node, &cursor->cls->subtables_map) {
rule = search_subtable(subtable, cursor->target);
if (rule) {
cursor->subtable = subtable;
struct cls_subtable *subtable;
HMAP_FOR_EACH_IN_BUCKET (subtable, hmap_node, minimask_hash(mask, 0),
- &cls->subtables) {
+ &cls->subtables_map) {
if (minimask_equal(mask, &subtable->mask)) {
return subtable;
}
cls->tries[i].field);
}
- hmap_insert(&cls->subtables, &subtable->hmap_node, hash);
+ /* Ports trie. */
+ subtable->ports_trie = NULL;
+ subtable->ports_mask_len
+ = 32 - ctz32(ntohl(MINIFLOW_GET_BE32(&mask->masks, tp_src)));
+
+ hmap_insert(&cls->subtables_map, &subtable->hmap_node, hash);
elem.subtable = subtable;
elem.tag = subtable->tag;
elem.max_priority = subtable->max_priority;
- cls_subtable_cache_push_back(&cls->subtables_priority, elem);
+ cls_subtables_push_back(&cls->subtables, elem);
return subtable;
}
struct cls_subtable *table = NULL;
struct cls_subtable_entry *iter;
- CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &cls->subtables_priority) {
+ CLS_SUBTABLES_FOR_EACH (table, iter, &cls->subtables) {
if (table == subtable) {
- cls_subtable_cache_remove(&cls->subtables_priority, iter);
+ cls_subtables_remove(&cls->subtables, iter);
break;
}
}
+ trie_destroy(subtable->ports_trie);
+
for (i = 0; i < subtable->n_indices; i++) {
hindex_destroy(&subtable->indices[i]);
}
minimask_destroy(&subtable->mask);
- hmap_remove(&cls->subtables, &subtable->hmap_node);
+ hmap_remove(&cls->subtables_map, &subtable->hmap_node);
hmap_destroy(&subtable->rules);
free(subtable);
}
*
* - Update 'subtable->max_priority' and 'subtable->max_count' if necessary.
*
- * - Update 'subtable''s position in 'cls->subtables_priority' if necessary.
+ * - Update 'subtable''s position in 'cls->subtables' if necessary.
*
* This function should only be called after adding a new rule, not after
* replacing a rule by an identical one or modifying a rule in-place. */
++subtable->max_count;
} else if (new_priority > subtable->max_priority) {
struct cls_subtable *table;
- struct cls_subtable_entry *iter, *subtable_iter = NULL;
+ struct cls_subtable_entry *iter, *from = NULL;
subtable->max_priority = new_priority;
subtable->max_count = 1;
- /* Possibly move 'subtable' earlier in the priority list. If we break
- * out of the loop, then 'subtable_iter' should be moved just before
- * 'iter'. If the loop terminates normally, then 'iter' will be the
- * first list element and we'll move subtable just before that
- * (e.g. to the front of the list). */
- CLS_SUBTABLE_CACHE_FOR_EACH_REVERSE (table, iter, &cls->subtables_priority) {
+ /* Possibly move 'subtable' earlier in the priority array. If
+ * we break out of the loop, then the subtable (at 'from')
+ * should be moved to the position right after the current
+ * element. If the loop terminates normally, then 'iter' will
+ * be at the first array element and we'll move the subtable
+ * to the front of the array. */
+ CLS_SUBTABLES_FOR_EACH_REVERSE (table, iter, &cls->subtables) {
if (table == subtable) {
- subtable_iter = iter; /* Locate the subtable as we go. */
+ from = iter; /* Locate the subtable as we go. */
iter->max_priority = new_priority;
} else if (table->max_priority >= new_priority) {
- ovs_assert(subtable_iter != NULL);
- iter++;
+ if (from == NULL) {
+ /* Corrupted cache? */
+ cls_subtables_reset(cls);
+ VLOG_ABORT("update_subtables_after_insertion(): Subtable priority list corrupted.");
+ OVS_NOT_REACHED();
+ }
+ iter++; /* After this. */
break;
}
}
- /* Move 'subtable' just before 'iter' (unless it's already there). */
- if (iter != subtable_iter) {
- cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
- }
+ /* Move subtable at 'from' to 'iter'. */
+ cls_subtables_move(iter, from);
}
}
*
* - Update 'subtable->max_priority' and 'subtable->max_count' if necessary.
*
- * - Update 'subtable''s position in 'cls->subtables_priority' if necessary.
+ * - Update 'subtable''s position in 'cls->subtables' if necessary.
*
* This function should only be called after removing a rule, not after
* replacing a rule by an identical one or modifying a rule in-place. */
if (del_priority == subtable->max_priority && --subtable->max_count == 0) {
struct cls_match *head;
struct cls_subtable *table;
- struct cls_subtable_entry *iter, *subtable_iter = NULL;
+ struct cls_subtable_entry *iter, *from = NULL;
subtable->max_priority = 0;
HMAP_FOR_EACH (head, hmap_node, &subtable->rules) {
}
}
- /* Possibly move 'subtable' later in the priority list. If we break
- * out of the loop, then 'subtable' should be moved just before that
- * 'iter'. If the loop terminates normally, then 'iter' will be the
- * list head and we'll move subtable just before that (e.g. to the back
- * of the list). */
- CLS_SUBTABLE_CACHE_FOR_EACH (table, iter, &cls->subtables_priority) {
+ /* Possibly move 'subtable' later in the priority array.
+ * After the loop the 'iter' will point right after the position
+ * at which the subtable should be moved (either at a subtable
+ * with an equal or lower priority, or just past the array),
+ * so it is decremented once. */
+ CLS_SUBTABLES_FOR_EACH (table, iter, &cls->subtables) {
if (table == subtable) {
- subtable_iter = iter; /* Locate the subtable as we go. */
+ from = iter; /* Locate the subtable as we go. */
iter->max_priority = subtable->max_priority;
} else if (table->max_priority <= subtable->max_priority) {
- ovs_assert(subtable_iter != NULL);
+ if (from == NULL) {
+ /* Corrupted cache? */
+ cls_subtables_reset(cls);
+ VLOG_ABORT("update_subtables_after_removal(): Subtable priority list corrupted.");
+ OVS_NOT_REACHED();
+ }
break;
}
}
+ /* Now at one past the destination. */
+ iter--;
- /* Move 'subtable' just before 'iter' (unless it's already there). */
- if (iter != subtable_iter) {
- cls_subtable_cache_splice(iter, subtable_iter, subtable_iter + 1);
- }
+ /* Move subtable at 'from' to 'iter'. */
+ cls_subtables_move(iter, from);
}
}
* but it didn't match. */
rule = NULL;
}
+ if (!rule && subtable->ports_mask_len) {
+ /* Ports are always part of the final range, if any.
+ * No match was found for the ports. Use the ports trie to figure out
+ * which ports bits to unwildcard. */
+ unsigned int mbits;
+ ovs_be32 value, mask;
+
+ mask = MINIFLOW_GET_BE32(&subtable->mask.masks, tp_src);
+ value = ((OVS_FORCE ovs_be32 *)flow)[TP_PORTS_OFS32] & mask;
+ trie_lookup_value(subtable->ports_trie, &value, &mbits);
+
+ ((OVS_FORCE ovs_be32 *)&wc->masks)[TP_PORTS_OFS32] |=
+ mask & htonl(~0 << (32 - mbits));
+
+ ofs.start = TP_PORTS_OFS32;
+ goto range_out;
+ }
out:
/* Must unwildcard all the fields, as they were looked at. */
flow_wildcards_fold_minimask(wc, &subtable->mask);
static void
trie_insert(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
{
- const ovs_be32 *prefix = minimatch_get_prefix(&rule->match, trie->field);
+ trie_insert_prefix(&trie->root,
+ minimatch_get_prefix(&rule->match, trie->field), mlen);
+}
+
+static void
+trie_insert_prefix(struct trie_node **edge, const ovs_be32 *prefix, int mlen)
+{
struct trie_node *node;
- struct trie_node **edge;
int ofs = 0;
/* Walk the tree. */
- for (edge = &trie->root;
- (node = *edge) != NULL;
+ for (; (node = *edge) != NULL;
edge = trie_next_edge(node, prefix, ofs)) {
unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
ofs += eqbits;
static void
trie_remove(struct cls_trie *trie, const struct cls_rule *rule, int mlen)
{
- const ovs_be32 *prefix = minimatch_get_prefix(&rule->match, trie->field);
+ trie_remove_prefix(&trie->root,
+ minimatch_get_prefix(&rule->match, trie->field), mlen);
+}
+
+/* 'mlen' must be the (non-zero) CIDR prefix length of the 'trie->field' mask
+ * in 'rule'. */
+static void
+trie_remove_prefix(struct trie_node **root, const ovs_be32 *prefix, int mlen)
+{
struct trie_node *node;
struct trie_node **edges[sizeof(union mf_value) * 8];
int depth = 0, ofs = 0;
/* Walk the tree. */
- for (edges[depth] = &trie->root;
+ for (edges[0] = root;
(node = *edges[depth]) != NULL;
edges[++depth] = trie_next_edge(node, prefix, ofs)) {
unsigned int eqbits = trie_prefix_equal_bits(node, prefix, ofs, mlen);
+
if (eqbits < node->nbits) {
/* Mismatch, nothing to be removed. This should never happen, as
* only rules in the classifier are ever removed. */