#include "flow.h"
#include "hash.h"
#include "rculist.h"
-#include "tag.h"
/* Classifier internal definitions, subject to change at any time. */
* following data structures. */
/* These fields are accessed by readers who care about wildcarding. */
- const tag_type tag; /* Tag generated from mask for partitioning. */
const uint8_t n_indices; /* How many indices to use. */
- const uint8_t index_ofs[CLS_MAX_INDICES]; /* u64 segment boundaries. */
+ const struct flowmap index_maps[CLS_MAX_INDICES + 1]; /* Stage maps. */
unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
* (runtime configurable). */
const int ports_mask_len;
/* 'mask' must be the last field. */
};
-/* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
- * field) with tags for the "cls_subtable"s that contain rules that match that
- * metadata value. */
-struct cls_partition {
- struct cmap_node cmap_node; /* In struct classifier's 'partitions' map. */
- ovs_be64 metadata; /* metadata value for this partition. */
- tag_type tags; /* OR of each flow's cls_subtable tag. */
- struct tag_tracker tracker; /* Tracks the bits in 'tags'. */
-};
-
/* Internal representation of a rule in a "struct cls_subtable".
*
* The 'next' member is an element in a singly linked, null-terminated list.
OVSRCU_TYPE(struct cls_match *) next; /* Equal, lower-priority matches. */
OVSRCU_TYPE(struct cls_conjunction_set *) conj_set;
- /* Accessed only by writers. */
- struct cls_partition *partition;
-
/* Accessed by readers interested in wildcarding. */
const int priority; /* Larger numbers are higher priorities. */
struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
/* Accessed by all readers. */
struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
- /* Controls rule's visibility to lookups.
- *
- * When 'visibility' is:
- *
- * > 0 - rule is visible starting from version 'visibility'
- * <= 0 - rule is invisible starting from version '-(visibility)'
+ /* Rule versioning.
*
- * The minimum version number used in lookups is 1 (== CLS_NO_VERSION),
- * which implies that when 'visibility' is:
- *
- * 1 - rule is visible in all lookup versions
- * 0 - rule is invisible in all lookup versions. */
- atomic_llong visibility;
+ * CLS_NOT_REMOVED_VERSION has a special meaning for 'remove_version',
+ * meaning that the rule has been added but not yet removed.
+ */
+ const cls_version_t add_version; /* Version rule was added in. */
+ ATOMIC(cls_version_t) remove_version; /* Version rule is removed in. */
const struct cls_rule *cls_rule;
const struct miniflow flow; /* Matching rule. Mask is in the subtable. */
void cls_match_free_cb(struct cls_match *);
static inline void
-cls_match_set_visibility(struct cls_match *rule, long long version)
+cls_match_set_remove_version(struct cls_match *rule, cls_version_t version)
{
- atomic_store_relaxed(&rule->visibility, version);
+ atomic_store_relaxed(&rule->remove_version, version);
}
static inline bool
-cls_match_visible_in_version(const struct cls_match *rule, long long version)
+cls_match_visible_in_version(const struct cls_match *rule,
+ cls_version_t version)
{
- long long visibility;
+ cls_version_t remove_version;
/* C11 does not want to access an atomic via a const object pointer. */
- atomic_read_relaxed(&CONST_CAST(struct cls_match *, rule)->visibility,
- &visibility);
-
- if (OVS_LIKELY(visibility > 0)) {
- /* Rule is visible starting from version 'visibility'. */
- return version >= visibility;
- } else {
- /* Rule is invisible starting from version '-visibility'. */
- return version < -visibility;
- }
+ atomic_read_relaxed(&CONST_CAST(struct cls_match *, rule)->remove_version,
+ &remove_version);
+
+ return rule->add_version <= version && version < remove_version;
}
static inline bool
cls_match_is_eventually_invisible(const struct cls_match *rule)
{
- long long visibility;
+ cls_version_t remove_version;
/* C11 does not want to access an atomic via a const object pointer. */
- atomic_read_relaxed(&CONST_CAST(struct cls_match *, rule)->visibility,
- &visibility);
+ atomic_read_relaxed(&CONST_CAST(struct cls_match *, rule)->remove_version,
+ &remove_version);
- return visibility <= 0;
+ return remove_version <= CLS_MAX_VERSION;
}
\f
* These are only used by the classifier, so place them here to allow
* for better optimization. */
-static inline uint64_t
-miniflow_get_map_in_range(const struct miniflow *miniflow,
- uint8_t start, uint8_t end, unsigned int *offset)
-{
- uint64_t map = miniflow->map;
- *offset = 0;
-
- if (start > 0) {
- uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
- *offset = count_1bits(map & msk);
- map &= ~msk;
- }
- if (end < FLOW_U64S) {
- uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
- map &= msk;
- }
- return map;
-}
-
/* Returns a hash value for the bits of 'flow' where there are 1-bits in
* 'mask', given 'basis'.
*
const uint64_t *mask_values = miniflow_get_values(&mask->masks);
const uint64_t *flow_u64 = (const uint64_t *)flow;
const uint64_t *p = mask_values;
- uint32_t hash;
- int idx;
+ uint32_t hash = basis;
+ map_t map;
- hash = basis;
- MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
- hash = hash_add64(hash, flow_u64[idx] & *p++);
+ FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
+ size_t idx;
+
+ MAP_FOR_EACH_INDEX (idx, map) {
+ hash = hash_add64(hash, flow_u64[idx] & *p++);
+ }
+ flow_u64 += MAP_T_BITS;
}
return hash_finish(hash, (p - mask_values) * 8);
const uint64_t *mask_values = miniflow_get_values(&mask->masks);
const uint64_t *p = mask_values;
uint32_t hash = basis;
- uint64_t flow_u64;
+ uint64_t value;
- MINIFLOW_FOR_EACH_IN_MAP(flow_u64, flow, mask->masks.map) {
- hash = hash_add64(hash, flow_u64 & *p++);
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(value, flow, mask->masks.map) {
+ hash = hash_add64(hash, value & *p++);
}
return hash_finish(hash, (p - mask_values) * 8);
}
-/* Returns a hash value for the bits of range [start, end) in 'flow',
- * where there are 1-bits in 'mask', given 'hash'.
+/* Returns a hash value for the values of 'flow', indicated by 'range', where
+ * there are 1-bits in 'mask', given 'basis'. 'range' must be a continuous
+ * subset of the bits in 'mask''s map, representing a continuous range of the
+ * minimask's mask data. '*offset' must be the number of 64-bit units of the
+ * minimask's data to skip to get to the first unit covered by 'range'. On
+ * return '*offset' is updated with the number of 64-bit units of the minimask
+ * consumed.
+ *
+ * Typically this function is called for successive ranges of minimask's masks,
+ * and the first invocation passes '*offset' as zero.
*
* The hash values returned by this function are the same as those returned by
* minimatch_hash_range(), only the form of the arguments differ. */
static inline uint32_t
flow_hash_in_minimask_range(const struct flow *flow,
const struct minimask *mask,
- uint8_t start, uint8_t end, uint32_t *basis)
+ const struct flowmap range,
+ unsigned int *offset,
+ uint32_t *basis)
{
const uint64_t *mask_values = miniflow_get_values(&mask->masks);
const uint64_t *flow_u64 = (const uint64_t *)flow;
- unsigned int offset;
- uint64_t map;
- const uint64_t *p;
+ const uint64_t *p = mask_values + *offset;
uint32_t hash = *basis;
- int idx;
+ map_t map;
+
+ FLOWMAP_FOR_EACH_MAP (map, range) {
+ size_t idx;
- map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
- p = mask_values + offset;
- MAP_FOR_EACH_INDEX(idx, map) {
- hash = hash_add64(hash, flow_u64[idx] & *p++);
+ MAP_FOR_EACH_INDEX (idx, map) {
+ hash = hash_add64(hash, flow_u64[idx] & *p++);
+ }
+ flow_u64 += MAP_T_BITS;
}
*basis = hash; /* Allow continuation from the unfinished value. */
- return hash_finish(hash, (p - mask_values) * 8);
+ *offset = p - mask_values;
+ return hash_finish(hash, *offset * 8);
}
/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
flow_union_with_miniflow(&wc->masks, &mask->masks);
}
-/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
- * in range [start, end). */
+/* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask for bits in
+ * 'fmap'. 1-bits in 'fmap' are a subset of 1-bits in 'mask''s map. */
static inline void
-flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
- const struct minimask *mask,
- uint8_t start, uint8_t end)
+flow_wildcards_fold_minimask_in_map(struct flow_wildcards *wc,
+ const struct minimask *mask,
+ const struct flowmap fmap)
{
- uint64_t *dst_u64 = (uint64_t *)&wc->masks;
- unsigned int offset;
- uint64_t map;
- const uint64_t *p;
- int idx;
-
- map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
- p = miniflow_get_values(&mask->masks) + offset;
- MAP_FOR_EACH_INDEX(idx, map) {
- dst_u64[idx] |= *p++;
- }
+ flow_union_with_miniflow_subset(&wc->masks, &mask->masks, fmap);
}
-/* Returns a hash value for 'flow', given 'basis'. */
+/* Returns a hash value for 'mask', given 'basis'. */
static inline uint32_t
-miniflow_hash(const struct miniflow *flow, uint32_t basis)
+minimask_hash(const struct minimask *mask, uint32_t basis)
{
- const uint64_t *values = miniflow_get_values(flow);
- const uint64_t *p = values;
+ const uint64_t *p = miniflow_get_values(&mask->masks);
+ size_t n_values = miniflow_n_values(&mask->masks);
uint32_t hash = basis;
- uint64_t hash_map = 0;
- uint64_t map;
- for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
- if (*p) {
- hash = hash_add64(hash, *p);
- hash_map |= rightmost_1bit(map);
- }
- p++;
+ for (size_t i = 0; i < n_values; i++) {
+ hash = hash_add64(hash, *p++);
}
- hash = hash_add64(hash, hash_map);
-
- return hash_finish(hash, p - values);
-}
-/* Returns a hash value for 'mask', given 'basis'. */
-static inline uint32_t
-minimask_hash(const struct minimask *mask, uint32_t basis)
-{
- return miniflow_hash(&mask->masks, basis);
-}
+ map_t map;
+ FLOWMAP_FOR_EACH_MAP (map, mask->masks.map) {
+ hash = hash_add64(hash, map);
+ }
-/* Returns a hash value for 'match', given 'basis'. */
-static inline uint32_t
-minimatch_hash(const struct minimatch *match, uint32_t basis)
-{
- return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
+ return hash_finish(hash, n_values);
}
-/* Returns a hash value for the bits of range [start, end) in 'minimatch',
- * given 'basis'.
+/* Returns a hash value for the values of 'match->flow', indicated by 'range',
+ * where there are 1-bits in 'match->mask', given 'basis'. 'range' must be a
+ * continuous subset of the bits in the map of 'match', representing a
+ * continuous range of the mask data of 'match'. '*offset' must be the number
+ * of 64-bit units of the match data to skip to get to the first unit covered
+ * by 'range'. On return '*offset' is updated with the number of 64-bit units
+ * of the match consumed.
+ *
+ * Typically this function is called for successive ranges of minimask's masks,
+ * and the first invocation passes '*offset' as zero.
*
* The hash values returned by this function are the same as those returned by
* flow_hash_in_minimask_range(), only the form of the arguments differ. */
static inline uint32_t
-minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
+minimatch_hash_range(const struct minimatch *match,
+ const struct flowmap range, unsigned int *offset,
uint32_t *basis)
{
- unsigned int offset;
- const uint64_t *p, *q;
+ const uint64_t *p = miniflow_get_values(match->flow) + *offset;
+ const uint64_t *q = miniflow_get_values(&match->mask->masks) + *offset;
+ unsigned int n = flowmap_n_1bits(range);
uint32_t hash = *basis;
- int n, i;
-
- n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
- &offset));
- q = miniflow_get_values(&match->mask.masks) + offset;
- p = miniflow_get_values(&match->flow) + offset;
- for (i = 0; i < n; i++) {
+ for (unsigned int i = 0; i < n; i++) {
hash = hash_add64(hash, p[i] & q[i]);
}
*basis = hash; /* Allow continuation from the unfinished value. */
- return hash_finish(hash, (offset + n) * 8);
+ *offset += n;
+ return hash_finish(hash, *offset * 8);
}
#endif