2 * Copyright (c) 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef CLASSIFIER_PRIVATE_H
18 #define CLASSIFIER_PRIVATE_H 1
26 /* Classifier internal definitions, subject to change at any time. */
28 /* A set of rules that all have the same fields wildcarded. */
30 struct cmap_node cmap_node; /* Within classifier's 'subtables_map'. */
32 /* These fields are only used by writers. */
33 int max_priority; /* Max priority of any rule in subtable. */
34 unsigned int max_count; /* Count of max_priority rules. */
36 /* Accessed by iterators. */
37 struct rculist rules_list; /* Unordered. */
39 /* Identical, but lower priority rules are not inserted to any of the
40 * following data structures. */
42 /* These fields are accessed by readers who care about wildcarding. */
43 const tag_type tag; /* Tag generated from mask for partitioning. */
44 const uint8_t n_indices; /* How many indices to use. */
45 const uint8_t index_ofs[CLS_MAX_INDICES]; /* u64 segment boundaries. */
46 unsigned int trie_plen[CLS_MAX_TRIES]; /* Trie prefix length in 'mask'
47 * (runtime configurable). */
48 const int ports_mask_len;
49 struct cmap indices[CLS_MAX_INDICES]; /* Staged lookup indices. */
50 rcu_trie_ptr ports_trie; /* NULL if none. */
52 /* These fields are accessed by all readers. */
53 struct cmap rules; /* Contains 'cls_match'es. */
54 const struct minimask mask; /* Wildcards for fields. */
55 /* 'mask' must be the last field. */
58 /* Associates a metadata value (that is, a value of the OpenFlow 1.1+ metadata
59 * field) with tags for the "cls_subtable"s that contain rules that match that
61 struct cls_partition {
62 struct cmap_node cmap_node; /* In struct classifier's 'partitions' map. */
63 ovs_be64 metadata; /* metadata value for this partition. */
64 tag_type tags; /* OR of each flow's cls_subtable tag. */
65 struct tag_tracker tracker; /* Tracks the bits in 'tags'. */
68 /* Internal representation of a rule in a "struct cls_subtable". */
70 /* Accessed by everybody. */
71 struct rculist list; /* Identical, lower-priority "cls_match"es. */
73 /* Accessed only by writers. */
74 struct cls_partition *partition;
76 /* Accessed by readers interested in wildcarding. */
77 const int priority; /* Larger numbers are higher priorities. */
78 struct cmap_node index_nodes[CLS_MAX_INDICES]; /* Within subtable's
80 /* Accessed by all readers. */
81 struct cmap_node cmap_node; /* Within struct cls_subtable 'rules'. */
83 /* Controls rule's visibility to lookups.
85 * When 'visibility' is:
87 * > 0 - rule is visible starting from version 'visibility'
88 * <= 0 - rule is invisible starting from version '-(visibility)'
90 * The minimum version number used in lookups is 1 (== CLS_NO_VERSION),
91 * which implies that when 'visibility' is:
93 * 1 - rule is visible in all lookup versions
94 * 0 - rule is invisible in all lookup versions. */
95 atomic_llong visibility;
97 const struct cls_rule *cls_rule;
98 OVSRCU_TYPE(struct cls_conjunction_set *) conj_set;
99 const struct miniflow flow; /* Matching rule. Mask is in the subtable. */
100 /* 'flow' must be the last field. */
104 cls_match_set_visibility(struct cls_match *rule, long long version)
106 atomic_store_relaxed(&rule->visibility, version);
110 cls_match_visible_in_version(const struct cls_match *rule, long long version)
112 long long visibility;
114 /* C11 does not want to access an atomic via a const object pointer. */
115 atomic_read_relaxed(&CONST_CAST(struct cls_match *, rule)->visibility,
118 if (OVS_LIKELY(visibility > 0)) {
119 /* Rule is visible starting from version 'visibility'. */
120 return version >= visibility;
122 /* Rule is invisible starting from version '-visibility'. */
123 return version < -visibility;
128 cls_match_is_eventually_invisible(const struct cls_match *rule)
130 long long visibility;
132 /* C11 does not want to access an atomic via a const object pointer. */
133 atomic_read_relaxed(&CONST_CAST(struct cls_match *, rule)->visibility,
136 return visibility <= 0;
139 /* A longest-prefix match tree. */
141 uint32_t prefix; /* Prefix bits for this node, MSB first. */
142 uint8_t n_bits; /* Never zero, except for the root node. */
143 unsigned int n_rules; /* Number of rules that have this prefix. */
144 rcu_trie_ptr edges[2]; /* Both NULL if leaf. */
147 /* Max bits per node. Must fit in struct trie_node's 'prefix'.
148 * Also tested with 16, 8, and 5 to stress the implementation. */
149 #define TRIE_PREFIX_BITS 32
151 /* flow/miniflow/minimask/minimatch utilities.
152 * These are only used by the classifier, so place them here to allow
153 * for better optimization. */
155 static inline uint64_t
156 miniflow_get_map_in_range(const struct miniflow *miniflow,
157 uint8_t start, uint8_t end, unsigned int *offset)
159 uint64_t map = miniflow->map;
163 uint64_t msk = (UINT64_C(1) << start) - 1; /* 'start' LSBs set */
164 *offset = count_1bits(map & msk);
167 if (end < FLOW_U64S) {
168 uint64_t msk = (UINT64_C(1) << end) - 1; /* 'end' LSBs set */
174 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
175 * 'mask', given 'basis'.
177 * The hash values returned by this function are the same as those returned by
178 * miniflow_hash_in_minimask(), only the form of the arguments differ. */
179 static inline uint32_t
180 flow_hash_in_minimask(const struct flow *flow, const struct minimask *mask,
183 const uint64_t *mask_values = miniflow_get_values(&mask->masks);
184 const uint64_t *flow_u64 = (const uint64_t *)flow;
185 const uint64_t *p = mask_values;
190 MAP_FOR_EACH_INDEX(idx, mask->masks.map) {
191 hash = hash_add64(hash, flow_u64[idx] & *p++);
194 return hash_finish(hash, (p - mask_values) * 8);
197 /* Returns a hash value for the bits of 'flow' where there are 1-bits in
198 * 'mask', given 'basis'.
200 * The hash values returned by this function are the same as those returned by
201 * flow_hash_in_minimask(), only the form of the arguments differ. */
202 static inline uint32_t
203 miniflow_hash_in_minimask(const struct miniflow *flow,
204 const struct minimask *mask, uint32_t basis)
206 const uint64_t *mask_values = miniflow_get_values(&mask->masks);
207 const uint64_t *p = mask_values;
208 uint32_t hash = basis;
211 MINIFLOW_FOR_EACH_IN_MAP(flow_u64, flow, mask->masks.map) {
212 hash = hash_add64(hash, flow_u64 & *p++);
215 return hash_finish(hash, (p - mask_values) * 8);
218 /* Returns a hash value for the bits of range [start, end) in 'flow',
219 * where there are 1-bits in 'mask', given 'hash'.
221 * The hash values returned by this function are the same as those returned by
222 * minimatch_hash_range(), only the form of the arguments differ. */
223 static inline uint32_t
224 flow_hash_in_minimask_range(const struct flow *flow,
225 const struct minimask *mask,
226 uint8_t start, uint8_t end, uint32_t *basis)
228 const uint64_t *mask_values = miniflow_get_values(&mask->masks);
229 const uint64_t *flow_u64 = (const uint64_t *)flow;
233 uint32_t hash = *basis;
236 map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
237 p = mask_values + offset;
238 MAP_FOR_EACH_INDEX(idx, map) {
239 hash = hash_add64(hash, flow_u64[idx] & *p++);
242 *basis = hash; /* Allow continuation from the unfinished value. */
243 return hash_finish(hash, (p - mask_values) * 8);
246 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask. */
248 flow_wildcards_fold_minimask(struct flow_wildcards *wc,
249 const struct minimask *mask)
251 flow_union_with_miniflow(&wc->masks, &mask->masks);
254 /* Fold minimask 'mask''s wildcard mask into 'wc's wildcard mask
255 * in range [start, end). */
257 flow_wildcards_fold_minimask_range(struct flow_wildcards *wc,
258 const struct minimask *mask,
259 uint8_t start, uint8_t end)
261 uint64_t *dst_u64 = (uint64_t *)&wc->masks;
267 map = miniflow_get_map_in_range(&mask->masks, start, end, &offset);
268 p = miniflow_get_values(&mask->masks) + offset;
269 MAP_FOR_EACH_INDEX(idx, map) {
270 dst_u64[idx] |= *p++;
274 /* Returns a hash value for 'flow', given 'basis'. */
275 static inline uint32_t
276 miniflow_hash(const struct miniflow *flow, uint32_t basis)
278 const uint64_t *values = miniflow_get_values(flow);
279 const uint64_t *p = values;
280 uint32_t hash = basis;
281 uint64_t hash_map = 0;
284 for (map = flow->map; map; map = zero_rightmost_1bit(map)) {
286 hash = hash_add64(hash, *p);
287 hash_map |= rightmost_1bit(map);
291 hash = hash_add64(hash, hash_map);
293 return hash_finish(hash, p - values);
296 /* Returns a hash value for 'mask', given 'basis'. */
297 static inline uint32_t
298 minimask_hash(const struct minimask *mask, uint32_t basis)
300 return miniflow_hash(&mask->masks, basis);
303 /* Returns a hash value for 'match', given 'basis'. */
304 static inline uint32_t
305 minimatch_hash(const struct minimatch *match, uint32_t basis)
307 return miniflow_hash(&match->flow, minimask_hash(&match->mask, basis));
310 /* Returns a hash value for the bits of range [start, end) in 'minimatch',
313 * The hash values returned by this function are the same as those returned by
314 * flow_hash_in_minimask_range(), only the form of the arguments differ. */
315 static inline uint32_t
316 minimatch_hash_range(const struct minimatch *match, uint8_t start, uint8_t end,
320 const uint64_t *p, *q;
321 uint32_t hash = *basis;
324 n = count_1bits(miniflow_get_map_in_range(&match->mask.masks, start, end,
326 q = miniflow_get_values(&match->mask.masks) + offset;
327 p = miniflow_get_values(&match->flow) + offset;
329 for (i = 0; i < n; i++) {
330 hash = hash_add64(hash, p[i] & q[i]);
332 *basis = hash; /* Allow continuation from the unfinished value. */
333 return hash_finish(hash, (offset + n) * 8);