2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* "White box" tests for classifier.
19 * With very few exceptions, these tests obtain complete coverage of every
20 * basic block and every branch in the classifier implementation, e.g. a clean
21 * report from "gcov -b". (Covering the exceptions would require finding
22 * collisions in the hash function used for flow data, etc.)
24 * This test should receive a clean report from "valgrind --leak-check=full":
25 * it frees every heap block that it allocates.
31 #include "byte-order.h"
32 #include "command-line.h"
37 #include "unaligned.h"
42 /* We need access to classifier internal definitions to be able to fully
43 * test them. The alternative would be to expose them all in the classifier
45 #include "classifier.c"
47 /* Fields in a rule. */
49 /* struct flow all-caps */ \
50 /* member name name */ \
51 /* ----------- -------- */ \
52 CLS_FIELD(tunnel.tun_id, TUN_ID) \
53 CLS_FIELD(metadata, METADATA) \
54 CLS_FIELD(nw_src, NW_SRC) \
55 CLS_FIELD(nw_dst, NW_DST) \
56 CLS_FIELD(in_port, IN_PORT) \
57 CLS_FIELD(vlan_tci, VLAN_TCI) \
58 CLS_FIELD(dl_type, DL_TYPE) \
59 CLS_FIELD(tp_src, TP_SRC) \
60 CLS_FIELD(tp_dst, TP_DST) \
61 CLS_FIELD(dl_src, DL_SRC) \
62 CLS_FIELD(dl_dst, DL_DST) \
63 CLS_FIELD(nw_proto, NW_PROTO) \
64 CLS_FIELD(nw_tos, NW_DSCP)
68 * (These are also indexed into struct classifier's 'tables' array.) */
70 #define CLS_FIELD(MEMBER, NAME) CLS_F_IDX_##NAME,
76 /* Field information. */
78 int ofs; /* Offset in struct flow. */
79 int len; /* Length in bytes. */
80 const char *name; /* Name (for debugging). */
83 static const struct cls_field cls_fields[CLS_N_FIELDS] = {
84 #define CLS_FIELD(MEMBER, NAME) \
85 { offsetof(struct flow, MEMBER), \
86 sizeof ((struct flow *)0)->MEMBER, \
93 int aux; /* Auxiliary data. */
94 struct cls_rule cls_rule; /* Classifier rule data. */
97 static struct test_rule *
98 test_rule_from_cls_rule(const struct cls_rule *rule)
100 return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
104 test_rule_destroy(struct test_rule *rule)
107 cls_rule_destroy(&rule->cls_rule);
112 static struct test_rule *make_rule(int wc_fields, unsigned int priority,
114 static void free_rule(struct test_rule *);
115 static struct test_rule *clone_rule(const struct test_rule *);
117 /* Trivial (linear) classifier. */
120 size_t allocated_rules;
121 struct test_rule **rules;
125 tcls_init(struct tcls *tcls)
128 tcls->allocated_rules = 0;
133 tcls_destroy(struct tcls *tcls)
138 for (i = 0; i < tcls->n_rules; i++) {
139 test_rule_destroy(tcls->rules[i]);
146 tcls_is_empty(const struct tcls *tcls)
148 return tcls->n_rules == 0;
151 static struct test_rule *
152 tcls_insert(struct tcls *tcls, const struct test_rule *rule)
156 for (i = 0; i < tcls->n_rules; i++) {
157 const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
158 if (cls_rule_equal(pos, &rule->cls_rule)) {
160 free_rule(tcls->rules[i]);
161 tcls->rules[i] = clone_rule(rule);
162 return tcls->rules[i];
163 } else if (pos->priority < rule->cls_rule.priority) {
168 if (tcls->n_rules >= tcls->allocated_rules) {
169 tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
170 sizeof *tcls->rules);
172 if (i != tcls->n_rules) {
173 memmove(&tcls->rules[i + 1], &tcls->rules[i],
174 sizeof *tcls->rules * (tcls->n_rules - i));
176 tcls->rules[i] = clone_rule(rule);
178 return tcls->rules[i];
182 tcls_remove(struct tcls *cls, const struct test_rule *rule)
186 for (i = 0; i < cls->n_rules; i++) {
187 struct test_rule *pos = cls->rules[i];
189 test_rule_destroy(pos);
191 memmove(&cls->rules[i], &cls->rules[i + 1],
192 sizeof *cls->rules * (cls->n_rules - i - 1));
202 match(const struct cls_rule *wild_, const struct flow *fixed)
207 minimatch_expand(&wild_->match, &wild);
208 for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
211 if (f_idx == CLS_F_IDX_NW_SRC) {
212 eq = !((fixed->nw_src ^ wild.flow.nw_src)
213 & wild.wc.masks.nw_src);
214 } else if (f_idx == CLS_F_IDX_NW_DST) {
215 eq = !((fixed->nw_dst ^ wild.flow.nw_dst)
216 & wild.wc.masks.nw_dst);
217 } else if (f_idx == CLS_F_IDX_TP_SRC) {
218 eq = !((fixed->tp_src ^ wild.flow.tp_src)
219 & wild.wc.masks.tp_src);
220 } else if (f_idx == CLS_F_IDX_TP_DST) {
221 eq = !((fixed->tp_dst ^ wild.flow.tp_dst)
222 & wild.wc.masks.tp_dst);
223 } else if (f_idx == CLS_F_IDX_DL_SRC) {
224 eq = eth_addr_equal_except(fixed->dl_src, wild.flow.dl_src,
225 wild.wc.masks.dl_src);
226 } else if (f_idx == CLS_F_IDX_DL_DST) {
227 eq = eth_addr_equal_except(fixed->dl_dst, wild.flow.dl_dst,
228 wild.wc.masks.dl_dst);
229 } else if (f_idx == CLS_F_IDX_VLAN_TCI) {
230 eq = !((fixed->vlan_tci ^ wild.flow.vlan_tci)
231 & wild.wc.masks.vlan_tci);
232 } else if (f_idx == CLS_F_IDX_TUN_ID) {
233 eq = !((fixed->tunnel.tun_id ^ wild.flow.tunnel.tun_id)
234 & wild.wc.masks.tunnel.tun_id);
235 } else if (f_idx == CLS_F_IDX_METADATA) {
236 eq = !((fixed->metadata ^ wild.flow.metadata)
237 & wild.wc.masks.metadata);
238 } else if (f_idx == CLS_F_IDX_NW_DSCP) {
239 eq = !((fixed->nw_tos ^ wild.flow.nw_tos) &
240 (wild.wc.masks.nw_tos & IP_DSCP_MASK));
241 } else if (f_idx == CLS_F_IDX_NW_PROTO) {
242 eq = !((fixed->nw_proto ^ wild.flow.nw_proto)
243 & wild.wc.masks.nw_proto);
244 } else if (f_idx == CLS_F_IDX_DL_TYPE) {
245 eq = !((fixed->dl_type ^ wild.flow.dl_type)
246 & wild.wc.masks.dl_type);
247 } else if (f_idx == CLS_F_IDX_IN_PORT) {
248 eq = !((fixed->in_port.ofp_port
249 ^ wild.flow.in_port.ofp_port)
250 & wild.wc.masks.in_port.ofp_port);
262 static struct cls_rule *
263 tcls_lookup(const struct tcls *cls, const struct flow *flow)
267 for (i = 0; i < cls->n_rules; i++) {
268 struct test_rule *pos = cls->rules[i];
269 if (match(&pos->cls_rule, flow)) {
270 return &pos->cls_rule;
277 tcls_delete_matches(struct tcls *cls, const struct cls_rule *target)
281 for (i = 0; i < cls->n_rules; ) {
282 struct test_rule *pos = cls->rules[i];
283 if (!minimask_has_extra(&pos->cls_rule.match.mask,
284 &target->match.mask)) {
287 miniflow_expand(&pos->cls_rule.match.flow, &flow);
288 if (match(target, &flow)) {
289 tcls_remove(cls, pos);
297 static ovs_be32 nw_src_values[] = { CONSTANT_HTONL(0xc0a80001),
298 CONSTANT_HTONL(0xc0a04455) };
299 static ovs_be32 nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002),
300 CONSTANT_HTONL(0xc0a04455) };
301 static ovs_be64 tun_id_values[] = {
303 CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
304 static ovs_be64 metadata_values[] = {
306 CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
307 static ofp_port_t in_port_values[] = { OFP_PORT_C(1), OFPP_LOCAL };
308 static ovs_be16 vlan_tci_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) };
309 static ovs_be16 dl_type_values[]
310 = { CONSTANT_HTONS(ETH_TYPE_IP), CONSTANT_HTONS(ETH_TYPE_ARP) };
311 static ovs_be16 tp_src_values[] = { CONSTANT_HTONS(49362),
312 CONSTANT_HTONS(80) };
313 static ovs_be16 tp_dst_values[] = { CONSTANT_HTONS(6667), CONSTANT_HTONS(22) };
314 static uint8_t dl_src_values[][6] = { { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
315 { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
316 static uint8_t dl_dst_values[][6] = { { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
317 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
318 static uint8_t nw_proto_values[] = { IPPROTO_TCP, IPPROTO_ICMP };
319 static uint8_t nw_dscp_values[] = { 48, 0 };
321 static void *values[CLS_N_FIELDS][2];
326 values[CLS_F_IDX_TUN_ID][0] = &tun_id_values[0];
327 values[CLS_F_IDX_TUN_ID][1] = &tun_id_values[1];
329 values[CLS_F_IDX_METADATA][0] = &metadata_values[0];
330 values[CLS_F_IDX_METADATA][1] = &metadata_values[1];
332 values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
333 values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
335 values[CLS_F_IDX_VLAN_TCI][0] = &vlan_tci_values[0];
336 values[CLS_F_IDX_VLAN_TCI][1] = &vlan_tci_values[1];
338 values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
339 values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
341 values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
342 values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
344 values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
345 values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
347 values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
348 values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
350 values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
351 values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
353 values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
354 values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
356 values[CLS_F_IDX_NW_DSCP][0] = &nw_dscp_values[0];
357 values[CLS_F_IDX_NW_DSCP][1] = &nw_dscp_values[1];
359 values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
360 values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
362 values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
363 values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
366 #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
367 #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
368 #define N_TUN_ID_VALUES ARRAY_SIZE(tun_id_values)
369 #define N_METADATA_VALUES ARRAY_SIZE(metadata_values)
370 #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
371 #define N_VLAN_TCI_VALUES ARRAY_SIZE(vlan_tci_values)
372 #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
373 #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
374 #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
375 #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
376 #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
377 #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
378 #define N_NW_DSCP_VALUES ARRAY_SIZE(nw_dscp_values)
380 #define N_FLOW_VALUES (N_NW_SRC_VALUES * \
384 N_VLAN_TCI_VALUES * \
390 N_NW_PROTO_VALUES * \
394 get_value(unsigned int *x, unsigned n_values)
396 unsigned int rem = *x % n_values;
402 compare_classifiers(struct classifier *cls, struct tcls *tcls)
404 static const int confidence = 500;
407 assert(classifier_count(cls) == tcls->n_rules);
408 for (i = 0; i < confidence; i++) {
409 struct cls_rule *cr0, *cr1, *cr2;
411 struct flow_wildcards wc;
414 flow_wildcards_init_catchall(&wc);
415 x = random_range(N_FLOW_VALUES);
416 memset(&flow, 0, sizeof flow);
417 flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
418 flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
419 flow.tunnel.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)];
420 flow.metadata = metadata_values[get_value(&x, N_METADATA_VALUES)];
421 flow.in_port.ofp_port = in_port_values[get_value(&x,
423 flow.vlan_tci = vlan_tci_values[get_value(&x, N_VLAN_TCI_VALUES)];
424 flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
425 flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
426 flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
427 memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
429 memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
431 flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
432 flow.nw_tos = nw_dscp_values[get_value(&x, N_NW_DSCP_VALUES)];
434 /* This assertion is here to suppress a GCC 4.9 array-bounds warning */
435 ovs_assert(cls->n_tries <= CLS_MAX_TRIES);
437 cr0 = classifier_lookup(cls, &flow, &wc);
438 cr1 = tcls_lookup(tcls, &flow);
439 assert((cr0 == NULL) == (cr1 == NULL));
441 const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
442 const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
444 assert(cls_rule_equal(cr0, cr1));
445 assert(tr0->aux == tr1->aux);
447 cr2 = classifier_lookup(cls, &flow, NULL);
453 destroy_classifier(struct classifier *cls)
455 struct test_rule *rule;
457 CLS_FOR_EACH_SAFE (rule, cls_rule, cls) {
458 classifier_remove(cls, &rule->cls_rule);
461 classifier_destroy(cls);
465 pvector_verify(const struct pvector *pvec)
467 void *ptr OVS_UNUSED;
468 unsigned int priority, prev_priority = UINT_MAX;
470 PVECTOR_FOR_EACH (ptr, pvec) {
471 priority = cursor__.vector[cursor__.entry_idx].priority;
472 if (priority > prev_priority) {
473 VLOG_ABORT("Priority vector is out of order (%u > %u)",
474 priority, prev_priority);
476 prev_priority = priority;
481 trie_verify(const rcu_trie_ptr *trie, unsigned int ofs, unsigned int n_bits)
483 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
486 assert(node->n_rules == 0 || node->n_bits > 0);
488 assert((ofs > 0 || (ofs == 0 && node->n_bits == 0)) && ofs <= n_bits);
491 + trie_verify(&node->edges[0], ofs, n_bits)
492 + trie_verify(&node->edges[1], ofs, n_bits);
498 verify_tries(struct classifier *cls)
500 unsigned int n_rules = 0;
503 for (i = 0; i < cls->n_tries; i++) {
504 n_rules += trie_verify(&cls->tries[i].root, 0,
505 cls->tries[i].field->n_bits);
507 ovs_mutex_lock(&cls->mutex);
508 assert(n_rules <= cls->n_rules);
509 ovs_mutex_unlock(&cls->mutex);
513 check_tables(const struct classifier *cls, int n_tables, int n_rules,
516 const struct cls_subtable *table;
517 struct test_rule *test_rule;
518 int found_tables = 0;
521 int found_rules2 = 0;
523 pvector_verify(&cls->subtables);
524 CMAP_FOR_EACH (table, cmap_node, &cls->subtables_map) {
525 const struct cls_match *head;
526 unsigned int max_priority = 0;
527 unsigned int max_count = 0;
529 const struct cls_subtable *iter;
531 /* Locate the subtable from 'subtables'. */
532 PVECTOR_FOR_EACH (iter, &cls->subtables) {
535 VLOG_ABORT("Subtable %p duplicated in 'subtables'.",
542 VLOG_ABORT("Subtable %p not found from 'subtables'.", table);
545 assert(!cmap_is_empty(&table->rules));
547 ovs_mutex_lock(&cls->mutex);
548 assert(trie_verify(&table->ports_trie, 0, table->ports_mask_len)
549 == (table->ports_mask_len ? table->n_rules : 0));
550 ovs_mutex_unlock(&cls->mutex);
553 CMAP_FOR_EACH (head, cmap_node, &table->rules) {
554 unsigned int prev_priority = UINT_MAX;
555 const struct cls_match *rule;
557 if (head->priority > max_priority) {
558 max_priority = head->priority;
560 } else if (head->priority == max_priority) {
565 ovs_mutex_lock(&cls->mutex);
566 LIST_FOR_EACH (rule, list, &head->list) {
567 assert(rule->priority < prev_priority);
568 assert(rule->priority <= table->max_priority);
570 prev_priority = rule->priority;
573 ovs_mutex_unlock(&cls->mutex);
574 assert(classifier_find_rule_exactly(cls, rule->cls_rule)
576 ovs_mutex_lock(&cls->mutex);
578 ovs_mutex_unlock(&cls->mutex);
580 ovs_mutex_lock(&cls->mutex);
581 assert(table->max_priority == max_priority);
582 assert(table->max_count == max_count);
583 ovs_mutex_unlock(&cls->mutex);
586 assert(found_tables == cmap_count(&cls->subtables_map));
587 assert(found_tables == pvector_count(&cls->subtables));
588 assert(n_tables == -1 || n_tables == cmap_count(&cls->subtables_map));
589 assert(n_rules == -1 || found_rules == n_rules);
590 assert(n_dups == -1 || found_dups == n_dups);
592 CLS_FOR_EACH (test_rule, cls_rule, cls) {
595 assert(found_rules == found_rules2);
598 static struct test_rule *
599 make_rule(int wc_fields, unsigned int priority, int value_pat)
601 const struct cls_field *f;
602 struct test_rule *rule;
605 match_init_catchall(&match);
606 for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
607 int f_idx = f - cls_fields;
608 int value_idx = (value_pat & (1u << f_idx)) != 0;
609 memcpy((char *) &match.flow + f->ofs,
610 values[f_idx][value_idx], f->len);
612 if (f_idx == CLS_F_IDX_NW_SRC) {
613 match.wc.masks.nw_src = OVS_BE32_MAX;
614 } else if (f_idx == CLS_F_IDX_NW_DST) {
615 match.wc.masks.nw_dst = OVS_BE32_MAX;
616 } else if (f_idx == CLS_F_IDX_TP_SRC) {
617 match.wc.masks.tp_src = OVS_BE16_MAX;
618 } else if (f_idx == CLS_F_IDX_TP_DST) {
619 match.wc.masks.tp_dst = OVS_BE16_MAX;
620 } else if (f_idx == CLS_F_IDX_DL_SRC) {
621 memset(match.wc.masks.dl_src, 0xff, ETH_ADDR_LEN);
622 } else if (f_idx == CLS_F_IDX_DL_DST) {
623 memset(match.wc.masks.dl_dst, 0xff, ETH_ADDR_LEN);
624 } else if (f_idx == CLS_F_IDX_VLAN_TCI) {
625 match.wc.masks.vlan_tci = OVS_BE16_MAX;
626 } else if (f_idx == CLS_F_IDX_TUN_ID) {
627 match.wc.masks.tunnel.tun_id = OVS_BE64_MAX;
628 } else if (f_idx == CLS_F_IDX_METADATA) {
629 match.wc.masks.metadata = OVS_BE64_MAX;
630 } else if (f_idx == CLS_F_IDX_NW_DSCP) {
631 match.wc.masks.nw_tos |= IP_DSCP_MASK;
632 } else if (f_idx == CLS_F_IDX_NW_PROTO) {
633 match.wc.masks.nw_proto = UINT8_MAX;
634 } else if (f_idx == CLS_F_IDX_DL_TYPE) {
635 match.wc.masks.dl_type = OVS_BE16_MAX;
636 } else if (f_idx == CLS_F_IDX_IN_PORT) {
637 match.wc.masks.in_port.ofp_port = u16_to_ofp(UINT16_MAX);
643 rule = xzalloc(sizeof *rule);
644 cls_rule_init(&rule->cls_rule, &match, wc_fields ? priority : UINT_MAX);
648 static struct test_rule *
649 clone_rule(const struct test_rule *src)
651 struct test_rule *dst;
653 dst = xmalloc(sizeof *dst);
655 cls_rule_clone(&dst->cls_rule, &src->cls_rule);
660 free_rule(struct test_rule *rule)
662 cls_rule_destroy(&rule->cls_rule);
667 shuffle(unsigned int *p, size_t n)
669 for (; n > 1; n--, p++) {
670 unsigned int *q = &p[random_range(n)];
671 unsigned int tmp = *p;
678 shuffle_u32s(uint32_t *p, size_t n)
680 for (; n > 1; n--, p++) {
681 uint32_t *q = &p[random_range(n)];
688 /* Classifier tests. */
690 static enum mf_field_id trie_fields[2] = {
691 MFF_IPV4_DST, MFF_IPV4_SRC
695 set_prefix_fields(struct classifier *cls)
698 classifier_set_prefix_fields(cls, trie_fields, ARRAY_SIZE(trie_fields));
702 /* Tests an empty classifier. */
704 test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
706 struct classifier cls;
709 classifier_init(&cls, flow_segment_u32s);
710 set_prefix_fields(&cls);
712 assert(classifier_is_empty(&cls));
713 assert(tcls_is_empty(&tcls));
714 compare_classifiers(&cls, &tcls);
715 classifier_destroy(&cls);
719 /* Destroys a null classifier. */
721 test_destroy_null(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
723 classifier_destroy(NULL);
726 /* Tests classification with one rule at a time. */
728 test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
730 unsigned int wc_fields; /* Hilarious. */
732 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
733 struct classifier cls;
734 struct test_rule *rule, *tcls_rule;
737 rule = make_rule(wc_fields,
738 hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
740 classifier_init(&cls, flow_segment_u32s);
741 set_prefix_fields(&cls);
744 tcls_rule = tcls_insert(&tcls, rule);
745 classifier_insert(&cls, &rule->cls_rule);
746 compare_classifiers(&cls, &tcls);
747 check_tables(&cls, 1, 1, 0);
749 classifier_remove(&cls, &rule->cls_rule);
750 tcls_remove(&tcls, tcls_rule);
751 assert(classifier_is_empty(&cls));
752 assert(tcls_is_empty(&tcls));
753 compare_classifiers(&cls, &tcls);
756 classifier_destroy(&cls);
761 /* Tests replacing one rule by another. */
763 test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
765 unsigned int wc_fields;
767 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
768 struct classifier cls;
769 struct test_rule *rule1;
770 struct test_rule *rule2;
773 rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
774 rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX);
778 classifier_init(&cls, flow_segment_u32s);
779 set_prefix_fields(&cls);
781 tcls_insert(&tcls, rule1);
782 classifier_insert(&cls, &rule1->cls_rule);
783 compare_classifiers(&cls, &tcls);
784 check_tables(&cls, 1, 1, 0);
788 tcls_insert(&tcls, rule2);
790 assert(test_rule_from_cls_rule(
791 classifier_replace(&cls, &rule2->cls_rule)) == rule1);
793 compare_classifiers(&cls, &tcls);
794 check_tables(&cls, 1, 1, 0);
797 destroy_classifier(&cls);
802 factorial(int n_items)
807 for (i = 2; i <= n_items; i++) {
822 reverse(int *a, int n)
826 for (i = 0; i < n / 2; i++) {
833 next_permutation(int *a, int n)
837 for (k = n - 2; k >= 0; k--) {
838 if (a[k] < a[k + 1]) {
841 for (l = n - 1; ; l--) {
844 reverse(a + (k + 1), n - (k + 1));
853 /* Tests classification with rules that have the same matching criteria. */
855 test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
857 enum { N_RULES = 3 };
860 for (n_pris = N_RULES; n_pris >= 1; n_pris--) {
861 int ops[N_RULES * 2];
867 for (i = 1; i < N_RULES; i++) {
868 pris[i] = pris[i - 1] + (n_pris > i);
871 for (i = 0; i < N_RULES * 2; i++) {
877 struct test_rule *rules[N_RULES];
878 struct test_rule *tcls_rules[N_RULES];
879 int pri_rules[N_RULES];
880 struct classifier cls;
885 for (i = 0; i < N_RULES; i++) {
886 rules[i] = make_rule(456, pris[i], 0);
887 tcls_rules[i] = NULL;
891 classifier_init(&cls, flow_segment_u32s);
892 set_prefix_fields(&cls);
895 for (i = 0; i < ARRAY_SIZE(ops); i++) {
899 if (!tcls_rules[j]) {
900 struct test_rule *displaced_rule;
902 tcls_rules[j] = tcls_insert(&tcls, rules[j]);
903 displaced_rule = test_rule_from_cls_rule(
904 classifier_replace(&cls, &rules[j]->cls_rule));
905 if (pri_rules[pris[j]] >= 0) {
906 int k = pri_rules[pris[j]];
907 assert(displaced_rule != NULL);
908 assert(displaced_rule != rules[j]);
909 assert(pris[j] == displaced_rule->cls_rule.priority);
910 tcls_rules[k] = NULL;
912 assert(displaced_rule == NULL);
914 pri_rules[pris[j]] = j;
916 classifier_remove(&cls, &rules[j]->cls_rule);
917 tcls_remove(&tcls, tcls_rules[j]);
918 tcls_rules[j] = NULL;
919 pri_rules[pris[j]] = -1;
921 compare_classifiers(&cls, &tcls);
924 for (m = 0; m < N_RULES; m++) {
925 n += tcls_rules[m] != NULL;
927 check_tables(&cls, n > 0, n, n - 1);
930 for (i = 0; i < N_RULES; i++) {
931 if (rules[i]->cls_rule.cls_match) {
932 classifier_remove(&cls, &rules[i]->cls_rule);
936 classifier_destroy(&cls);
938 } while (next_permutation(ops, ARRAY_SIZE(ops)));
939 assert(n_permutations == (factorial(N_RULES * 2) >> N_RULES));
944 count_ones(unsigned long int x)
949 x = zero_rightmost_1bit(x);
957 array_contains(int *array, int n, int value)
961 for (i = 0; i < n; i++) {
962 if (array[i] == value) {
970 /* Tests classification with two rules at a time that fall into the same
971 * table but different lists. */
973 test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
977 for (iteration = 0; iteration < 50; iteration++) {
978 enum { N_RULES = 20 };
979 struct test_rule *rules[N_RULES];
980 struct test_rule *tcls_rules[N_RULES];
981 struct classifier cls;
983 int value_pats[N_RULES];
989 wcf = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
990 value_mask = ~wcf & ((1u << CLS_N_FIELDS) - 1);
991 } while ((1 << count_ones(value_mask)) < N_RULES);
993 classifier_init(&cls, flow_segment_u32s);
994 set_prefix_fields(&cls);
997 for (i = 0; i < N_RULES; i++) {
998 unsigned int priority = random_uint32();
1001 value_pats[i] = random_uint32() & value_mask;
1002 } while (array_contains(value_pats, i, value_pats[i]));
1004 rules[i] = make_rule(wcf, priority, value_pats[i]);
1005 tcls_rules[i] = tcls_insert(&tcls, rules[i]);
1007 classifier_insert(&cls, &rules[i]->cls_rule);
1008 compare_classifiers(&cls, &tcls);
1010 check_tables(&cls, 1, i + 1, 0);
1013 for (i = 0; i < N_RULES; i++) {
1014 tcls_remove(&tcls, tcls_rules[i]);
1015 classifier_remove(&cls, &rules[i]->cls_rule);
1016 compare_classifiers(&cls, &tcls);
1017 free_rule(rules[i]);
1019 check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0);
1022 classifier_destroy(&cls);
1023 tcls_destroy(&tcls);
1027 /* Tests classification with many rules at a time that fall into random lists
1030 test_many_rules_in_n_tables(int n_tables)
1032 enum { MAX_RULES = 50 };
1037 assert(n_tables < 10);
1038 for (i = 0; i < n_tables; i++) {
1040 wcfs[i] = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1041 } while (array_contains(wcfs, i, wcfs[i]));
1044 for (iteration = 0; iteration < 30; iteration++) {
1045 unsigned int priorities[MAX_RULES];
1046 struct classifier cls;
1049 random_set_seed(iteration + 1);
1050 for (i = 0; i < MAX_RULES; i++) {
1051 priorities[i] = i * 129;
1053 shuffle(priorities, ARRAY_SIZE(priorities));
1055 classifier_init(&cls, flow_segment_u32s);
1056 set_prefix_fields(&cls);
1059 for (i = 0; i < MAX_RULES; i++) {
1060 struct test_rule *rule;
1061 unsigned int priority = priorities[i];
1062 int wcf = wcfs[random_range(n_tables)];
1063 int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1064 rule = make_rule(wcf, priority, value_pat);
1065 tcls_insert(&tcls, rule);
1066 classifier_insert(&cls, &rule->cls_rule);
1067 compare_classifiers(&cls, &tcls);
1068 check_tables(&cls, -1, i + 1, -1);
1071 while (!classifier_is_empty(&cls)) {
1072 struct test_rule *target;
1073 struct test_rule *rule;
1075 target = clone_rule(tcls.rules[random_range(tcls.n_rules)]);
1077 CLS_FOR_EACH_TARGET_SAFE (rule, cls_rule, &cls,
1078 &target->cls_rule) {
1079 classifier_remove(&cls, &rule->cls_rule);
1083 tcls_delete_matches(&tcls, &target->cls_rule);
1084 compare_classifiers(&cls, &tcls);
1085 check_tables(&cls, -1, -1, -1);
1089 destroy_classifier(&cls);
1090 tcls_destroy(&tcls);
1095 test_many_rules_in_two_tables(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
1097 test_many_rules_in_n_tables(2);
1101 test_many_rules_in_five_tables(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
1103 test_many_rules_in_n_tables(5);
1106 /* Miniflow tests. */
1111 static const uint32_t values[] =
1112 { 0xffffffff, 0xaaaaaaaa, 0x55555555, 0x80000000,
1113 0x00000001, 0xface0000, 0x00d00d1e, 0xdeadbeef };
1115 return values[random_range(ARRAY_SIZE(values))];
1119 choose(unsigned int n, unsigned int *idxp)
1130 init_consecutive_values(int n_consecutive, struct flow *flow,
1133 uint32_t *flow_u32 = (uint32_t *) flow;
1135 if (choose(FLOW_U32S - n_consecutive + 1, idxp)) {
1138 for (i = 0; i < n_consecutive; i++) {
1139 flow_u32[*idxp + i] = random_value();
1148 next_random_flow(struct flow *flow, unsigned int idx)
1150 uint32_t *flow_u32 = (uint32_t *) flow;
1153 memset(flow, 0, sizeof *flow);
1156 if (choose(1, &idx)) {
1160 /* All flows with a small number of consecutive nonzero values. */
1161 for (i = 1; i <= 4; i++) {
1162 if (init_consecutive_values(i, flow, &idx)) {
1167 /* All flows with a large number of consecutive nonzero values. */
1168 for (i = FLOW_U32S - 4; i <= FLOW_U32S; i++) {
1169 if (init_consecutive_values(i, flow, &idx)) {
1174 /* All flows with exactly two nonconsecutive nonzero values. */
1175 if (choose((FLOW_U32S - 1) * (FLOW_U32S - 2) / 2, &idx)) {
1178 for (ofs1 = 0; ofs1 < FLOW_U32S - 2; ofs1++) {
1181 for (ofs2 = ofs1 + 2; ofs2 < FLOW_U32S; ofs2++) {
1182 if (choose(1, &idx)) {
1183 flow_u32[ofs1] = random_value();
1184 flow_u32[ofs2] = random_value();
1192 /* 16 randomly chosen flows with N >= 3 nonzero values. */
1193 if (choose(16 * (FLOW_U32S - 4), &idx)) {
1194 int n = idx / 16 + 3;
1197 for (i = 0; i < n; i++) {
1198 flow_u32[i] = random_value();
1200 shuffle_u32s(flow_u32, FLOW_U32S);
1209 any_random_flow(struct flow *flow)
1211 static unsigned int max;
1213 while (next_random_flow(flow, max)) {
1218 next_random_flow(flow, random_range(max));
1222 toggle_masked_flow_bits(struct flow *flow, const struct flow_wildcards *mask)
1224 const uint32_t *mask_u32 = (const uint32_t *) &mask->masks;
1225 uint32_t *flow_u32 = (uint32_t *) flow;
1228 for (i = 0; i < FLOW_U32S; i++) {
1229 if (mask_u32[i] != 0) {
1233 bit = 1u << random_range(32);
1234 } while (!(bit & mask_u32[i]));
1241 wildcard_extra_bits(struct flow_wildcards *mask)
1243 uint32_t *mask_u32 = (uint32_t *) &mask->masks;
1246 for (i = 0; i < FLOW_U32S; i++) {
1247 if (mask_u32[i] != 0) {
1251 bit = 1u << random_range(32);
1252 } while (!(bit & mask_u32[i]));
1253 mask_u32[i] &= ~bit;
1259 test_miniflow(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
1264 random_set_seed(0xb3faca38);
1265 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1266 const uint32_t *flow_u32 = (const uint32_t *) &flow;
1267 struct miniflow miniflow, miniflow2, miniflow3;
1268 struct flow flow2, flow3;
1269 struct flow_wildcards mask;
1270 struct minimask minimask;
1273 /* Convert flow to miniflow. */
1274 miniflow_init(&miniflow, &flow);
1276 /* Check that the flow equals its miniflow. */
1277 assert(miniflow_get_vid(&miniflow) == vlan_tci_to_vid(flow.vlan_tci));
1278 for (i = 0; i < FLOW_U32S; i++) {
1279 assert(MINIFLOW_GET_TYPE(&miniflow, uint32_t, i * 4)
1283 /* Check that the miniflow equals itself. */
1284 assert(miniflow_equal(&miniflow, &miniflow));
1286 /* Convert miniflow back to flow and verify that it's the same. */
1287 miniflow_expand(&miniflow, &flow2);
1288 assert(flow_equal(&flow, &flow2));
1290 /* Check that copying a miniflow works properly. */
1291 miniflow_clone(&miniflow2, &miniflow);
1292 assert(miniflow_equal(&miniflow, &miniflow2));
1293 assert(miniflow_hash(&miniflow, 0) == miniflow_hash(&miniflow2, 0));
1294 miniflow_expand(&miniflow2, &flow3);
1295 assert(flow_equal(&flow, &flow3));
1297 /* Check that masked matches work as expected for identical flows and
1300 next_random_flow(&mask.masks, 1);
1301 } while (flow_wildcards_is_catchall(&mask));
1302 minimask_init(&minimask, &mask);
1303 assert(minimask_is_catchall(&minimask)
1304 == flow_wildcards_is_catchall(&mask));
1305 assert(miniflow_equal_in_minimask(&miniflow, &miniflow2, &minimask));
1306 assert(miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
1307 assert(miniflow_hash_in_minimask(&miniflow, &minimask, 0x12345678) ==
1308 flow_hash_in_minimask(&flow, &minimask, 0x12345678));
1310 /* Check that masked matches work as expected for differing flows and
1312 toggle_masked_flow_bits(&flow2, &mask);
1313 assert(!miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
1314 miniflow_init(&miniflow3, &flow2);
1315 assert(!miniflow_equal_in_minimask(&miniflow, &miniflow3, &minimask));
1318 miniflow_destroy(&miniflow);
1319 miniflow_destroy(&miniflow2);
1320 miniflow_destroy(&miniflow3);
1321 minimask_destroy(&minimask);
1326 test_minimask_has_extra(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
1328 struct flow_wildcards catchall;
1329 struct minimask minicatchall;
1333 flow_wildcards_init_catchall(&catchall);
1334 minimask_init(&minicatchall, &catchall);
1335 assert(minimask_is_catchall(&minicatchall));
1337 random_set_seed(0x2ec7905b);
1338 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1339 struct flow_wildcards mask;
1340 struct minimask minimask;
1343 minimask_init(&minimask, &mask);
1344 assert(!minimask_has_extra(&minimask, &minimask));
1345 assert(minimask_has_extra(&minicatchall, &minimask)
1346 == !minimask_is_catchall(&minimask));
1347 if (!minimask_is_catchall(&minimask)) {
1348 struct minimask minimask2;
1350 wildcard_extra_bits(&mask);
1351 minimask_init(&minimask2, &mask);
1352 assert(minimask_has_extra(&minimask2, &minimask));
1353 assert(!minimask_has_extra(&minimask, &minimask2));
1354 minimask_destroy(&minimask2);
1357 minimask_destroy(&minimask);
1360 minimask_destroy(&minicatchall);
1364 test_minimask_combine(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
1366 struct flow_wildcards catchall;
1367 struct minimask minicatchall;
1371 flow_wildcards_init_catchall(&catchall);
1372 minimask_init(&minicatchall, &catchall);
1373 assert(minimask_is_catchall(&minicatchall));
1375 random_set_seed(0x181bf0cd);
1376 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1377 struct minimask minimask, minimask2, minicombined;
1378 struct flow_wildcards mask, mask2, combined, combined2;
1379 uint32_t storage[FLOW_U32S];
1383 minimask_init(&minimask, &mask);
1385 minimask_combine(&minicombined, &minimask, &minicatchall, storage);
1386 assert(minimask_is_catchall(&minicombined));
1388 any_random_flow(&flow2);
1389 mask2.masks = flow2;
1390 minimask_init(&minimask2, &mask2);
1392 minimask_combine(&minicombined, &minimask, &minimask2, storage);
1393 flow_wildcards_and(&combined, &mask, &mask2);
1394 minimask_expand(&minicombined, &combined2);
1395 assert(flow_wildcards_equal(&combined, &combined2));
1397 minimask_destroy(&minimask);
1398 minimask_destroy(&minimask2);
1401 minimask_destroy(&minicatchall);
1404 static const struct command commands[] = {
1405 /* Classifier tests. */
1406 {"empty", 0, 0, test_empty},
1407 {"destroy-null", 0, 0, test_destroy_null},
1408 {"single-rule", 0, 0, test_single_rule},
1409 {"rule-replacement", 0, 0, test_rule_replacement},
1410 {"many-rules-in-one-list", 0, 0, test_many_rules_in_one_list},
1411 {"many-rules-in-one-table", 0, 0, test_many_rules_in_one_table},
1412 {"many-rules-in-two-tables", 0, 0, test_many_rules_in_two_tables},
1413 {"many-rules-in-five-tables", 0, 0, test_many_rules_in_five_tables},
1415 /* Miniflow and minimask tests. */
1416 {"miniflow", 0, 0, test_miniflow},
1417 {"minimask_has_extra", 0, 0, test_minimask_has_extra},
1418 {"minimask_combine", 0, 0, test_minimask_combine},
1424 test_classifier_main(int argc, char *argv[])
1426 set_program_name(argv[0]);
1428 run_command(argc - 1, argv + 1, commands);
1431 OVSTEST_REGISTER("test-classifier", test_classifier_main);