2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 /* "White box" tests for classifier.
19 * With very few exceptions, these tests obtain complete coverage of every
20 * basic block and every branch in the classifier implementation, e.g. a clean
21 * report from "gcov -b". (Covering the exceptions would require finding
22 * collisions in the hash function used for flow data, etc.)
24 * This test should receive a clean report from "valgrind --leak-check=full":
25 * it frees every heap block that it allocates.
30 #include "classifier.h"
34 #include "byte-order.h"
35 #include "classifier-private.h"
36 #include "command-line.h"
42 #include "unaligned.h"
45 static bool versioned = false;
47 /* Fields in a rule. */
49 /* struct flow all-caps */ \
50 /* member name name */ \
51 /* ----------- -------- */ \
52 CLS_FIELD(tunnel.tun_id, TUN_ID) \
53 CLS_FIELD(metadata, METADATA) \
54 CLS_FIELD(nw_src, NW_SRC) \
55 CLS_FIELD(nw_dst, NW_DST) \
56 CLS_FIELD(in_port, IN_PORT) \
57 CLS_FIELD(vlan_tci, VLAN_TCI) \
58 CLS_FIELD(dl_type, DL_TYPE) \
59 CLS_FIELD(tp_src, TP_SRC) \
60 CLS_FIELD(tp_dst, TP_DST) \
61 CLS_FIELD(dl_src, DL_SRC) \
62 CLS_FIELD(dl_dst, DL_DST) \
63 CLS_FIELD(nw_proto, NW_PROTO) \
64 CLS_FIELD(nw_tos, NW_DSCP)
68 * (These are also indexed into struct classifier's 'tables' array.) */
70 #define CLS_FIELD(MEMBER, NAME) CLS_F_IDX_##NAME,
76 /* Field information. */
78 int ofs; /* Offset in struct flow. */
79 int len; /* Length in bytes. */
80 const char *name; /* Name (for debugging). */
83 static const struct cls_field cls_fields[CLS_N_FIELDS] = {
84 #define CLS_FIELD(MEMBER, NAME) \
85 { offsetof(struct flow, MEMBER), \
86 sizeof ((struct flow *)0)->MEMBER, \
93 struct ovs_list list_node;
94 int aux; /* Auxiliary data. */
95 struct cls_rule cls_rule; /* Classifier rule data. */
98 static struct test_rule *
99 test_rule_from_cls_rule(const struct cls_rule *rule)
101 return rule ? CONTAINER_OF(rule, struct test_rule, cls_rule) : NULL;
105 test_rule_destroy(struct test_rule *rule)
108 cls_rule_destroy(&rule->cls_rule);
113 static struct test_rule *make_rule(int wc_fields, int priority, int value_pat,
115 static void free_rule(struct test_rule *);
116 static struct test_rule *clone_rule(const struct test_rule *);
118 /* Trivial (linear) classifier. */
121 size_t allocated_rules;
122 struct test_rule **rules;
126 tcls_init(struct tcls *tcls)
129 tcls->allocated_rules = 0;
134 tcls_destroy(struct tcls *tcls)
139 for (i = 0; i < tcls->n_rules; i++) {
140 test_rule_destroy(tcls->rules[i]);
147 tcls_is_empty(const struct tcls *tcls)
149 return tcls->n_rules == 0;
152 static struct test_rule *
153 tcls_insert(struct tcls *tcls, const struct test_rule *rule)
157 for (i = 0; i < tcls->n_rules; i++) {
158 const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
159 if (cls_rule_equal(pos, &rule->cls_rule)) {
161 ovsrcu_postpone(free_rule, tcls->rules[i]);
162 tcls->rules[i] = clone_rule(rule);
163 return tcls->rules[i];
164 } else if (pos->priority < rule->cls_rule.priority) {
169 if (tcls->n_rules >= tcls->allocated_rules) {
170 tcls->rules = x2nrealloc(tcls->rules, &tcls->allocated_rules,
171 sizeof *tcls->rules);
173 if (i != tcls->n_rules) {
174 memmove(&tcls->rules[i + 1], &tcls->rules[i],
175 sizeof *tcls->rules * (tcls->n_rules - i));
177 tcls->rules[i] = clone_rule(rule);
179 return tcls->rules[i];
183 tcls_remove(struct tcls *cls, const struct test_rule *rule)
187 for (i = 0; i < cls->n_rules; i++) {
188 struct test_rule *pos = cls->rules[i];
190 test_rule_destroy(pos);
192 memmove(&cls->rules[i], &cls->rules[i + 1],
193 sizeof *cls->rules * (cls->n_rules - i - 1));
203 match(const struct cls_rule *wild_, const struct flow *fixed)
208 minimatch_expand(&wild_->match, &wild);
209 for (f_idx = 0; f_idx < CLS_N_FIELDS; f_idx++) {
212 if (f_idx == CLS_F_IDX_NW_SRC) {
213 eq = !((fixed->nw_src ^ wild.flow.nw_src)
214 & wild.wc.masks.nw_src);
215 } else if (f_idx == CLS_F_IDX_NW_DST) {
216 eq = !((fixed->nw_dst ^ wild.flow.nw_dst)
217 & wild.wc.masks.nw_dst);
218 } else if (f_idx == CLS_F_IDX_TP_SRC) {
219 eq = !((fixed->tp_src ^ wild.flow.tp_src)
220 & wild.wc.masks.tp_src);
221 } else if (f_idx == CLS_F_IDX_TP_DST) {
222 eq = !((fixed->tp_dst ^ wild.flow.tp_dst)
223 & wild.wc.masks.tp_dst);
224 } else if (f_idx == CLS_F_IDX_DL_SRC) {
225 eq = eth_addr_equal_except(fixed->dl_src, wild.flow.dl_src,
226 wild.wc.masks.dl_src);
227 } else if (f_idx == CLS_F_IDX_DL_DST) {
228 eq = eth_addr_equal_except(fixed->dl_dst, wild.flow.dl_dst,
229 wild.wc.masks.dl_dst);
230 } else if (f_idx == CLS_F_IDX_VLAN_TCI) {
231 eq = !((fixed->vlan_tci ^ wild.flow.vlan_tci)
232 & wild.wc.masks.vlan_tci);
233 } else if (f_idx == CLS_F_IDX_TUN_ID) {
234 eq = !((fixed->tunnel.tun_id ^ wild.flow.tunnel.tun_id)
235 & wild.wc.masks.tunnel.tun_id);
236 } else if (f_idx == CLS_F_IDX_METADATA) {
237 eq = !((fixed->metadata ^ wild.flow.metadata)
238 & wild.wc.masks.metadata);
239 } else if (f_idx == CLS_F_IDX_NW_DSCP) {
240 eq = !((fixed->nw_tos ^ wild.flow.nw_tos) &
241 (wild.wc.masks.nw_tos & IP_DSCP_MASK));
242 } else if (f_idx == CLS_F_IDX_NW_PROTO) {
243 eq = !((fixed->nw_proto ^ wild.flow.nw_proto)
244 & wild.wc.masks.nw_proto);
245 } else if (f_idx == CLS_F_IDX_DL_TYPE) {
246 eq = !((fixed->dl_type ^ wild.flow.dl_type)
247 & wild.wc.masks.dl_type);
248 } else if (f_idx == CLS_F_IDX_IN_PORT) {
249 eq = !((fixed->in_port.ofp_port
250 ^ wild.flow.in_port.ofp_port)
251 & wild.wc.masks.in_port.ofp_port);
263 static struct cls_rule *
264 tcls_lookup(const struct tcls *cls, const struct flow *flow)
268 for (i = 0; i < cls->n_rules; i++) {
269 struct test_rule *pos = cls->rules[i];
270 if (match(&pos->cls_rule, flow)) {
271 return &pos->cls_rule;
278 tcls_delete_matches(struct tcls *cls, const struct cls_rule *target)
282 for (i = 0; i < cls->n_rules; ) {
283 struct test_rule *pos = cls->rules[i];
284 if (!minimask_has_extra(&pos->cls_rule.match.mask,
285 &target->match.mask)) {
288 miniflow_expand(&pos->cls_rule.match.flow, &flow);
289 if (match(target, &flow)) {
290 tcls_remove(cls, pos);
298 static ovs_be32 nw_src_values[] = { CONSTANT_HTONL(0xc0a80001),
299 CONSTANT_HTONL(0xc0a04455) };
300 static ovs_be32 nw_dst_values[] = { CONSTANT_HTONL(0xc0a80002),
301 CONSTANT_HTONL(0xc0a04455) };
302 static ovs_be64 tun_id_values[] = {
304 CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
305 static ovs_be64 metadata_values[] = {
307 CONSTANT_HTONLL(UINT64_C(0xfedcba9876543210)) };
308 static ofp_port_t in_port_values[] = { OFP_PORT_C(1), OFPP_LOCAL };
309 static ovs_be16 vlan_tci_values[] = { CONSTANT_HTONS(101), CONSTANT_HTONS(0) };
310 static ovs_be16 dl_type_values[]
311 = { CONSTANT_HTONS(ETH_TYPE_IP), CONSTANT_HTONS(ETH_TYPE_ARP) };
312 static ovs_be16 tp_src_values[] = { CONSTANT_HTONS(49362),
313 CONSTANT_HTONS(80) };
314 static ovs_be16 tp_dst_values[] = { CONSTANT_HTONS(6667), CONSTANT_HTONS(22) };
315 static uint8_t dl_src_values[][ETH_ADDR_LEN] = {
316 { 0x00, 0x02, 0xe3, 0x0f, 0x80, 0xa4 },
317 { 0x5e, 0x33, 0x7f, 0x5f, 0x1e, 0x99 } };
318 static uint8_t dl_dst_values[][ETH_ADDR_LEN] = {
319 { 0x4a, 0x27, 0x71, 0xae, 0x64, 0xc1 },
320 { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff } };
321 static uint8_t nw_proto_values[] = { IPPROTO_TCP, IPPROTO_ICMP };
322 static uint8_t nw_dscp_values[] = { 48, 0 };
324 static void *values[CLS_N_FIELDS][2];
329 values[CLS_F_IDX_TUN_ID][0] = &tun_id_values[0];
330 values[CLS_F_IDX_TUN_ID][1] = &tun_id_values[1];
332 values[CLS_F_IDX_METADATA][0] = &metadata_values[0];
333 values[CLS_F_IDX_METADATA][1] = &metadata_values[1];
335 values[CLS_F_IDX_IN_PORT][0] = &in_port_values[0];
336 values[CLS_F_IDX_IN_PORT][1] = &in_port_values[1];
338 values[CLS_F_IDX_VLAN_TCI][0] = &vlan_tci_values[0];
339 values[CLS_F_IDX_VLAN_TCI][1] = &vlan_tci_values[1];
341 values[CLS_F_IDX_DL_SRC][0] = dl_src_values[0];
342 values[CLS_F_IDX_DL_SRC][1] = dl_src_values[1];
344 values[CLS_F_IDX_DL_DST][0] = dl_dst_values[0];
345 values[CLS_F_IDX_DL_DST][1] = dl_dst_values[1];
347 values[CLS_F_IDX_DL_TYPE][0] = &dl_type_values[0];
348 values[CLS_F_IDX_DL_TYPE][1] = &dl_type_values[1];
350 values[CLS_F_IDX_NW_SRC][0] = &nw_src_values[0];
351 values[CLS_F_IDX_NW_SRC][1] = &nw_src_values[1];
353 values[CLS_F_IDX_NW_DST][0] = &nw_dst_values[0];
354 values[CLS_F_IDX_NW_DST][1] = &nw_dst_values[1];
356 values[CLS_F_IDX_NW_PROTO][0] = &nw_proto_values[0];
357 values[CLS_F_IDX_NW_PROTO][1] = &nw_proto_values[1];
359 values[CLS_F_IDX_NW_DSCP][0] = &nw_dscp_values[0];
360 values[CLS_F_IDX_NW_DSCP][1] = &nw_dscp_values[1];
362 values[CLS_F_IDX_TP_SRC][0] = &tp_src_values[0];
363 values[CLS_F_IDX_TP_SRC][1] = &tp_src_values[1];
365 values[CLS_F_IDX_TP_DST][0] = &tp_dst_values[0];
366 values[CLS_F_IDX_TP_DST][1] = &tp_dst_values[1];
369 #define N_NW_SRC_VALUES ARRAY_SIZE(nw_src_values)
370 #define N_NW_DST_VALUES ARRAY_SIZE(nw_dst_values)
371 #define N_TUN_ID_VALUES ARRAY_SIZE(tun_id_values)
372 #define N_METADATA_VALUES ARRAY_SIZE(metadata_values)
373 #define N_IN_PORT_VALUES ARRAY_SIZE(in_port_values)
374 #define N_VLAN_TCI_VALUES ARRAY_SIZE(vlan_tci_values)
375 #define N_DL_TYPE_VALUES ARRAY_SIZE(dl_type_values)
376 #define N_TP_SRC_VALUES ARRAY_SIZE(tp_src_values)
377 #define N_TP_DST_VALUES ARRAY_SIZE(tp_dst_values)
378 #define N_DL_SRC_VALUES ARRAY_SIZE(dl_src_values)
379 #define N_DL_DST_VALUES ARRAY_SIZE(dl_dst_values)
380 #define N_NW_PROTO_VALUES ARRAY_SIZE(nw_proto_values)
381 #define N_NW_DSCP_VALUES ARRAY_SIZE(nw_dscp_values)
383 #define N_FLOW_VALUES (N_NW_SRC_VALUES * \
387 N_VLAN_TCI_VALUES * \
393 N_NW_PROTO_VALUES * \
397 get_value(unsigned int *x, unsigned n_values)
399 unsigned int rem = *x % n_values;
405 compare_classifiers(struct classifier *cls, size_t n_invisible_rules,
406 long long version, struct tcls *tcls)
408 static const int confidence = 500;
411 assert(classifier_count(cls) == tcls->n_rules + n_invisible_rules);
412 for (i = 0; i < confidence; i++) {
413 const struct cls_rule *cr0, *cr1, *cr2;
415 struct flow_wildcards wc;
418 flow_wildcards_init_catchall(&wc);
419 x = random_range(N_FLOW_VALUES);
420 memset(&flow, 0, sizeof flow);
421 flow.nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
422 flow.nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
423 flow.tunnel.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)];
424 flow.metadata = metadata_values[get_value(&x, N_METADATA_VALUES)];
425 flow.in_port.ofp_port = in_port_values[get_value(&x,
427 flow.vlan_tci = vlan_tci_values[get_value(&x, N_VLAN_TCI_VALUES)];
428 flow.dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
429 flow.tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
430 flow.tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
431 memcpy(flow.dl_src, dl_src_values[get_value(&x, N_DL_SRC_VALUES)],
433 memcpy(flow.dl_dst, dl_dst_values[get_value(&x, N_DL_DST_VALUES)],
435 flow.nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
436 flow.nw_tos = nw_dscp_values[get_value(&x, N_NW_DSCP_VALUES)];
438 /* This assertion is here to suppress a GCC 4.9 array-bounds warning */
439 ovs_assert(cls->n_tries <= CLS_MAX_TRIES);
441 cr0 = classifier_lookup(cls, version, &flow, &wc);
442 cr1 = tcls_lookup(tcls, &flow);
443 assert((cr0 == NULL) == (cr1 == NULL));
445 const struct test_rule *tr0 = test_rule_from_cls_rule(cr0);
446 const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
448 assert(cls_rule_equal(cr0, cr1));
449 assert(tr0->aux == tr1->aux);
451 /* Make sure the rule should have been visible. */
452 assert(cr0->cls_match);
453 assert(cls_match_visible_in_version(cr0->cls_match, version));
455 cr2 = classifier_lookup(cls, version, &flow, NULL);
461 destroy_classifier(struct classifier *cls)
463 struct test_rule *rule;
465 classifier_defer(cls);
466 CLS_FOR_EACH (rule, cls_rule, cls) {
467 if (classifier_remove(cls, &rule->cls_rule)) {
468 ovsrcu_postpone(free_rule, rule);
471 classifier_destroy(cls);
475 pvector_verify(const struct pvector *pvec)
477 void *ptr OVS_UNUSED;
478 int prev_priority = INT_MAX;
480 PVECTOR_FOR_EACH (ptr, pvec) {
481 int priority = cursor__.vector[cursor__.entry_idx].priority;
482 if (priority > prev_priority) {
483 ovs_abort(0, "Priority vector is out of order (%u > %u)",
484 priority, prev_priority);
486 prev_priority = priority;
491 trie_verify(const rcu_trie_ptr *trie, unsigned int ofs, unsigned int n_bits)
493 const struct trie_node *node = ovsrcu_get(struct trie_node *, trie);
496 assert(node->n_rules == 0 || node->n_bits > 0);
498 assert((ofs > 0 || (ofs == 0 && node->n_bits == 0)) && ofs <= n_bits);
501 + trie_verify(&node->edges[0], ofs, n_bits)
502 + trie_verify(&node->edges[1], ofs, n_bits);
508 verify_tries(struct classifier *cls)
509 OVS_NO_THREAD_SAFETY_ANALYSIS
511 unsigned int n_rules = 0;
514 for (i = 0; i < cls->n_tries; i++) {
515 n_rules += trie_verify(&cls->tries[i].root, 0,
516 cls->tries[i].field->n_bits);
518 assert(n_rules <= cls->n_rules);
522 check_tables(const struct classifier *cls, int n_tables, int n_rules,
523 int n_dups, int n_invisible, long long version)
524 OVS_NO_THREAD_SAFETY_ANALYSIS
526 const struct cls_subtable *table;
527 struct test_rule *test_rule;
528 int found_tables = 0;
529 int found_tables_with_visible_rules = 0;
532 int found_invisible = 0;
533 int found_visible_but_removable = 0;
534 int found_rules2 = 0;
536 pvector_verify(&cls->subtables);
537 CMAP_FOR_EACH (table, cmap_node, &cls->subtables_map) {
538 const struct cls_match *head;
539 int max_priority = INT_MIN;
540 unsigned int max_count = 0;
542 bool found_visible_rules = false;
543 const struct cls_subtable *iter;
545 /* Locate the subtable from 'subtables'. */
546 PVECTOR_FOR_EACH (iter, &cls->subtables) {
549 ovs_abort(0, "Subtable %p duplicated in 'subtables'.",
556 ovs_abort(0, "Subtable %p not found from 'subtables'.", table);
559 assert(!cmap_is_empty(&table->rules));
560 assert(trie_verify(&table->ports_trie, 0, table->ports_mask_len)
561 == (table->ports_mask_len ? cmap_count(&table->rules) : 0));
565 CMAP_FOR_EACH (head, cmap_node, &table->rules) {
566 int prev_priority = INT_MAX;
567 long long prev_version = 0;
568 const struct cls_match *rule, *prev;
569 bool found_visible_rules_in_list = false;
571 assert(head->priority <= table->max_priority);
573 if (head->priority > max_priority) {
574 max_priority = head->priority;
578 FOR_EACH_RULE_IN_LIST_PROTECTED(rule, prev, head) {
579 long long rule_version;
580 const struct cls_rule *found_rule;
582 /* Priority may not increase. */
583 assert(rule->priority <= prev_priority);
585 if (rule->priority == max_priority) {
589 /* Count invisible rules and visible duplicates. */
590 if (!cls_match_visible_in_version(rule, version)) {
593 if (cls_match_is_eventually_invisible(rule)) {
594 found_visible_but_removable++;
596 if (found_visible_rules_in_list) {
599 found_visible_rules_in_list = true;
600 found_visible_rules = true;
603 /* Rule must be visible in the version it was inserted. */
604 rule_version = rule->cls_rule->version;
605 assert(cls_match_visible_in_version(rule, rule_version));
607 /* We should always find the latest version of the rule,
608 * unless all rules have been marked for removal.
609 * Later versions must always be later in the list. */
610 found_rule = classifier_find_rule_exactly(cls, rule->cls_rule);
611 if (found_rule && found_rule != rule->cls_rule) {
613 assert(found_rule->priority == rule->priority);
615 /* Found rule may not have a lower version. */
616 assert(found_rule->version >= rule_version);
618 /* This rule must not be visible in the found rule's
620 assert(!cls_match_visible_in_version(rule,
621 found_rule->version));
624 if (rule->priority == prev_priority) {
625 /* Exact duplicate rule may not have a lower version. */
626 assert(rule_version >= prev_version);
628 /* Previous rule must not be visible in rule's version. */
629 assert(!cls_match_visible_in_version(prev, rule_version));
632 prev_priority = rule->priority;
633 prev_version = rule_version;
638 if (found_visible_rules) {
639 found_tables_with_visible_rules++;
642 assert(table->max_priority == max_priority);
643 assert(table->max_count == max_count);
646 assert(found_tables == cmap_count(&cls->subtables_map));
647 assert(found_tables == pvector_count(&cls->subtables));
648 assert(n_tables == -1 || n_tables == found_tables_with_visible_rules);
649 assert(n_rules == -1 || found_rules == n_rules + found_invisible);
650 assert(n_dups == -1 || found_dups == n_dups);
651 assert(found_invisible == n_invisible);
653 CLS_FOR_EACH (test_rule, cls_rule, cls) {
656 /* Iteration does not see removable rules. */
658 == found_rules2 + found_visible_but_removable + found_invisible);
661 static struct test_rule *
662 make_rule(int wc_fields, int priority, int value_pat, long long version)
664 const struct cls_field *f;
665 struct test_rule *rule;
668 match_init_catchall(&match);
669 for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
670 int f_idx = f - cls_fields;
671 int value_idx = (value_pat & (1u << f_idx)) != 0;
672 memcpy((char *) &match.flow + f->ofs,
673 values[f_idx][value_idx], f->len);
675 if (f_idx == CLS_F_IDX_NW_SRC) {
676 match.wc.masks.nw_src = OVS_BE32_MAX;
677 } else if (f_idx == CLS_F_IDX_NW_DST) {
678 match.wc.masks.nw_dst = OVS_BE32_MAX;
679 } else if (f_idx == CLS_F_IDX_TP_SRC) {
680 match.wc.masks.tp_src = OVS_BE16_MAX;
681 } else if (f_idx == CLS_F_IDX_TP_DST) {
682 match.wc.masks.tp_dst = OVS_BE16_MAX;
683 } else if (f_idx == CLS_F_IDX_DL_SRC) {
684 memset(match.wc.masks.dl_src, 0xff, ETH_ADDR_LEN);
685 } else if (f_idx == CLS_F_IDX_DL_DST) {
686 memset(match.wc.masks.dl_dst, 0xff, ETH_ADDR_LEN);
687 } else if (f_idx == CLS_F_IDX_VLAN_TCI) {
688 match.wc.masks.vlan_tci = OVS_BE16_MAX;
689 } else if (f_idx == CLS_F_IDX_TUN_ID) {
690 match.wc.masks.tunnel.tun_id = OVS_BE64_MAX;
691 } else if (f_idx == CLS_F_IDX_METADATA) {
692 match.wc.masks.metadata = OVS_BE64_MAX;
693 } else if (f_idx == CLS_F_IDX_NW_DSCP) {
694 match.wc.masks.nw_tos |= IP_DSCP_MASK;
695 } else if (f_idx == CLS_F_IDX_NW_PROTO) {
696 match.wc.masks.nw_proto = UINT8_MAX;
697 } else if (f_idx == CLS_F_IDX_DL_TYPE) {
698 match.wc.masks.dl_type = OVS_BE16_MAX;
699 } else if (f_idx == CLS_F_IDX_IN_PORT) {
700 match.wc.masks.in_port.ofp_port = u16_to_ofp(UINT16_MAX);
706 rule = xzalloc(sizeof *rule);
707 cls_rule_init(&rule->cls_rule, &match, wc_fields
708 ? (priority == INT_MIN ? priority + 1 :
709 priority == INT_MAX ? priority - 1 : priority)
714 static struct test_rule *
715 clone_rule(const struct test_rule *src)
717 struct test_rule *dst;
719 dst = xmalloc(sizeof *dst);
721 cls_rule_clone(&dst->cls_rule, &src->cls_rule);
726 free_rule(struct test_rule *rule)
728 cls_rule_destroy(&rule->cls_rule);
733 shuffle(int *p, size_t n)
735 for (; n > 1; n--, p++) {
736 int *q = &p[random_range(n)];
744 shuffle_u32s(uint32_t *p, size_t n)
746 for (; n > 1; n--, p++) {
747 uint32_t *q = &p[random_range(n)];
754 /* Classifier tests. */
756 static enum mf_field_id trie_fields[2] = {
757 MFF_IPV4_DST, MFF_IPV4_SRC
761 set_prefix_fields(struct classifier *cls)
764 classifier_set_prefix_fields(cls, trie_fields, ARRAY_SIZE(trie_fields));
768 /* Tests an empty classifier. */
770 test_empty(struct ovs_cmdl_context *ctx OVS_UNUSED)
772 struct classifier cls;
775 classifier_init(&cls, flow_segment_u64s);
776 set_prefix_fields(&cls);
778 assert(classifier_is_empty(&cls));
779 assert(tcls_is_empty(&tcls));
780 compare_classifiers(&cls, 0, CLS_MIN_VERSION, &tcls);
781 classifier_destroy(&cls);
785 /* Destroys a null classifier. */
787 test_destroy_null(struct ovs_cmdl_context *ctx OVS_UNUSED)
789 classifier_destroy(NULL);
792 /* Tests classification with one rule at a time. */
794 test_single_rule(struct ovs_cmdl_context *ctx OVS_UNUSED)
796 unsigned int wc_fields; /* Hilarious. */
798 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
799 struct classifier cls;
800 struct test_rule *rule, *tcls_rule;
803 rule = make_rule(wc_fields,
804 hash_bytes(&wc_fields, sizeof wc_fields, 0), 0,
806 classifier_init(&cls, flow_segment_u64s);
807 set_prefix_fields(&cls);
809 tcls_rule = tcls_insert(&tcls, rule);
811 classifier_insert(&cls, &rule->cls_rule, NULL, 0);
812 compare_classifiers(&cls, 0, CLS_MIN_VERSION, &tcls);
813 check_tables(&cls, 1, 1, 0, 0, CLS_MIN_VERSION);
815 classifier_remove(&cls, &rule->cls_rule);
816 tcls_remove(&tcls, tcls_rule);
817 assert(classifier_is_empty(&cls));
818 assert(tcls_is_empty(&tcls));
819 compare_classifiers(&cls, 0, CLS_MIN_VERSION, &tcls);
821 ovsrcu_postpone(free_rule, rule);
822 classifier_destroy(&cls);
827 /* Tests replacing one rule by another. */
829 test_rule_replacement(struct ovs_cmdl_context *ctx OVS_UNUSED)
831 unsigned int wc_fields;
833 for (wc_fields = 0; wc_fields < (1u << CLS_N_FIELDS); wc_fields++) {
834 struct classifier cls;
835 struct test_rule *rule1;
836 struct test_rule *rule2;
839 rule1 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX,
841 rule2 = make_rule(wc_fields, OFP_DEFAULT_PRIORITY, UINT_MAX,
846 classifier_init(&cls, flow_segment_u64s);
847 set_prefix_fields(&cls);
849 tcls_insert(&tcls, rule1);
850 classifier_insert(&cls, &rule1->cls_rule, NULL, 0);
851 compare_classifiers(&cls, 0, CLS_MIN_VERSION, &tcls);
852 check_tables(&cls, 1, 1, 0, 0, CLS_MIN_VERSION);
856 tcls_insert(&tcls, rule2);
858 assert(test_rule_from_cls_rule(
859 classifier_replace(&cls, &rule2->cls_rule,
861 ovsrcu_postpone(free_rule, rule1);
862 compare_classifiers(&cls, 0, CLS_MIN_VERSION, &tcls);
863 check_tables(&cls, 1, 1, 0, 0, CLS_MIN_VERSION);
864 classifier_defer(&cls);
865 classifier_remove(&cls, &rule2->cls_rule);
868 destroy_classifier(&cls);
873 factorial(int n_items)
878 for (i = 2; i <= n_items; i++) {
893 reverse(int *a, int n)
897 for (i = 0; i < n / 2; i++) {
904 next_permutation(int *a, int n)
908 for (k = n - 2; k >= 0; k--) {
909 if (a[k] < a[k + 1]) {
912 for (l = n - 1; ; l--) {
915 reverse(a + (k + 1), n - (k + 1));
924 /* Tests classification with rules that have the same matching criteria. */
926 test_many_rules_in_one_list (struct ovs_cmdl_context *ctx OVS_UNUSED)
928 enum { N_RULES = 3 };
931 for (n_pris = N_RULES; n_pris >= 1; n_pris--) {
932 int ops[N_RULES * 2];
938 for (i = 1; i < N_RULES; i++) {
939 pris[i] = pris[i - 1] + (n_pris > i);
942 for (i = 0; i < N_RULES * 2; i++) {
948 struct test_rule *rules[N_RULES];
949 struct test_rule *tcls_rules[N_RULES];
950 int pri_rules[N_RULES];
951 struct classifier cls;
953 long long version = CLS_MIN_VERSION;
954 size_t n_invisible_rules = 0;
958 for (i = 0; i < N_RULES; i++) {
959 rules[i] = make_rule(456, pris[i], 0, version);
960 tcls_rules[i] = NULL;
964 classifier_init(&cls, flow_segment_u64s);
965 set_prefix_fields(&cls);
968 for (i = 0; i < ARRAY_SIZE(ops); i++) {
969 struct test_rule *displaced_rule = NULL;
970 struct cls_rule *removable_rule = NULL;
974 if (!tcls_rules[j]) {
975 tcls_rules[j] = tcls_insert(&tcls, rules[j]);
977 /* Insert the new rule in the next version. */
978 *CONST_CAST(cls_version_t *,
979 &rules[j]->cls_rule.version)
982 displaced_rule = test_rule_from_cls_rule(
983 classifier_find_rule_exactly(&cls,
984 &rules[j]->cls_rule));
985 if (displaced_rule) {
986 /* Mark the old rule for removal after the current
988 cls_rule_make_invisible_in_version(
989 &displaced_rule->cls_rule, version);
991 removable_rule = &displaced_rule->cls_rule;
993 classifier_insert(&cls, &rules[j]->cls_rule, NULL, 0);
995 displaced_rule = test_rule_from_cls_rule(
996 classifier_replace(&cls, &rules[j]->cls_rule,
999 if (pri_rules[pris[j]] >= 0) {
1000 int k = pri_rules[pris[j]];
1001 assert(displaced_rule != NULL);
1002 assert(displaced_rule != rules[j]);
1003 assert(pris[j] == displaced_rule->cls_rule.priority);
1004 tcls_rules[k] = NULL;
1006 assert(displaced_rule == NULL);
1008 pri_rules[pris[j]] = j;
1011 /* Mark the rule for removal after the current
1013 cls_rule_make_invisible_in_version(
1014 &rules[j]->cls_rule, version + 1);
1016 n_invisible_rules++;
1017 removable_rule = &rules[j]->cls_rule;
1019 classifier_remove(&cls, &rules[j]->cls_rule);
1021 tcls_remove(&tcls, tcls_rules[j]);
1022 tcls_rules[j] = NULL;
1023 pri_rules[pris[j]] = -1;
1025 compare_classifiers(&cls, n_invisible_rules, version, &tcls);
1027 for (m = 0; m < N_RULES; m++) {
1028 n += tcls_rules[m] != NULL;
1030 check_tables(&cls, n > 0, n, n - 1, n_invisible_rules,
1033 if (versioned && removable_rule) {
1034 /* Removable rule is no longer visible. */
1035 assert(removable_rule->cls_match);
1036 assert(!cls_match_visible_in_version(
1037 removable_rule->cls_match, version));
1038 classifier_remove(&cls, removable_rule);
1039 n_invisible_rules--;
1043 classifier_defer(&cls);
1044 for (i = 0; i < N_RULES; i++) {
1045 if (classifier_remove(&cls, &rules[i]->cls_rule)) {
1046 ovsrcu_postpone(free_rule, rules[i]);
1049 classifier_destroy(&cls);
1050 tcls_destroy(&tcls);
1051 } while (next_permutation(ops, ARRAY_SIZE(ops)));
1052 assert(n_permutations == (factorial(N_RULES * 2) >> N_RULES));
1057 count_ones(unsigned long int x)
1062 x = zero_rightmost_1bit(x);
1070 array_contains(int *array, int n, int value)
1074 for (i = 0; i < n; i++) {
1075 if (array[i] == value) {
1083 /* Tests classification with two rules at a time that fall into the same
1084 * table but different lists. */
1086 test_many_rules_in_one_table(struct ovs_cmdl_context *ctx OVS_UNUSED)
1090 for (iteration = 0; iteration < 50; iteration++) {
1091 enum { N_RULES = 20 };
1092 struct test_rule *rules[N_RULES];
1093 struct test_rule *tcls_rules[N_RULES];
1094 struct classifier cls;
1096 long long version = CLS_MIN_VERSION;
1097 size_t n_invisible_rules = 0;
1098 int value_pats[N_RULES];
1104 wcf = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1105 value_mask = ~wcf & ((1u << CLS_N_FIELDS) - 1);
1106 } while ((1 << count_ones(value_mask)) < N_RULES);
1108 classifier_init(&cls, flow_segment_u64s);
1109 set_prefix_fields(&cls);
1112 for (i = 0; i < N_RULES; i++) {
1113 int priority = random_range(INT_MAX);
1116 value_pats[i] = random_uint32() & value_mask;
1117 } while (array_contains(value_pats, i, value_pats[i]));
1120 rules[i] = make_rule(wcf, priority, value_pats[i], version);
1121 tcls_rules[i] = tcls_insert(&tcls, rules[i]);
1123 classifier_insert(&cls, &rules[i]->cls_rule, NULL, 0);
1124 compare_classifiers(&cls, n_invisible_rules, version, &tcls);
1126 check_tables(&cls, 1, i + 1, 0, n_invisible_rules, version);
1129 for (i = 0; i < N_RULES; i++) {
1130 tcls_remove(&tcls, tcls_rules[i]);
1132 /* Mark the rule for removal after the current version. */
1133 cls_rule_make_invisible_in_version(&rules[i]->cls_rule,
1136 n_invisible_rules++;
1138 classifier_remove(&cls, &rules[i]->cls_rule);
1140 compare_classifiers(&cls, n_invisible_rules, version, &tcls);
1141 check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0,
1142 n_invisible_rules, version);
1144 ovsrcu_postpone(free_rule, rules[i]);
1149 for (i = 0; i < N_RULES; i++) {
1150 classifier_remove(&cls, &rules[i]->cls_rule);
1151 n_invisible_rules--;
1153 compare_classifiers(&cls, n_invisible_rules, version, &tcls);
1154 check_tables(&cls, 0, 0, 0, n_invisible_rules, version);
1155 ovsrcu_postpone(free_rule, rules[i]);
1159 classifier_destroy(&cls);
1160 tcls_destroy(&tcls);
1164 /* Tests classification with many rules at a time that fall into random lists
1167 test_many_rules_in_n_tables(int n_tables)
1169 enum { MAX_RULES = 50 };
1174 assert(n_tables < 10);
1175 for (i = 0; i < n_tables; i++) {
1177 wcfs[i] = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1178 } while (array_contains(wcfs, i, wcfs[i]));
1181 for (iteration = 0; iteration < 30; iteration++) {
1182 int priorities[MAX_RULES];
1183 struct classifier cls;
1185 long long version = CLS_MIN_VERSION;
1186 size_t n_invisible_rules = 0;
1187 struct ovs_list list = OVS_LIST_INITIALIZER(&list);
1189 random_set_seed(iteration + 1);
1190 for (i = 0; i < MAX_RULES; i++) {
1191 priorities[i] = (i * 129) & INT_MAX;
1193 shuffle(priorities, ARRAY_SIZE(priorities));
1195 classifier_init(&cls, flow_segment_u64s);
1196 set_prefix_fields(&cls);
1199 for (i = 0; i < MAX_RULES; i++) {
1200 struct test_rule *rule;
1201 int priority = priorities[i];
1202 int wcf = wcfs[random_range(n_tables)];
1203 int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
1204 rule = make_rule(wcf, priority, value_pat, version);
1205 tcls_insert(&tcls, rule);
1206 classifier_insert(&cls, &rule->cls_rule, NULL, 0);
1207 compare_classifiers(&cls, n_invisible_rules, version, &tcls);
1208 check_tables(&cls, -1, i + 1, -1, n_invisible_rules, version);
1211 while (classifier_count(&cls) - n_invisible_rules > 0) {
1212 struct test_rule *target;
1213 struct test_rule *rule;
1214 size_t n_removable_rules = 0;
1216 target = clone_rule(tcls.rules[random_range(tcls.n_rules)]);
1218 CLS_FOR_EACH_TARGET (rule, cls_rule, &cls, &target->cls_rule) {
1220 /* Mark the rule for removal after the current version. */
1221 cls_rule_make_invisible_in_version(&rule->cls_rule,
1223 n_removable_rules++;
1224 compare_classifiers(&cls, n_invisible_rules, version,
1226 check_tables(&cls, -1, -1, -1, n_invisible_rules, version);
1228 list_push_back(&list, &rule->list_node);
1229 } else if (classifier_remove(&cls, &rule->cls_rule)) {
1230 ovsrcu_postpone(free_rule, rule);
1235 n_invisible_rules += n_removable_rules;
1237 tcls_delete_matches(&tcls, &target->cls_rule);
1240 compare_classifiers(&cls, n_invisible_rules, version, &tcls);
1241 check_tables(&cls, -1, -1, -1, n_invisible_rules, version);
1244 struct test_rule *rule;
1246 /* Remove rules that are no longer visible. */
1247 LIST_FOR_EACH_POP (rule, list_node, &list) {
1248 classifier_remove(&cls, &rule->cls_rule);
1249 n_invisible_rules--;
1251 compare_classifiers(&cls, n_invisible_rules, version,
1253 check_tables(&cls, -1, -1, -1, n_invisible_rules, version);
1257 destroy_classifier(&cls);
1258 tcls_destroy(&tcls);
1263 test_many_rules_in_two_tables(struct ovs_cmdl_context *ctx OVS_UNUSED)
1265 test_many_rules_in_n_tables(2);
1269 test_many_rules_in_five_tables(struct ovs_cmdl_context *ctx OVS_UNUSED)
1271 test_many_rules_in_n_tables(5);
1274 /* Miniflow tests. */
1279 static const uint32_t values[] =
1280 { 0xffffffff, 0xaaaaaaaa, 0x55555555, 0x80000000,
1281 0x00000001, 0xface0000, 0x00d00d1e, 0xdeadbeef };
1283 return values[random_range(ARRAY_SIZE(values))];
1287 choose(unsigned int n, unsigned int *idxp)
1297 #define FLOW_U32S (FLOW_U64S * 2)
1300 init_consecutive_values(int n_consecutive, struct flow *flow,
1303 uint32_t *flow_u32 = (uint32_t *) flow;
1305 if (choose(FLOW_U32S - n_consecutive + 1, idxp)) {
1308 for (i = 0; i < n_consecutive; i++) {
1309 flow_u32[*idxp + i] = random_value();
1318 next_random_flow(struct flow *flow, unsigned int idx)
1320 uint32_t *flow_u32 = (uint32_t *) flow;
1323 memset(flow, 0, sizeof *flow);
1326 if (choose(1, &idx)) {
1330 /* All flows with a small number of consecutive nonzero values. */
1331 for (i = 1; i <= 4; i++) {
1332 if (init_consecutive_values(i, flow, &idx)) {
1337 /* All flows with a large number of consecutive nonzero values. */
1338 for (i = FLOW_U32S - 4; i <= FLOW_U32S; i++) {
1339 if (init_consecutive_values(i, flow, &idx)) {
1344 /* All flows with exactly two nonconsecutive nonzero values. */
1345 if (choose((FLOW_U32S - 1) * (FLOW_U32S - 2) / 2, &idx)) {
1348 for (ofs1 = 0; ofs1 < FLOW_U32S - 2; ofs1++) {
1351 for (ofs2 = ofs1 + 2; ofs2 < FLOW_U32S; ofs2++) {
1352 if (choose(1, &idx)) {
1353 flow_u32[ofs1] = random_value();
1354 flow_u32[ofs2] = random_value();
1362 /* 16 randomly chosen flows with N >= 3 nonzero values. */
1363 if (choose(16 * (FLOW_U32S - 4), &idx)) {
1364 int n = idx / 16 + 3;
1367 for (i = 0; i < n; i++) {
1368 flow_u32[i] = random_value();
1370 shuffle_u32s(flow_u32, FLOW_U32S);
1379 any_random_flow(struct flow *flow)
1381 static unsigned int max;
1383 while (next_random_flow(flow, max)) {
1388 next_random_flow(flow, random_range(max));
1392 toggle_masked_flow_bits(struct flow *flow, const struct flow_wildcards *mask)
1394 const uint32_t *mask_u32 = (const uint32_t *) &mask->masks;
1395 uint32_t *flow_u32 = (uint32_t *) flow;
1398 for (i = 0; i < FLOW_U32S; i++) {
1399 if (mask_u32[i] != 0) {
1403 bit = 1u << random_range(32);
1404 } while (!(bit & mask_u32[i]));
1411 wildcard_extra_bits(struct flow_wildcards *mask)
1413 uint32_t *mask_u32 = (uint32_t *) &mask->masks;
1416 for (i = 0; i < FLOW_U32S; i++) {
1417 if (mask_u32[i] != 0) {
1421 bit = 1u << random_range(32);
1422 } while (!(bit & mask_u32[i]));
1423 mask_u32[i] &= ~bit;
1429 test_miniflow(struct ovs_cmdl_context *ctx OVS_UNUSED)
1434 random_set_seed(0xb3faca38);
1435 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1436 const uint64_t *flow_u64 = (const uint64_t *) &flow;
1437 struct miniflow miniflow, miniflow2, miniflow3;
1438 struct flow flow2, flow3;
1439 struct flow_wildcards mask;
1440 struct minimask minimask;
1443 /* Convert flow to miniflow. */
1444 miniflow_init(&miniflow, &flow);
1446 /* Check that the flow equals its miniflow. */
1447 assert(miniflow_get_vid(&miniflow) == vlan_tci_to_vid(flow.vlan_tci));
1448 for (i = 0; i < FLOW_U64S; i++) {
1449 assert(miniflow_get(&miniflow, i) == flow_u64[i]);
1452 /* Check that the miniflow equals itself. */
1453 assert(miniflow_equal(&miniflow, &miniflow));
1455 /* Convert miniflow back to flow and verify that it's the same. */
1456 miniflow_expand(&miniflow, &flow2);
1457 assert(flow_equal(&flow, &flow2));
1459 /* Check that copying a miniflow works properly. */
1460 miniflow_clone(&miniflow2, &miniflow);
1461 assert(miniflow_equal(&miniflow, &miniflow2));
1462 assert(miniflow_hash(&miniflow, 0) == miniflow_hash(&miniflow2, 0));
1463 miniflow_expand(&miniflow2, &flow3);
1464 assert(flow_equal(&flow, &flow3));
1466 /* Check that masked matches work as expected for identical flows and
1469 next_random_flow(&mask.masks, 1);
1470 } while (flow_wildcards_is_catchall(&mask));
1471 minimask_init(&minimask, &mask);
1472 assert(minimask_is_catchall(&minimask)
1473 == flow_wildcards_is_catchall(&mask));
1474 assert(miniflow_equal_in_minimask(&miniflow, &miniflow2, &minimask));
1475 assert(miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
1476 assert(miniflow_hash_in_minimask(&miniflow, &minimask, 0x12345678) ==
1477 flow_hash_in_minimask(&flow, &minimask, 0x12345678));
1479 /* Check that masked matches work as expected for differing flows and
1481 toggle_masked_flow_bits(&flow2, &mask);
1482 assert(!miniflow_equal_flow_in_minimask(&miniflow, &flow2, &minimask));
1483 miniflow_init(&miniflow3, &flow2);
1484 assert(!miniflow_equal_in_minimask(&miniflow, &miniflow3, &minimask));
1487 miniflow_destroy(&miniflow);
1488 miniflow_destroy(&miniflow2);
1489 miniflow_destroy(&miniflow3);
1490 minimask_destroy(&minimask);
1495 test_minimask_has_extra(struct ovs_cmdl_context *ctx OVS_UNUSED)
1497 struct flow_wildcards catchall;
1498 struct minimask minicatchall;
1502 flow_wildcards_init_catchall(&catchall);
1503 minimask_init(&minicatchall, &catchall);
1504 assert(minimask_is_catchall(&minicatchall));
1506 random_set_seed(0x2ec7905b);
1507 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1508 struct flow_wildcards mask;
1509 struct minimask minimask;
1512 minimask_init(&minimask, &mask);
1513 assert(!minimask_has_extra(&minimask, &minimask));
1514 assert(minimask_has_extra(&minicatchall, &minimask)
1515 == !minimask_is_catchall(&minimask));
1516 if (!minimask_is_catchall(&minimask)) {
1517 struct minimask minimask2;
1519 wildcard_extra_bits(&mask);
1520 minimask_init(&minimask2, &mask);
1521 assert(minimask_has_extra(&minimask2, &minimask));
1522 assert(!minimask_has_extra(&minimask, &minimask2));
1523 minimask_destroy(&minimask2);
1526 minimask_destroy(&minimask);
1529 minimask_destroy(&minicatchall);
1533 test_minimask_combine(struct ovs_cmdl_context *ctx OVS_UNUSED)
1535 struct flow_wildcards catchall;
1536 struct minimask minicatchall;
1540 flow_wildcards_init_catchall(&catchall);
1541 minimask_init(&minicatchall, &catchall);
1542 assert(minimask_is_catchall(&minicatchall));
1544 random_set_seed(0x181bf0cd);
1545 for (idx = 0; next_random_flow(&flow, idx); idx++) {
1546 struct minimask minimask, minimask2, minicombined;
1547 struct flow_wildcards mask, mask2, combined, combined2;
1548 uint64_t storage[FLOW_U64S];
1552 minimask_init(&minimask, &mask);
1554 minimask_combine(&minicombined, &minimask, &minicatchall, storage);
1555 assert(minimask_is_catchall(&minicombined));
1557 any_random_flow(&flow2);
1558 mask2.masks = flow2;
1559 minimask_init(&minimask2, &mask2);
1561 minimask_combine(&minicombined, &minimask, &minimask2, storage);
1562 flow_wildcards_and(&combined, &mask, &mask2);
1563 minimask_expand(&minicombined, &combined2);
1564 assert(flow_wildcards_equal(&combined, &combined2));
1566 minimask_destroy(&minimask);
1567 minimask_destroy(&minimask2);
1570 minimask_destroy(&minicatchall);
1573 static const struct ovs_cmdl_command commands[] = {
1574 /* Classifier tests. */
1575 {"empty", NULL, 0, 0, test_empty},
1576 {"destroy-null", NULL, 0, 0, test_destroy_null},
1577 {"single-rule", NULL, 0, 0, test_single_rule},
1578 {"rule-replacement", NULL, 0, 0, test_rule_replacement},
1579 {"many-rules-in-one-list", NULL, 0, 1, test_many_rules_in_one_list},
1580 {"many-rules-in-one-table", NULL, 0, 1, test_many_rules_in_one_table},
1581 {"many-rules-in-two-tables", NULL, 0, 0, test_many_rules_in_two_tables},
1582 {"many-rules-in-five-tables", NULL, 0, 0, test_many_rules_in_five_tables},
1584 /* Miniflow and minimask tests. */
1585 {"miniflow", NULL, 0, 0, test_miniflow},
1586 {"minimask_has_extra", NULL, 0, 0, test_minimask_has_extra},
1587 {"minimask_combine", NULL, 0, 0, test_minimask_combine},
1589 {NULL, NULL, 0, 0, NULL},
1593 test_classifier_main(int argc, char *argv[])
1595 struct ovs_cmdl_context ctx = {
1599 set_program_name(argv[0]);
1601 if (argc > 1 && !strcmp(argv[1], "--versioned")) {
1608 ovs_cmdl_run_command(&ctx, commands);
1611 OVSTEST_REGISTER("test-classifier", test_classifier_main);