+ struct cls_aux *aux = aux_;
+ cls_version_t version = CLS_MIN_VERSION;
+ int hits = 0, old_hits;
+ int misses = 0, old_misses;
+ size_t i;
+
+ random_set_seed(1);
+
+ for (i = 0; i < n_lookups; i++) {
+ const struct cls_rule *cr;
+ struct flow_wildcards wc;
+ unsigned int x;
+
+ x = random_range(aux->n_lookup_flows);
+
+ if (aux->use_wc) {
+ flow_wildcards_init_catchall(&wc);
+ cr = classifier_lookup(aux->cls, version, &aux->lookup_flows[x],
+ &wc);
+ } else {
+ cr = classifier_lookup(aux->cls, version, &aux->lookup_flows[x],
+ NULL);
+ }
+ if (cr) {
+ hits++;
+ } else {
+ misses++;
+ }
+ }
+ atomic_add(&aux->hits, hits, &old_hits);
+ atomic_add(&aux->misses, misses, &old_misses);
+ return NULL;
+}
+
+/* Benchmark classification. */
+static void
+benchmark(bool use_wc)
+{
+ struct classifier cls;
+ cls_version_t version = CLS_MIN_VERSION;
+ struct cls_aux aux;
+ int *wcfs = xmalloc(n_tables * sizeof *wcfs);
+ int *priorities = xmalloc(n_priorities * sizeof *priorities);
+ struct timeval start;
+ pthread_t *threads;
+ int i;
+
+ fatal_signal_init();
+
+ random_set_seed(1);
+
+ for (i = 0; i < n_tables; i++) {
+ do {
+ wcfs[i] = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
+ } while (array_contains(wcfs, i, wcfs[i]));
+ }
+
+ for (i = 0; i < n_priorities; i++) {
+ priorities[i] = (i * 129) & INT_MAX;
+ }
+ shuffle(priorities, n_priorities);
+
+ classifier_init(&cls, flow_segment_u64s);
+ set_prefix_fields(&cls);
+
+ /* Create lookup flows. */
+ aux.use_wc = use_wc;
+ aux.cls = &cls;
+ aux.n_lookup_flows = 2 * N_FLOW_VALUES;
+ aux.lookup_flows = xzalloc(aux.n_lookup_flows * sizeof *aux.lookup_flows);
+ for (i = 0; i < aux.n_lookup_flows; i++) {
+ struct flow *flow = &aux.lookup_flows[i];
+ unsigned int x;
+
+ x = random_range(N_FLOW_VALUES);
+ flow->nw_src = nw_src_values[get_value(&x, N_NW_SRC_VALUES)];
+ flow->nw_dst = nw_dst_values[get_value(&x, N_NW_DST_VALUES)];
+ flow->tunnel.tun_id = tun_id_values[get_value(&x, N_TUN_ID_VALUES)];
+ flow->metadata = metadata_values[get_value(&x, N_METADATA_VALUES)];
+ flow->in_port.ofp_port = in_port_values[get_value(&x,
+ N_IN_PORT_VALUES)];
+ flow->vlan_tci = vlan_tci_values[get_value(&x, N_VLAN_TCI_VALUES)];
+ flow->dl_type = dl_type_values[get_value(&x, N_DL_TYPE_VALUES)];
+ flow->tp_src = tp_src_values[get_value(&x, N_TP_SRC_VALUES)];
+ flow->tp_dst = tp_dst_values[get_value(&x, N_TP_DST_VALUES)];
+ flow->dl_src = dl_src_values[get_value(&x, N_DL_SRC_VALUES)];
+ flow->dl_dst = dl_dst_values[get_value(&x, N_DL_DST_VALUES)];
+ flow->nw_proto = nw_proto_values[get_value(&x, N_NW_PROTO_VALUES)];
+ flow->nw_tos = nw_dscp_values[get_value(&x, N_NW_DSCP_VALUES)];
+ }
+ atomic_init(&aux.hits, 0);
+ atomic_init(&aux.misses, 0);
+
+ /* Rule insertion. */
+ for (i = 0; i < n_rules; i++) {
+ struct test_rule *rule;
+ const struct cls_rule *old_cr;
+
+ int priority = priorities[random_range(n_priorities)];
+ int wcf = wcfs[random_range(n_tables)];
+ int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
+
+ rule = make_rule(wcf, priority, value_pat);
+ old_cr = classifier_find_rule_exactly(&cls, &rule->cls_rule, version);
+ if (!old_cr) {
+ classifier_insert(&cls, &rule->cls_rule, version, NULL, 0);
+ } else {
+ free_rule(rule);
+ }
+ }
+
+ /* Lookup. */
+ xgettimeofday(&start);
+ threads = xmalloc(n_threads * sizeof *threads);
+ for (i = 0; i < n_threads; i++) {
+ threads[i] = ovs_thread_create("lookups", lookup_classifier, &aux);
+ }
+ for (i = 0; i < n_threads; i++) {
+ xpthread_join(threads[i], NULL);
+ }
+
+ int elapsed_msec = elapsed(&start);
+
+ free(threads);
+
+ int hits, misses;
+ atomic_read(&aux.hits, &hits);
+ atomic_read(&aux.misses, &misses);
+ printf("hits: %d, misses: %d\n", hits, misses);
+
+ printf("classifier lookups: %5d ms, %"PRId64" lookups/sec\n",
+ elapsed_msec,
+ (((uint64_t)hits + misses) * 1000) / elapsed_msec);
+
+ destroy_classifier(&cls);
+ free(aux.lookup_flows);
+ free(priorities);
+ free(wcfs);
+}
+\f
+/* Miniflow tests. */
+
+static uint32_t
+random_value(void)
+{
+ static const uint32_t values[] =
+ { 0xffffffff, 0xaaaaaaaa, 0x55555555, 0x80000000,
+ 0x00000001, 0xface0000, 0x00d00d1e, 0xdeadbeef };
+
+ return values[random_range(ARRAY_SIZE(values))];
+}
+
+static bool
+choose(unsigned int n, unsigned int *idxp)
+{
+ if (*idxp < n) {
+ return true;
+ } else {
+ *idxp -= n;
+ return false;
+ }
+}
+
+#define FLOW_U32S (FLOW_U64S * 2)
+
+static bool
+init_consecutive_values(int n_consecutive, struct flow *flow,
+ unsigned int *idxp)
+{
+ uint32_t *flow_u32 = (uint32_t *) flow;
+
+ if (choose(FLOW_U32S - n_consecutive + 1, idxp)) {
+ int i;
+
+ for (i = 0; i < n_consecutive; i++) {
+ flow_u32[*idxp + i] = random_value();
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static bool
+next_random_flow(struct flow *flow, unsigned int idx)
+{
+ uint32_t *flow_u32 = (uint32_t *) flow;
+ int i;
+
+ memset(flow, 0, sizeof *flow);
+
+ /* Empty flow. */
+ if (choose(1, &idx)) {
+ return true;
+ }
+
+ /* All flows with a small number of consecutive nonzero values. */
+ for (i = 1; i <= 4; i++) {
+ if (init_consecutive_values(i, flow, &idx)) {
+ return true;
+ }
+ }
+
+ /* All flows with a large number of consecutive nonzero values. */
+ for (i = FLOW_U32S - 4; i <= FLOW_U32S; i++) {
+ if (init_consecutive_values(i, flow, &idx)) {
+ return true;
+ }
+ }
+
+ /* All flows with exactly two nonconsecutive nonzero values. */
+ if (choose((FLOW_U32S - 1) * (FLOW_U32S - 2) / 2, &idx)) {
+ int ofs1;
+
+ for (ofs1 = 0; ofs1 < FLOW_U32S - 2; ofs1++) {
+ int ofs2;
+
+ for (ofs2 = ofs1 + 2; ofs2 < FLOW_U32S; ofs2++) {
+ if (choose(1, &idx)) {
+ flow_u32[ofs1] = random_value();
+ flow_u32[ofs2] = random_value();
+ return true;
+ }
+ }
+ }
+ OVS_NOT_REACHED();
+ }
+
+ /* 16 randomly chosen flows with N >= 3 nonzero values. */
+ if (choose(16 * (FLOW_U32S - 4), &idx)) {
+ int n = idx / 16 + 3;
+ int i;
+
+ for (i = 0; i < n; i++) {
+ flow_u32[i] = random_value();
+ }
+ shuffle_u32s(flow_u32, FLOW_U32S);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void
+any_random_flow(struct flow *flow)
+{
+ static unsigned int max;
+ if (!max) {
+ while (next_random_flow(flow, max)) {
+ max++;
+ }
+ }
+
+ next_random_flow(flow, random_range(max));
+}
+
+static void
+toggle_masked_flow_bits(struct flow *flow, const struct flow_wildcards *mask)
+{
+ const uint32_t *mask_u32 = (const uint32_t *) &mask->masks;
+ uint32_t *flow_u32 = (uint32_t *) flow;
+ int i;
+
+ for (i = 0; i < FLOW_U32S; i++) {
+ if (mask_u32[i] != 0) {
+ uint32_t bit;
+
+ do {
+ bit = 1u << random_range(32);
+ } while (!(bit & mask_u32[i]));
+ flow_u32[i] ^= bit;
+ }
+ }
+}
+
+static void
+wildcard_extra_bits(struct flow_wildcards *mask)
+{
+ uint32_t *mask_u32 = (uint32_t *) &mask->masks;
+ int i;
+
+ for (i = 0; i < FLOW_U32S; i++) {
+ if (mask_u32[i] != 0) {
+ uint32_t bit;
+
+ do {
+ bit = 1u << random_range(32);
+ } while (!(bit & mask_u32[i]));
+ mask_u32[i] &= ~bit;
+ }
+ }
+}
+
+/* Returns a copy of 'src'. The caller must eventually free the returned
+ * miniflow with free(). */
+static struct miniflow *
+miniflow_clone__(const struct miniflow *src)
+{
+ struct miniflow *dst;
+ size_t data_size;
+
+ data_size = miniflow_alloc(&dst, 1, src);
+ miniflow_clone(dst, src, data_size / sizeof(uint64_t));
+ return dst;
+}
+
+/* Returns a hash value for 'flow', given 'basis'. */
+static inline uint32_t
+miniflow_hash__(const struct miniflow *flow, uint32_t basis)
+{
+ const uint64_t *p = miniflow_get_values(flow);
+ size_t n_values = miniflow_n_values(flow);
+ struct flowmap hash_map = FLOWMAP_EMPTY_INITIALIZER;
+ uint32_t hash = basis;
+ size_t idx;
+
+ FLOWMAP_FOR_EACH_INDEX(idx, flow->map) {
+ uint64_t value = *p++;
+
+ if (value) {
+ hash = hash_add64(hash, value);
+ flowmap_set(&hash_map, idx, 1);
+ }
+ }
+ map_t map;
+ FLOWMAP_FOR_EACH_MAP (map, hash_map) {
+ hash = hash_add64(hash, map);
+ }
+
+ return hash_finish(hash, n_values);
+}
+
+static void
+test_miniflow(struct ovs_cmdl_context *ctx OVS_UNUSED)
+{
+ struct flow flow;
+ unsigned int idx;
+
+ random_set_seed(0xb3faca38);
+ for (idx = 0; next_random_flow(&flow, idx); idx++) {
+ const uint64_t *flow_u64 = (const uint64_t *) &flow;
+ struct miniflow *miniflow, *miniflow2, *miniflow3;
+ struct flow flow2, flow3;
+ struct flow_wildcards mask;
+ struct minimask *minimask;
+ int i;
+
+ /* Convert flow to miniflow. */
+ miniflow = miniflow_create(&flow);
+
+ /* Check that the flow equals its miniflow. */
+ assert(miniflow_get_vid(miniflow) == vlan_tci_to_vid(flow.vlan_tci));
+ for (i = 0; i < FLOW_U64S; i++) {
+ assert(miniflow_get(miniflow, i) == flow_u64[i]);
+ }
+
+ /* Check that the miniflow equals itself. */
+ assert(miniflow_equal(miniflow, miniflow));
+
+ /* Convert miniflow back to flow and verify that it's the same. */
+ miniflow_expand(miniflow, &flow2);
+ assert(flow_equal(&flow, &flow2));
+
+ /* Check that copying a miniflow works properly. */
+ miniflow2 = miniflow_clone__(miniflow);
+ assert(miniflow_equal(miniflow, miniflow2));
+ assert(miniflow_hash__(miniflow, 0) == miniflow_hash__(miniflow2, 0));
+ miniflow_expand(miniflow2, &flow3);
+ assert(flow_equal(&flow, &flow3));
+
+ /* Check that masked matches work as expected for identical flows and
+ * miniflows. */
+ do {
+ next_random_flow(&mask.masks, 1);
+ } while (flow_wildcards_is_catchall(&mask));
+ minimask = minimask_create(&mask);
+ assert(minimask_is_catchall(minimask)
+ == flow_wildcards_is_catchall(&mask));
+ assert(miniflow_equal_in_minimask(miniflow, miniflow2, minimask));
+ assert(miniflow_equal_flow_in_minimask(miniflow, &flow2, minimask));
+ assert(miniflow_hash_in_minimask(miniflow, minimask, 0x12345678) ==
+ flow_hash_in_minimask(&flow, minimask, 0x12345678));
+ assert(minimask_hash(minimask, 0) ==
+ miniflow_hash__(&minimask->masks, 0));
+
+ /* Check that masked matches work as expected for differing flows and
+ * miniflows. */
+ toggle_masked_flow_bits(&flow2, &mask);
+ assert(!miniflow_equal_flow_in_minimask(miniflow, &flow2, minimask));
+ miniflow3 = miniflow_create(&flow2);
+ assert(!miniflow_equal_in_minimask(miniflow, miniflow3, minimask));
+
+ /* Clean up. */
+ free(miniflow);
+ free(miniflow2);
+ free(miniflow3);
+ free(minimask);
+ }
+}
+
+static void
+test_minimask_has_extra(struct ovs_cmdl_context *ctx OVS_UNUSED)
+{
+ struct flow_wildcards catchall;
+ struct minimask *minicatchall;
+ struct flow flow;
+ unsigned int idx;
+
+ flow_wildcards_init_catchall(&catchall);
+ minicatchall = minimask_create(&catchall);
+ assert(minimask_is_catchall(minicatchall));
+
+ random_set_seed(0x2ec7905b);
+ for (idx = 0; next_random_flow(&flow, idx); idx++) {
+ struct flow_wildcards mask;
+ struct minimask *minimask;
+
+ mask.masks = flow;
+ minimask = minimask_create(&mask);
+ assert(!minimask_has_extra(minimask, minimask));
+ assert(minimask_has_extra(minicatchall, minimask)
+ == !minimask_is_catchall(minimask));
+ if (!minimask_is_catchall(minimask)) {
+ struct minimask *minimask2;
+
+ wildcard_extra_bits(&mask);
+ minimask2 = minimask_create(&mask);
+ assert(minimask_has_extra(minimask2, minimask));
+ assert(!minimask_has_extra(minimask, minimask2));
+ free(minimask2);
+ }
+
+ free(minimask);
+ }
+
+ free(minicatchall);
+}
+
+static void
+test_minimask_combine(struct ovs_cmdl_context *ctx OVS_UNUSED)
+{
+ struct flow_wildcards catchall;
+ struct minimask *minicatchall;
+ struct flow flow;
+ unsigned int idx;
+
+ flow_wildcards_init_catchall(&catchall);
+ minicatchall = minimask_create(&catchall);
+ assert(minimask_is_catchall(minicatchall));
+
+ random_set_seed(0x181bf0cd);
+ for (idx = 0; next_random_flow(&flow, idx); idx++) {
+ struct minimask *minimask, *minimask2;
+ struct flow_wildcards mask, mask2, combined, combined2;
+ struct {
+ struct minimask minicombined;
+ uint64_t storage[FLOW_U64S];
+ } m;
+ struct flow flow2;
+
+ mask.masks = flow;
+ minimask = minimask_create(&mask);
+
+ minimask_combine(&m.minicombined, minimask, minicatchall, m.storage);
+ assert(minimask_is_catchall(&m.minicombined));
+
+ any_random_flow(&flow2);
+ mask2.masks = flow2;
+ minimask2 = minimask_create(&mask2);
+
+ minimask_combine(&m.minicombined, minimask, minimask2, m.storage);
+ flow_wildcards_and(&combined, &mask, &mask2);
+ minimask_expand(&m.minicombined, &combined2);
+ assert(flow_wildcards_equal(&combined, &combined2));
+
+ free(minimask);
+ free(minimask2);
+ }
+
+ free(minicatchall);
+}
+\f
+
+static void help(struct ovs_cmdl_context *ctx);
+
+static const struct ovs_cmdl_command commands[] = {
+ /* Classifier tests. */
+ {"empty", NULL, 0, 0, test_empty},
+ {"destroy-null", NULL, 0, 0, test_destroy_null},
+ {"single-rule", NULL, 0, 0, test_single_rule},
+ {"rule-replacement", NULL, 0, 0, test_rule_replacement},
+ {"many-rules-in-one-list", NULL, 0, 1, test_many_rules_in_one_list},
+ {"many-rules-in-one-table", NULL, 0, 1, test_many_rules_in_one_table},
+ {"many-rules-in-two-tables", NULL, 0, 0, test_many_rules_in_two_tables},
+ {"many-rules-in-five-tables", NULL, 0, 0, test_many_rules_in_five_tables},
+ {"benchmark", NULL, 0, 5, run_benchmarks},
+
+ /* Miniflow and minimask tests. */
+ {"miniflow", NULL, 0, 0, test_miniflow},
+ {"minimask_has_extra", NULL, 0, 0, test_minimask_has_extra},
+ {"minimask_combine", NULL, 0, 0, test_minimask_combine},
+
+ {"--help", NULL, 0, 0, help},
+ {NULL, NULL, 0, 0, NULL},
+};
+
+static void
+help(struct ovs_cmdl_context *ctx OVS_UNUSED)
+{
+ const struct ovs_cmdl_command *p;
+ struct ds test_names = DS_EMPTY_INITIALIZER;
+ const int linesize = 80;
+
+ printf("usage: ovstest %s TEST [TESTARGS]\n"
+ "where TEST is one of the following:\n\n",
+ program_name);
+
+ for (p = commands; p->name != NULL; p++) {
+ if (*p->name != '-') { /* Skip internal commands */
+ if (test_names.length > 1
+ && test_names.length + strlen(p->name) + 1 >= linesize) {
+ test_names.length -= 1;
+ printf ("%s\n", ds_cstr(&test_names));
+ ds_clear(&test_names);
+ }
+ ds_put_format(&test_names, "%s, ", p->name);
+ }
+ }
+ if (test_names.length > 2) {
+ test_names.length -= 2;
+ printf("%s\n", ds_cstr(&test_names));
+ }
+ ds_destroy(&test_names);
+}
+
+static void
+test_classifier_main(int argc, char *argv[])
+{
+ struct ovs_cmdl_context ctx = {
+ .argc = argc - 1,
+ .argv = argv + 1,
+ };