X-Git-Url: http://git.cascardo.eti.br/?a=blobdiff_plain;f=lib%2Fclassifier.h;h=889dc1e58e47c4419c48fbf6bc1d154b5814b4b4;hb=e5c0f5a4452e368bbe4f24e9f5a591965399d0fa;hp=d0a408b4a57a0aa7ddb5d44793d4cbce9a075c61;hpb=afae68b16f01559df44e3fd62f1fc020faec5731;p=cascardo%2Fovs.git diff --git a/lib/classifier.h b/lib/classifier.h index d0a408b4a..889dc1e58 100644 --- a/lib/classifier.h +++ b/lib/classifier.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc. + * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -126,9 +126,12 @@ * cls_subtable", with the other almost-identical rules chained off a linked * list inside that highest-priority rule. * + * The following sub-sections describe various optimizations over this simple + * approach. + * * * Staged Lookup (Wildcard Optimization) - * ===================================== + * ------------------------------------- * * Subtable lookup is performed in ranges defined for struct flow, starting * from metadata (registers, in_port, etc.), then L2 header, L3, and finally @@ -141,7 +144,7 @@ * * * Prefix Tracking (Wildcard Optimization) - * ======================================= + * --------------------------------------- * * Classifier uses prefix trees ("tries") for tracking the used * address space, enabling skipping classifier tables containing @@ -171,7 +174,7 @@ * * * Partitioning (Lookup Time and Wildcard Optimization) - * ==================================================== + * ---------------------------------------------------- * * Suppose that a given classifier is being used to handle multiple stages in a * pipeline using "resubmit", with metadata (that is, the OpenFlow 1.1+ field @@ -207,156 +210,267 @@ * Each eliminated subtable lookup also reduces the amount of un-wildcarding. * * + * Classifier Versioning + * ===================== + * + * Classifier lookups are always done in a specific classifier version, where + * a version is defined to be a natural number. + * + * When a new rule is added to a classifier, it is set to become visible in a + * specific version. If the version number used at insert time is larger than + * any version number currently used in lookups, the new rule is said to be + * invisible to lookups. This means that lookups won't find the rule, but the + * rule is immediately available to classifier iterations. + * + * Similarly, a rule can be marked as to be deleted in a future version. To + * delete a rule in a way to not remove the rule before all ongoing lookups are + * finished, the rule should be made invisible in a specific version number. + * Then, when all the lookups use a later version number, the rule can be + * actually removed from the classifier. + * + * Classifiers can hold duplicate rules (rules with the same match criteria and + * priority) when at most one of these duplicates is visible in any given + * lookup version. The caller responsible for classifier modifications must + * maintain this invariant. + * + * The classifier supports versioning for two reasons: + * + * 1. Support for versioned modifications makes it possible to perform an + * arbitraty series of classifier changes as one atomic transaction, + * where intermediate versions of the classifier are not visible to any + * lookups. Also, when a rule is added for a future version, or marked + * for removal after the current version, such modifications can be + * reverted without any visible effects to any of the current lookups. + * + * 2. Performance: Adding (or deleting) a large set of rules can, in + * pathological cases, have a cost proportional to the number of rules + * already in the classifier. When multiple rules are being added (or + * deleted) in one go, though, this pathological case cost can be + * typically avoided, as long as it is OK for any new rules to be + * invisible until the batch change is complete. + * + * Note that the classifier_replace() function replaces a rule immediately, and + * is therefore not safe to use with versioning. It is still available for the + * users that do not use versioning. + * + * + * Deferred Publication + * ==================== + * + * Removing large number of rules from classifier can be costly, as the + * supporting data structures are teared down, in many cases just to be + * re-instantiated right after. In the worst case, as when each rule has a + * different match pattern (mask), the maintenance of the match patterns can + * have cost O(N^2), where N is the number of different match patterns. To + * alleviate this, the classifier supports a "deferred mode", in which changes + * in internal data structures needed for future version lookups may not be + * fully computed yet. The computation is finalized when the deferred mode is + * turned off. + * + * This feature can be used with versioning such that all changes to future + * versions are made in the deferred mode. Then, right before making the new + * version visible to lookups, the deferred mode is turned off so that all the + * data structures are ready for lookups with the new version number. + * + * To use deferred publication, first call classifier_defer(). Then, modify + * the classifier via additions (classifier_insert() with a specific, future + * version number) and deletions (use cls_rule_make_removable_after_version()). + * Then call classifier_publish(), and after that, announce the new version + * number to be used in lookups. + * + * * Thread-safety * ============= * - * The classifier may safely be accessed by many reader threads concurrently or - * by a single writer. */ + * The classifier may safely be accessed by many reader threads concurrently + * and by a single writer, or by multiple writers when they guarantee mutually + * exlucive access to classifier modifications. + * + * Since the classifier rules are RCU protected, the rule destruction after + * removal from the classifier must be RCU postponed. Also, when versioning is + * used, the rule removal itself needs to be typically RCU postponed. In this + * case the rule destruction is doubly RCU postponed, i.e., the second + * ovsrcu_postpone() call to destruct the rule is called from the first RCU + * callback that removes the rule. + * + * Rules that have never been visible to lookups are an exeption to the above + * rule. Such rules can be removed immediately, but their destruction must + * still be RCU postponed, as the rule's visibility attribute may be examined + * parallel to the rule's removal. */ #include "cmap.h" #include "match.h" #include "meta-flow.h" +#include "pvector.h" +#include "rculist.h" +#include "type-props.h" #ifdef __cplusplus extern "C" { #endif /* Classifier internal data structures. */ -struct cls_classifier; struct cls_subtable; struct cls_match; +struct trie_node; +typedef OVSRCU_TYPE(struct trie_node *) rcu_trie_ptr; + +/* Prefix trie for a 'field' */ +struct cls_trie { + const struct mf_field *field; /* Trie field, or NULL. */ + rcu_trie_ptr root; /* NULL if none. */ +}; + +typedef uint64_t cls_version_t; + +#define CLS_MIN_VERSION 0 /* Default version number to use. */ +#define CLS_MAX_VERSION (TYPE_MAXIMUM(cls_version_t) - 1) +#define CLS_NOT_REMOVED_VERSION TYPE_MAXIMUM(cls_version_t) + enum { - CLS_MAX_TRIES = 3 /* Maximum number of prefix trees per classifier. */ + CLS_MAX_INDICES = 3, /* Maximum number of lookup indices per subtable. */ + CLS_MAX_TRIES = 3 /* Maximum number of prefix trees per classifier. */ }; /* A flow classifier. */ struct classifier { - struct cls_classifier *cls; + int n_rules; /* Total number of rules. */ + uint8_t n_flow_segments; + uint8_t flow_segments[CLS_MAX_INDICES]; /* Flow segment boundaries to use + * for staged lookup. */ + struct cmap subtables_map; /* Contains "struct cls_subtable"s. */ + struct pvector subtables; + struct cmap partitions; /* Contains "struct cls_partition"s. */ + struct cls_trie tries[CLS_MAX_TRIES]; /* Prefix tries. */ + unsigned int n_tries; + bool publish; /* Make changes visible to lookups? */ +}; + +struct cls_conjunction { + uint32_t id; + uint8_t clause; + uint8_t n_clauses; }; /* A rule to be inserted to the classifier. */ struct cls_rule { - struct minimatch match; /* Matching rule. */ - unsigned int priority; /* Larger numbers are higher priorities. */ - struct cls_match *cls_match; /* NULL if rule is not in a classifier. */ + struct rculist node; /* In struct cls_subtable 'rules_list'. */ + const int priority; /* Larger numbers are higher priorities. */ + struct cls_match *cls_match; /* NULL if not in a classifier. */ + const struct minimatch match; /* Matching rule. */ }; -void cls_rule_init(struct cls_rule *, const struct match *, - unsigned int priority); +void cls_rule_init(struct cls_rule *, const struct match *, int priority); void cls_rule_init_from_minimatch(struct cls_rule *, const struct minimatch *, - unsigned int priority); + int priority); void cls_rule_clone(struct cls_rule *, const struct cls_rule *); void cls_rule_move(struct cls_rule *dst, struct cls_rule *src); void cls_rule_destroy(struct cls_rule *); -bool cls_rule_equal(const struct cls_rule *, const struct cls_rule *); -uint32_t cls_rule_hash(const struct cls_rule *, uint32_t basis); +void cls_rule_set_conjunctions(struct cls_rule *, + const struct cls_conjunction *, size_t n); +bool cls_rule_equal(const struct cls_rule *, const struct cls_rule *); void cls_rule_format(const struct cls_rule *, struct ds *); - bool cls_rule_is_catchall(const struct cls_rule *); - bool cls_rule_is_loose_match(const struct cls_rule *rule, const struct minimatch *criteria); +bool cls_rule_visible_in_version(const struct cls_rule *, cls_version_t); +void cls_rule_make_invisible_in_version(const struct cls_rule *, + cls_version_t); +void cls_rule_restore_visibility(const struct cls_rule *); +/* Constructor/destructor. Must run single-threaded. */ void classifier_init(struct classifier *, const uint8_t *flow_segments); void classifier_destroy(struct classifier *); + +/* Modifiers. Caller MUST exclude concurrent calls from other threads. */ bool classifier_set_prefix_fields(struct classifier *, const enum mf_field_id *trie_fields, unsigned int n_trie_fields); +void classifier_insert(struct classifier *, const struct cls_rule *, + cls_version_t, const struct cls_conjunction *, + size_t n_conjunctions); +const struct cls_rule *classifier_replace(struct classifier *, + const struct cls_rule *, + cls_version_t, + const struct cls_conjunction *, + size_t n_conjunctions); +const struct cls_rule *classifier_remove(struct classifier *, + const struct cls_rule *); +static inline void classifier_defer(struct classifier *); +static inline void classifier_publish(struct classifier *); +/* Lookups. These are RCU protected and may run concurrently with modifiers + * and each other. */ +const struct cls_rule *classifier_lookup(const struct classifier *, + cls_version_t, struct flow *, + struct flow_wildcards *); +bool classifier_rule_overlaps(const struct classifier *, + const struct cls_rule *, cls_version_t); +const struct cls_rule *classifier_find_rule_exactly(const struct classifier *, + const struct cls_rule *, + cls_version_t); +const struct cls_rule *classifier_find_match_exactly(const struct classifier *, + const struct match *, + int priority, + cls_version_t); bool classifier_is_empty(const struct classifier *); int classifier_count(const struct classifier *); -void classifier_insert(struct classifier *, struct cls_rule *); -struct cls_rule *classifier_replace(struct classifier *, struct cls_rule *); - -void classifier_remove(struct classifier *, struct cls_rule *); -struct cls_rule *classifier_lookup(const struct classifier *, - const struct flow *, - struct flow_wildcards *); -void classifier_lookup_miniflow_batch(const struct classifier *cls, - const struct miniflow **flows, - struct cls_rule **rules, size_t len); -bool classifier_rule_overlaps(const struct classifier *, - const struct cls_rule *); - -struct cls_rule *classifier_find_rule_exactly(const struct classifier *, - const struct cls_rule *); - -struct cls_rule *classifier_find_match_exactly(const struct classifier *, - const struct match *, - unsigned int priority); -/* Iteration. */ - +/* Iteration. + * + * Iteration is lockless and RCU-protected. Concurrent threads may perform all + * kinds of concurrent modifications without ruining the iteration. Obviously, + * any modifications may or may not be visible to the concurrent iterator, but + * all the rules not deleted are visited by the iteration. The iterating + * thread may also modify the classifier rules itself. + * + * 'TARGET' iteration only iterates rules matching the 'TARGET' criteria. + * Rather than looping through all the rules and skipping ones that can't + * match, 'TARGET' iteration skips whole subtables, if the 'TARGET' happens to + * be more specific than the subtable. */ struct cls_cursor { - const struct cls_classifier *cls; + const struct classifier *cls; const struct cls_subtable *subtable; const struct cls_rule *target; - struct cmap_cursor subtables; - struct cmap_cursor rules; - bool safe; + cls_version_t version; /* Version to iterate. */ + struct pvector_cursor subtables; + const struct cls_rule *rule; }; +struct cls_cursor cls_cursor_start(const struct classifier *, + const struct cls_rule *target, + cls_version_t); +void cls_cursor_advance(struct cls_cursor *); -/* Iteration requires mutual exclusion of writers. We do this by taking - * a mutex for the duration of the iteration, except for the - * 'SAFE' variant, where we release the mutex for the body of the loop. */ -struct cls_cursor cls_cursor_init(const struct classifier *cls, - const struct cls_rule *target, - void **pnode, const void *offset, bool safe); - -struct cls_rule *cls_cursor_next(struct cls_cursor *cursor, - const struct cls_rule *); - -#define CLS_CURSOR_START(RULE, MEMBER, CLS, TARGET) \ - cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \ - OBJECT_CONTAINING(NULL, RULE, MEMBER), false) - -#define CLS_CURSOR_START_SAFE(RULE, MEMBER, CLS, TARGET) \ - cls_cursor_init(CLS, (TARGET), (void **)&(RULE), \ - OBJECT_CONTAINING(NULL, RULE, MEMBER), true) - -#define CLS_FOR_EACH(RULE, MEMBER, CLS) \ - for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \ - NULL); \ - RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \ - ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \ - MEMBER)) - -#define CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, TARGET) \ - for (struct cls_cursor cursor__ = CLS_CURSOR_START(RULE, MEMBER, CLS, \ - TARGET); \ - RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER); \ - ASSIGN_CONTAINER(RULE, cls_cursor_next(&cursor__, &(RULE)->MEMBER), \ - MEMBER)) - -/* This form allows classifier_remove() to be called within the loop. */ -#define CLS_FOR_EACH_SAFE(RULE, NEXT, MEMBER, CLS) \ - for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \ - CLS, NULL); \ - (RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \ - ? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \ - &(RULE)->MEMBER), \ - MEMBER), true \ +#define CLS_FOR_EACH(RULE, MEMBER, CLS) \ + CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, NULL, CLS_MAX_VERSION) +#define CLS_FOR_EACH_TARGET(RULE, MEMBER, CLS, TARGET, VERSION) \ + for (struct cls_cursor cursor__ = cls_cursor_start(CLS, TARGET, VERSION); \ + (cursor__.rule \ + ? (INIT_CONTAINER(RULE, cursor__.rule, MEMBER), \ + cls_cursor_advance(&cursor__), \ + true) \ : false); \ - (RULE) = (NEXT)) + ) -/* This form allows classifier_remove() to be called within the loop. */ -#define CLS_FOR_EACH_TARGET_SAFE(RULE, NEXT, MEMBER, CLS, TARGET) \ - for (struct cls_cursor cursor__ = CLS_CURSOR_START_SAFE(RULE, MEMBER, \ - CLS, TARGET); \ - (RULE != OBJECT_CONTAINING(NULL, RULE, MEMBER) \ - ? ASSIGN_CONTAINER(NEXT, cls_cursor_next(&cursor__, \ - &(RULE)->MEMBER), \ - MEMBER), true \ - : false); \ - (RULE) = (NEXT)) + +static inline void +classifier_defer(struct classifier *cls) +{ + cls->publish = false; +} +static inline void +classifier_publish(struct classifier *cls) +{ + cls->publish = true; + pvector_publish(&cls->subtables); +} #ifdef __cplusplus } #endif - #endif /* classifier.h */