{
struct cls_classifier *cls = xmalloc(sizeof *cls);
- fat_rwlock_init(&cls_->rwlock);
ovs_mutex_init(&cls->mutex);
ovs_mutex_lock(&cls->mutex);
}
/* Destroys 'cls'. Rules within 'cls', if any, are not freed; this is the
- * caller's responsibility. */
+ * caller's responsibility.
+ * May only be called after all the readers have been terminated. */
void
classifier_destroy(struct classifier *cls_)
OVS_EXCLUDED(cls_->cls->mutex)
struct cls_subtable *subtable, *next_subtable;
int i;
- fat_rwlock_destroy(&cls_->rwlock);
if (!cls) {
return;
}
/* Returns the number of rules in 'cls'. */
int
classifier_count(const struct classifier *cls)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
+ /* n_rules is an int, so in the presence of concurrent writers this will
+ * return either the old or a new value. */
return cls->cls->n_rules;
}
return NULL;
}
+/*
+ * As the readers are operating concurrently with the modifications, a
+ * concurrent reader may or may not see the new rule, depending on how
+ * the concurrent events overlap with each other. This is no
+ * different from the former locked behavior, but there the visibility
+ * of the new rule only depended on the timing of the locking
+ * functions.
+ *
+ * The new rule is first added to the segment indices, so the readers
+ * may find the rule in the indices before the rule is visible in the
+ * subtables 'rules' map. This may result in us losing the
+ * opportunity to quit lookups earlier, resulting in sub-optimal
+ * wildcarding. This will be fixed by forthcoming revalidation always
+ * scheduled after flow table changes.
+ *
+ * Similar behavior may happen due to us removing the overlapping rule
+ * (if any) from the indices only after the new rule has been added.
+ *
+ * The subtable's max priority is updated only after the rule is
+ * inserted, so the concurrent readers may not see the rule, as the
+ * updated priority ordered subtable list will only be visible after
+ * the subtable's max priority is updated.
+ *
+ * Similarly, the classifier's partitions for new rules are updated by
+ * the caller after this function, so the readers may keep skipping
+ * the subtable until they see the updated partitions.
+ */
static struct cls_match *
insert_rule(struct cls_classifier *cls, struct cls_subtable *subtable,
struct cls_rule *new_rule)
* by a single writer. */
#include "cmap.h"
-#include "fat-rwlock.h"
#include "match.h"
#include "meta-flow.h"
extern "C" {
#endif
-/* Needed only for the lock annotation in struct classifier. */
-extern struct ovs_mutex ofproto_mutex;
-
/* Classifier internal data structures. */
struct cls_classifier;
struct cls_subtable;
-struct cls_partition;
struct cls_match;
enum {
/* A flow classifier. */
struct classifier {
- struct fat_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
struct cls_classifier *cls;
};
bool cls_rule_is_loose_match(const struct cls_rule *rule,
const struct minimatch *criteria);
-void classifier_init(struct classifier *cls, const uint8_t *flow_segments);
+void classifier_init(struct classifier *, const uint8_t *flow_segments);
void classifier_destroy(struct classifier *);
-bool classifier_set_prefix_fields(struct classifier *cls,
+bool classifier_set_prefix_fields(struct classifier *,
const enum mf_field_id *trie_fields,
- unsigned int n_trie_fields)
- OVS_REQ_WRLOCK(cls->rwlock);
+ unsigned int n_trie_fields);
+
+bool classifier_is_empty(const struct classifier *);
+int classifier_count(const struct classifier *);
+void classifier_insert(struct classifier *, struct cls_rule *);
+struct cls_rule *classifier_replace(struct classifier *, struct cls_rule *);
-bool classifier_is_empty(const struct classifier *cls);
-int classifier_count(const struct classifier *cls)
- OVS_REQ_RDLOCK(cls->rwlock);
-void classifier_insert(struct classifier *cls, struct cls_rule *)
- OVS_REQ_WRLOCK(cls->rwlock);
-struct cls_rule *classifier_replace(struct classifier *cls, struct cls_rule *)
- OVS_REQ_WRLOCK(cls->rwlock);
-void classifier_remove(struct classifier *cls, struct cls_rule *)
- OVS_REQ_WRLOCK(cls->rwlock);
-struct cls_rule *classifier_lookup(const struct classifier *cls,
+void classifier_remove(struct classifier *, struct cls_rule *);
+struct cls_rule *classifier_lookup(const struct classifier *,
const struct flow *,
- struct flow_wildcards *)
- OVS_REQ_RDLOCK(cls->rwlock);
+ struct flow_wildcards *);
void classifier_lookup_miniflow_batch(const struct classifier *cls,
const struct miniflow **flows,
- struct cls_rule **rules, size_t len)
- OVS_REQ_RDLOCK(cls->rwlock);
-bool classifier_rule_overlaps(const struct classifier *cls,
- const struct cls_rule *)
- OVS_REQ_RDLOCK(cls->rwlock);
+ struct cls_rule **rules, size_t len);
+bool classifier_rule_overlaps(const struct classifier *,
+ const struct cls_rule *);
-struct cls_rule *classifier_find_rule_exactly(const struct classifier *cls,
+struct cls_rule *classifier_find_rule_exactly(const struct classifier *,
const struct cls_rule *);
-struct cls_rule *classifier_find_match_exactly(const struct classifier *cls,
+struct cls_rule *classifier_find_match_exactly(const struct classifier *,
const struct match *,
unsigned int priority);
\f
#include "dpif-provider.h"
#include "dummy.h"
#include "dynamic-string.h"
+#include "fat-rwlock.h"
#include "flow.h"
#include "cmap.h"
#include "latch.h"
* dp_netdev_mutex (global)
* port_mutex
* flow_mutex
- * cls.rwlock
* queue_rwlock
*/
struct dp_netdev {
/* Flows.
*
- * Readers of 'cls' must take a 'cls->rwlock' read lock.
- *
- * Writers of 'flow_table' must take the 'flow_mutex'.
- *
- * Writers of 'cls' must take the 'flow_mutex' and then the 'cls->rwlock'
- * write lock. (The outer 'flow_mutex' allows writers to atomically
- * perform multiple operations on 'cls' and 'flow_table'.)
+ * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
+ * changes to 'cls' must be made while still holding the 'flow_mutex'.
*/
struct ovs_mutex flow_mutex;
- struct classifier cls; /* Classifier. Protected by cls.rwlock. */
+ struct classifier cls;
struct cmap flow_table OVS_GUARDED; /* Flow table. */
/* Queues.
static void
dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
- OVS_REQ_WRLOCK(dp->cls.rwlock)
OVS_REQUIRES(dp->flow_mutex)
{
struct cls_rule *cr = CONST_CAST(struct cls_rule *, &flow->cr);
struct dp_netdev_flow *netdev_flow, *next;
ovs_mutex_lock(&dp->flow_mutex);
- fat_rwlock_wrlock(&dp->cls.rwlock);
CMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
- fat_rwlock_unlock(&dp->cls.rwlock);
ovs_mutex_unlock(&dp->flow_mutex);
}
static struct dp_netdev_flow *
dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
- OVS_REQ_RDLOCK(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
struct cls_rule *rule;
cmap_insert(&dp->flow_table,
CONST_CAST(struct cmap_node *, &netdev_flow->node),
flow_hash(flow, 0));
- fat_rwlock_wrlock(&dp->cls.rwlock);
classifier_insert(&dp->cls,
CONST_CAST(struct cls_rule *, &netdev_flow->cr));
- fat_rwlock_unlock(&dp->cls.rwlock);
return 0;
}
miniflow_init(&miniflow, &flow);
ovs_mutex_lock(&dp->flow_mutex);
- fat_rwlock_rdlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
- fat_rwlock_unlock(&dp->cls.rwlock);
if (!netdev_flow) {
if (put->flags & DPIF_FP_CREATE) {
if (cmap_count(&dp->flow_table) < MAX_FLOWS) {
if (del->stats) {
get_dpif_flow_stats(netdev_flow, del->stats);
}
- fat_rwlock_wrlock(&dp->cls.rwlock);
dp_netdev_remove_flow(dp, netdev_flow);
- fat_rwlock_unlock(&dp->cls.rwlock);
} else {
error = ENOENT;
}
mfs[i] = &keys[i].flow;
}
- fat_rwlock_rdlock(&dp->cls.rwlock);
classifier_lookup_miniflow_batch(&dp->cls, mfs, rules, cnt);
- fat_rwlock_unlock(&dp->cls.rwlock);
n_batches = 0;
for (i = 0; i < cnt; i++) {
}
do {
- fat_rwlock_rdlock(&cls->rwlock);
cls_rule = classifier_lookup(cls, flow, wc);
- fat_rwlock_unlock(&cls->rwlock);
rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
* Thread-safety
* =============
*
- * A cls->rwlock read-lock holder prevents rules from being added or deleted.
+ * Adding or removing rules requires holding ofproto_mutex.
*
- * Adding or removing rules requires holding ofproto_mutex AND the cls->rwlock
- * write-lock.
+ * Rules in 'cls' are RCU protected. For extended access to a rule, try
+ * incrementing its ref_count with ofproto_rule_try_ref(), or
+ * ofproto_rule_ref(), if the rule is still known to be in 'cls'. A rule
+ * will be freed using ovsrcu_postpone() once its 'ref_count' reaches zero.
*
- * cls->rwlock should be held only briefly. For extended access to a rule,
- * increment its ref_count with ofproto_rule_ref(). A rule will not be freed
- * until its ref_count reaches zero.
+ * Modifying a rule requires the rule's own mutex.
*
- * Modifying a rule requires the rule's own mutex. Holding cls->rwlock (for
- * read or write) does not allow the holder to modify the rule.
- *
- * Freeing a rule requires ofproto_mutex and the cls->rwlock write-lock. After
- * removing the rule from the classifier, release a ref_count from the rule
- * ('cls''s reference to the rule).
+ * Freeing a rule requires ofproto_mutex. After removing the rule from the
+ * classifier, release a ref_count from the rule ('cls''s reference to the
+ * rule).
*
* Refer to the thread-safety notes on struct rule for more information.*/
struct oftable {
* Rules
* -----
*
- * A rule 'rule' may be accessed without a risk of being freed by code that
- * holds a read-lock or write-lock on 'cls->rwlock' or that owns a reference to
- * 'rule->ref_count' (or both). Code that needs to hold onto a rule for a
- * while should take 'cls->rwlock', find the rule it needs, increment
- * 'rule->ref_count' with ofproto_rule_ref(), and drop 'cls->rwlock'.
+ * A rule 'rule' may be accessed without a risk of being freed by a thread
+ * until the thread quiesces (i.e., rules are RCU protected and destructed
+ * using ovsrcu_postpone()). Code that needs to hold onto a rule for a
+ * while should increment 'rule->ref_count' either with ofproto_rule_ref()
+ * (if 'ofproto_mutex' is held), or with ofproto_rule_try_ref() (when some
+ * other thread might remove the rule from 'cls'). ofproto_rule_try_ref()
+ * will fail if the rule has already been scheduled for destruction.
*
* 'rule->ref_count' protects 'rule' from being freed. It doesn't protect the
- * rule from being deleted from 'cls' (that's 'cls->rwlock') and it doesn't
+ * rule from being deleted from 'cls' (that's 'ofproto_mutex') and it doesn't
* protect members of 'rule' from modification (that's 'rule->mutex').
*
* 'rule->mutex' protects the members of 'rule' from modification. It doesn't
- * protect the rule from being deleted from 'cls' (that's 'cls->rwlock') and it
- * doesn't prevent the rule from being freed (that's 'rule->ref_count').
+ * protect the rule from being deleted from 'cls' (that's 'ofproto_mutex') and
+ * it doesn't prevent the rule from being freed (that's 'rule->ref_count').
*
* Regarding thread safety, the members of a rule fall into the following
* categories:
* Thread-safety
* =============
*
- * A struct rule_actions may be accessed without a risk of being
- * freed by code that holds a read-lock or write-lock on 'rule->mutex' (where
- * 'rule' is the rule for which 'rule->actions == actions') or during the RCU
- * active period.
+ * A struct rule_actions may be accessed without a risk of being freed by
+ * code that holds 'rule->mutex' (where 'rule' is the rule for which
+ * 'rule->actions == actions') or during the RCU active period.
*
* All members are immutable: they do not change during the struct's
* lifetime. */
}
table->max_flows = s->max_flows;
- fat_rwlock_wrlock(&table->cls.rwlock);
+
if (classifier_set_prefix_fields(&table->cls,
s->prefix_fields, s->n_prefix_fields)) {
/* XXX: Trigger revalidation. */
}
- fat_rwlock_unlock(&table->cls.rwlock);
ovs_mutex_lock(&ofproto_mutex);
evict_rules_from_table(table, 0);
n_rules = 0;
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
- fat_rwlock_rdlock(&table->cls.rwlock);
n_rules += classifier_count(&table->cls);
- fat_rwlock_unlock(&table->cls.rwlock);
}
simap_increase(usage, "rules", n_rules);
/* First do a cheap check whether the rule we're looking for already exists
* with the actions that we want. If it does, then we're done. */
- fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, match, priority));
if (rule) {
} else {
must_add = true;
}
- fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
/* If there's no such rule or the rule doesn't have the actions we want,
* fall back to a executing a full flow mod. We can't optimize this at
struct rule *rule;
bool done = false;
- fat_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
&fm->match,
fm->priority));
}
ovs_mutex_unlock(&rule->mutex);
}
- fat_rwlock_unlock(&table->cls.rwlock);
if (done) {
return 0;
/* First do a cheap check whether the rule we're looking for has already
* been deleted. If so, then we're done. */
- fat_rwlock_rdlock(&cls->rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
priority));
- fat_rwlock_unlock(&cls->rwlock);
if (!rule) {
return;
}
ots[i].instructions = htonl(OFPIT11_ALL);
ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
- fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
- fat_rwlock_unlock(&p->tables[i].cls.rwlock);
}
p->ofproto_class->get_tables(p, ots);
FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
struct rule *rule;
- fat_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_rule_exactly(
&table->cls, &criteria->cr));
- fat_rwlock_unlock(&table->cls.rwlock);
if (rule) {
collect_rule(rule, criteria, rules, &n_readonly);
}
cls_rule_init(&cr, &fm->match, fm->priority);
/* Transform "add" into "modify" if there's an existing identical flow. */
- fat_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
- fat_rwlock_unlock(&table->cls.rwlock);
if (rule) {
struct rule_collection rules;
/* Check for overlap, if requested. */
if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
- bool overlaps;
-
- fat_rwlock_rdlock(&table->cls.rwlock);
- overlaps = classifier_rule_overlaps(&table->cls, &cr);
- fat_rwlock_unlock(&table->cls.rwlock);
-
- if (overlaps) {
+ if (classifier_rule_overlaps(&table->cls, &cr)) {
cls_rule_destroy(&cr);
return OFPERR_OFPFMFC_OVERLAP;
}
meter_insert_rule(rule);
}
- fat_rwlock_wrlock(&table->cls.rwlock);
classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
- fat_rwlock_unlock(&table->cls.rwlock);
error = ofproto->ofproto_class->rule_insert(rule);
if (error) {
table->max_flows = UINT_MAX;
atomic_init(&table->config, (unsigned int)OFPROTO_TABLE_MISS_DEFAULT);
- fat_rwlock_wrlock(&table->cls.rwlock);
classifier_set_prefix_fields(&table->cls, default_prefix_fields,
ARRAY_SIZE(default_prefix_fields));
- fat_rwlock_unlock(&table->cls.rwlock);
atomic_init(&table->n_matched, 0);
atomic_init(&table->n_missed, 0);
static void
oftable_destroy(struct oftable *table)
{
- fat_rwlock_rdlock(&table->cls.rwlock);
ovs_assert(classifier_is_empty(&table->cls));
- fat_rwlock_unlock(&table->cls.rwlock);
oftable_disable_eviction(table);
classifier_destroy(&table->cls);
free(table->name);
{
struct classifier *cls = &ofproto->tables[rule->table_id].cls;
- fat_rwlock_wrlock(&cls->rwlock);
classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
- fat_rwlock_unlock(&cls->rwlock);
cookies_remove(ofproto, rule);
struct simap;
struct smap;
+/* Needed for the lock annotations. */
+extern struct ovs_mutex ofproto_mutex;
+
struct ofproto_controller_info {
bool is_connected;
enum ofp12_controller_role role;
static void
compare_classifiers(struct classifier *cls, struct tcls *tcls)
- OVS_REQ_RDLOCK(cls->rwlock)
{
static const int confidence = 500;
unsigned int i;
struct test_rule *rule, *next_rule;
CLS_FOR_EACH_SAFE (rule, next_rule, cls_rule, cls) {
- fat_rwlock_wrlock(&cls->rwlock);
classifier_remove(cls, &rule->cls_rule);
- fat_rwlock_unlock(&cls->rwlock);
free_rule(rule);
}
classifier_destroy(cls);
static void
check_tables(const struct classifier *cls, int n_tables, int n_rules,
- int n_dups) OVS_EXCLUDED(cls->rwlock)
+ int n_dups)
{
const struct cls_subtable *table;
struct test_rule *test_rule;
static void
set_prefix_fields(struct classifier *cls)
- OVS_REQ_WRLOCK(cls->rwlock)
{
verify_tries(cls);
classifier_set_prefix_fields(cls, trie_fields, ARRAY_SIZE(trie_fields));
struct tcls tcls;
classifier_init(&cls, flow_segment_u32s);
- fat_rwlock_wrlock(&cls.rwlock);
set_prefix_fields(&cls);
tcls_init(&tcls);
assert(classifier_is_empty(&cls));
assert(tcls_is_empty(&tcls));
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
classifier_destroy(&cls);
tcls_destroy(&tcls);
}
hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
classifier_init(&cls, flow_segment_u32s);
- fat_rwlock_wrlock(&cls.rwlock);
set_prefix_fields(&cls);
tcls_init(&tcls);
tcls_rule = tcls_insert(&tcls, rule);
classifier_insert(&cls, &rule->cls_rule);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
check_tables(&cls, 1, 1, 0);
- fat_rwlock_wrlock(&cls.rwlock);
classifier_remove(&cls, &rule->cls_rule);
tcls_remove(&tcls, tcls_rule);
assert(classifier_is_empty(&cls));
assert(tcls_is_empty(&tcls));
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
free_rule(rule);
classifier_destroy(&cls);
rule2->aux += 5;
classifier_init(&cls, flow_segment_u32s);
- fat_rwlock_wrlock(&cls.rwlock);
set_prefix_fields(&cls);
tcls_init(&tcls);
tcls_insert(&tcls, rule1);
classifier_insert(&cls, &rule1->cls_rule);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
check_tables(&cls, 1, 1, 0);
tcls_destroy(&tcls);
tcls_init(&tcls);
tcls_insert(&tcls, rule2);
- fat_rwlock_wrlock(&cls.rwlock);
assert(test_rule_from_cls_rule(
classifier_replace(&cls, &rule2->cls_rule)) == rule1);
free_rule(rule1);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
check_tables(&cls, 1, 1, 0);
tcls_destroy(&tcls);
}
classifier_init(&cls, flow_segment_u32s);
- fat_rwlock_wrlock(&cls.rwlock);
set_prefix_fields(&cls);
- fat_rwlock_unlock(&cls.rwlock);
tcls_init(&tcls);
for (i = 0; i < ARRAY_SIZE(ops); i++) {
int j = ops[i];
int m, n;
- fat_rwlock_wrlock(&cls.rwlock);
if (!tcls_rules[j]) {
struct test_rule *displaced_rule;
pri_rules[pris[j]] = -1;
}
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
n = 0;
for (m = 0; m < N_RULES; m++) {
check_tables(&cls, n > 0, n, n - 1);
}
- fat_rwlock_wrlock(&cls.rwlock);
for (i = 0; i < N_RULES; i++) {
if (rules[i]->cls_rule.cls_match) {
classifier_remove(&cls, &rules[i]->cls_rule);
}
free_rule(rules[i]);
}
- fat_rwlock_unlock(&cls.rwlock);
classifier_destroy(&cls);
tcls_destroy(&tcls);
} while (next_permutation(ops, ARRAY_SIZE(ops)));
} while ((1 << count_ones(value_mask)) < N_RULES);
classifier_init(&cls, flow_segment_u32s);
- fat_rwlock_wrlock(&cls.rwlock);
set_prefix_fields(&cls);
- fat_rwlock_unlock(&cls.rwlock);
tcls_init(&tcls);
for (i = 0; i < N_RULES; i++) {
rules[i] = make_rule(wcf, priority, value_pats[i]);
tcls_rules[i] = tcls_insert(&tcls, rules[i]);
- fat_rwlock_wrlock(&cls.rwlock);
classifier_insert(&cls, &rules[i]->cls_rule);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
check_tables(&cls, 1, i + 1, 0);
}
for (i = 0; i < N_RULES; i++) {
tcls_remove(&tcls, tcls_rules[i]);
- fat_rwlock_wrlock(&cls.rwlock);
classifier_remove(&cls, &rules[i]->cls_rule);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
free_rule(rules[i]);
check_tables(&cls, i < N_RULES - 1, N_RULES - (i + 1), 0);
shuffle(priorities, ARRAY_SIZE(priorities));
classifier_init(&cls, flow_segment_u32s);
- fat_rwlock_wrlock(&cls.rwlock);
set_prefix_fields(&cls);
- fat_rwlock_unlock(&cls.rwlock);
tcls_init(&tcls);
for (i = 0; i < MAX_RULES; i++) {
int value_pat = random_uint32() & ((1u << CLS_N_FIELDS) - 1);
rule = make_rule(wcf, priority, value_pat);
tcls_insert(&tcls, rule);
- fat_rwlock_wrlock(&cls.rwlock);
classifier_insert(&cls, &rule->cls_rule);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
check_tables(&cls, -1, i + 1, -1);
}
CLS_FOR_EACH_TARGET_SAFE (rule, next_rule, cls_rule, &cls,
&target->cls_rule) {
- fat_rwlock_wrlock(&cls.rwlock);
classifier_remove(&cls, &rule->cls_rule);
- fat_rwlock_unlock(&cls.rwlock);
free_rule(rule);
}
tcls_delete_matches(&tcls, &target->cls_rule);
- fat_rwlock_rdlock(&cls.rwlock);
compare_classifiers(&cls, &tcls);
- fat_rwlock_unlock(&cls.rwlock);
check_tables(&cls, -1, -1, -1);
free_rule(target);
}
struct fte *fte, *next;
CLS_FOR_EACH_SAFE (fte, next, rule, cls) {
- fat_rwlock_wrlock(&cls->rwlock);
classifier_remove(cls, &fte->rule);
- fat_rwlock_unlock(&cls->rwlock);
fte_free(fte);
}
classifier_destroy(cls);
cls_rule_init(&fte->rule, match, priority);
fte->versions[index] = version;
- fat_rwlock_wrlock(&cls->rwlock);
old = fte_from_cls_rule(classifier_replace(cls, &fte->rule));
- fat_rwlock_unlock(&cls->rwlock);
if (old) {
fte_version_free(old->versions[index]);
fte->versions[!index] = old->versions[!index];