From: Ben Pfaff Date: Mon, 13 Jan 2014 19:21:12 +0000 (-0800) Subject: classifier: Use fat_rwlock instead of ovs_rwlock. X-Git-Tag: v2.1.0~39 X-Git-Url: http://git.cascardo.eti.br/?p=cascardo%2Fovs.git;a=commitdiff_plain;h=cd7b9405a452a486f5b7d16849aa23805984c104 classifier: Use fat_rwlock instead of ovs_rwlock. Jarno Rajahalme reported up to 40% performance gain on netperf TCP_CRR with an earlier version of this patch in combination with a kernel NUMA patch, together with a reduction in variance: http://openvswitch.org/pipermail/dev/2014-January/035867.html Signed-off-by: Ben Pfaff Acked-by: Ethan Jackson --- diff --git a/lib/classifier.c b/lib/classifier.c index 1675283a9..30a91b755 100644 --- a/lib/classifier.c +++ b/lib/classifier.c @@ -176,7 +176,7 @@ classifier_init(struct classifier *cls, const uint8_t *flow_segments) hmap_init(&cls->subtables); list_init(&cls->subtables_priority); hmap_init(&cls->partitions); - ovs_rwlock_init(&cls->rwlock); + fat_rwlock_init(&cls->rwlock); cls->n_flow_segments = 0; if (flow_segments) { while (cls->n_flow_segments < CLS_MAX_INDICES @@ -213,7 +213,7 @@ classifier_destroy(struct classifier *cls) free(partition); } hmap_destroy(&cls->partitions); - ovs_rwlock_destroy(&cls->rwlock); + fat_rwlock_destroy(&cls->rwlock); } } diff --git a/lib/classifier.h b/lib/classifier.h index b6b89a0c1..c3c1c3bd2 100644 --- a/lib/classifier.h +++ b/lib/classifier.h @@ -213,6 +213,7 @@ * The classifier may safely be accessed by many reader threads concurrently or * by a single writer. */ +#include "fat-rwlock.h" #include "flow.h" #include "hindex.h" #include "hmap.h" @@ -254,7 +255,7 @@ struct classifier { struct list subtables_priority; /* Subtables in descending priority order. */ struct hmap partitions; /* Contains "struct cls_partition"s. */ - struct ovs_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex); + struct fat_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex); struct cls_trie tries[CLS_MAX_TRIES]; /* Prefix tries. */ unsigned int n_tries; }; diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 19fbdcf40..d32b1bdf1 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -638,9 +638,9 @@ dpif_netdev_get_max_ports(const struct dpif *dpif OVS_UNUSED) static void dp_netdev_free_flow(struct dp_netdev *dp, struct dp_netdev_flow *netdev_flow) { - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); classifier_remove(&dp->cls, &netdev_flow->cr); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); cls_rule_destroy(&netdev_flow->cr); hmap_remove(&dp->flow_table, &netdev_flow->node); @@ -755,9 +755,9 @@ dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct flow *flow) { struct cls_rule *cr; - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); cr = classifier_lookup(&dp->cls, flow, NULL); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); return (cr ? CONTAINER_OF(cr, struct dp_netdev_flow, cr) @@ -928,15 +928,15 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct flow *flow, match_init(&match, flow, wc); cls_rule_init(&netdev_flow->cr, &match, NETDEV_RULE_PRIORITY); - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); classifier_insert(&dp->cls, &netdev_flow->cr); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); error = set_flow_actions(netdev_flow, actions, actions_len); if (error) { - ovs_rwlock_wrlock(&dp->cls.rwlock); + fat_rwlock_wrlock(&dp->cls.rwlock); classifier_remove(&dp->cls, &netdev_flow->cr); - ovs_rwlock_unlock(&dp->cls.rwlock); + fat_rwlock_unlock(&dp->cls.rwlock); cls_rule_destroy(&netdev_flow->cr); free(netdev_flow); diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 59af46476..3493fedf3 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -1044,9 +1044,9 @@ destruct(struct ofproto *ofproto_) OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) { struct cls_cursor cursor; - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, NULL); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) { ofproto_rule_delete(&ofproto->up, &rule->up); } @@ -2925,7 +2925,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, } cls = &ofproto->up.tables[table_id].cls; - ovs_rwlock_rdlock(&cls->rwlock); + fat_rwlock_rdlock(&cls->rwlock); frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0; if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) { /* We must pretend that transport ports are unavailable. */ @@ -2942,7 +2942,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, *rule = rule_dpif_cast(rule_from_cls_rule(cls_rule)); rule_dpif_ref(*rule); - ovs_rwlock_unlock(&cls->rwlock); + fat_rwlock_unlock(&cls->rwlock); return *rule != NULL; } diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 75461e2dd..b09ea2279 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -1168,7 +1168,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id, } table->max_flows = s->max_flows; - ovs_rwlock_wrlock(&table->cls.rwlock); + fat_rwlock_wrlock(&table->cls.rwlock); if (classifier_count(&table->cls) > table->max_flows && table->eviction_fields) { /* 'table' contains more flows than allowed. We might not be able to @@ -1188,7 +1188,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id, classifier_set_prefix_fields(&table->cls, s->prefix_fields, s->n_prefix_fields); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); } bool @@ -1263,9 +1263,9 @@ ofproto_flush__(struct ofproto *ofproto) continue; } - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, NULL); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) { if (!rule->pending) { ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE); @@ -1454,7 +1454,7 @@ ofproto_run(struct ofproto *p) heap_rebuild(&evg->rules); } - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { if (!rule->eviction_group @@ -1462,7 +1462,7 @@ ofproto_run(struct ofproto *p) eviction_group_add_rule(rule); } } - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); ovs_mutex_unlock(&ofproto_mutex); } } @@ -1612,9 +1612,9 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage) n_rules = 0; OFPROTO_FOR_EACH_TABLE (table, ofproto) { - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); n_rules += classifier_count(&table->cls); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); } simap_increase(usage, "rules", n_rules); @@ -1901,7 +1901,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match, /* First do a cheap check whether the rule we're looking for already exists * with the actions that we want. If it does, then we're done. */ - ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock); + fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock); rule = rule_from_cls_rule(classifier_find_match_exactly( &ofproto->tables[0].cls, match, priority)); if (rule) { @@ -1913,7 +1913,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct match *match, } else { must_add = true; } - ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock); + fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock); /* If there's no such rule or the rule doesn't have the actions we want, * fall back to a executing a full flow mod. We can't optimize this at @@ -1952,10 +1952,10 @@ ofproto_delete_flow(struct ofproto *ofproto, /* First do a cheap check whether the rule we're looking for has already * been deleted. If so, then we're done. */ - ovs_rwlock_rdlock(&cls->rwlock); + fat_rwlock_rdlock(&cls->rwlock); rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target, priority)); - ovs_rwlock_unlock(&cls->rwlock); + fat_rwlock_unlock(&cls->rwlock); if (!rule) { return true; } @@ -3078,9 +3078,9 @@ handle_table_stats_request(struct ofconn *ofconn, ots[i].instructions = htonl(OFPIT11_ALL); ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK); ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */ - ovs_rwlock_rdlock(&p->tables[i].cls.rwlock); + fat_rwlock_rdlock(&p->tables[i].cls.rwlock); ots[i].active_count = htonl(classifier_count(&p->tables[i].cls)); - ovs_rwlock_unlock(&p->tables[i].cls.rwlock); + fat_rwlock_unlock(&p->tables[i].cls.rwlock); } p->ofproto_class->get_tables(p, ots); @@ -3445,7 +3445,7 @@ collect_rules_loose(struct ofproto *ofproto, struct cls_cursor cursor; struct rule *rule; - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, &criteria->cr); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { error = collect_rule(rule, criteria, rules); @@ -3453,7 +3453,7 @@ collect_rules_loose(struct ofproto *ofproto, break; } } - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); } } @@ -3505,10 +3505,10 @@ collect_rules_strict(struct ofproto *ofproto, FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) { struct rule *rule; - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); rule = rule_from_cls_rule(classifier_find_rule_exactly( &table->cls, &criteria->cr)); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); if (rule) { error = collect_rule(rule, criteria, rules); if (error) { @@ -3656,12 +3656,12 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results) struct cls_cursor cursor; struct rule *rule; - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { flow_stats_ds(rule, results); } - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); } } @@ -3972,9 +3972,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn, cls_rule_init(&cr, &fm->match, fm->priority); /* Transform "add" into "modify" if there's an existing identical flow. */ - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr)); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); if (rule) { cls_rule_destroy(&cr); if (!rule_is_modifiable(rule)) { @@ -4004,9 +4004,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn, if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) { bool overlaps; - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); overlaps = classifier_rule_overlaps(&table->cls, &cr); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); if (overlaps) { cls_rule_destroy(&cr); @@ -4827,13 +4827,13 @@ ofproto_collect_ofmonitor_refresh_rules(const struct ofmonitor *m, struct cls_cursor cursor; struct rule *rule; - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, &target); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { ovs_assert(!rule->pending); /* XXX */ ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules); } - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); } HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) { @@ -6662,9 +6662,9 @@ oftable_init(struct oftable *table) static void oftable_destroy(struct oftable *table) { - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); ovs_assert(classifier_is_empty(&table->cls)); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); oftable_disable_eviction(table); classifier_destroy(&table->cls); free(table->name); @@ -6746,12 +6746,12 @@ oftable_enable_eviction(struct oftable *table, hmap_init(&table->eviction_groups_by_id); heap_init(&table->eviction_groups_by_size); - ovs_rwlock_rdlock(&table->cls.rwlock); + fat_rwlock_rdlock(&table->cls.rwlock); cls_cursor_init(&cursor, &table->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { eviction_group_add_rule(rule); } - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); } /* Removes 'rule' from the oftable that contains it. */ @@ -6761,9 +6761,9 @@ oftable_remove_rule__(struct ofproto *ofproto, struct rule *rule) { struct classifier *cls = &ofproto->tables[rule->table_id].cls; - ovs_rwlock_wrlock(&cls->rwlock); + fat_rwlock_wrlock(&cls->rwlock); classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr)); - ovs_rwlock_unlock(&cls->rwlock); + fat_rwlock_unlock(&cls->rwlock); cookies_remove(ofproto, rule); @@ -6810,9 +6810,9 @@ oftable_insert_rule(struct rule *rule) struct meter *meter = ofproto->meters[meter_id]; list_insert(&meter->rules, &rule->meter_list_node); } - ovs_rwlock_wrlock(&table->cls.rwlock); + fat_rwlock_wrlock(&table->cls.rwlock); classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr)); - ovs_rwlock_unlock(&table->cls.rwlock); + fat_rwlock_unlock(&table->cls.rwlock); eviction_group_add_rule(rule); } @@ -6881,7 +6881,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap) OFPROTO_FOR_EACH_TABLE (oftable, ofproto) { const struct cls_subtable *table; - ovs_rwlock_rdlock(&oftable->cls.rwlock); + fat_rwlock_rdlock(&oftable->cls.rwlock); HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) { if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) { const struct cls_rule *rule; @@ -6893,7 +6893,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap) } } } - ovs_rwlock_unlock(&oftable->cls.rwlock); + fat_rwlock_unlock(&oftable->cls.rwlock); } } diff --git a/tests/test-classifier.c b/tests/test-classifier.c index 93a2dc1a8..4282fd4b5 100644 --- a/tests/test-classifier.c +++ b/tests/test-classifier.c @@ -449,13 +449,13 @@ destroy_classifier(struct classifier *cls) struct test_rule *rule, *next_rule; struct cls_cursor cursor; - ovs_rwlock_wrlock(&cls->rwlock); + fat_rwlock_wrlock(&cls->rwlock); cls_cursor_init(&cursor, cls, NULL); CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) { classifier_remove(cls, &rule->cls_rule); free_rule(rule); } - ovs_rwlock_unlock(&cls->rwlock); + fat_rwlock_unlock(&cls->rwlock); classifier_destroy(cls); } @@ -621,13 +621,13 @@ test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) struct tcls tcls; classifier_init(&cls, flow_segment_u32s); - ovs_rwlock_wrlock(&cls.rwlock); + fat_rwlock_wrlock(&cls.rwlock); classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); assert(classifier_is_empty(&cls)); assert(tcls_is_empty(&tcls)); compare_classifiers(&cls, &tcls); - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); classifier_destroy(&cls); tcls_destroy(&tcls); } @@ -654,7 +654,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) hash_bytes(&wc_fields, sizeof wc_fields, 0), 0); classifier_init(&cls, flow_segment_u32s); - ovs_rwlock_wrlock(&cls.rwlock); + fat_rwlock_wrlock(&cls.rwlock); classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); @@ -671,7 +671,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) compare_classifiers(&cls, &tcls); free_rule(rule); - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); classifier_destroy(&cls); tcls_destroy(&tcls); } @@ -695,7 +695,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) rule2->aux += 5; classifier_init(&cls, flow_segment_u32s); - ovs_rwlock_wrlock(&cls.rwlock); + fat_rwlock_wrlock(&cls.rwlock); classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); @@ -713,7 +713,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) check_tables(&cls, 1, 1, 0); compare_classifiers(&cls, &tcls); tcls_destroy(&tcls); - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); destroy_classifier(&cls); } } @@ -809,7 +809,7 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED) } classifier_init(&cls, flow_segment_u32s); - ovs_rwlock_wrlock(&cls.rwlock); + fat_rwlock_wrlock(&cls.rwlock); classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); @@ -850,7 +850,7 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char *argv[] OVS_UNUSED) compare_classifiers(&cls, &tcls); } - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); classifier_destroy(&cls); tcls_destroy(&tcls); @@ -913,7 +913,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) } while ((1 << count_ones(value_mask)) < N_RULES); classifier_init(&cls, flow_segment_u32s); - ovs_rwlock_wrlock(&cls.rwlock); + fat_rwlock_wrlock(&cls.rwlock); classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); @@ -942,7 +942,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char *argv[] OVS_UNUSED) compare_classifiers(&cls, &tcls); } - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); classifier_destroy(&cls); tcls_destroy(&tcls); } @@ -977,7 +977,7 @@ test_many_rules_in_n_tables(int n_tables) shuffle(priorities, ARRAY_SIZE(priorities)); classifier_init(&cls, flow_segment_u32s); - ovs_rwlock_wrlock(&cls.rwlock); + fat_rwlock_wrlock(&cls.rwlock); classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields)); tcls_init(&tcls); @@ -1012,7 +1012,7 @@ test_many_rules_in_n_tables(int n_tables) free_rule(target); } - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); destroy_classifier(&cls); tcls_destroy(&tcls); } diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c index 9b02b25ca..e8453f303 100644 --- a/utilities/ovs-ofctl.c +++ b/utilities/ovs-ofctl.c @@ -2252,13 +2252,13 @@ fte_free_all(struct classifier *cls) struct cls_cursor cursor; struct fte *fte, *next; - ovs_rwlock_wrlock(&cls->rwlock); + fat_rwlock_wrlock(&cls->rwlock); cls_cursor_init(&cursor, cls, NULL); CLS_CURSOR_FOR_EACH_SAFE (fte, next, rule, &cursor) { classifier_remove(cls, &fte->rule); fte_free(fte); } - ovs_rwlock_unlock(&cls->rwlock); + fat_rwlock_unlock(&cls->rwlock); classifier_destroy(cls); } @@ -2277,9 +2277,9 @@ fte_insert(struct classifier *cls, const struct match *match, cls_rule_init(&fte->rule, match, priority); fte->versions[index] = version; - ovs_rwlock_wrlock(&cls->rwlock); + fat_rwlock_wrlock(&cls->rwlock); old = fte_from_cls_rule(classifier_replace(cls, &fte->rule)); - ovs_rwlock_unlock(&cls->rwlock); + fat_rwlock_unlock(&cls->rwlock); if (old) { fte_version_free(old->versions[index]); fte->versions[!index] = old->versions[!index]; @@ -2490,7 +2490,7 @@ ofctl_replace_flows(int argc OVS_UNUSED, char *argv[]) list_init(&requests); /* Delete flows that exist on the switch but not in the file. */ - ovs_rwlock_rdlock(&cls.rwlock); + fat_rwlock_rdlock(&cls.rwlock); cls_cursor_init(&cursor, &cls, NULL); CLS_CURSOR_FOR_EACH (fte, rule, &cursor) { struct fte_version *file_ver = fte->versions[FILE_IDX]; @@ -2514,7 +2514,7 @@ ofctl_replace_flows(int argc OVS_UNUSED, char *argv[]) fte_make_flow_mod(fte, FILE_IDX, OFPFC_ADD, protocol, &requests); } } - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); transact_multiple_noreply(vconn, &requests); vconn_close(vconn); @@ -2556,7 +2556,7 @@ ofctl_diff_flows(int argc OVS_UNUSED, char *argv[]) ds_init(&a_s); ds_init(&b_s); - ovs_rwlock_rdlock(&cls.rwlock); + fat_rwlock_rdlock(&cls.rwlock); cls_cursor_init(&cursor, &cls, NULL); CLS_CURSOR_FOR_EACH (fte, rule, &cursor) { struct fte_version *a = fte->versions[0]; @@ -2576,7 +2576,7 @@ ofctl_diff_flows(int argc OVS_UNUSED, char *argv[]) } } } - ovs_rwlock_unlock(&cls.rwlock); + fat_rwlock_unlock(&cls.rwlock); ds_destroy(&a_s); ds_destroy(&b_s);