COVERAGE_DEFINE(ofproto_reinit_ports);
COVERAGE_DEFINE(ofproto_update_port);
+/* Default fields to use for prefix tries in each flow table, unless something
+ * else is configured. */
+const enum mf_field_id default_prefix_fields[2] =
+ { MFF_IPV4_DST, MFF_IPV4_SRC };
+
enum ofproto_state {
S_OPENFLOW, /* Processing OpenFlow commands. */
S_EVICT, /* Evicting flows from over-limit tables. */
/* Opens and returns a netdev for 'ofproto_port' in 'ofproto', or a null
* pointer if the netdev cannot be opened. On success, also fills in
- * 'opp'. */
+ * '*pp'. */
static struct netdev *
ofport_open(struct ofproto *ofproto,
struct ofproto_port *ofproto_port,
}
/* Returns true if most fields of 'a' and 'b' are equal. Differences in name,
- * port number, and 'config' bits other than OFPUTIL_PS_LINK_DOWN are
+ * port number, and 'config' bits other than OFPUTIL_PC_PORT_DOWN are
* disregarded. */
static bool
ofport_equal(const struct ofputil_phy_port *a,
}
}
+void
+ofproto_group_ref(struct ofgroup *group)
+{
+ if (group) {
+ ovs_refcount_ref(&group->ref_count);
+ }
+}
+
+void
+ofproto_group_unref(struct ofgroup *group)
+{
+ if (group && ovs_refcount_unref(&group->ref_count) == 1) {
+ group->ofproto->ofproto_class->group_destruct(group);
+ ofputil_bucket_list_destroy(&group->buckets);
+ group->ofproto->ofproto_class->group_dealloc(group);
+ }
+}
+
static uint32_t get_provider_meter_id(const struct ofproto *,
uint32_t of_meter_id);
return error;
}
- error = ofputil_decode_port_mod(oh, &pm);
+ error = ofputil_decode_port_mod(oh, &pm, false);
if (error) {
return error;
}
ofputil_append_port_stat(replies, &ops);
}
-static enum ofperr
-handle_port_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_port_request(struct ofconn *ofconn,
+ const struct ofp_header *request, ofp_port_t port_no,
+ void (*cb)(struct ofport *, struct list *replies))
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofport *port;
struct list replies;
- ofp_port_t port_no;
- enum ofperr error;
-
- error = ofputil_decode_port_stats_request(request, &port_no);
- if (error) {
- return error;
- }
ofpmp_init(&replies, request);
if (port_no != OFPP_ANY) {
- port = ofproto_get_port(p, port_no);
+ port = ofproto_get_port(ofproto, port_no);
if (port) {
- append_port_stat(port, &replies);
+ cb(port, &replies);
}
} else {
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- append_port_stat(port, &replies);
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ cb(port, &replies);
}
}
ofconn_send_replies(ofconn, &replies);
- return 0;
+}
+
+static enum ofperr
+handle_port_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ ofp_port_t port_no;
+ enum ofperr error;
+
+ error = ofputil_decode_port_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_stat);
+ }
+ return error;
+}
+
+static void
+append_port_desc(struct ofport *port, struct list *replies)
+{
+ ofputil_append_port_desc_stats_reply(&port->pp, replies);
}
static enum ofperr
handle_port_desc_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
- enum ofp_version version;
- struct ofport *port;
- struct list replies;
-
- ofpmp_init(&replies, request);
+ ofp_port_t port_no;
+ enum ofperr error;
- version = ofputil_protocol_to_ofp_version(ofconn_get_protocol(ofconn));
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- ofputil_append_port_desc_stats_reply(version, &port->pp, &replies);
+ error = ofputil_decode_port_desc_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_desc);
}
-
- ofconn_send_replies(ofconn, &replies);
- return 0;
+ return error;
}
static uint32_t
return 0;
}
+/* If the group exists, this function increments the groups's reference count.
+ *
+ * Make sure to call ofproto_group_unref() after no longer needing to maintain
+ * a reference to the group. */
bool
ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
struct ofgroup **group)
- OVS_TRY_RDLOCK(true, (*group)->rwlock)
{
ovs_rwlock_rdlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
hash_int(group_id, 0), &ofproto->groups) {
if ((*group)->group_id == group_id) {
- ovs_rwlock_rdlock(&(*group)->rwlock);
+ ofproto_group_ref(*group);
ovs_rwlock_unlock(&ofproto->groups_rwlock);
return true;
}
return false;
}
-void
-ofproto_group_release(struct ofgroup *group)
- OVS_RELEASES(group->rwlock)
-{
- ovs_rwlock_unlock(&group->rwlock);
-}
-
static bool
ofproto_group_write_lookup(const struct ofproto *ofproto, uint32_t group_id,
struct ofgroup **group)
- OVS_TRY_WRLOCK(true, ofproto->groups_rwlock)
- OVS_TRY_WRLOCK(true, (*group)->rwlock)
+ OVS_ACQUIRES(ofproto->groups_rwlock)
{
ovs_rwlock_wrlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
hash_int(group_id, 0), &ofproto->groups) {
if ((*group)->group_id == group_id) {
- ovs_rwlock_wrlock(&(*group)->rwlock);
return true;
}
}
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
return false;
}
group_get_ref_count(struct ofgroup *group)
OVS_EXCLUDED(ofproto_mutex)
{
- struct ofproto *ofproto = group->ofproto;
+ struct ofproto *ofproto = CONST_CAST(struct ofproto *, group->ofproto);
struct rule_criteria criteria;
struct rule_collection rules;
struct match match;
static void
append_group_stats(struct ofgroup *group, struct list *replies)
- OVS_REQ_RDLOCK(group->rwlock)
{
struct ofputil_group_stats ogs;
- struct ofproto *ofproto = group->ofproto;
+ const struct ofproto *ofproto = group->ofproto;
long long int now = time_msec();
int error;
free(ogs.bucket_stats);
}
-static enum ofperr
-handle_group_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_group_request(struct ofconn *ofconn,
+ const struct ofp_header *request, uint32_t group_id,
+ void (*cb)(struct ofgroup *, struct list *replies))
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- enum ofperr error;
struct ofgroup *group;
- uint32_t group_id;
-
- error = ofputil_decode_group_stats_request(request, &group_id);
- if (error) {
- return error;
- }
+ struct list replies;
ofpmp_init(&replies, request);
-
if (group_id == OFPG_ALL) {
ovs_rwlock_rdlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
- ovs_rwlock_rdlock(&group->rwlock);
- append_group_stats(group, &replies);
- ovs_rwlock_unlock(&group->rwlock);
+ cb(group, &replies);
}
ovs_rwlock_unlock(&ofproto->groups_rwlock);
} else {
if (ofproto_group_lookup(ofproto, group_id, &group)) {
- append_group_stats(group, &replies);
- ofproto_group_release(group);
+ cb(group, &replies);
+ ofproto_group_unref(group);
}
}
-
ofconn_send_replies(ofconn, &replies);
-
- return 0;
}
static enum ofperr
-handle_group_desc_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+handle_group_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
{
- struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- struct ofputil_group_desc gds;
- struct ofgroup *group;
-
- ofpmp_init(&replies, request);
+ uint32_t group_id;
+ enum ofperr error;
- ovs_rwlock_rdlock(&ofproto->groups_rwlock);
- HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
- gds.group_id = group->group_id;
- gds.type = group->type;
- ofputil_append_group_desc_reply(&gds, &group->buckets, &replies);
+ error = ofputil_decode_group_stats_request(request, &group_id);
+ if (error) {
+ return error;
}
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
- ofconn_send_replies(ofconn, &replies);
+ handle_group_request(ofconn, request, group_id, append_group_stats);
+ return 0;
+}
+static void
+append_group_desc(struct ofgroup *group, struct list *replies)
+{
+ struct ofputil_group_desc gds;
+
+ gds.group_id = group->group_id;
+ gds.type = group->type;
+ ofputil_append_group_desc_reply(&gds, &group->buckets, replies);
+}
+
+static enum ofperr
+handle_group_desc_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ handle_group_request(ofconn, request,
+ ofputil_decode_group_desc_request(request),
+ append_group_desc);
return 0;
}
return 0;
}
+static enum ofperr
+init_group(struct ofproto *ofproto, struct ofputil_group_mod *gm,
+ struct ofgroup **ofgroup)
+{
+ enum ofperr error;
+ const long long int now = time_msec();
+
+ if (gm->group_id > OFPG_MAX) {
+ return OFPERR_OFPGMFC_INVALID_GROUP;
+ }
+ if (gm->type > OFPGT11_FF) {
+ return OFPERR_OFPGMFC_BAD_TYPE;
+ }
+
+ *ofgroup = ofproto->ofproto_class->group_alloc();
+ if (!*ofgroup) {
+ VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
+ return OFPERR_OFPGMFC_OUT_OF_GROUPS;
+ }
+
+ (*ofgroup)->ofproto = ofproto;
+ *CONST_CAST(uint32_t *, &((*ofgroup)->group_id)) = gm->group_id;
+ *CONST_CAST(enum ofp11_group_type *, &(*ofgroup)->type) = gm->type;
+ *CONST_CAST(long long int *, &((*ofgroup)->created)) = now;
+ *CONST_CAST(long long int *, &((*ofgroup)->modified)) = now;
+ ovs_refcount_init(&(*ofgroup)->ref_count);
+
+ list_move(&(*ofgroup)->buckets, &gm->buckets);
+ *CONST_CAST(uint32_t *, &(*ofgroup)->n_buckets) =
+ list_size(&(*ofgroup)->buckets);
+
+ /* Construct called BEFORE any locks are held. */
+ error = ofproto->ofproto_class->group_construct(*ofgroup);
+ if (error) {
+ ofputil_bucket_list_destroy(&(*ofgroup)->buckets);
+ ofproto->ofproto_class->group_dealloc(*ofgroup);
+ }
+ return error;
+}
+
/* Implements OFPGC11_ADD
* in which no matching flow already exists in the flow table.
*
struct ofgroup *ofgroup;
enum ofperr error;
- if (gm->group_id > OFPG_MAX) {
- return OFPERR_OFPGMFC_INVALID_GROUP;
- }
- if (gm->type > OFPGT11_FF) {
- return OFPERR_OFPGMFC_BAD_TYPE;
- }
-
/* Allocate new group and initialize it. */
- ofgroup = ofproto->ofproto_class->group_alloc();
- if (!ofgroup) {
- VLOG_WARN_RL(&rl, "%s: failed to create group", ofproto->name);
- return OFPERR_OFPGMFC_OUT_OF_GROUPS;
- }
-
- ovs_rwlock_init(&ofgroup->rwlock);
- ofgroup->ofproto = ofproto;
- ofgroup->group_id = gm->group_id;
- ofgroup->type = gm->type;
- ofgroup->created = ofgroup->modified = time_msec();
-
- list_move(&ofgroup->buckets, &gm->buckets);
- ofgroup->n_buckets = list_size(&ofgroup->buckets);
-
- /* Construct called BEFORE any locks are held. */
- error = ofproto->ofproto_class->group_construct(ofgroup);
+ error = init_group(ofproto, gm, &ofgroup);
if (error) {
- goto free_out;
+ return error;
}
/* We wrlock as late as possible to minimize the time we jam any other
unlock_out:
ovs_rwlock_unlock(&ofproto->groups_rwlock);
ofproto->ofproto_class->group_destruct(ofgroup);
- free_out:
ofputil_bucket_list_destroy(&ofgroup->buckets);
ofproto->ofproto_class->group_dealloc(ofgroup);
/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on
* failure.
*
+ * Note that the group is re-created and then replaces the old group in
+ * ofproto's ofgroup hash map. Thus, the group is never altered while users of
+ * the xlate module hold a pointer to the group.
+ *
* 'ofconn' is used to retrieve the packet buffer specified in fm->buffer_id,
* if any. */
static enum ofperr
modify_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
{
- struct ofgroup *ofgroup;
- struct ofgroup *victim;
+ struct ofgroup *ofgroup, *new_ofgroup, *retiring;
enum ofperr error;
- if (gm->group_id > OFPG_MAX) {
- return OFPERR_OFPGMFC_INVALID_GROUP;
- }
-
- if (gm->type > OFPGT11_FF) {
- return OFPERR_OFPGMFC_BAD_TYPE;
+ error = init_group(ofproto, gm, &new_ofgroup);
+ if (error) {
+ return error;
}
- victim = ofproto->ofproto_class->group_alloc();
- if (!victim) {
- VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
- return OFPERR_OFPGMFC_OUT_OF_GROUPS;
- }
+ retiring = new_ofgroup;
if (!ofproto_group_write_lookup(ofproto, gm->group_id, &ofgroup)) {
error = OFPERR_OFPGMFC_UNKNOWN_GROUP;
- goto free_out;
+ goto out;
}
- /* Both group's and its container's write locks held now.
- * Also, n_groups[] is protected by ofproto->groups_rwlock. */
+
+ /* Ofproto's group write lock is held now. */
if (ofgroup->type != gm->type
&& ofproto->n_groups[gm->type] >= ofproto->ogf.max_groups[gm->type]) {
error = OFPERR_OFPGMFC_OUT_OF_GROUPS;
- goto unlock_out;
+ goto out;
}
- *victim = *ofgroup;
- list_move(&victim->buckets, &ofgroup->buckets);
+ /* The group creation time does not change during modification. */
+ *CONST_CAST(long long int *, &(new_ofgroup->created)) = ofgroup->created;
+ *CONST_CAST(long long int *, &(new_ofgroup->modified)) = time_msec();
- ofgroup->type = gm->type;
- list_move(&ofgroup->buckets, &gm->buckets);
- ofgroup->n_buckets = list_size(&ofgroup->buckets);
-
- error = ofproto->ofproto_class->group_modify(ofgroup, victim);
- if (!error) {
- ofputil_bucket_list_destroy(&victim->buckets);
- ofproto->n_groups[victim->type]--;
- ofproto->n_groups[ofgroup->type]++;
- ofgroup->modified = time_msec();
- } else {
- ofputil_bucket_list_destroy(&ofgroup->buckets);
+ error = ofproto->ofproto_class->group_modify(new_ofgroup);
+ if (error) {
+ goto out;
+ }
- *ofgroup = *victim;
- list_move(&ofgroup->buckets, &victim->buckets);
+ retiring = ofgroup;
+ /* Replace ofgroup in ofproto's groups hash map with new_ofgroup. */
+ hmap_remove(&ofproto->groups, &ofgroup->hmap_node);
+ hmap_insert(&ofproto->groups, &new_ofgroup->hmap_node,
+ hash_int(new_ofgroup->group_id, 0));
+ if (ofgroup->type != new_ofgroup->type) {
+ ofproto->n_groups[ofgroup->type]--;
+ ofproto->n_groups[new_ofgroup->type]++;
}
- unlock_out:
- ovs_rwlock_unlock(&ofgroup->rwlock);
+out:
+ ofproto_group_unref(retiring);
ovs_rwlock_unlock(&ofproto->groups_rwlock);
- free_out:
- ofproto->ofproto_class->group_dealloc(victim);
return error;
}
fm.out_group = ofgroup->group_id;
handle_flow_mod__(ofproto, NULL, &fm, NULL);
- /* Must wait until existing readers are done,
- * while holding the container's write lock at the same time. */
- ovs_rwlock_wrlock(&ofgroup->rwlock);
hmap_remove(&ofproto->groups, &ofgroup->hmap_node);
/* No-one can find this group any more. */
ofproto->n_groups[ofgroup->type]--;
ovs_rwlock_unlock(&ofproto->groups_rwlock);
-
- ofproto->ofproto_class->group_destruct(ofgroup);
- ofputil_bucket_list_destroy(&ofgroup->buckets);
- ovs_rwlock_unlock(&ofgroup->rwlock);
- ovs_rwlock_destroy(&ofgroup->rwlock);
- ofproto->ofproto_class->group_dealloc(ofgroup);
+ ofproto_group_unref(ofgroup);
}
/* Implements OFPGC_DELETE. */
classifier_init(&table->cls, flow_segment_u32s);
table->max_flows = UINT_MAX;
atomic_init(&table->config, (unsigned int)OFPROTO_TABLE_MISS_DEFAULT);
+
+ fat_rwlock_wrlock(&table->cls.rwlock);
+ classifier_set_prefix_fields(&table->cls, default_prefix_fields,
+ ARRAY_SIZE(default_prefix_fields));
+ fat_rwlock_unlock(&table->cls.rwlock);
+
+ atomic_init(&table->n_matched, 0);
+ atomic_init(&table->n_missed, 0);
}
/* Destroys 'table', including its classifier and eviction groups.