enum ofputil_protocol protocol; /* Current protocol variant. */
enum nx_packet_in_format packet_in_format; /* OFPT_PACKET_IN format. */
- /* Asynchronous flow table operation support. */
- struct list opgroups; /* Contains pending "ofopgroups", if any. */
- struct ofpbuf *blocked; /* Postponed OpenFlow message, if any. */
- bool retry; /* True if 'blocked' is ready to try again. */
-
/* OFPT_PACKET_IN related data. */
struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
#define N_SCHEDULERS 2
const struct ofproto_controller *);
static void ofconn_run(struct ofconn *,
- bool (*handle_openflow)(struct ofconn *,
+ void (*handle_openflow)(struct ofconn *,
const struct ofpbuf *ofp_msg));
-static void ofconn_wait(struct ofconn *, bool handling_openflow);
+static void ofconn_wait(struct ofconn *);
static void ofconn_log_flow_mods(struct ofconn *);
free(mgr);
}
-/* Does all of the periodic maintenance required by 'mgr'.
- *
- * If 'handle_openflow' is nonnull, calls 'handle_openflow' for each message
- * received on an OpenFlow connection, passing along the OpenFlow connection
- * itself and the message that was sent. If 'handle_openflow' returns true,
- * the message is considered to be fully processed. If 'handle_openflow'
- * returns false, the message is considered not to have been processed at all;
- * it will be stored and re-presented to 'handle_openflow' following the next
- * call to connmgr_retry(). 'handle_openflow' must not modify or free the
- * message.
- *
- * If 'handle_openflow' is NULL, no OpenFlow messages will be processed and
- * other activities that could affect the flow table (in-band processing,
- * fail-open processing) are suppressed too. */
+/* Does all of the periodic maintenance required by 'mgr'. Calls
+ * 'handle_openflow' for each message received on an OpenFlow connection,
+ * passing along the OpenFlow connection itself and the message that was sent.
+ * 'handle_openflow' must not modify or free the message. */
void
connmgr_run(struct connmgr *mgr,
- bool (*handle_openflow)(struct ofconn *,
+ void (*handle_openflow)(struct ofconn *,
const struct ofpbuf *ofp_msg))
OVS_EXCLUDED(ofproto_mutex)
{
struct ofservice *ofservice;
size_t i;
- if (handle_openflow && mgr->in_band) {
+ if (mgr->in_band) {
if (!in_band_run(mgr->in_band)) {
in_band_destroy(mgr->in_band);
mgr->in_band = NULL;
/* Fail-open maintenance. Do this after processing the ofconns since
* fail-open checks the status of the controller rconn. */
- if (handle_openflow && mgr->fail_open) {
+ if (mgr->fail_open) {
fail_open_run(mgr->fail_open);
}
}
}
-/* Causes the poll loop to wake up when connmgr_run() needs to run.
- *
- * If 'handling_openflow' is true, arriving OpenFlow messages and other
- * activities that affect the flow table will wake up the poll loop. If
- * 'handling_openflow' is false, they will not. */
+/* Causes the poll loop to wake up when connmgr_run() needs to run. */
void
-connmgr_wait(struct connmgr *mgr, bool handling_openflow)
+connmgr_wait(struct connmgr *mgr)
{
struct ofservice *ofservice;
struct ofconn *ofconn;
size_t i;
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- ofconn_wait(ofconn, handling_openflow);
+ ofconn_wait(ofconn);
}
ofmonitor_wait(mgr);
- if (handling_openflow && mgr->in_band) {
+ if (mgr->in_band) {
in_band_wait(mgr->in_band);
}
- if (handling_openflow && mgr->fail_open) {
+ if (mgr->fail_open) {
fail_open_wait(mgr->fail_open);
}
HMAP_FOR_EACH (ofservice, node, &mgr->services) {
{
return ofconn->connmgr->ofproto;
}
-
-/* If processing of OpenFlow messages was blocked on any 'mgr' ofconns by
- * returning false to the 'handle_openflow' callback to connmgr_run(), this
- * re-enables them. */
-void
-connmgr_retry(struct connmgr *mgr)
-{
- struct ofconn *ofconn;
-
- LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- ofconn->retry = true;
- }
-}
\f
/* OpenFlow configuration. */
ofconn->last_op = now;
}
-/* Returns true if 'ofconn' has any pending opgroups. */
-bool
-ofconn_has_pending_opgroups(const struct ofconn *ofconn)
-{
- return !list_is_empty(&ofconn->opgroups);
-}
-
-/* Adds 'ofconn_node' to 'ofconn''s list of pending opgroups.
- *
- * If 'ofconn' is destroyed or its connection drops, then 'ofconn' will remove
- * 'ofconn_node' from the list and re-initialize it with list_init(). The
- * client may, therefore, use list_is_empty(ofconn_node) to determine whether
- * 'ofconn_node' is still associated with an active ofconn.
- *
- * The client may also remove ofconn_node from the list itself, with
- * list_remove(). */
-void
-ofconn_add_opgroup(struct ofconn *ofconn, struct list *ofconn_node)
-{
- list_push_back(&ofconn->opgroups, ofconn_node);
-}
-
struct hmap *
ofconn_get_bundles(struct ofconn *ofconn)
{
ofconn->type = type;
ofconn->enable_async_msgs = enable_async_msgs;
- list_init(&ofconn->opgroups);
-
hmap_init(&ofconn->monitors);
list_init(&ofconn->updates);
ofconn_set_protocol(ofconn, OFPUTIL_P_NONE);
ofconn->packet_in_format = NXPIF_OPENFLOW10;
- /* Disassociate 'ofconn' from all of the ofopgroups that it initiated that
- * have not yet completed. (Those ofopgroups will still run to completion
- * in the usual way, but any errors that they run into will not be reported
- * on any OpenFlow channel.)
- *
- * Also discard any blocked operation on 'ofconn'. */
- while (!list_is_empty(&ofconn->opgroups)) {
- list_init(list_pop_front(&ofconn->opgroups));
- }
- ofpbuf_delete(ofconn->blocked);
- ofconn->blocked = NULL;
-
rconn_packet_counter_destroy(ofconn->packet_in_counter);
ofconn->packet_in_counter = rconn_packet_counter_create();
for (i = 0; i < N_SCHEDULERS; i++) {
ofconn_may_recv(const struct ofconn *ofconn)
{
int count = rconn_packet_counter_n_packets(ofconn->reply_counter);
- return (!ofconn->blocked || ofconn->retry) && count < OFCONN_REPLY_MAX;
+ return count < OFCONN_REPLY_MAX;
}
static void
ofconn_run(struct ofconn *ofconn,
- bool (*handle_openflow)(struct ofconn *,
+ void (*handle_openflow)(struct ofconn *,
const struct ofpbuf *ofp_msg))
{
struct connmgr *mgr = ofconn->connmgr;
rconn_run(ofconn->rconn);
- if (handle_openflow) {
- /* Limit the number of iterations to avoid starving other tasks. */
- for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
- struct ofpbuf *of_msg;
-
- of_msg = (ofconn->blocked
- ? ofconn->blocked
- : rconn_recv(ofconn->rconn));
- if (!of_msg) {
- break;
- }
- if (mgr->fail_open) {
- fail_open_maybe_recover(mgr->fail_open);
- }
+ /* Limit the number of iterations to avoid starving other tasks. */
+ for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
+ struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
+ if (!of_msg) {
+ break;
+ }
- if (handle_openflow(ofconn, of_msg)) {
- ofpbuf_delete(of_msg);
- ofconn->blocked = NULL;
- } else {
- ofconn->blocked = of_msg;
- ofconn->retry = false;
- }
+ if (mgr->fail_open) {
+ fail_open_maybe_recover(mgr->fail_open);
}
- }
+ handle_openflow(ofconn, of_msg);
+ ofpbuf_delete(of_msg);
+ }
if (time_msec() >= ofconn->next_op_report) {
ofconn_log_flow_mods(ofconn);
}
static void
-ofconn_wait(struct ofconn *ofconn, bool handling_openflow)
+ofconn_wait(struct ofconn *ofconn)
{
int i;
pinsched_wait(ofconn->schedulers[i]);
}
rconn_run_wait(ofconn->rconn);
- if (handling_openflow && ofconn_may_recv(ofconn)) {
+ if (ofconn_may_recv(ofconn)) {
rconn_recv_wait(ofconn->rconn);
}
if (ofconn->next_op_report != LLONG_MAX) {
HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
if (m->flags & update
&& (m->table_id == 0xff || m->table_id == rule->table_id)
- && ofoperation_has_out_port(rule->pending, m->out_port)
&& cls_rule_is_loose_match(&rule->cr, &m->match)) {
flags |= m->flags;
}
* assigned to groups but will not have an associated ofconn. */
struct ofopgroup {
struct ofproto *ofproto; /* Owning ofproto. */
- struct list ofproto_node; /* In ofproto's "pending" list. */
struct list ops; /* List of "struct ofoperation"s. */
- int n_running; /* Number of ops still pending. */
/* Data needed to send OpenFlow reply on failure or to send a buffered
* packet on success.
uint32_t group_id)
OVS_EXCLUDED(ofproto->groups_rwlock);
static enum ofperr add_group(struct ofproto *, struct ofputil_group_mod *);
-static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
+static void handle_openflow(struct ofconn *, const struct ofpbuf *);
static enum ofperr handle_flow_mod__(struct ofproto *,
struct ofputil_flow_mod *,
const struct flow_mod_requester *)
hindex_init(&ofproto->cookies);
list_init(&ofproto->expirable);
ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name);
- ofproto->state = S_OPENFLOW;
- list_init(&ofproto->pending);
- ofproto->n_pending = 0;
- hmap_init(&ofproto->deletions);
guarded_list_init(&ofproto->rule_executes);
ofproto->vlan_bitmap = NULL;
ofproto->vlans_changed = false;
}
static void
-ofproto_rule_delete__(struct ofproto *ofproto, struct rule *rule,
- uint8_t reason)
+ofproto_rule_delete__(struct rule *rule, uint8_t reason)
OVS_REQUIRES(ofproto_mutex)
{
struct ofopgroup *group;
- ovs_assert(!rule->pending);
-
- group = ofopgroup_create_unattached(ofproto);
+ group = ofopgroup_create_unattached(rule->ofproto);
delete_flow__(rule, group, reason);
ofopgroup_submit(group);
}
OVS_EXCLUDED(ofproto_mutex)
{
struct ofopgroup *group;
+ struct ofoperation *op;
ovs_mutex_lock(&ofproto_mutex);
- ovs_assert(!rule->pending);
-
group = ofopgroup_create_unattached(ofproto);
- ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
+ op = ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
oftable_remove_rule__(ofproto, rule);
ofproto->ofproto_class->rule_delete(rule);
+ ofoperation_complete(op, 0);
ofopgroup_submit(group);
ovs_mutex_unlock(&ofproto_mutex);
cls_cursor_init(&cursor, &table->cls, NULL);
fat_rwlock_unlock(&table->cls.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
- if (!rule->pending) {
- ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE);
- }
+ ofproto_rule_delete__(rule, OFPRR_DELETE);
}
}
ovs_mutex_unlock(&ofproto_mutex);
{
struct oftable *table;
- ovs_assert(list_is_empty(&ofproto->pending));
-
destroy_rule_executes(ofproto);
delete_group(ofproto, OFPG_ALL);
}
free(ofproto->tables);
- hmap_destroy(&ofproto->deletions);
-
ovs_assert(hindex_is_empty(&ofproto->cookies));
hindex_destroy(&ofproto->cookies);
}
}
-static bool
-any_pending_ops(const struct ofproto *p)
- OVS_EXCLUDED(ofproto_mutex)
-{
- bool b;
-
- ovs_mutex_lock(&ofproto_mutex);
- b = !list_is_empty(&p->pending);
- ovs_mutex_unlock(&ofproto_mutex);
-
- return b;
-}
-
int
ofproto_run(struct ofproto *p)
{
case S_EVICT:
connmgr_run(p->connmgr, NULL);
ofproto_evict(p);
- if (!any_pending_ops(p)) {
- p->state = S_OPENFLOW;
- }
+ p->state = S_OPENFLOW;
break;
case S_FLUSH:
connmgr_run(p->connmgr, NULL);
ofproto_flush__(p);
- if (!any_pending_ops(p)) {
- connmgr_flushed(p->connmgr);
- p->state = S_OPENFLOW;
- }
+ connmgr_flushed(p->connmgr);
+ p->state = S_OPENFLOW;
break;
default:
switch (p->state) {
case S_OPENFLOW:
- connmgr_wait(p->connmgr, true);
+ connmgr_wait(p->connmgr);
break;
case S_EVICT:
case S_FLUSH:
- connmgr_wait(p->connmgr, false);
- if (!any_pending_ops(p)) {
- poll_immediate_wake();
- }
+ connmgr_wait(p->connmgr);
+ poll_immediate_wake();
break;
}
}
simap_increase(usage, "ports", hmap_count(&ofproto->ports));
- ovs_mutex_lock(&ofproto_mutex);
- simap_increase(usage, "ops",
- ofproto->n_pending + hmap_count(&ofproto->deletions));
- ovs_mutex_unlock(&ofproto_mutex);
-
n_rules = 0;
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
fat_rwlock_rdlock(&table->cls.rwlock);
* ofproto's table 0 and, if it finds one, deletes it.
*
* This is a helper function for in-band control and fail-open. */
-bool
+void
ofproto_delete_flow(struct ofproto *ofproto,
const struct match *target, unsigned int priority)
OVS_EXCLUDED(ofproto_mutex)
rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
priority));
fat_rwlock_unlock(&cls->rwlock);
- if (!rule) {
- return true;
+ if (rule) {
+ /* Execute a full flow mod. We can't optimize this at all because we
+ * didn't take enough locks above to ensure that the flow table didn't
+ * already change beneath us. */
+ simple_flow_mod(ofproto, target, priority, NULL, 0,
+ OFPFC_DELETE_STRICT);
}
-
- /* Fall back to a executing a full flow mod. We can't optimize this at all
- * because we didn't take enough locks above to ensure that the flow table
- * didn't already change beneath us. */
- return simple_flow_mod(ofproto, target, priority, NULL, 0,
- OFPFC_DELETE_STRICT) != OFPROTO_POSTPONE;
}
/* Starts the process of deleting all of the flows from all of ofproto's flow
}
}
-/* Returns true if a rule related to 'op' has an OpenFlow OFPAT_OUTPUT or
- * OFPAT_ENQUEUE action that outputs to 'out_port'. */
-bool
-ofoperation_has_out_port(const struct ofoperation *op, ofp_port_t out_port)
- OVS_REQUIRES(ofproto_mutex)
-{
- if (ofproto_rule_has_out_port(op->rule, out_port)) {
- return true;
- }
-
- switch (op->type) {
- case OFOPERATION_ADD:
- case OFOPERATION_DELETE:
- return false;
-
- case OFOPERATION_MODIFY:
- case OFOPERATION_REPLACE:
- return ofpacts_output_to_port(op->actions->ofpacts,
- op->actions->ofpacts_len, out_port);
- }
-
- OVS_NOT_REACHED();
-}
-
static void
rule_execute_destroy(struct rule_execute *e)
{
* check 'c->cr' itself.
*
* Increments '*n_readonly' if 'rule' wasn't added because it's read-only (and
- * 'c' only includes modifiable rules).
- *
- * Returns 0 ordinarily, but OFPROTO_POSTPONE if we would otherwise collect a
- * rule that has a pending operation. */
-static enum ofperr
+ * 'c' only includes modifiable rules). */
+static void
collect_rule(struct rule *rule, const struct rule_criteria *c,
struct rule_collection *rules, size_t *n_readonly)
OVS_REQUIRES(ofproto_mutex)
&& ofproto_rule_has_out_group(rule, c->out_group)
&& !((rule->flow_cookie ^ c->cookie) & c->cookie_mask)
&& (!rule_is_hidden(rule) || c->include_hidden)) {
- if (rule->pending) {
- return OFPROTO_POSTPONE;
- }
-
/* Rule matches all the criteria... */
if (rule_is_modifiable(rule, 0) || c->include_readonly) {
/* ...add it. */
++*n_readonly;
}
}
- return 0;
}
/* Searches 'ofproto' for rules that match the criteria in 'criteria'. Matches
hash_cookie(criteria->cookie),
&ofproto->cookies) {
if (cls_rule_is_loose_match(&rule->cr, &criteria->cr.match)) {
- error = collect_rule(rule, criteria, rules, &n_readonly);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
}
} else {
fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &criteria->cr);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
- error = collect_rule(rule, criteria, rules, &n_readonly);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
fat_rwlock_unlock(&table->cls.rwlock);
}
hash_cookie(criteria->cookie),
&ofproto->cookies) {
if (cls_rule_equal(&rule->cr, &criteria->cr)) {
- error = collect_rule(rule, criteria, rules, &n_readonly);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
}
} else {
&table->cls, &criteria->cr));
fat_rwlock_unlock(&table->cls.rwlock);
if (rule) {
- error = collect_rule(rule, criteria, rules, &n_readonly);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
}
}
exit:
+ if (!error && !rules->n && n_readonly) {
+ /* We didn't find any rules to modify. We did find some read-only
+ * rules that we're not allowed to modify, so report that. */
+ error = OFPERR_OFPBRC_EPERM;
+ }
if (error) {
rule_collection_destroy(rules);
}
return error;
}
-static bool
-is_flow_deletion_pending(const struct ofproto *ofproto,
- const struct cls_rule *cls_rule,
- uint8_t table_id)
- OVS_REQUIRES(ofproto_mutex)
-{
- if (!hmap_is_empty(&ofproto->deletions)) {
- struct ofoperation *op;
-
- HMAP_FOR_EACH_WITH_HASH (op, hmap_node,
- cls_rule_hash(cls_rule, table_id),
- &ofproto->deletions) {
- if (op->rule->table_id == table_id
- && cls_rule_equal(cls_rule, &op->rule->cr)) {
- return true;
- }
- }
- }
-
- return false;
-}
-
static bool
should_evict_a_rule(struct oftable *table, unsigned int extra_space)
OVS_REQUIRES(ofproto_mutex)
if (!choose_rule_to_evict(table, &rule)) {
return OFPERR_OFPFMFC_TABLE_FULL;
- } else if (rule->pending) {
- return OFPROTO_POSTPONE;
} else {
struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
delete_flow__(rule, group, OFPRR_EVICTION);
{
const struct rule_actions *actions;
struct ofopgroup *group;
+ struct ofoperation *op;
struct oftable *table;
struct cls_rule cr;
struct rule *rule;
cls_rule_destroy(&cr);
if (!rule_is_modifiable(rule, fm->flags)) {
return OFPERR_OFPBRC_EPERM;
- } else if (rule->pending) {
- return OFPROTO_POSTPONE;
} else {
struct rule_collection rules;
}
}
- /* Serialize against pending deletion. */
- if (is_flow_deletion_pending(ofproto, &cr, table_id)) {
- cls_rule_destroy(&cr);
- return OFPROTO_POSTPONE;
- }
-
/* Check for overlap, if requested. */
if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
bool overlaps;
*CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto;
cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), &cr);
ovs_refcount_init(&rule->ref_count);
- rule->pending = NULL;
rule->flow_cookie = fm->new_cookie;
rule->created = rule->modified = time_msec();
fat_rwlock_unlock(&table->cls.rwlock);
group = ofopgroup_create(ofproto, fm->buffer_id, req);
- ofoperation_create(group, rule, OFOPERATION_ADD, 0);
- ofproto->ofproto_class->rule_insert(rule);
+ op = ofoperation_create(group, rule, OFOPERATION_ADD, 0);
+ error = ofproto->ofproto_class->rule_insert(rule);
+ ofoperation_complete(op, error);
ofopgroup_submit(group);
return error;
ovsrcu_set(&rule->actions, new_actions);
ofproto->ofproto_class->rule_modify_actions(rule, reset_counters);
- } else {
- ofoperation_complete(op, 0);
}
+ ofoperation_complete(op, 0);
}
ofopgroup_submit(group);
ofproto_rule_expire(struct rule *rule, uint8_t reason)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofproto *ofproto = rule->ofproto;
-
ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT
|| reason == OFPRR_DELETE || reason == OFPRR_GROUP_DELETE);
- ofproto_rule_delete__(ofproto, rule, reason);
+ ofproto_rule_delete__(rule, reason);
}
/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
}
if (request.role != OFPCR12_ROLE_NOCHANGE) {
- if (ofconn_get_role(ofconn) != request.role
- && ofconn_has_pending_opgroups(ofconn)) {
- return OFPROTO_POSTPONE;
- }
-
if (request.have_generation_id
&& !ofconn_set_master_election_id(ofconn, request.generation_id)) {
return OFPERR_OFPRRFC_STALE;
cur = ofconn_get_protocol(ofconn);
next = ofputil_protocol_set_base(cur, next_base);
- if (cur != next && ofconn_has_pending_opgroups(ofconn)) {
- /* Avoid sending async messages in surprising protocol. */
- return OFPROTO_POSTPONE;
- }
-
ofconn_set_protocol(ofconn, next);
+
return 0;
}
return OFPERR_OFPBRC_EPERM;
}
- if (format != ofconn_get_packet_in_format(ofconn)
- && ofconn_has_pending_opgroups(ofconn)) {
- /* Avoid sending async message in surprsing packet in format. */
- return OFPROTO_POSTPONE;
- }
-
ofconn_set_packet_in_format(ofconn, format);
return 0;
}
{
struct ofpbuf *buf;
- if (ofconn_has_pending_opgroups(ofconn)) {
- return OFPROTO_POSTPONE;
- }
-
buf = ofpraw_alloc_reply((oh->version == OFP10_VERSION
? OFPRAW_OFPT10_BARRIER_REPLY
: OFPRAW_OFPT11_BARRIER_REPLY), oh, 0);
struct list *msgs)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofoperation *op = rule->pending;
const struct rule_actions *actions;
struct ofputil_flow_update fu;
struct match match;
- if (op && op->type == OFOPERATION_ADD) {
- /* We'll report the final flow when the operation completes. Reporting
- * it now would cause a duplicate report later. */
- return;
- }
-
fu.event = (flags & (NXFMF_INITIAL | NXFMF_ADD)
? NXFME_ADDED : NXFME_MODIFIED);
fu.reason = 0;
fu.match = &match;
fu.priority = rule->cr.priority;
- if (!(flags & NXFMF_ACTIONS)) {
- actions = NULL;
- } else if (!op) {
- actions = rule_get_actions(rule);
- } else {
- /* An operation is in progress. Use the previous version of the flow's
- * actions, so that when the operation commits we report the change. */
- switch (op->type) {
- case OFOPERATION_ADD:
- OVS_NOT_REACHED();
-
- case OFOPERATION_MODIFY:
- case OFOPERATION_REPLACE:
- actions = op->actions ? op->actions : rule_get_actions(rule);
- break;
-
- case OFOPERATION_DELETE:
- actions = rule_get_actions(rule);
- break;
-
- default:
- OVS_NOT_REACHED();
- }
- }
+ actions = flags & NXFMF_ACTIONS ? rule_get_actions(rule) : NULL;
fu.ofpacts = actions ? actions->ofpacts : NULL;
fu.ofpacts_len = actions ? actions->ofpacts_len : 0;
return;
}
- if (!(rule->pending
- ? ofoperation_has_out_port(rule->pending, m->out_port)
- : ofproto_rule_has_out_port(rule, m->out_port))) {
+ if (!ofproto_rule_has_out_port(rule, m->out_port)) {
return;
}
OVS_REQUIRES(ofproto_mutex)
{
const struct ofproto *ofproto = ofconn_get_ofproto(m->ofconn);
- const struct ofoperation *op;
const struct oftable *table;
struct cls_rule target;
fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
- ovs_assert(!rule->pending); /* XXX */
ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
}
fat_rwlock_unlock(&table->cls.rwlock);
}
-
- HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
- struct rule *rule = op->rule;
-
- if (((m->table_id == 0xff
- ? !(ofproto->tables[rule->table_id].flags & OFTABLE_HIDDEN)
- : m->table_id == rule->table_id))
- && cls_rule_is_loose_match(&rule->cr, &target.match)) {
- ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
- }
- }
cls_rule_destroy(&target);
}
struct rule *rule;
LIST_FOR_EACH (rule, meter_list_node, &meter->rules) {
- if (rule->pending) {
- error = OFPROTO_POSTPONE;
- goto exit;
- }
rule_collection_add(&rules, rule);
}
}
/* Delete the meters. */
meter_delete(ofproto, first, last);
-exit:
ovs_mutex_unlock(&ofproto_mutex);
rule_collection_destroy(&rules);
}
}
-static bool
+static void
handle_openflow(struct ofconn *ofconn, const struct ofpbuf *ofp_msg)
OVS_EXCLUDED(ofproto_mutex)
{
int error = handle_openflow__(ofconn, ofp_msg);
- if (error && error != OFPROTO_POSTPONE) {
+ if (error) {
ofconn_send_error(ofconn, ofpbuf_data(ofp_msg), error);
}
COVERAGE_INC(ofproto_recv_openflow);
- return error != OFPROTO_POSTPONE;
}
\f
/* Asynchronous operations. */
{
struct ofopgroup *group = xzalloc(sizeof *group);
group->ofproto = ofproto;
- list_init(&group->ofproto_node);
list_init(&group->ops);
list_init(&group->ofconn_node);
return group;
ovs_assert(ofconn_get_ofproto(req->ofconn) == ofproto);
- ofconn_add_opgroup(req->ofconn, &group->ofconn_node);
group->ofconn = req->ofconn;
group->request = xmemdup(req->request, MIN(request_len, 64));
group->buffer_id = buffer_id;
ofopgroup_submit(struct ofopgroup *group)
OVS_REQUIRES(ofproto_mutex)
{
- if (!group->n_running) {
- ofopgroup_complete(group);
- } else {
- list_push_back(&group->ofproto->pending, &group->ofproto_node);
- group->ofproto->n_pending++;
- }
+ ofopgroup_complete(group);
}
static void
struct ofoperation *op, *next_op;
int error;
- ovs_assert(!group->n_running);
-
error = 0;
LIST_FOR_EACH (op, group_node, &group->ops) {
if (op->error) {
op->reason, abbrev_ofconn, abbrev_xid);
}
- rule->pending = NULL;
-
ovs_assert(!op->error || op->type == OFOPERATION_ADD);
switch (op->type) {
case OFOPERATION_ADD:
ofmonitor_flush(ofproto->connmgr);
- if (!list_is_empty(&group->ofproto_node)) {
- ovs_assert(ofproto->n_pending > 0);
- ofproto->n_pending--;
- list_remove(&group->ofproto_node);
- }
- if (!list_is_empty(&group->ofconn_node)) {
- list_remove(&group->ofconn_node);
- if (error) {
- ofconn_send_error(group->ofconn, group->request, error);
- }
- connmgr_retry(ofproto->connmgr);
+ if (error) {
+ ofconn_send_error(group->ofconn, group->request, error);
}
free(group->request);
free(group);
enum ofp_flow_removed_reason reason)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofproto *ofproto = group->ofproto;
struct ofoperation *op;
- ovs_assert(!rule->pending);
-
- op = rule->pending = xzalloc(sizeof *op);
+ op = xzalloc(sizeof *op);
op->group = group;
list_push_back(&group->ops, &op->group_node);
op->rule = rule;
ovs_mutex_unlock(&rule->mutex);
op->flags = rule->flags;
- group->n_running++;
-
- if (type == OFOPERATION_DELETE) {
- hmap_insert(&ofproto->deletions, &op->hmap_node,
- cls_rule_hash(&rule->cr, rule->table_id));
- }
-
return op;
}
ofoperation_destroy(struct ofoperation *op)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofopgroup *group = op->group;
-
- if (op->rule) {
- op->rule->pending = NULL;
- }
- if (op->type == OFOPERATION_DELETE) {
- hmap_remove(&group->ofproto->deletions, &op->hmap_node);
- }
- list_remove(&op->group_node);
rule_actions_destroy(op->actions);
free(op);
}
void
ofoperation_complete(struct ofoperation *op, enum ofperr error)
{
- struct ofopgroup *group = op->group;
-
- ovs_assert(group->n_running > 0);
ovs_assert(!error || op->type == OFOPERATION_ADD);
-
op->error = error;
- if (!--group->n_running && !list_is_empty(&group->ofproto_node)) {
- /* This function can be called from ->rule_construct(), in which case
- * ofproto_mutex is held, or it can be called from ->run(), in which
- * case ofproto_mutex is not held. But only in the latter case can we
- * arrive here, so we can safely take ofproto_mutex now. */
- ovs_mutex_lock(&ofproto_mutex);
- ovs_assert(op->rule->pending == op);
- ofopgroup_complete(group);
- ovs_mutex_unlock(&ofproto_mutex);
- }
}
\f
static uint64_t