#include "unaligned.h"
#include "unixctl.h"
#include "vlog.h"
+#include "bundles.h"
VLOG_DEFINE_THIS_MODULE(ofproto);
/* OFOPERATION_MODIFY, OFOPERATION_REPLACE: The old actions, if the actions
* are changing. */
- struct rule_actions *actions;
+ const struct rule_actions *actions;
/* OFOPERATION_DELETE. */
enum ofp_flow_removed_reason reason; /* Reason flow was removed. */
};
/* rule. */
-static void ofproto_rule_destroy__(struct rule *);
static void ofproto_rule_send_removed(struct rule *, uint8_t reason);
static bool rule_is_modifiable(const struct rule *rule,
enum ofputil_flow_mod_flags flag);
ofproto->n_pending = 0;
hmap_init(&ofproto->deletions);
guarded_list_init(&ofproto->rule_executes);
- ofproto->n_add = ofproto->n_delete = ofproto->n_modify = 0;
- ofproto->first_op = ofproto->last_op = LLONG_MIN;
- ofproto->next_op_report = LLONG_MAX;
- ofproto->op_backoff = LLONG_MIN;
ofproto->vlan_bitmap = NULL;
ofproto->vlans_changed = false;
ofproto->min_mtu = INT_MAX;
ovs_rwlock_init(&ofproto->groups_rwlock);
hmap_init(&ofproto->groups);
ovs_mutex_unlock(&ofproto_mutex);
+ ofproto->ogf.types = 0xf;
ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS |
OFPGFC_SELECT_WEIGHT;
ofproto->ogf.max_groups[OFPGT11_ALL] = OFPG_MAX;
}
}
-/* Populates 'status' with key value pairs indicating the status of the BFD
- * session on 'ofp_port'. This information is intended to be populated in the
- * OVS database. Has no effect if 'ofp_port' is not na OpenFlow port in
- * 'ofproto'. */
+/* Populates 'status' with the status of BFD on 'ofport'. If 'force' is set to
+ * true, status will be returned even if there is no status change since last
+ * update.
+ *
+ * Returns 0 on success. Returns a negative number if there is no status change
+ * since last update and 'force' is set to false. Returns a positive errno
+ * otherwise. Has no effect if 'ofp_port' is not an OpenFlow port in 'ofproto'.
+ *
+ * The caller must provide and own '*status'. */
int
ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port,
- struct smap *status)
+ bool force, struct smap *status)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
return (ofport && ofproto->ofproto_class->get_bfd_status
- ? ofproto->ofproto_class->get_bfd_status(ofport, status)
+ ? ofproto->ofproto_class->get_bfd_status(ofport, force, status)
: EOPNOTSUPP);
}
ovs_assert(list_is_empty(&ofproto->pending));
destroy_rule_executes(ofproto);
- guarded_list_destroy(&ofproto->rule_executes);
-
delete_group(ofproto, OFPG_ALL);
+
+ guarded_list_destroy(&ofproto->rule_executes);
ovs_rwlock_destroy(&ofproto->groups_rwlock);
hmap_destroy(&ofproto->groups);
hmap_destroy(&ofproto->deletions);
+ ovs_assert(hindex_is_empty(&ofproto->cookies));
+ hindex_destroy(&ofproto->cookies);
+
free(ofproto->vlan_bitmap);
ofproto->ofproto_class->dealloc(ofproto);
}
p->ofproto_class->destruct(p);
- ofproto_destroy__(p);
+ /* Destroying rules is deferred, must have 'ofproto' around for them. */
+ ovsrcu_postpone(ofproto_destroy__, p);
}
/* Destroys the datapath with the respective 'name' and 'type'. With the Linux
ovs_mutex_lock(&ofproto_mutex);
fat_rwlock_rdlock(&table->cls.rwlock);
+
+ if (classifier_count(&table->cls) > 100000) {
+ static struct vlog_rate_limit count_rl =
+ VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_WARN_RL(&count_rl, "Table %"PRIuSIZE" has an excessive"
+ " number of rules: %d", i,
+ classifier_count(&table->cls));
+ }
+
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
if (rule->idle_timeout || rule->hard_timeout) {
OVS_NOT_REACHED();
}
- if (time_msec() >= p->next_op_report) {
- long long int ago = (time_msec() - p->first_op) / 1000;
- long long int interval = (p->last_op - p->first_op) / 1000;
- struct ds s;
-
- ds_init(&s);
- ds_put_format(&s, "%d flow_mods ",
- p->n_add + p->n_delete + p->n_modify);
- if (interval == ago) {
- ds_put_format(&s, "in the last %lld s", ago);
- } else if (interval) {
- ds_put_format(&s, "in the %lld s starting %lld s ago",
- interval, ago);
- } else {
- ds_put_format(&s, "%lld s ago", ago);
- }
-
- ds_put_cstr(&s, " (");
- if (p->n_add) {
- ds_put_format(&s, "%d adds, ", p->n_add);
- }
- if (p->n_delete) {
- ds_put_format(&s, "%d deletes, ", p->n_delete);
- }
- if (p->n_modify) {
- ds_put_format(&s, "%d modifications, ", p->n_modify);
- }
- s.length -= 2;
- ds_put_char(&s, ')');
-
- VLOG_INFO("%s: %s", p->name, ds_cstr(&s));
- ds_destroy(&s);
-
- p->n_add = p->n_delete = p->n_modify = 0;
- p->next_op_report = LLONG_MAX;
- }
-
return error;
}
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, match, priority));
if (rule) {
- struct rule_actions *actions = rule_get_actions(rule);
+ const struct rule_actions *actions = rule_get_actions(rule);
must_add = !ofpacts_equal(actions->ofpacts, actions->ofpacts_len,
ofpacts, ofpacts_len);
} else {
if (fm->command == OFPFC_MODIFY_STRICT && fm->table_id != OFPTT_ALL
&& !(fm->flags & OFPUTIL_FF_RESET_COUNTS)) {
struct oftable *table = &ofproto->tables[fm->table_id];
- struct cls_rule match_rule;
struct rule *rule;
bool done = false;
- cls_rule_init(&match_rule, &fm->match, fm->priority);
fat_rwlock_rdlock(&table->cls.rwlock);
- rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
- &match_rule));
+ rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
+ &fm->match,
+ fm->priority));
if (rule) {
/* Reading many of the rule fields and writing on 'modified'
* requires the rule->mutex. Also, rule->actions may change
return handle_flow_mod__(ofproto, NULL, fm, NULL);
}
-/* Resets the modified time for 'rule' or an equivalent rule. If 'rule' is not
- * in the classifier, but an equivalent rule is, unref 'rule' and ref the new
- * rule. Otherwise if 'rule' is no longer installed in the classifier,
- * reinstall it.
- *
- * Returns the rule whose modified time has been reset. */
-struct rule *
-ofproto_refresh_rule(struct rule *rule)
-{
- const struct oftable *table = &rule->ofproto->tables[rule->table_id];
- const struct cls_rule *cr = &rule->cr;
- struct rule *r;
-
- /* do_add_flow() requires that the rule is not installed. We lock the
- * ofproto_mutex here so that another thread cannot add the flow before
- * we get a chance to add it.*/
- ovs_mutex_lock(&ofproto_mutex);
-
- fat_rwlock_rdlock(&table->cls.rwlock);
- r = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, cr));
- if (r != rule) {
- ofproto_rule_ref(r);
- }
- fat_rwlock_unlock(&table->cls.rwlock);
-
- if (!r) {
- do_add_flow(rule->ofproto, NULL, NULL, 0, rule);
- } else if (r != rule) {
- ofproto_rule_unref(rule);
- rule = r;
- }
- ovs_mutex_unlock(&ofproto_mutex);
-
- /* Refresh the modified time for the rule. */
- ovs_mutex_lock(&rule->mutex);
- rule->modified = MAX(rule->modified, time_msec());
- ovs_mutex_unlock(&rule->mutex);
-
- return rule;
-}
-
/* Searches for a rule with matching criteria exactly equal to 'target' in
* ofproto's table 0 and, if it finds one, deletes it.
*
}
}
\f
+static void
+ofproto_rule_destroy__(struct rule *rule)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
+ rule_actions_destroy(rule_get_actions(rule));
+ ovs_mutex_destroy(&rule->mutex);
+ rule->ofproto->ofproto_class->rule_dealloc(rule);
+}
+
+static void
+rule_destroy_cb(struct rule *rule)
+{
+ rule->ofproto->ofproto_class->rule_destruct(rule);
+ ofproto_rule_destroy__(rule);
+}
+
void
ofproto_rule_ref(struct rule *rule)
{
}
}
+/* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the
+ * ref_count reaches 0.
+ *
+ * Use of RCU allows short term use (between RCU quiescent periods) without
+ * keeping a reference. A reference must be taken if the rule needs to
+ * stay around accross the RCU quiescent periods. */
void
ofproto_rule_unref(struct rule *rule)
{
if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
- rule->ofproto->ofproto_class->rule_destruct(rule);
- ofproto_rule_destroy__(rule);
+ ovsrcu_postpone(rule_destroy_cb, rule);
}
}
-static void
-ofproto_rule_destroy__(struct rule *rule)
- OVS_NO_THREAD_SAFETY_ANALYSIS
-{
- cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
- rule_actions_destroy(rule_get_actions(rule));
- ovs_mutex_destroy(&rule->mutex);
- rule->ofproto->ofproto_class->rule_dealloc(rule);
-}
-
static uint32_t get_provider_meter_id(const struct ofproto *,
uint32_t of_meter_id);
-/* Creates and returns a new 'struct rule_actions', with a ref_count of 1,
- * whose actions are a copy of from the 'ofpacts_len' bytes of 'ofpacts'. */
-struct rule_actions *
+/* Creates and returns a new 'struct rule_actions', whose actions are a copy
+ * of from the 'ofpacts_len' bytes of 'ofpacts'. */
+const struct rule_actions *
rule_actions_create(const struct ofproto *ofproto,
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct rule_actions *actions;
- actions = xmalloc(sizeof *actions);
- actions->ofpacts = xmemdup(ofpacts, ofpacts_len);
+ actions = xmalloc(sizeof *actions + ofpacts_len);
actions->ofpacts_len = ofpacts_len;
actions->provider_meter_id
= get_provider_meter_id(ofproto,
ofpacts_get_meter(ofpacts, ofpacts_len));
+ memcpy(actions->ofpacts, ofpacts, ofpacts_len);
return actions;
}
-static void
-rule_actions_destroy_cb(struct rule_actions *actions)
-{
- free(actions->ofpacts);
- free(actions);
-}
-
-/* Decrements 'actions''s ref_count and frees 'actions' if the ref_count
- * reaches 0. */
+/* Free the actions after the RCU quiescent period is reached. */
void
-rule_actions_destroy(struct rule_actions *actions)
+rule_actions_destroy(const struct rule_actions *actions)
{
if (actions) {
- ovsrcu_postpone(rule_actions_destroy_cb, actions);
+ ovsrcu_postpone(free, CONST_CAST(struct rule_actions *, actions));
}
}
return error;
}
- error = ofputil_decode_port_mod(oh, &pm);
+ error = ofputil_decode_port_mod(oh, &pm, false);
if (error) {
return error;
}
ofputil_append_port_stat(replies, &ops);
}
-static enum ofperr
-handle_port_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_port_request(struct ofconn *ofconn,
+ const struct ofp_header *request, ofp_port_t port_no,
+ void (*cb)(struct ofport *, struct list *replies))
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofport *port;
struct list replies;
- ofp_port_t port_no;
- enum ofperr error;
-
- error = ofputil_decode_port_stats_request(request, &port_no);
- if (error) {
- return error;
- }
ofpmp_init(&replies, request);
if (port_no != OFPP_ANY) {
- port = ofproto_get_port(p, port_no);
+ port = ofproto_get_port(ofproto, port_no);
if (port) {
- append_port_stat(port, &replies);
+ cb(port, &replies);
}
} else {
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- append_port_stat(port, &replies);
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ cb(port, &replies);
}
}
ofconn_send_replies(ofconn, &replies);
- return 0;
+}
+
+static enum ofperr
+handle_port_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ ofp_port_t port_no;
+ enum ofperr error;
+
+ error = ofputil_decode_port_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_stat);
+ }
+ return error;
+}
+
+static void
+append_port_desc(struct ofport *port, struct list *replies)
+{
+ ofputil_append_port_desc_stats_reply(&port->pp, replies);
}
static enum ofperr
handle_port_desc_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
- enum ofp_version version;
- struct ofport *port;
- struct list replies;
-
- ofpmp_init(&replies, request);
+ ofp_port_t port_no;
+ enum ofperr error;
- version = ofputil_protocol_to_ofp_version(ofconn_get_protocol(ofconn));
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- ofputil_append_port_desc_stats_reply(version, &port->pp, &replies);
+ error = ofputil_decode_port_desc_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_desc);
}
-
- ofconn_send_replies(ofconn, &replies);
- return 0;
+ return error;
}
static uint32_t
long long int now = time_msec();
struct ofputil_flow_stats fs;
long long int created, used, modified;
- struct rule_actions *actions;
+ const struct rule_actions *actions;
enum ofputil_flow_mod_flags flags;
ovs_mutex_lock(&rule->mutex);
flow_stats_ds(struct rule *rule, struct ds *results)
{
uint64_t packet_count, byte_count;
- struct rule_actions *actions;
+ const struct rule_actions *actions;
long long int created, used;
rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id);
}
-/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'. Returns
- * true if the port's CFM status was successfully stored into '*status'.
- * Returns false if the port did not have CFM configured, in which case
- * '*status' is indeterminate.
+/* Checks the status of CFM configured on 'ofp_port' within 'ofproto' and stores
+ * the port's CFM status in '*status'. If 'force' is set to true, status will
+ * be returned even if there is no status change since last update.
*
- * The caller must provide and owns '*status', and must free 'status->rmps'. */
-bool
+ * Returns 0 on success. Returns a negative number if there is no status
+ * change since last update and 'force' is set to false. Returns positive errno
+ * if the port did not have CFM configured.
+ *
+ * The caller must provide and own '*status', and must free 'status->rmps'.
+ * '*status' is indeterminate if the return value is non-zero. */
+int
ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
- struct ofproto_cfm_status *status)
+ bool force, struct ofproto_cfm_status *status)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
- return (ofport
- && ofproto->ofproto_class->get_cfm_status
- && ofproto->ofproto_class->get_cfm_status(ofport, status));
+ return (ofport && ofproto->ofproto_class->get_cfm_status
+ ? ofproto->ofproto_class->get_cfm_status(ofport, force, status)
+ : EOPNOTSUPP);
}
static enum ofperr
HMAP_FOR_EACH_WITH_HASH (op, hmap_node,
cls_rule_hash(cls_rule, table_id),
&ofproto->deletions) {
- if (cls_rule_equal(cls_rule, &op->rule->cr)) {
+ if (op->rule->table_id == table_id
+ && cls_rule_equal(cls_rule, &op->rule->cr)) {
return true;
}
}
reset_counters = (fm->flags & OFPUTIL_FF_RESET_COUNTS) != 0;
if (actions_changed || reset_counters) {
- struct rule_actions *new_actions;
+ const struct rule_actions *new_actions;
op->actions = rule_get_actions(rule);
new_actions = rule_actions_create(ofproto,
uint64_t ofpacts_stub[1024 / 8];
struct ofpbuf ofpacts;
enum ofperr error;
- long long int now;
error = reject_slave_controller(ofconn);
if (error) {
goto exit_free_ofpacts;
}
- /* Record the operation for logging a summary report. */
- switch (fm.command) {
- case OFPFC_ADD:
- ofproto->n_add++;
- break;
-
- case OFPFC_MODIFY:
- case OFPFC_MODIFY_STRICT:
- ofproto->n_modify++;
- break;
-
- case OFPFC_DELETE:
- case OFPFC_DELETE_STRICT:
- ofproto->n_delete++;
- break;
- }
-
- now = time_msec();
- if (ofproto->next_op_report == LLONG_MAX) {
- ofproto->first_op = now;
- ofproto->next_op_report = MAX(now + 10 * 1000,
- ofproto->op_backoff);
- ofproto->op_backoff = ofproto->next_op_report + 60 * 1000;
- }
- ofproto->last_op = now;
+ ofconn_report_flow_mod(ofconn, fm.command);
exit_free_ofpacts:
ofpbuf_uninit(&ofpacts);
free(ogs.bucket_stats);
}
-static enum ofperr
-handle_group_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_group_request(struct ofconn *ofconn,
+ const struct ofp_header *request, uint32_t group_id,
+ void (*cb)(struct ofgroup *, struct list *replies))
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- enum ofperr error;
struct ofgroup *group;
- uint32_t group_id;
-
- error = ofputil_decode_group_stats_request(request, &group_id);
- if (error) {
- return error;
- }
+ struct list replies;
ofpmp_init(&replies, request);
-
if (group_id == OFPG_ALL) {
ovs_rwlock_rdlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
ovs_rwlock_rdlock(&group->rwlock);
- append_group_stats(group, &replies);
+ cb(group, &replies);
ovs_rwlock_unlock(&group->rwlock);
}
ovs_rwlock_unlock(&ofproto->groups_rwlock);
} else {
if (ofproto_group_lookup(ofproto, group_id, &group)) {
- append_group_stats(group, &replies);
+ cb(group, &replies);
ofproto_group_release(group);
}
}
-
ofconn_send_replies(ofconn, &replies);
-
- return 0;
}
static enum ofperr
-handle_group_desc_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+handle_group_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
{
- struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- struct ofputil_group_desc gds;
- struct ofgroup *group;
-
- ofpmp_init(&replies, request);
+ uint32_t group_id;
+ enum ofperr error;
- ovs_rwlock_rdlock(&ofproto->groups_rwlock);
- HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
- gds.group_id = group->group_id;
- gds.type = group->type;
- ofputil_append_group_desc_reply(&gds, &group->buckets, &replies);
+ error = ofputil_decode_group_stats_request(request, &group_id);
+ if (error) {
+ return error;
}
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
- ofconn_send_replies(ofconn, &replies);
+ handle_group_request(ofconn, request, group_id, append_group_stats);
+ return 0;
+}
+static void
+append_group_desc(struct ofgroup *group, struct list *replies)
+{
+ struct ofputil_group_desc gds;
+
+ gds.group_id = group->group_id;
+ gds.type = group->type;
+ ofputil_append_group_desc_reply(&gds, &group->buckets, replies);
+}
+
+static enum ofperr
+handle_group_desc_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ handle_group_request(ofconn, request,
+ ofputil_decode_group_desc_request(request),
+ append_group_desc);
return 0;
}
return table_mod(ofproto, &tm);
}
+static enum ofperr
+handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ enum ofperr error;
+ struct ofputil_bundle_ctrl_msg bctrl;
+ struct ofpbuf *buf;
+ struct ofputil_bundle_ctrl_msg reply;
+
+ error = ofputil_decode_bundle_ctrl(oh, &bctrl);
+ if (error) {
+ return error;
+ }
+ reply.flags = 0;
+ reply.bundle_id = bctrl.bundle_id;
+
+ switch (bctrl.type) {
+ case OFPBCT_OPEN_REQUEST:
+ error = ofp_bundle_open(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_OPEN_REPLY;
+ break;
+ case OFPBCT_CLOSE_REQUEST:
+ error = ofp_bundle_close(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_CLOSE_REPLY;;
+ break;
+ case OFPBCT_COMMIT_REQUEST:
+ error = ofp_bundle_commit(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_COMMIT_REPLY;
+ break;
+ case OFPBCT_DISCARD_REQUEST:
+ error = ofp_bundle_discard(ofconn, bctrl.bundle_id);
+ reply.type = OFPBCT_DISCARD_REPLY;
+ break;
+
+ case OFPBCT_OPEN_REPLY:
+ case OFPBCT_CLOSE_REPLY:
+ case OFPBCT_COMMIT_REPLY:
+ case OFPBCT_DISCARD_REPLY:
+ return OFPERR_OFPBFC_BAD_TYPE;
+ break;
+ }
+
+ if (!error) {
+ buf = ofputil_encode_bundle_ctrl_reply(oh, &reply);
+ ofconn_send_reply(ofconn, buf);
+ }
+ return error;
+}
+
+
+static enum ofperr
+handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ enum ofperr error;
+ struct ofputil_bundle_add_msg badd;
+
+ error = ofputil_decode_bundle_add(oh, &badd);
+ if (error) {
+ return error;
+ }
+
+ return ofp_bundle_add_message(ofconn, &badd);
+}
+
static enum ofperr
handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
OVS_EXCLUDED(ofproto_mutex)
case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
return handle_queue_get_config_request(ofconn, oh);
+ case OFPTYPE_BUNDLE_CONTROL:
+ return handle_bundle_control(ofconn, oh);
+
+ case OFPTYPE_BUNDLE_ADD_MESSAGE:
+ return handle_bundle_add(ofconn, oh);
+
case OFPTYPE_HELLO:
case OFPTYPE_ERROR:
case OFPTYPE_FEATURES_REPLY:
if (!(op->error
|| ofproto_rule_is_hidden(rule)
|| (op->type == OFOPERATION_MODIFY
- && op->actions
+ && !op->actions
&& rule->flow_cookie == op->flow_cookie))) {
/* Check that we can just cast from ofoperation_type to
* nx_flow_update_event. */
rule->hard_timeout = op->hard_timeout;
ovs_mutex_unlock(&rule->mutex);
if (op->actions) {
- struct rule_actions *old_actions;
+ const struct rule_actions *old_actions;
ovs_mutex_lock(&rule->mutex);
old_actions = rule_get_actions(rule);
{
struct ofproto *ofproto = rule->ofproto;
struct oftable *table = &ofproto->tables[rule->table_id];
- struct rule_actions *actions;
+ const struct rule_actions *actions;
bool may_expire;
ovs_mutex_lock(&rule->mutex);
void
ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
{
+ struct match match;
+ struct cls_rule target;
const struct oftable *oftable;
+ match_init_catchall(&match);
+ match_set_vlan_vid_masked(&match, htons(VLAN_CFI), htons(VLAN_CFI));
+ cls_rule_init(&target, &match, 0);
+
free(ofproto->vlan_bitmap);
ofproto->vlan_bitmap = bitmap_allocate(4096);
ofproto->vlans_changed = false;
OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
- const struct cls_subtable *table;
+ struct cls_cursor cursor;
+ struct rule *rule;
fat_rwlock_rdlock(&oftable->cls.rwlock);
- HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) {
- if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
- const struct cls_rule *rule;
-
- HMAP_FOR_EACH (rule, hmap_node, &table->rules) {
- uint16_t vid = miniflow_get_vid(&rule->match.flow);
- bitmap_set1(vlan_bitmap, vid);
- bitmap_set1(ofproto->vlan_bitmap, vid);
- }
+ cls_cursor_init(&cursor, &oftable->cls, &target);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
+ uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
+
+ bitmap_set1(vlan_bitmap, vid);
+ bitmap_set1(ofproto->vlan_bitmap, vid);
}
}
fat_rwlock_unlock(&oftable->cls.rwlock);