#include "unaligned.h"
#include "unixctl.h"
#include "vlog.h"
+#include "bundles.h"
VLOG_DEFINE_THIS_MODULE(ofproto);
ofproto->n_pending = 0;
hmap_init(&ofproto->deletions);
guarded_list_init(&ofproto->rule_executes);
- ofproto->n_add = ofproto->n_delete = ofproto->n_modify = 0;
- ofproto->first_op = ofproto->last_op = LLONG_MIN;
- ofproto->next_op_report = LLONG_MAX;
- ofproto->op_backoff = LLONG_MIN;
ofproto->vlan_bitmap = NULL;
ofproto->vlans_changed = false;
ofproto->min_mtu = INT_MAX;
}
}
-/* Populates 'status' with the status of BFD on 'ofport'. Returns 0 on
- * success. Returns a negative number if there is no status change since
- * last update. Returns a positive errno otherwise. Has no effect if
- * 'ofp_port' is not an OpenFlow port in 'ofproto'.
+/* Populates 'status' with the status of BFD on 'ofport'. If 'force' is set to
+ * true, status will be returned even if there is no status change since last
+ * update.
+ *
+ * Returns 0 on success. Returns a negative number if there is no status change
+ * since last update and 'force' is set to false. Returns a positive errno
+ * otherwise. Has no effect if 'ofp_port' is not an OpenFlow port in 'ofproto'.
*
* The caller must provide and own '*status'. */
int
ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port,
- struct smap *status)
+ bool force, struct smap *status)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
return (ofport && ofproto->ofproto_class->get_bfd_status
- ? ofproto->ofproto_class->get_bfd_status(ofport, status)
+ ? ofproto->ofproto_class->get_bfd_status(ofport, force, status)
: EOPNOTSUPP);
}
hmap_destroy(&ofproto->deletions);
+ ovs_assert(hindex_is_empty(&ofproto->cookies));
+ hindex_destroy(&ofproto->cookies);
+
free(ofproto->vlan_bitmap);
ofproto->ofproto_class->dealloc(ofproto);
OVS_NOT_REACHED();
}
- if (time_msec() >= p->next_op_report) {
- long long int ago = (time_msec() - p->first_op) / 1000;
- long long int interval = (p->last_op - p->first_op) / 1000;
- struct ds s;
-
- ds_init(&s);
- ds_put_format(&s, "%d flow_mods ",
- p->n_add + p->n_delete + p->n_modify);
- if (interval == ago) {
- ds_put_format(&s, "in the last %lld s", ago);
- } else if (interval) {
- ds_put_format(&s, "in the %lld s starting %lld s ago",
- interval, ago);
- } else {
- ds_put_format(&s, "%lld s ago", ago);
- }
-
- ds_put_cstr(&s, " (");
- if (p->n_add) {
- ds_put_format(&s, "%d adds, ", p->n_add);
- }
- if (p->n_delete) {
- ds_put_format(&s, "%d deletes, ", p->n_delete);
- }
- if (p->n_modify) {
- ds_put_format(&s, "%d modifications, ", p->n_modify);
- }
- s.length -= 2;
- ds_put_char(&s, ')');
-
- VLOG_INFO("%s: %s", p->name, ds_cstr(&s));
- ds_destroy(&s);
-
- p->n_add = p->n_delete = p->n_modify = 0;
- p->next_op_report = LLONG_MAX;
- }
-
return error;
}
if (fm->command == OFPFC_MODIFY_STRICT && fm->table_id != OFPTT_ALL
&& !(fm->flags & OFPUTIL_FF_RESET_COUNTS)) {
struct oftable *table = &ofproto->tables[fm->table_id];
- struct cls_rule match_rule;
struct rule *rule;
bool done = false;
- cls_rule_init(&match_rule, &fm->match, fm->priority);
fat_rwlock_rdlock(&table->cls.rwlock);
- rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls,
- &match_rule));
+ rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
+ &fm->match,
+ fm->priority));
if (rule) {
/* Reading many of the rule fields and writing on 'modified'
* requires the rule->mutex. Also, rule->actions may change
return handle_flow_mod__(ofproto, NULL, fm, NULL);
}
-/* Resets the modified time for 'rule' or an equivalent rule. If 'rule' is not
- * in the classifier, but an equivalent rule is, unref 'rule' and ref the new
- * rule. Otherwise if 'rule' is no longer installed in the classifier,
- * reinstall it.
- *
- * Returns the rule whose modified time has been reset. */
-struct rule *
-ofproto_refresh_rule(struct rule *rule)
-{
- const struct oftable *table = &rule->ofproto->tables[rule->table_id];
- const struct cls_rule *cr = &rule->cr;
- struct rule *r;
-
- /* do_add_flow() requires that the rule is not installed. We lock the
- * ofproto_mutex here so that another thread cannot add the flow before
- * we get a chance to add it.*/
- ovs_mutex_lock(&ofproto_mutex);
-
- fat_rwlock_rdlock(&table->cls.rwlock);
- r = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, cr));
- if (r != rule) {
- ofproto_rule_ref(r);
- }
- fat_rwlock_unlock(&table->cls.rwlock);
-
- if (!r) {
- do_add_flow(rule->ofproto, NULL, NULL, 0, rule);
- } else if (r != rule) {
- ofproto_rule_unref(rule);
- rule = r;
- }
- ovs_mutex_unlock(&ofproto_mutex);
-
- /* Refresh the modified time for the rule. */
- ovs_mutex_lock(&rule->mutex);
- rule->modified = MAX(rule->modified, time_msec());
- ovs_mutex_unlock(&rule->mutex);
-
- return rule;
-}
-
/* Searches for a rule with matching criteria exactly equal to 'target' in
* ofproto's table 0 and, if it finds one, deletes it.
*
return error;
}
- error = ofputil_decode_port_mod(oh, &pm);
+ error = ofputil_decode_port_mod(oh, &pm, false);
if (error) {
return error;
}
ofputil_append_port_stat(replies, &ops);
}
-static enum ofperr
-handle_port_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_port_request(struct ofconn *ofconn,
+ const struct ofp_header *request, ofp_port_t port_no,
+ void (*cb)(struct ofport *, struct list *replies))
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofport *port;
struct list replies;
- ofp_port_t port_no;
- enum ofperr error;
-
- error = ofputil_decode_port_stats_request(request, &port_no);
- if (error) {
- return error;
- }
ofpmp_init(&replies, request);
if (port_no != OFPP_ANY) {
- port = ofproto_get_port(p, port_no);
+ port = ofproto_get_port(ofproto, port_no);
if (port) {
- append_port_stat(port, &replies);
+ cb(port, &replies);
}
} else {
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- append_port_stat(port, &replies);
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ cb(port, &replies);
}
}
ofconn_send_replies(ofconn, &replies);
- return 0;
+}
+
+static enum ofperr
+handle_port_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ ofp_port_t port_no;
+ enum ofperr error;
+
+ error = ofputil_decode_port_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_stat);
+ }
+ return error;
+}
+
+static void
+append_port_desc(struct ofport *port, struct list *replies)
+{
+ ofputil_append_port_desc_stats_reply(&port->pp, replies);
}
static enum ofperr
handle_port_desc_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
- enum ofp_version version;
- struct ofport *port;
- struct list replies;
-
- ofpmp_init(&replies, request);
+ ofp_port_t port_no;
+ enum ofperr error;
- version = ofputil_protocol_to_ofp_version(ofconn_get_protocol(ofconn));
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- ofputil_append_port_desc_stats_reply(version, &port->pp, &replies);
+ error = ofputil_decode_port_desc_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_desc);
}
-
- ofconn_send_replies(ofconn, &replies);
- return 0;
+ return error;
}
static uint32_t
ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id);
}
-/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'.
- * Returns 0 if the port's CFM status was successfully stored into
- * '*status'. Returns positive errno if the port did not have CFM
- * configured. Returns negative number if there is no status change
- * since last update.
+/* Checks the status of CFM configured on 'ofp_port' within 'ofproto' and stores
+ * the port's CFM status in '*status'. If 'force' is set to true, status will
+ * be returned even if there is no status change since last update.
+ *
+ * Returns 0 on success. Returns a negative number if there is no status
+ * change since last update and 'force' is set to false. Returns positive errno
+ * if the port did not have CFM configured.
*
* The caller must provide and own '*status', and must free 'status->rmps'.
* '*status' is indeterminate if the return value is non-zero. */
int
ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
- struct ofproto_cfm_status *status)
+ bool force, struct ofproto_cfm_status *status)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
return (ofport && ofproto->ofproto_class->get_cfm_status
- ? ofproto->ofproto_class->get_cfm_status(ofport, status)
+ ? ofproto->ofproto_class->get_cfm_status(ofport, force, status)
: EOPNOTSUPP);
}
HMAP_FOR_EACH_WITH_HASH (op, hmap_node,
cls_rule_hash(cls_rule, table_id),
&ofproto->deletions) {
- if (cls_rule_equal(cls_rule, &op->rule->cr)) {
+ if (op->rule->table_id == table_id
+ && cls_rule_equal(cls_rule, &op->rule->cr)) {
return true;
}
}
uint64_t ofpacts_stub[1024 / 8];
struct ofpbuf ofpacts;
enum ofperr error;
- long long int now;
error = reject_slave_controller(ofconn);
if (error) {
goto exit_free_ofpacts;
}
- /* Record the operation for logging a summary report. */
- switch (fm.command) {
- case OFPFC_ADD:
- ofproto->n_add++;
- break;
-
- case OFPFC_MODIFY:
- case OFPFC_MODIFY_STRICT:
- ofproto->n_modify++;
- break;
-
- case OFPFC_DELETE:
- case OFPFC_DELETE_STRICT:
- ofproto->n_delete++;
- break;
- }
-
- now = time_msec();
- if (ofproto->next_op_report == LLONG_MAX) {
- ofproto->first_op = now;
- ofproto->next_op_report = MAX(now + 10 * 1000,
- ofproto->op_backoff);
- ofproto->op_backoff = ofproto->next_op_report + 60 * 1000;
- }
- ofproto->last_op = now;
+ ofconn_report_flow_mod(ofconn, fm.command);
exit_free_ofpacts:
ofpbuf_uninit(&ofpacts);
free(ogs.bucket_stats);
}
-static enum ofperr
-handle_group_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_group_request(struct ofconn *ofconn,
+ const struct ofp_header *request, uint32_t group_id,
+ void (*cb)(struct ofgroup *, struct list *replies))
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- enum ofperr error;
struct ofgroup *group;
- uint32_t group_id;
-
- error = ofputil_decode_group_stats_request(request, &group_id);
- if (error) {
- return error;
- }
+ struct list replies;
ofpmp_init(&replies, request);
-
if (group_id == OFPG_ALL) {
ovs_rwlock_rdlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
ovs_rwlock_rdlock(&group->rwlock);
- append_group_stats(group, &replies);
+ cb(group, &replies);
ovs_rwlock_unlock(&group->rwlock);
}
ovs_rwlock_unlock(&ofproto->groups_rwlock);
} else {
if (ofproto_group_lookup(ofproto, group_id, &group)) {
- append_group_stats(group, &replies);
+ cb(group, &replies);
ofproto_group_release(group);
}
}
-
ofconn_send_replies(ofconn, &replies);
-
- return 0;
}
static enum ofperr
-handle_group_desc_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+handle_group_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
{
- struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- struct ofputil_group_desc gds;
- struct ofgroup *group;
-
- ofpmp_init(&replies, request);
+ uint32_t group_id;
+ enum ofperr error;
- ovs_rwlock_rdlock(&ofproto->groups_rwlock);
- HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
- gds.group_id = group->group_id;
- gds.type = group->type;
- ofputil_append_group_desc_reply(&gds, &group->buckets, &replies);
+ error = ofputil_decode_group_stats_request(request, &group_id);
+ if (error) {
+ return error;
}
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
- ofconn_send_replies(ofconn, &replies);
+ handle_group_request(ofconn, request, group_id, append_group_stats);
+ return 0;
+}
+static void
+append_group_desc(struct ofgroup *group, struct list *replies)
+{
+ struct ofputil_group_desc gds;
+
+ gds.group_id = group->group_id;
+ gds.type = group->type;
+ ofputil_append_group_desc_reply(&gds, &group->buckets, replies);
+}
+
+static enum ofperr
+handle_group_desc_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ handle_group_request(ofconn, request,
+ ofputil_decode_group_desc_request(request),
+ append_group_desc);
return 0;
}
return table_mod(ofproto, &tm);
}
+static enum ofperr
+handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ enum ofperr error;
+ struct ofputil_bundle_ctrl_msg bctrl;
+ struct ofpbuf *buf;
+ struct ofputil_bundle_ctrl_msg reply;
+
+ error = ofputil_decode_bundle_ctrl(oh, &bctrl);
+ if (error) {
+ return error;
+ }
+ reply.flags = 0;
+ reply.bundle_id = bctrl.bundle_id;
+
+ switch (bctrl.type) {
+ case OFPBCT_OPEN_REQUEST:
+ error = ofp_bundle_open(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_OPEN_REPLY;
+ break;
+ case OFPBCT_CLOSE_REQUEST:
+ error = ofp_bundle_close(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_CLOSE_REPLY;;
+ break;
+ case OFPBCT_COMMIT_REQUEST:
+ error = ofp_bundle_commit(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_COMMIT_REPLY;
+ break;
+ case OFPBCT_DISCARD_REQUEST:
+ error = ofp_bundle_discard(ofconn, bctrl.bundle_id);
+ reply.type = OFPBCT_DISCARD_REPLY;
+ break;
+
+ case OFPBCT_OPEN_REPLY:
+ case OFPBCT_CLOSE_REPLY:
+ case OFPBCT_COMMIT_REPLY:
+ case OFPBCT_DISCARD_REPLY:
+ return OFPERR_OFPBFC_BAD_TYPE;
+ break;
+ }
+
+ if (!error) {
+ buf = ofputil_encode_bundle_ctrl_reply(oh, &reply);
+ ofconn_send_reply(ofconn, buf);
+ }
+ return error;
+}
+
+
+static enum ofperr
+handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ enum ofperr error;
+ struct ofputil_bundle_add_msg badd;
+
+ error = ofputil_decode_bundle_add(oh, &badd);
+ if (error) {
+ return error;
+ }
+
+ return ofp_bundle_add_message(ofconn, &badd);
+}
+
static enum ofperr
handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
OVS_EXCLUDED(ofproto_mutex)
case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
return handle_queue_get_config_request(ofconn, oh);
+ case OFPTYPE_BUNDLE_CONTROL:
+ return handle_bundle_control(ofconn, oh);
+
+ case OFPTYPE_BUNDLE_ADD_MESSAGE:
+ return handle_bundle_add(ofconn, oh);
+
case OFPTYPE_HELLO:
case OFPTYPE_ERROR:
case OFPTYPE_FEATURES_REPLY:
if (!(op->error
|| ofproto_rule_is_hidden(rule)
|| (op->type == OFOPERATION_MODIFY
- && op->actions
+ && !op->actions
&& rule->flow_cookie == op->flow_cookie))) {
/* Check that we can just cast from ofoperation_type to
* nx_flow_update_event. */
void
ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
{
+ struct match match;
+ struct cls_rule target;
const struct oftable *oftable;
+ match_init_catchall(&match);
+ match_set_vlan_vid_masked(&match, htons(VLAN_CFI), htons(VLAN_CFI));
+ cls_rule_init(&target, &match, 0);
+
free(ofproto->vlan_bitmap);
ofproto->vlan_bitmap = bitmap_allocate(4096);
ofproto->vlans_changed = false;
OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
- const struct cls_subtable *table;
+ struct cls_cursor cursor;
+ struct rule *rule;
fat_rwlock_rdlock(&oftable->cls.rwlock);
- HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) {
- if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
- const struct cls_rule *rule;
-
- HMAP_FOR_EACH (rule, hmap_node, &table->rules) {
- uint16_t vid = miniflow_get_vid(&rule->match.flow);
- bitmap_set1(vlan_bitmap, vid);
- bitmap_set1(ofproto->vlan_bitmap, vid);
- }
+ cls_cursor_init(&cursor, &oftable->cls, &target);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
+ uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
+
+ bitmap_set1(vlan_bitmap, vid);
+ bitmap_set1(ofproto->vlan_bitmap, vid);
}
}
fat_rwlock_unlock(&oftable->cls.rwlock);