/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <stdlib.h>
#include "coverage.h"
+#include "dynamic-string.h"
#include "fail-open.h"
#include "in-band.h"
#include "odp-util.h"
#include "simap.h"
#include "stream.h"
#include "timeval.h"
-#include "vconn.h"
-#include "vlog.h"
+#include "openvswitch/vconn.h"
+#include "openvswitch/vlog.h"
+
+#include "bundles.h"
VLOG_DEFINE_THIS_MODULE(connmgr);
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-/* An OpenFlow connection. */
+/* An OpenFlow connection.
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * 'ofproto_mutex' must be held whenever an ofconn is created or destroyed or,
+ * more or less equivalently, whenever an ofconn is added to or removed from a
+ * connmgr. 'ofproto_mutex' doesn't protect the data inside the ofconn, except
+ * as specifically noted below. */
struct ofconn {
/* Configuration that persists from one connection to the next. */
- struct list node; /* In struct connmgr's "all_conns" list. */
+ struct ovs_list node; /* In struct connmgr's "all_conns" list. */
struct hmap_node hmap_node; /* In struct connmgr's "controllers" map. */
struct connmgr *connmgr; /* Connection's manager. */
enum ofputil_protocol protocol; /* Current protocol variant. */
enum nx_packet_in_format packet_in_format; /* OFPT_PACKET_IN format. */
- /* Asynchronous flow table operation support. */
- struct list opgroups; /* Contains pending "ofopgroups", if any. */
- struct ofpbuf *blocked; /* Postponed OpenFlow message, if any. */
- bool retry; /* True if 'blocked' is ready to try again. */
-
/* OFPT_PACKET_IN related data. */
struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */
#define N_SCHEDULERS 2
#define OFCONN_REPLY_MAX 100
struct rconn_packet_counter *reply_counter;
- /* Asynchronous message configuration in each possible roles.
+ /* Asynchronous message configuration in each possible role.
*
* A 1-bit enables sending an asynchronous message for one possible reason
* that the message might be generated, a 0-bit disables it. */
- uint32_t master_async_config[OAM_N_TYPES]; /* master, other */
- uint32_t slave_async_config[OAM_N_TYPES]; /* slave */
-
- /* Flow monitors. */
- struct hmap monitors; /* Contains "struct ofmonitor"s. */
- struct list updates; /* List of "struct ofpbuf"s. */
- bool sent_abbrev_update; /* Does 'updates' contain NXFME_ABBREV? */
- struct rconn_packet_counter *monitor_counter;
- uint64_t monitor_paused;
+ struct ofputil_async_cfg *async_cfg;
+
+ /* Flow table operation logging. */
+ int n_add, n_delete, n_modify; /* Number of unreported ops of each kind. */
+ long long int first_op, last_op; /* Range of times for unreported ops. */
+ long long int next_op_report; /* Time to report ops, or LLONG_MAX. */
+ long long int op_backoff; /* Earliest time to report ops again. */
+
+/* Flow monitors (e.g. NXST_FLOW_MONITOR). */
+
+ /* Configuration. Contains "struct ofmonitor"s. */
+ struct hmap monitors OVS_GUARDED_BY(ofproto_mutex);
+
+ /* Flow control.
+ *
+ * When too many flow monitor notifications back up in the transmit buffer,
+ * we pause the transmission of further notifications. These members track
+ * the flow control state.
+ *
+ * When notifications are flowing, 'monitor_paused' is 0. When
+ * notifications are paused, 'monitor_paused' is the value of
+ * 'monitor_seqno' at the point we paused.
+ *
+ * 'monitor_counter' counts the OpenFlow messages and bytes currently in
+ * flight. This value growing too large triggers pausing. */
+ uint64_t monitor_paused OVS_GUARDED_BY(ofproto_mutex);
+ struct rconn_packet_counter *monitor_counter OVS_GUARDED_BY(ofproto_mutex);
+
+ /* State of monitors for a single ongoing flow_mod.
+ *
+ * 'updates' is a list of "struct ofpbuf"s that contain
+ * NXST_FLOW_MONITOR_REPLY messages representing the changes made by the
+ * current flow_mod.
+ *
+ * When 'updates' is nonempty, 'sent_abbrev_update' is true if 'updates'
+ * contains an update event of type NXFME_ABBREV and false otherwise.. */
+ struct ovs_list updates OVS_GUARDED_BY(ofproto_mutex);
+ bool sent_abbrev_update OVS_GUARDED_BY(ofproto_mutex);
+
+ /* Active bundles. Contains "struct ofp_bundle"s. */
+ struct hmap bundles;
};
static struct ofconn *ofconn_create(struct connmgr *, struct rconn *,
- enum ofconn_type, bool enable_async_msgs);
-static void ofconn_destroy(struct ofconn *);
-static void ofconn_flush(struct ofconn *);
+ enum ofconn_type, bool enable_async_msgs)
+ OVS_REQUIRES(ofproto_mutex);
+static void ofconn_destroy(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
+static void ofconn_flush(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
static void ofconn_reconfigure(struct ofconn *,
const struct ofproto_controller *);
static void ofconn_run(struct ofconn *,
- bool (*handle_openflow)(struct ofconn *,
+ void (*handle_openflow)(struct ofconn *,
const struct ofpbuf *ofp_msg));
-static void ofconn_wait(struct ofconn *, bool handling_openflow);
+static void ofconn_wait(struct ofconn *);
+
+static void ofconn_log_flow_mods(struct ofconn *);
static const char *ofconn_get_target(const struct ofconn *);
static char *ofconn_make_name(const struct connmgr *, const char *target);
static void ofconn_send(const struct ofconn *, struct ofpbuf *,
struct rconn_packet_counter *);
-static void do_send_packet_in(struct ofpbuf *, void *ofconn_);
+static void do_send_packet_ins(struct ofconn *, struct ovs_list *txq);
/* A listener for incoming OpenFlow "service" connections. */
struct ofservice {
char *local_port_name;
/* OpenFlow connections. */
- struct hmap controllers; /* Controller "struct ofconn"s. */
- struct list all_conns; /* Contains "struct ofconn"s. */
+ struct hmap controllers; /* All OFCONN_PRIMARY controllers. */
+ struct ovs_list all_conns; /* All controllers. */
uint64_t master_election_id; /* monotonically increasing sequence number
* for master election */
bool master_election_id_defined;
return;
}
+ ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
ofconn_destroy(ofconn);
}
+ ovs_mutex_unlock(&ofproto_mutex);
+
hmap_destroy(&mgr->controllers);
HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &mgr->services) {
free(mgr);
}
-/* Does all of the periodic maintenance required by 'mgr'.
- *
- * If 'handle_openflow' is nonnull, calls 'handle_openflow' for each message
- * received on an OpenFlow connection, passing along the OpenFlow connection
- * itself and the message that was sent. If 'handle_openflow' returns true,
- * the message is considered to be fully processed. If 'handle_openflow'
- * returns false, the message is considered not to have been processed at all;
- * it will be stored and re-presented to 'handle_openflow' following the next
- * call to connmgr_retry(). 'handle_openflow' must not modify or free the
- * message.
- *
- * If 'handle_openflow' is NULL, no OpenFlow messages will be processed and
- * other activities that could affect the flow table (in-band processing,
- * fail-open processing) are suppressed too. */
+/* Does all of the periodic maintenance required by 'mgr'. Calls
+ * 'handle_openflow' for each message received on an OpenFlow connection,
+ * passing along the OpenFlow connection itself and the message that was sent.
+ * 'handle_openflow' must not modify or free the message. */
void
connmgr_run(struct connmgr *mgr,
- bool (*handle_openflow)(struct ofconn *,
+ void (*handle_openflow)(struct ofconn *,
const struct ofpbuf *ofp_msg))
+ OVS_EXCLUDED(ofproto_mutex)
{
struct ofconn *ofconn, *next_ofconn;
struct ofservice *ofservice;
size_t i;
- if (handle_openflow && mgr->in_band) {
+ if (mgr->in_band) {
if (!in_band_run(mgr->in_band)) {
in_band_destroy(mgr->in_band);
mgr->in_band = NULL;
/* Fail-open maintenance. Do this after processing the ofconns since
* fail-open checks the status of the controller rconn. */
- if (handle_openflow && mgr->fail_open) {
+ if (mgr->fail_open) {
fail_open_run(mgr->fail_open);
}
rconn_connect_unreliably(rconn, vconn, name);
free(name);
+ ovs_mutex_lock(&ofproto_mutex);
ofconn = ofconn_create(mgr, rconn, OFCONN_SERVICE,
ofservice->enable_async_msgs);
+ ovs_mutex_unlock(&ofproto_mutex);
+
ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
ofservice->burst_limit);
} else if (retval != EAGAIN) {
}
}
-/* Causes the poll loop to wake up when connmgr_run() needs to run.
- *
- * If 'handling_openflow' is true, arriving OpenFlow messages and other
- * activities that affect the flow table will wake up the poll loop. If
- * 'handling_openflow' is false, they will not. */
+/* Causes the poll loop to wake up when connmgr_run() needs to run. */
void
-connmgr_wait(struct connmgr *mgr, bool handling_openflow)
+connmgr_wait(struct connmgr *mgr)
{
struct ofservice *ofservice;
struct ofconn *ofconn;
size_t i;
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- ofconn_wait(ofconn, handling_openflow);
+ ofconn_wait(ofconn);
}
ofmonitor_wait(mgr);
- if (handling_openflow && mgr->in_band) {
+ if (mgr->in_band) {
in_band_wait(mgr->in_band);
}
- if (handling_openflow && mgr->fail_open) {
+ if (mgr->fail_open) {
fail_open_wait(mgr->fail_open);
}
HMAP_FOR_EACH (ofservice, node, &mgr->services) {
packets += rconn_count_txqlen(ofconn->rconn);
for (i = 0; i < N_SCHEDULERS; i++) {
- packets += pinsched_count_txqlen(ofconn->schedulers[i]);
+ struct pinsched_stats stats;
+
+ pinsched_get_stats(ofconn->schedulers[i], &stats);
+ packets += stats.n_queued;
}
packets += pktbuf_count_packets(ofconn->pktbuf);
}
{
return ofconn->connmgr->ofproto;
}
-
-/* If processing of OpenFlow messages was blocked on any 'mgr' ofconns by
- * returning false to the 'handle_openflow' callback to connmgr_run(), this
- * re-enables them. */
-void
-connmgr_retry(struct connmgr *mgr)
-{
- struct ofconn *ofconn;
-
- LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- ofconn->retry = true;
- }
-}
\f
/* OpenFlow configuration. */
static void add_controller(struct connmgr *, const char *target, uint8_t dscp,
- uint32_t allowed_versions);
+ uint32_t allowed_versions)
+ OVS_REQUIRES(ofproto_mutex);
static struct ofconn *find_controller_by_target(struct connmgr *,
const char *target);
-static void update_fail_open(struct connmgr *);
+static void update_fail_open(struct connmgr *) OVS_EXCLUDED(ofproto_mutex);
static int set_pvconns(struct pvconn ***pvconnsp, size_t *n_pvconnsp,
const struct sset *);
time_t last_connection = rconn_get_last_connection(rconn);
time_t last_disconnect = rconn_get_last_disconnect(rconn);
int last_error = rconn_get_last_error(rconn);
+ int i;
shash_add(info, target, cinfo);
cinfo->is_connected = rconn_is_connected(rconn);
cinfo->role = ofconn->role;
- cinfo->pairs.n = 0;
-
+ smap_init(&cinfo->pairs);
if (last_error) {
- cinfo->pairs.keys[cinfo->pairs.n] = "last_error";
- cinfo->pairs.values[cinfo->pairs.n++]
- = xstrdup(ovs_retval_to_string(last_error));
+ smap_add(&cinfo->pairs, "last_error",
+ ovs_retval_to_string(last_error));
}
- cinfo->pairs.keys[cinfo->pairs.n] = "state";
- cinfo->pairs.values[cinfo->pairs.n++]
- = xstrdup(rconn_get_state(rconn));
+ smap_add(&cinfo->pairs, "state", rconn_get_state(rconn));
if (last_connection != TIME_MIN) {
- cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect";
- cinfo->pairs.values[cinfo->pairs.n++]
- = xasprintf("%ld", (long int) (now - last_connection));
+ smap_add_format(&cinfo->pairs, "sec_since_connect",
+ "%ld", (long int) (now - last_connection));
}
if (last_disconnect != TIME_MIN) {
- cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect";
- cinfo->pairs.values[cinfo->pairs.n++]
- = xasprintf("%ld", (long int) (now - last_disconnect));
+ smap_add_format(&cinfo->pairs, "sec_since_disconnect",
+ "%ld", (long int) (now - last_disconnect));
+ }
+
+ for (i = 0; i < N_SCHEDULERS; i++) {
+ if (ofconn->schedulers[i]) {
+ const char *name = i ? "miss" : "action";
+ struct pinsched_stats stats;
+
+ pinsched_get_stats(ofconn->schedulers[i], &stats);
+ smap_add_nocopy(&cinfo->pairs,
+ xasprintf("packet-in-%s-backlog", name),
+ xasprintf("%u", stats.n_queued));
+ smap_add_nocopy(&cinfo->pairs,
+ xasprintf("packet-in-%s-bypassed", name),
+ xasprintf("%llu", stats.n_normal));
+ smap_add_nocopy(&cinfo->pairs,
+ xasprintf("packet-in-%s-queued", name),
+ xasprintf("%llu", stats.n_limited));
+ smap_add_nocopy(&cinfo->pairs,
+ xasprintf("packet-in-%s-dropped", name),
+ xasprintf("%llu", stats.n_queue_dropped));
+ }
}
}
}
SHASH_FOR_EACH (node, info) {
struct ofproto_controller_info *cinfo = node->data;
- while (cinfo->pairs.n) {
- free(CONST_CAST(char *, cinfo->pairs.values[--cinfo->pairs.n]));
- }
+ smap_destroy(&cinfo->pairs);
free(cinfo);
}
shash_destroy(info);
connmgr_set_controllers(struct connmgr *mgr,
const struct ofproto_controller *controllers,
size_t n_controllers, uint32_t allowed_versions)
+ OVS_EXCLUDED(ofproto_mutex)
{
bool had_controllers = connmgr_has_controllers(mgr);
struct shash new_controllers;
struct ofservice *ofservice, *next_ofservice;
size_t i;
+ /* Required to add and remove ofconns. This could probably be narrowed to
+ * cover a smaller amount of code, if that yielded some benefit. */
+ ovs_mutex_lock(&ofproto_mutex);
+
/* Create newly configured controllers and services.
* Create a name to ofproto_controller mapping in 'new_controllers'. */
shash_init(&new_controllers);
shash_destroy(&new_controllers);
+ ovs_mutex_unlock(&ofproto_mutex);
+
update_in_band_remotes(mgr);
update_fail_open(mgr);
if (had_controllers != connmgr_has_controllers(mgr)) {
static void
add_controller(struct connmgr *mgr, const char *target, uint8_t dscp,
uint32_t allowed_versions)
+ OVS_REQUIRES(ofproto_mutex)
{
char *name = ofconn_make_name(mgr, target);
struct ofconn *ofconn;
/* Add all the remotes. */
HMAP_FOR_EACH (ofconn, hmap_node, &mgr->controllers) {
- struct sockaddr_in *sin = &addrs[n_addrs];
const char *target = rconn_get_target(ofconn->rconn);
-
- if (ofconn->band == OFPROTO_OUT_OF_BAND) {
- continue;
- }
-
- if (stream_parse_target_with_default_ports(target,
- OFP_TCP_PORT,
- OFP_SSL_PORT,
- sin)) {
- n_addrs++;
+ union {
+ struct sockaddr_storage ss;
+ struct sockaddr_in in;
+ } sa;
+
+ if (ofconn->band == OFPROTO_IN_BAND
+ && stream_parse_target_with_default_port(target, OFP_PORT, &sa.ss)
+ && sa.ss.ss_family == AF_INET) {
+ addrs[n_addrs++] = sa.in;
}
}
for (i = 0; i < mgr->n_extra_remotes; i++) {
static void
update_fail_open(struct connmgr *mgr)
+ OVS_EXCLUDED(ofproto_mutex)
{
if (connmgr_has_controllers(mgr)
&& mgr->fail_mode == OFPROTO_FAIL_STANDALONE) {
return ofconn->role;
}
+void
+ofconn_send_role_status(struct ofconn *ofconn, uint32_t role, uint8_t reason)
+{
+ struct ofputil_role_status status;
+ struct ofpbuf *buf;
+
+ status.reason = reason;
+ status.role = role;
+ ofconn_get_master_election_id(ofconn, &status.generation_id);
+
+ buf = ofputil_encode_role_status(&status, ofconn_get_protocol(ofconn));
+ if (buf) {
+ ofconn_send(ofconn, buf, NULL);
+ }
+}
+
/* Changes 'ofconn''s role to 'role'. If 'role' is OFPCR12_ROLE_MASTER then
* any existing master is demoted to a slave. */
void
ofconn_set_role(struct ofconn *ofconn, enum ofp12_controller_role role)
{
- if (role == OFPCR12_ROLE_MASTER) {
+ if (role != ofconn->role && role == OFPCR12_ROLE_MASTER) {
struct ofconn *other;
- HMAP_FOR_EACH (other, hmap_node, &ofconn->connmgr->controllers) {
+ LIST_FOR_EACH (other, node, &ofconn->connmgr->all_conns) {
if (other->role == OFPCR12_ROLE_MASTER) {
other->role = OFPCR12_ROLE_SLAVE;
+ ofconn_send_role_status(other, OFPCR12_ROLE_SLAVE, OFPCRR_MASTER_REQUEST);
}
}
}
void
ofconn_set_invalid_ttl_to_controller(struct ofconn *ofconn, bool enable)
{
+ struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
uint32_t bit = 1u << OFPR_INVALID_TTL;
if (enable) {
- ofconn->master_async_config[OAM_PACKET_IN] |= bit;
+ ac.master[OAM_PACKET_IN] |= bit;
} else {
- ofconn->master_async_config[OAM_PACKET_IN] &= ~bit;
+ ac.master[OAM_PACKET_IN] &= ~bit;
}
+ ofconn_set_async_config(ofconn, &ac);
}
bool
ofconn_get_invalid_ttl_to_controller(struct ofconn *ofconn)
{
+ struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
uint32_t bit = 1u << OFPR_INVALID_TTL;
- return (ofconn->master_async_config[OAM_PACKET_IN] & bit) != 0;
+ return (ac.master[OAM_PACKET_IN] & bit) != 0;
}
/* Returns the currently configured protocol for 'ofconn', one of OFPUTIL_P_*.
/* Returns the currently configured packet in format for 'ofconn', one of
* NXPIF_*.
*
- * The default, if no other format has been set, is NXPIF_OPENFLOW10. */
+ * The default, if no other format has been set, is NXPIF_STANDARD. */
enum nx_packet_in_format
ofconn_get_packet_in_format(struct ofconn *ofconn)
{
void
ofconn_set_async_config(struct ofconn *ofconn,
- const uint32_t master_masks[OAM_N_TYPES],
- const uint32_t slave_masks[OAM_N_TYPES])
+ const struct ofputil_async_cfg *ac)
{
- size_t size = sizeof ofconn->master_async_config;
- memcpy(ofconn->master_async_config, master_masks, size);
- memcpy(ofconn->slave_async_config, slave_masks, size);
+ if (!ofconn->async_cfg) {
+ ofconn->async_cfg = xmalloc(sizeof *ofconn->async_cfg);
+ }
+ *ofconn->async_cfg = *ac;
+}
+
+struct ofputil_async_cfg
+ofconn_get_async_config(const struct ofconn *ofconn)
+{
+ if (ofconn->async_cfg) {
+ return *ofconn->async_cfg;
+ }
+
+ int version = rconn_get_version(ofconn->rconn);
+ return (version < 0 || !ofconn->enable_async_msgs
+ ? OFPUTIL_ASYNC_CFG_INIT
+ : ofputil_async_cfg_default(version));
}
/* Sends 'msg' on 'ofconn', accounting it as a reply. (If there is a
/* Sends each of the messages in list 'replies' on 'ofconn' in order,
* accounting them as replies. */
void
-ofconn_send_replies(const struct ofconn *ofconn, struct list *replies)
+ofconn_send_replies(const struct ofconn *ofconn, struct ovs_list *replies)
{
- struct ofpbuf *reply, *next;
+ struct ofpbuf *reply;
- LIST_FOR_EACH_SAFE (reply, next, list_node, replies) {
- list_remove(&reply->list_node);
+ LIST_FOR_EACH_POP (reply, list_node, replies) {
ofconn_send_reply(ofconn, reply);
}
}
/* Same as pktbuf_retrieve(), using the pktbuf owned by 'ofconn'. */
enum ofperr
ofconn_pktbuf_retrieve(struct ofconn *ofconn, uint32_t id,
- struct ofpbuf **bufferp, ofp_port_t *in_port)
+ struct dp_packet **bufferp, ofp_port_t *in_port)
{
return pktbuf_retrieve(ofconn->pktbuf, id, bufferp, in_port);
}
-/* Returns true if 'ofconn' has any pending opgroups. */
-bool
-ofconn_has_pending_opgroups(const struct ofconn *ofconn)
+/* Reports that a flow_mod operation of the type specified by 'command' was
+ * successfully executed by 'ofconn', so that the connmgr can log it. */
+void
+ofconn_report_flow_mod(struct ofconn *ofconn,
+ enum ofp_flow_mod_command command)
{
- return !list_is_empty(&ofconn->opgroups);
+ long long int now;
+
+ switch (command) {
+ case OFPFC_ADD:
+ ofconn->n_add++;
+ break;
+
+ case OFPFC_MODIFY:
+ case OFPFC_MODIFY_STRICT:
+ ofconn->n_modify++;
+ break;
+
+ case OFPFC_DELETE:
+ case OFPFC_DELETE_STRICT:
+ ofconn->n_delete++;
+ break;
+ }
+
+ now = time_msec();
+ if (ofconn->next_op_report == LLONG_MAX) {
+ ofconn->first_op = now;
+ ofconn->next_op_report = MAX(now + 10 * 1000, ofconn->op_backoff);
+ ofconn->op_backoff = ofconn->next_op_report + 60 * 1000;
+ }
+ ofconn->last_op = now;
}
+\f
+/* OpenFlow 1.4 bundles. */
-/* Adds 'ofconn_node' to 'ofconn''s list of pending opgroups.
- *
- * If 'ofconn' is destroyed or its connection drops, then 'ofconn' will remove
- * 'ofconn_node' from the list and re-initialize it with list_init(). The
- * client may, therefore, use list_is_empty(ofconn_node) to determine whether
- * 'ofconn_node' is still associated with an active ofconn.
- *
- * The client may also remove ofconn_node from the list itself, with
- * list_remove(). */
-void
-ofconn_add_opgroup(struct ofconn *ofconn, struct list *ofconn_node)
+static inline uint32_t
+bundle_hash(uint32_t id)
+{
+ return hash_int(id, 0);
+}
+
+struct ofp_bundle *
+ofconn_get_bundle(struct ofconn *ofconn, uint32_t id)
+{
+ struct ofp_bundle *bundle;
+
+ HMAP_FOR_EACH_IN_BUCKET(bundle, node, bundle_hash(id), &ofconn->bundles) {
+ if (bundle->id == id) {
+ return bundle;
+ }
+ }
+
+ return NULL;
+}
+
+enum ofperr
+ofconn_insert_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
+{
+ /* XXX: Check the limit of open bundles */
+
+ hmap_insert(&ofconn->bundles, &bundle->node, bundle_hash(bundle->id));
+
+ return 0;
+}
+
+enum ofperr
+ofconn_remove_bundle(struct ofconn *ofconn, struct ofp_bundle *bundle)
{
- list_push_back(&ofconn->opgroups, ofconn_node);
+ hmap_remove(&ofconn->bundles, &bundle->node);
+
+ return 0;
+}
+
+static void
+bundle_remove_all(struct ofconn *ofconn)
+{
+ struct ofp_bundle *b, *next;
+
+ HMAP_FOR_EACH_SAFE (b, next, node, &ofconn->bundles) {
+ ofp_bundle_remove__(ofconn, b, false);
+ }
}
\f
/* Private ofconn functions. */
ofconn->type = type;
ofconn->enable_async_msgs = enable_async_msgs;
- list_init(&ofconn->opgroups);
-
hmap_init(&ofconn->monitors);
list_init(&ofconn->updates);
+ hmap_init(&ofconn->bundles);
+
ofconn_flush(ofconn);
return ofconn;
* connection to the next. */
static void
ofconn_flush(struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofmonitor *monitor, *next_monitor;
int i;
+ ofconn_log_flow_mods(ofconn);
+
ofconn->role = OFPCR12_ROLE_EQUAL;
ofconn_set_protocol(ofconn, OFPUTIL_P_NONE);
- ofconn->packet_in_format = NXPIF_OPENFLOW10;
-
- /* Disassociate 'ofconn' from all of the ofopgroups that it initiated that
- * have not yet completed. (Those ofopgroups will still run to completion
- * in the usual way, but any errors that they run into will not be reported
- * on any OpenFlow channel.)
- *
- * Also discard any blocked operation on 'ofconn'. */
- while (!list_is_empty(&ofconn->opgroups)) {
- list_init(list_pop_front(&ofconn->opgroups));
- }
- ofpbuf_delete(ofconn->blocked);
- ofconn->blocked = NULL;
+ ofconn->packet_in_format = NXPIF_STANDARD;
rconn_packet_counter_destroy(ofconn->packet_in_counter);
ofconn->packet_in_counter = rconn_packet_counter_create();
rconn_packet_counter_destroy(ofconn->reply_counter);
ofconn->reply_counter = rconn_packet_counter_create();
- if (ofconn->enable_async_msgs) {
- uint32_t *master = ofconn->master_async_config;
- uint32_t *slave = ofconn->slave_async_config;
-
- /* "master" and "other" roles get all asynchronous messages by default,
- * except that the controller needs to enable nonstandard "packet-in"
- * reasons itself. */
- master[OAM_PACKET_IN] = (1u << OFPR_NO_MATCH) | (1u << OFPR_ACTION);
- master[OAM_PORT_STATUS] = ((1u << OFPPR_ADD)
- | (1u << OFPPR_DELETE)
- | (1u << OFPPR_MODIFY));
- master[OAM_FLOW_REMOVED] = ((1u << OFPRR_IDLE_TIMEOUT)
- | (1u << OFPRR_HARD_TIMEOUT)
- | (1u << OFPRR_DELETE));
-
- /* "slave" role gets port status updates by default. */
- slave[OAM_PACKET_IN] = 0;
- slave[OAM_PORT_STATUS] = ((1u << OFPPR_ADD)
- | (1u << OFPPR_DELETE)
- | (1u << OFPPR_MODIFY));
- slave[OAM_FLOW_REMOVED] = 0;
- } else {
- memset(ofconn->master_async_config, 0,
- sizeof ofconn->master_async_config);
- memset(ofconn->slave_async_config, 0,
- sizeof ofconn->slave_async_config);
- }
+ free(ofconn->async_cfg);
+ ofconn->async_cfg = NULL;
+
+ ofconn->n_add = ofconn->n_delete = ofconn->n_modify = 0;
+ ofconn->first_op = ofconn->last_op = LLONG_MIN;
+ ofconn->next_op_report = LLONG_MAX;
+ ofconn->op_backoff = LLONG_MIN;
HMAP_FOR_EACH_SAFE (monitor, next_monitor, ofconn_node,
&ofconn->monitors) {
static void
ofconn_destroy(struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
{
ofconn_flush(ofconn);
hmap_remove(&ofconn->connmgr->controllers, &ofconn->hmap_node);
}
+ bundle_remove_all(ofconn);
+ hmap_destroy(&ofconn->bundles);
+
hmap_destroy(&ofconn->monitors);
list_remove(&ofconn->node);
rconn_destroy(ofconn->rconn);
static bool
ofconn_may_recv(const struct ofconn *ofconn)
{
- int count = ofconn->reply_counter->n_packets;
- return (!ofconn->blocked || ofconn->retry) && count < OFCONN_REPLY_MAX;
+ int count = rconn_packet_counter_n_packets(ofconn->reply_counter);
+ return count < OFCONN_REPLY_MAX;
}
static void
ofconn_run(struct ofconn *ofconn,
- bool (*handle_openflow)(struct ofconn *,
+ void (*handle_openflow)(struct ofconn *,
const struct ofpbuf *ofp_msg))
{
struct connmgr *mgr = ofconn->connmgr;
size_t i;
for (i = 0; i < N_SCHEDULERS; i++) {
- pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn);
+ struct ovs_list txq;
+
+ pinsched_run(ofconn->schedulers[i], &txq);
+ do_send_packet_ins(ofconn, &txq);
}
rconn_run(ofconn->rconn);
- if (handle_openflow) {
- /* Limit the number of iterations to avoid starving other tasks. */
- for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
- struct ofpbuf *of_msg;
-
- of_msg = (ofconn->blocked
- ? ofconn->blocked
- : rconn_recv(ofconn->rconn));
- if (!of_msg) {
- break;
- }
- if (mgr->fail_open) {
- fail_open_maybe_recover(mgr->fail_open);
- }
+ /* Limit the number of iterations to avoid starving other tasks. */
+ for (i = 0; i < 50 && ofconn_may_recv(ofconn); i++) {
+ struct ofpbuf *of_msg = rconn_recv(ofconn->rconn);
+ if (!of_msg) {
+ break;
+ }
- if (handle_openflow(ofconn, of_msg)) {
- ofpbuf_delete(of_msg);
- ofconn->blocked = NULL;
- } else {
- ofconn->blocked = of_msg;
- ofconn->retry = false;
- }
+ if (mgr->fail_open) {
+ fail_open_maybe_recover(mgr->fail_open);
}
+
+ handle_openflow(ofconn, of_msg);
+ ofpbuf_delete(of_msg);
}
+ if (time_msec() >= ofconn->next_op_report) {
+ ofconn_log_flow_mods(ofconn);
+ }
+
+ ovs_mutex_lock(&ofproto_mutex);
if (!rconn_is_alive(ofconn->rconn)) {
ofconn_destroy(ofconn);
} else if (!rconn_is_connected(ofconn->rconn)) {
ofconn_flush(ofconn);
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
static void
-ofconn_wait(struct ofconn *ofconn, bool handling_openflow)
+ofconn_wait(struct ofconn *ofconn)
{
int i;
pinsched_wait(ofconn->schedulers[i]);
}
rconn_run_wait(ofconn->rconn);
- if (handling_openflow && ofconn_may_recv(ofconn)) {
+ if (ofconn_may_recv(ofconn)) {
rconn_recv_wait(ofconn->rconn);
}
+ if (ofconn->next_op_report != LLONG_MAX) {
+ poll_timer_wait_until(ofconn->next_op_report);
+ }
+}
+
+static void
+ofconn_log_flow_mods(struct ofconn *ofconn)
+{
+ int n_flow_mods = ofconn->n_add + ofconn->n_delete + ofconn->n_modify;
+ if (n_flow_mods) {
+ long long int ago = (time_msec() - ofconn->first_op) / 1000;
+ long long int interval = (ofconn->last_op - ofconn->first_op) / 1000;
+ struct ds s;
+
+ ds_init(&s);
+ ds_put_format(&s, "%d flow_mods ", n_flow_mods);
+ if (interval == ago) {
+ ds_put_format(&s, "in the last %lld s", ago);
+ } else if (interval) {
+ ds_put_format(&s, "in the %lld s starting %lld s ago",
+ interval, ago);
+ } else {
+ ds_put_format(&s, "%lld s ago", ago);
+ }
+
+ ds_put_cstr(&s, " (");
+ if (ofconn->n_add) {
+ ds_put_format(&s, "%d adds, ", ofconn->n_add);
+ }
+ if (ofconn->n_delete) {
+ ds_put_format(&s, "%d deletes, ", ofconn->n_delete);
+ }
+ if (ofconn->n_modify) {
+ ds_put_format(&s, "%d modifications, ", ofconn->n_modify);
+ }
+ s.length -= 2;
+ ds_put_char(&s, ')');
+
+ VLOG_INFO("%s: %s", rconn_get_name(ofconn->rconn), ds_cstr(&s));
+ ds_destroy(&s);
+
+ ofconn->n_add = ofconn->n_delete = ofconn->n_modify = 0;
+ }
+ ofconn->next_op_report = LLONG_MAX;
}
/* Returns true if 'ofconn' should receive asynchronous messages of the given
* 'ofconn'. */
static bool
ofconn_receives_async_msg(const struct ofconn *ofconn,
- enum ofconn_async_msg_type type,
+ enum ofputil_async_msg_type type,
unsigned int reason)
{
- const uint32_t *async_config;
-
ovs_assert(reason < 32);
ovs_assert((unsigned int) type < OAM_N_TYPES);
- if (ofconn_get_protocol(ofconn) == OFPUTIL_P_NONE
- || !rconn_is_connected(ofconn->rconn)) {
- return false;
- }
-
/* Keep the following code in sync with the documentation in the
* "Asynchronous Messages" section in DESIGN. */
return false;
}
- async_config = (ofconn->role == OFPCR12_ROLE_SLAVE
- ? ofconn->slave_async_config
- : ofconn->master_async_config);
- if (!(async_config[type] & (1u << reason))) {
- return false;
+ struct ofputil_async_cfg ac = ofconn_get_async_config(ofconn);
+ uint32_t *masks = (ofconn->role == OFPCR12_ROLE_SLAVE
+ ? ac.slave
+ : ac.master);
+ return (masks[type] & (1u << reason)) != 0;
+}
+
+/* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
+ * packet rather than to send the packet to the controller.
+ *
+ * This function returns true to indicate that a packet_in message
+ * for a "table-miss" should be sent to at least one controller.
+ * That is there is at least one controller with controller_id 0
+ * which connected using an OpenFlow version earlier than OpenFlow1.3.
+ *
+ * False otherwise.
+ *
+ * This logic assumes that "table-miss" packet_in messages
+ * are always sent to controller_id 0. */
+bool
+connmgr_wants_packet_in_on_miss(struct connmgr *mgr) OVS_EXCLUDED(ofproto_mutex)
+{
+ struct ofconn *ofconn;
+
+ ovs_mutex_lock(&ofproto_mutex);
+ LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
+ enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
+
+ if (ofconn->controller_id == 0 &&
+ (protocol == OFPUTIL_P_NONE ||
+ ofputil_protocol_to_ofp_version(protocol) < OFP13_VERSION)) {
+ ovs_mutex_unlock(&ofproto_mutex);
+ return true;
+ }
}
+ ovs_mutex_unlock(&ofproto_mutex);
- return true;
+ return false;
}
/* Returns a human-readable name for an OpenFlow connection between 'mgr' and
\f
/* Sending asynchronous messages. */
-static void schedule_packet_in(struct ofconn *, struct ofputil_packet_in);
-
/* Sends an OFPT_PORT_STATUS message with 'opp' and 'reason' to appropriate
- * controllers managed by 'mgr'. */
+ * controllers managed by 'mgr'. For messages caused by a controller
+ * OFPT_PORT_MOD, specify 'source' as the controller connection that sent the
+ * request; otherwise, specify 'source' as NULL. */
void
-connmgr_send_port_status(struct connmgr *mgr,
+connmgr_send_port_status(struct connmgr *mgr, struct ofconn *source,
const struct ofputil_phy_port *pp, uint8_t reason)
{
/* XXX Should limit the number of queued port status change messages. */
if (ofconn_receives_async_msg(ofconn, OAM_PORT_STATUS, reason)) {
struct ofpbuf *msg;
+ /* Before 1.5, OpenFlow specified that OFPT_PORT_MOD should not
+ * generate OFPT_PORT_STATUS messages. That requirement was a
+ * relic of how OpenFlow originally supported a single controller,
+ * so that one could expect the controller to already know the
+ * changes it had made.
+ *
+ * EXT-338 changes OpenFlow 1.5 OFPT_PORT_MOD to send
+ * OFPT_PORT_STATUS messages to every controller. This is
+ * obviously more useful in the multi-controller case. We could
+ * always implement it that way in OVS, but that would risk
+ * confusing controllers that are intended for single-controller
+ * use only. (Imagine a controller that generates an OFPT_PORT_MOD
+ * in response to any OFPT_PORT_STATUS!)
+ *
+ * So this compromises: for OpenFlow 1.4 and earlier, it generates
+ * OFPT_PORT_STATUS for OFPT_PORT_MOD, but not back to the
+ * originating controller. In a single-controller environment, in
+ * particular, this means that it will never generate
+ * OFPT_PORT_STATUS for OFPT_PORT_MOD at all. */
+ if (ofconn == source
+ && rconn_get_version(ofconn->rconn) < OFP15_VERSION) {
+ continue;
+ }
+
msg = ofputil_encode_port_status(&ps, ofconn_get_protocol(ofconn));
ofconn_send(ofconn, msg, NULL);
}
}
}
+/* Sends an OFPT_REQUESTFORWARD message with 'request' and 'reason' to
+ * appropriate controllers managed by 'mgr'. For messages caused by a
+ * controller OFPT_GROUP_MOD and OFPT_METER_MOD, specify 'source' as the
+ * controller connection that sent the request; otherwise, specify 'source'
+ * as NULL. */
+void
+connmgr_send_requestforward(struct connmgr *mgr, const struct ofconn *source,
+ const struct ofputil_requestforward *rf)
+{
+ struct ofconn *ofconn;
+
+ LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
+ if (ofconn_receives_async_msg(ofconn, OAM_REQUESTFORWARD, rf->reason)
+ && rconn_get_version(ofconn->rconn) >= OFP14_VERSION
+ && ofconn != source) {
+ enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
+ ofconn_send(ofconn, ofputil_encode_requestforward(rf, protocol),
+ NULL);
+ }
+ }
+}
+
/* Sends an OFPT_FLOW_REMOVED or NXT_FLOW_REMOVED message based on 'fr' to
* appropriate controllers managed by 'mgr'. */
void
}
}
-/* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as
- * necessary according to their individual configurations.
- *
- * The caller doesn't need to fill in pin->buffer_id or pin->total_len. */
+/* Sends an OFPT_TABLE_STATUS message with 'reason' to appropriate controllers
+ * managed by 'mgr'. When the table state changes, the controller needs to be
+ * informed with the OFPT_TABLE_STATUS message. The reason values
+ * OFPTR_VACANCY_DOWN and OFPTR_VACANCY_UP identify a vacancy message. The
+ * vacancy events are generated when the remaining space in the flow table
+ * changes and crosses one of the vacancy thereshold specified by
+ * OFPT_TABLE_MOD. */
void
-connmgr_send_packet_in(struct connmgr *mgr,
- const struct ofputil_packet_in *pin)
+connmgr_send_table_status(struct connmgr *mgr,
+ const struct ofputil_table_desc *td,
+ uint8_t reason)
{
+ struct ofputil_table_status ts;
struct ofconn *ofconn;
+ ts.reason = reason;
+ ts.desc = *td;
+
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- if (ofconn_receives_async_msg(ofconn, OAM_PACKET_IN, pin->reason)
- && ofconn->controller_id == pin->controller_id) {
- schedule_packet_in(ofconn, *pin);
+ if (ofconn_receives_async_msg(ofconn, OAM_TABLE_STATUS, reason)) {
+ struct ofpbuf *msg;
+
+ msg = ofputil_encode_table_status(&ts,
+ ofconn_get_protocol(ofconn));
+ if (msg) {
+ ofconn_send(ofconn, msg, NULL);
+ }
}
}
}
-/* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */
-static void
-do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_)
+/* Given 'pin', sends an OFPT_PACKET_IN message to each OpenFlow controller as
+ * necessary according to their individual configurations. */
+void
+connmgr_send_async_msg(struct connmgr *mgr,
+ const struct ofproto_async_msg *am)
{
- struct ofconn *ofconn = ofconn_;
+ struct ofconn *ofconn;
- rconn_send_with_limit(ofconn->rconn, ofp_packet_in,
- ofconn->packet_in_counter, 100);
+ LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
+ enum ofputil_protocol protocol = ofconn_get_protocol(ofconn);
+ if (protocol == OFPUTIL_P_NONE || !rconn_is_connected(ofconn->rconn)
+ || ofconn->controller_id != am->controller_id
+ || !ofconn_receives_async_msg(ofconn, am->oam,
+ am->pin.up.public.reason)) {
+ continue;
+ }
+
+ struct ofpbuf *msg = ofputil_encode_packet_in_private(
+ &am->pin.up, protocol, ofconn->packet_in_format,
+ am->pin.max_len >= 0 ? am->pin.max_len : ofconn->miss_send_len,
+ ofconn->pktbuf);
+
+ struct ovs_list txq;
+ bool is_miss = (am->pin.up.public.reason == OFPR_NO_MATCH ||
+ am->pin.up.public.reason == OFPR_EXPLICIT_MISS ||
+ am->pin.up.public.reason == OFPR_IMPLICIT_MISS);
+ pinsched_send(ofconn->schedulers[is_miss],
+ am->pin.up.public.flow_metadata.flow.in_port.ofp_port,
+ msg, &txq);
+ do_send_packet_ins(ofconn, &txq);
+ }
}
-/* Takes 'pin', composes an OpenFlow packet-in message from it, and passes it
- * to 'ofconn''s packet scheduler for sending. */
static void
-schedule_packet_in(struct ofconn *ofconn, struct ofputil_packet_in pin)
+do_send_packet_ins(struct ofconn *ofconn, struct ovs_list *txq)
{
- struct connmgr *mgr = ofconn->connmgr;
-
- pin.total_len = pin.packet_len;
+ struct ofpbuf *pin;
- /* Get OpenFlow buffer_id. */
- if (pin.reason == OFPR_ACTION) {
- pin.buffer_id = UINT32_MAX;
- } else if (mgr->fail_open && fail_open_is_active(mgr->fail_open)) {
- pin.buffer_id = pktbuf_get_null();
- } else if (!ofconn->pktbuf) {
- pin.buffer_id = UINT32_MAX;
- } else {
- pin.buffer_id = pktbuf_save(ofconn->pktbuf, pin.packet, pin.packet_len,
- pin.fmd.in_port);
- }
+ LIST_FOR_EACH_POP (pin, list_node, txq) {
+ if (rconn_send_with_limit(ofconn->rconn, pin,
+ ofconn->packet_in_counter, 100) == EAGAIN) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5);
- /* Figure out how much of the packet to send. */
- if (pin.reason == OFPR_NO_MATCH) {
- pin.send_len = pin.packet_len;
- } else {
- /* Caller should have initialized 'send_len' to 'max_len' specified in
- * output action. */
- }
- if (pin.buffer_id != UINT32_MAX) {
- pin.send_len = MIN(pin.send_len, ofconn->miss_send_len);
+ VLOG_INFO_RL(&rl, "%s: dropping packet-in due to queue overflow",
+ rconn_get_name(ofconn->rconn));
+ }
}
-
- /* Make OFPT_PACKET_IN and hand over to packet scheduler. It might
- * immediately call into do_send_packet_in() or it might buffer it for a
- * while (until a later call to pinsched_run()). */
- pinsched_send(ofconn->schedulers[pin.reason == OFPR_NO_MATCH ? 0 : 1],
- pin.fmd.in_port,
- ofputil_encode_packet_in(&pin, ofconn_get_protocol(ofconn),
- ofconn->packet_in_format),
- do_send_packet_in, ofconn);
}
\f
/* Fail-open settings. */
* In-band control has more sophisticated code that manages flows itself. */
void
connmgr_flushed(struct connmgr *mgr)
+ OVS_EXCLUDED(ofproto_mutex)
{
if (mgr->fail_open) {
fail_open_flushed(mgr->fail_open);
ofpbuf_init(&ofpacts, OFPACT_OUTPUT_SIZE);
ofpact_put_OUTPUT(&ofpacts)->port = OFPP_NORMAL;
- ofpact_pad(&ofpacts);
match_init_catchall(&match);
- ofproto_add_flow(mgr->ofproto, &match, 0, ofpacts.data, ofpacts.size);
+ ofproto_add_flow(mgr->ofproto, &match, 0, ofpacts.data,
+ ofpacts.size);
ofpbuf_uninit(&ofpacts);
}
}
+
+/* Returns the number of hidden rules created by the in-band and fail-open
+ * implementations in table 0. (Subtracting this count from the number of
+ * rules in the table 0 classifier, as maintained in struct oftable, yields
+ * the number of flows that OVS should report via OpenFlow for table 0.) */
+int
+connmgr_count_hidden_rules(const struct connmgr *mgr)
+{
+ int n_hidden = 0;
+ if (mgr->in_band) {
+ n_hidden += in_band_count_rules(mgr->in_band);
+ }
+ if (mgr->fail_open) {
+ n_hidden += fail_open_count_rules(mgr->fail_open);
+ }
+ return n_hidden;
+}
\f
/* Creates a new ofservice for 'target' in 'mgr'. Returns 0 if successful,
* otherwise a positive errno value.
enum ofperr
ofmonitor_create(const struct ofputil_flow_monitor_request *request,
struct ofconn *ofconn, struct ofmonitor **monitorp)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofmonitor *m;
m = ofmonitor_lookup(ofconn, request->id);
if (m) {
- return OFPERR_NXBRC_FM_DUPLICATE_ID;
+ return OFPERR_OFPMOFC_MONITOR_EXISTS;
}
m = xmalloc(sizeof *m);
struct ofmonitor *
ofmonitor_lookup(struct ofconn *ofconn, uint32_t id)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofmonitor *m;
void
ofmonitor_destroy(struct ofmonitor *m)
+ OVS_REQUIRES(ofproto_mutex)
{
if (m) {
minimatch_destroy(&m->match);
ofmonitor_report(struct connmgr *mgr, struct rule *rule,
enum nx_flow_update_event event,
enum ofp_flow_removed_reason reason,
- const struct ofconn *abbrev_ofconn, ovs_be32 abbrev_xid)
+ const struct ofconn *abbrev_ofconn, ovs_be32 abbrev_xid,
+ const struct rule_actions *old_actions)
+ OVS_REQUIRES(ofproto_mutex)
{
enum nx_flow_monitor_flags update;
struct ofconn *ofconn;
+ if (rule_is_hidden(rule)) {
+ return;
+ }
+
switch (event) {
case NXFME_ADDED:
update = NXFMF_ADD;
default:
case NXFME_ABBREV:
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
if (m->flags & update
&& (m->table_id == 0xff || m->table_id == rule->table_id)
- && ofoperation_has_out_port(rule->pending, m->out_port)
+ && (ofproto_rule_has_out_port(rule, m->out_port)
+ || (old_actions
+ && ofpacts_output_to_port(old_actions->ofpacts,
+ old_actions->ofpacts_len,
+ m->out_port)))
&& cls_rule_is_loose_match(&rule->cr, &m->match)) {
flags |= m->flags;
}
ofconn->sent_abbrev_update = false;
}
- if (ofconn != abbrev_ofconn || ofconn->monitor_paused) {
+ if (flags & NXFMF_OWN || ofconn != abbrev_ofconn
+ || ofconn->monitor_paused) {
struct ofputil_flow_update fu;
struct match match;
fu.event = event;
fu.reason = event == NXFME_DELETED ? reason : 0;
- fu.idle_timeout = rule->idle_timeout;
- fu.hard_timeout = rule->hard_timeout;
fu.table_id = rule->table_id;
fu.cookie = rule->flow_cookie;
minimatch_expand(&rule->cr.match, &match);
fu.match = &match;
fu.priority = rule->cr.priority;
+
+ ovs_mutex_lock(&rule->mutex);
+ fu.idle_timeout = rule->idle_timeout;
+ fu.hard_timeout = rule->hard_timeout;
+ ovs_mutex_unlock(&rule->mutex);
+
if (flags & NXFMF_ACTIONS) {
- fu.ofpacts = rule->ofpacts;
- fu.ofpacts_len = rule->ofpacts_len;
+ const struct rule_actions *actions = rule_get_actions(rule);
+ fu.ofpacts = actions->ofpacts;
+ fu.ofpacts_len = actions->ofpacts_len;
} else {
fu.ofpacts = NULL;
fu.ofpacts_len = 0;
void
ofmonitor_flush(struct connmgr *mgr)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofconn *ofconn;
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- struct ofpbuf *msg, *next;
+ struct ofpbuf *msg;
+
+ LIST_FOR_EACH_POP (msg, list_node, &ofconn->updates) {
+ unsigned int n_bytes;
- LIST_FOR_EACH_SAFE (msg, next, list_node, &ofconn->updates) {
- list_remove(&msg->list_node);
ofconn_send(ofconn, msg, ofconn->monitor_counter);
- if (!ofconn->monitor_paused
- && ofconn->monitor_counter->n_bytes > 128 * 1024) {
+ n_bytes = rconn_packet_counter_n_bytes(ofconn->monitor_counter);
+ if (!ofconn->monitor_paused && n_bytes > 128 * 1024) {
struct ofpbuf *pause;
COVERAGE_INC(ofmonitor_pause);
static void
ofmonitor_resume(struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
{
+ struct rule_collection rules;
struct ofpbuf *resumed;
struct ofmonitor *m;
- struct list rules;
- struct list msgs;
+ struct ovs_list msgs;
- list_init(&rules);
+ rule_collection_init(&rules);
HMAP_FOR_EACH (m, ofconn_node, &ofconn->monitors) {
ofmonitor_collect_resume_rules(m, ofconn->monitor_paused, &rules);
}
ofconn->monitor_paused = 0;
}
+static bool
+ofmonitor_may_resume(const struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ return (ofconn->monitor_paused != 0
+ && !rconn_packet_counter_n_packets(ofconn->monitor_counter));
+}
+
static void
ofmonitor_run(struct connmgr *mgr)
{
struct ofconn *ofconn;
+ ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- if (ofconn->monitor_paused && !ofconn->monitor_counter->n_packets) {
+ if (ofmonitor_may_resume(ofconn)) {
COVERAGE_INC(ofmonitor_resume);
ofmonitor_resume(ofconn);
}
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
static void
{
struct ofconn *ofconn;
+ ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- if (ofconn->monitor_paused && !ofconn->monitor_counter->n_packets) {
+ if (ofmonitor_may_resume(ofconn)) {
poll_immediate_wake();
}
}
+ ovs_mutex_unlock(&ofproto_mutex);
+}
+
+void
+ofproto_async_msg_free(struct ofproto_async_msg *am)
+{
+ free(am->pin.up.public.packet);
+ free(am->pin.up.public.userdata);
+ free(am->pin.up.stack);
+ free(am->pin.up.actions);
+ free(am->pin.up.action_set);
+ free(am);
}