COVERAGE_DEFINE(facet_unexpected);
COVERAGE_DEFINE(facet_suppress);
COVERAGE_DEFINE(subfacet_install_fail);
+COVERAGE_DEFINE(packet_in_overflow);
COVERAGE_DEFINE(flow_mod_overflow);
/* Number of implemented OpenFlow tables. */
struct ovs_mutex flow_mod_mutex;
struct list flow_mods OVS_GUARDED;
size_t n_flow_mods OVS_GUARDED;
+
+ struct ovs_mutex pin_mutex;
+ struct list pins OVS_GUARDED;
+ size_t n_pins OVS_GUARDED;
};
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
struct ofputil_packet_in *pin)
{
- connmgr_send_packet_in(ofproto->up.connmgr, pin);
+ ovs_mutex_lock(&ofproto->pin_mutex);
+ if (ofproto->n_pins > 1024) {
+ ovs_mutex_unlock(&ofproto->pin_mutex);
+ COVERAGE_INC(packet_in_overflow);
+ free(CONST_CAST(void *, pin->packet));
+ free(pin);
+ return;
+ }
+
+ list_push_back(&ofproto->pins, &pin->list_node);
+ ofproto->n_pins++;
+ ovs_mutex_unlock(&ofproto->pin_mutex);
}
\f
/* Factory functions. */
}
xlate_ofproto_set(ofproto, ofproto->up.name,
- ofproto->backer->dpif, ofproto->ml,
+ ofproto->backer->dpif, ofproto->miss_rule,
+ ofproto->no_packet_in_rule, ofproto->ml,
ofproto->stp, ofproto->mbridge,
ofproto->sflow, ofproto->ipfix,
ofproto->up.frag_handling,
ofproto->n_flow_mods = 0;
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
+ ovs_mutex_init(&ofproto->pin_mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_lock(&ofproto->pin_mutex);
+ list_init(&ofproto->pins);
+ ofproto->n_pins = 0;
+ ovs_mutex_unlock(&ofproto->pin_mutex);
+
ofproto_dpif_unixctl_init();
hmap_init(&ofproto->vlandev_map);
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
+ struct ofputil_flow_mod *pin, *next_pin;
struct ofputil_flow_mod *fm, *next_fm;
struct oftable *table;
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
ovs_mutex_destroy(&ofproto->flow_mod_mutex);
+ ovs_mutex_lock(&ofproto->pin_mutex);
+ LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &ofproto->pins) {
+ list_remove(&pin->list_node);
+ ofproto->n_pins--;
+ free(pin->ofpacts);
+ free(pin);
+ }
+ ovs_mutex_unlock(&ofproto->pin_mutex);
+ ovs_mutex_destroy(&ofproto->pin_mutex);
+
mbridge_unref(ofproto->mbridge);
netflow_destroy(ofproto->netflow);
run_fast(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofputil_flow_mod *fm, *next;
+ struct ofputil_packet_in *pin, *next_pin;
+ struct ofputil_flow_mod *fm, *next_fm;
+ struct list flow_mods, pins;
struct ofport_dpif *ofport;
- struct list flow_mods;
/* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
}
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
- LIST_FOR_EACH_SAFE (fm, next, list_node, &flow_mods) {
+ LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &flow_mods) {
int error = ofproto_flow_mod(&ofproto->up, fm);
if (error && !VLOG_DROP_WARN(&rl)) {
VLOG_WARN("learning action failed to modify flow table (%s)",
free(fm);
}
+ ovs_mutex_lock(&ofproto->pin_mutex);
+ if (ofproto->n_pins) {
+ pins = ofproto->pins;
+ list_moved(&pins);
+ list_init(&ofproto->pins);
+ ofproto->n_pins = 0;
+ } else {
+ list_init(&pins);
+ }
+ ovs_mutex_unlock(&ofproto->pin_mutex);
+
+ LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
+ connmgr_send_packet_in(ofproto->up.connmgr, pin);
+ list_remove(&pin->list_node);
+ free(CONST_CAST(void *, pin->packet));
+ free(pin);
+ }
+
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run_fast(ofport);
}
/* Expire OpenFlow flows whose idle_timeout or hard_timeout
* has passed. */
+ ovs_mutex_lock(&ofproto->up.expirable_mutex);
LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
&ofproto->up.expirable) {
rule_expire(rule_dpif_cast(rule));
}
+ ovs_mutex_unlock(&ofproto->up.expirable_mutex);
/* All outstanding data in existing flows has been accounted, so it's a
* good time to do bond rebalancing. */
static void
rule_expire(struct rule_dpif *rule)
{
+ uint16_t idle_timeout, hard_timeout;
long long int now;
uint8_t reason;
return;
}
+ ovs_mutex_lock(&rule->up.timeout_mutex);
+ hard_timeout = rule->up.hard_timeout;
+ idle_timeout = rule->up.idle_timeout;
+ ovs_mutex_unlock(&rule->up.timeout_mutex);
+
/* Has 'rule' expired? */
now = time_msec();
- if (rule->up.hard_timeout
- && now > rule->up.modified + rule->up.hard_timeout * 1000) {
+ if (hard_timeout && now > rule->up.modified + hard_timeout * 1000) {
reason = OFPRR_HARD_TIMEOUT;
- } else if (rule->up.idle_timeout
- && now > rule->up.used + rule->up.idle_timeout * 1000) {
+ } else if (idle_timeout && now > rule->up.used + idle_timeout * 1000) {
reason = OFPRR_IDLE_TIMEOUT;
} else {
return;
rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
struct flow_wildcards *wc)
{
+ struct ofport_dpif *port;
struct rule_dpif *rule;
rule = rule_dpif_lookup_in_table(ofproto, flow, wc, 0);
if (rule) {
return rule;
}
+ port = get_ofp_port(ofproto, flow->in_port.ofp_port);
+ if (!port) {
+ VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
+ flow->in_port.ofp_port);
+ }
- return rule_dpif_miss_rule(ofproto, flow);
+ return choose_miss_rule(port ? port->up.pp.config : 0, ofproto->miss_rule,
+ ofproto->no_packet_in_rule);
}
struct rule_dpif *
return rule_dpif_cast(rule_from_cls_rule(cls_rule));
}
+/* Given a port configuration (specified as zero if there's no port), chooses
+ * which of 'miss_rule' and 'no_packet_in_rule' should be used in case of a
+ * flow table miss. */
struct rule_dpif *
-rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow)
+choose_miss_rule(enum ofputil_port_config config, struct rule_dpif *miss_rule,
+ struct rule_dpif *no_packet_in_rule)
{
- struct ofport_dpif *port;
-
- port = get_ofp_port(ofproto, flow->in_port.ofp_port);
- if (!port) {
- VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
- flow->in_port.ofp_port);
- return ofproto->miss_rule;
- }
-
- if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
- return ofproto->no_packet_in_rule;
- }
- return ofproto->miss_rule;
+ return config & OFPUTIL_PC_NO_PACKET_IN ? no_packet_in_rule : miss_rule;
}
static void