free(fmb);
}
+/* Discards any flow miss batches queued up in 'udpif' for 'ofproto' (because
+ * 'ofproto' is being destroyed).
+ *
+ * 'ofproto''s xports must already have been removed, otherwise new flow miss
+ * batches could still end up getting queued. */
+void
+flow_miss_batch_ofproto_destroyed(struct udpif *udpif,
+ const struct ofproto_dpif *ofproto)
+{
+ struct flow_miss_batch *fmb, *next_fmb;
+
+ ovs_mutex_lock(&udpif->fmb_mutex);
+ LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) {
+ struct flow_miss *miss, *next_miss;
+
+ HMAP_FOR_EACH_SAFE (miss, next_miss, hmap_node, &fmb->misses) {
+ if (miss->ofproto == ofproto) {
+ hmap_remove(&fmb->misses, &miss->hmap_node);
+ miss_destroy(miss);
+ }
+ }
+
+ if (hmap_is_empty(&fmb->misses)) {
+ list_remove(&fmb->list_node);
+ flow_miss_batch_destroy(fmb);
+ udpif->n_fmbs--;
+ }
+ }
+ ovs_mutex_unlock(&udpif->fmb_mutex);
+}
+
/* Retreives the next drop key which ofproto-dpif needs to process. The caller
* is responsible for destroying it with drop_key_destroy(). */
struct drop_key *
flow_wildcards_init_catchall(&wc);
rule_dpif_lookup(ofproto, &miss->flow, &wc, &rule);
- rule_credit_stats(rule, &miss->stats);
+ rule_dpif_credit_stats(rule, &miss->stats);
xlate_in_init(&xin, ofproto, &miss->flow, rule, miss->stats.tcp_flags,
NULL);
xin.may_learn = true;
xlate_actions(&xin, &miss->xout);
flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
- if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
+ if (rule_dpif_fail_open(rule)) {
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct ofputil_packet_in *pin;
xlate_actions_for_side_effects(&xin);
}
}
- rule_release(rule);
+ rule_dpif_release(rule);
if (miss->xout.odp_actions.size) {
LIST_FOR_EACH (packet, list_node, &miss->packets) {