From 8354bbc9ce36aec09b11ca5d55219c5c41c421ad Mon Sep 17 00:00:00 2001 From: Ben Pfaff Date: Thu, 12 Sep 2013 17:42:23 -0700 Subject: [PATCH] guarded-list: New data structure for thread-safe queue. We already had queues that were suitable for replacement by this data structure, and I intend to add another one later on. flow_miss_batch_ofproto_destroyed() did not work well with the guarded-list structure (it required either adding a lot more functions or breaking the abstraction) so I changed the caller to just use udpif_revalidate(). Checking reval_seq at the end of handle_miss_upcalls() also didn't work well with the abstraction, so I decided that since this was a corner case anyway it would be acceptable to just drop those in flow_miss_batch_next(). Signed-off-by: Ben Pfaff Acked-by: Ethan Jackson --- lib/automake.mk | 2 + lib/guarded-list.c | 97 ++++++++++++++++ lib/guarded-list.h | 41 +++++++ ofproto/ofproto-dpif-upcall.c | 208 +++++++++++----------------------- ofproto/ofproto-dpif-upcall.h | 6 +- ofproto/ofproto-dpif.c | 77 ++++--------- 6 files changed, 227 insertions(+), 204 deletions(-) create mode 100644 lib/guarded-list.c create mode 100644 lib/guarded-list.h diff --git a/lib/automake.mk b/lib/automake.mk index da1896a2e..92cfc13e0 100644 --- a/lib/automake.mk +++ b/lib/automake.mk @@ -58,6 +58,8 @@ lib_libopenvswitch_a_SOURCES = \ lib/fatal-signal.h \ lib/flow.c \ lib/flow.h \ + lib/guarded-list.c \ + lib/guarded-list.h \ lib/hash.c \ lib/hash.h \ lib/hindex.c \ diff --git a/lib/guarded-list.c b/lib/guarded-list.c new file mode 100644 index 000000000..cbb203021 --- /dev/null +++ b/lib/guarded-list.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013 Nicira, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "guarded-list.h" + +void +guarded_list_init(struct guarded_list *list) +{ + ovs_mutex_init(&list->mutex); + list_init(&list->list); + list->n = 0; +} + +void +guarded_list_destroy(struct guarded_list *list) +{ + ovs_mutex_destroy(&list->mutex); +} + +bool +guarded_list_is_empty(const struct guarded_list *list) +{ + bool empty; + + ovs_mutex_lock(&list->mutex); + empty = list->n == 0; + ovs_mutex_unlock(&list->mutex); + + return empty; +} + +/* If 'list' has fewer than 'max' elements, adds 'node' at the end of the list + * and returns the number of elements now on the list. + * + * If 'list' already has at least 'max' elements, returns 0 without modifying + * the list. */ +size_t +guarded_list_push_back(struct guarded_list *list, + struct list *node, size_t max) +{ + size_t retval = 0; + + ovs_mutex_lock(&list->mutex); + if (list->n < max) { + list_push_back(&list->list, node); + retval = ++list->n; + } + ovs_mutex_unlock(&list->mutex); + + return retval; +} + +struct list * +guarded_list_pop_front(struct guarded_list *list) +{ + struct list *node = NULL; + + ovs_mutex_lock(&list->mutex); + if (list->n) { + node = list_pop_front(&list->list); + list->n--; + } + ovs_mutex_unlock(&list->mutex); + + return node; +} + +size_t +guarded_list_pop_all(struct guarded_list *list, struct list *elements) +{ + size_t n; + + ovs_mutex_lock(&list->mutex); + list_move(elements, &list->list); + n = list->n; + + list_init(&list->list); + list->n = 0; + ovs_mutex_unlock(&list->mutex); + + return n; +} diff --git a/lib/guarded-list.h b/lib/guarded-list.h new file mode 100644 index 000000000..625865d42 --- /dev/null +++ b/lib/guarded-list.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2013 Nicira, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef GUARDED_LIST_H +#define GUARDED_LIST_H 1 + +#include +#include "compiler.h" +#include "list.h" +#include "ovs-thread.h" + +struct guarded_list { + struct ovs_mutex mutex; + struct list list; + size_t n; +}; + +void guarded_list_init(struct guarded_list *); +void guarded_list_destroy(struct guarded_list *); + +bool guarded_list_is_empty(const struct guarded_list *); + +size_t guarded_list_push_back(struct guarded_list *, struct list *, + size_t max); +struct list *guarded_list_pop_front(struct guarded_list *); +size_t guarded_list_pop_all(struct guarded_list *, struct list *); + +#endif /* guarded-list.h */ diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index ae856a4e5..bc1e88419 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c @@ -23,6 +23,7 @@ #include "dynamic-string.h" #include "dpif.h" #include "fail-open.h" +#include "guarded-list.h" #include "latch.h" #include "seq.h" #include "list.h" @@ -41,6 +42,7 @@ COVERAGE_DEFINE(upcall_queue_overflow); COVERAGE_DEFINE(drop_queue_overflow); COVERAGE_DEFINE(miss_queue_overflow); COVERAGE_DEFINE(fmb_queue_overflow); +COVERAGE_DEFINE(fmb_queue_revalidated); /* A thread that processes each upcall handed to it by the dispatcher thread, * forwards the upcall's packet, and then queues it to the main ofproto_dpif @@ -79,26 +81,15 @@ struct udpif { struct handler *handlers; /* Miss handlers. */ size_t n_handlers; - /* Atomic queue of unprocessed drop keys. */ - struct ovs_mutex drop_key_mutex; - struct list drop_keys OVS_GUARDED; - size_t n_drop_keys OVS_GUARDED; - - /* Atomic queue of special upcalls for ofproto-dpif to process. */ - struct ovs_mutex upcall_mutex; - struct list upcalls OVS_GUARDED; - size_t n_upcalls OVS_GUARDED; - - /* Atomic queue of flow_miss_batches. */ - struct ovs_mutex fmb_mutex; - struct list fmbs OVS_GUARDED; - size_t n_fmbs OVS_GUARDED; + /* Queues to pass up to ofproto-dpif. */ + struct guarded_list drop_keys; /* "struct drop key"s. */ + struct guarded_list upcalls; /* "struct upcall"s. */ + struct guarded_list fmbs; /* "struct flow_miss_batch"es. */ /* Number of times udpif_revalidate() has been called. */ atomic_uint reval_seq; struct seq *wait_seq; - uint64_t last_seq; struct latch exit_latch; /* Tells child threads to exit. */ }; @@ -121,13 +112,10 @@ udpif_create(struct dpif_backer *backer, struct dpif *dpif) udpif->secret = random_uint32(); udpif->wait_seq = seq_create(); latch_init(&udpif->exit_latch); - list_init(&udpif->drop_keys); - list_init(&udpif->upcalls); - list_init(&udpif->fmbs); + guarded_list_init(&udpif->drop_keys); + guarded_list_init(&udpif->upcalls); + guarded_list_init(&udpif->fmbs); atomic_init(&udpif->reval_seq, 0); - ovs_mutex_init(&udpif->drop_key_mutex); - ovs_mutex_init(&udpif->upcall_mutex); - ovs_mutex_init(&udpif->fmb_mutex); return udpif; } @@ -153,9 +141,9 @@ udpif_destroy(struct udpif *udpif) flow_miss_batch_destroy(fmb); } - ovs_mutex_destroy(&udpif->drop_key_mutex); - ovs_mutex_destroy(&udpif->upcall_mutex); - ovs_mutex_destroy(&udpif->fmb_mutex); + guarded_list_destroy(&udpif->drop_keys); + guarded_list_destroy(&udpif->upcalls); + guarded_list_destroy(&udpif->fmbs); latch_destroy(&udpif->exit_latch); seq_destroy(udpif->wait_seq); free(udpif); @@ -228,34 +216,17 @@ udpif_recv_set(struct udpif *udpif, size_t n_handlers, bool enable) } } -void -udpif_run(struct udpif *udpif) -{ - udpif->last_seq = seq_read(udpif->wait_seq); -} - void udpif_wait(struct udpif *udpif) { - ovs_mutex_lock(&udpif->drop_key_mutex); - if (udpif->n_drop_keys) { - poll_immediate_wake(); - } - ovs_mutex_unlock(&udpif->drop_key_mutex); - - ovs_mutex_lock(&udpif->upcall_mutex); - if (udpif->n_upcalls) { - poll_immediate_wake(); - } - ovs_mutex_unlock(&udpif->upcall_mutex); - - ovs_mutex_lock(&udpif->fmb_mutex); - if (udpif->n_fmbs) { + uint64_t seq = seq_read(udpif->wait_seq); + if (!guarded_list_is_empty(&udpif->drop_keys) || + !guarded_list_is_empty(&udpif->upcalls) || + !guarded_list_is_empty(&udpif->fmbs)) { poll_immediate_wake(); + } else { + seq_wait(udpif->wait_seq, seq); } - ovs_mutex_unlock(&udpif->fmb_mutex); - - seq_wait(udpif->wait_seq, udpif->last_seq); } /* Notifies 'udpif' that something changed which may render previous @@ -265,20 +236,21 @@ udpif_revalidate(struct udpif *udpif) { struct flow_miss_batch *fmb, *next_fmb; unsigned int junk; + struct list fmbs; /* Since we remove each miss on revalidation, their statistics won't be * accounted to the appropriate 'facet's in the upper layer. In most * cases, this is alright because we've already pushed the stats to the * relevant rules. However, NetFlow requires absolute packet counts on * 'facet's which could now be incorrect. */ - ovs_mutex_lock(&udpif->fmb_mutex); atomic_add(&udpif->reval_seq, 1, &junk); - LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) { + + guarded_list_pop_all(&udpif->fmbs, &fmbs); + LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &fmbs) { list_remove(&fmb->list_node); flow_miss_batch_destroy(fmb); - udpif->n_fmbs--; } - ovs_mutex_unlock(&udpif->fmb_mutex); + udpif_drop_key_clear(udpif); } @@ -288,16 +260,8 @@ udpif_revalidate(struct udpif *udpif) struct upcall * upcall_next(struct udpif *udpif) { - struct upcall *next = NULL; - - ovs_mutex_lock(&udpif->upcall_mutex); - if (udpif->n_upcalls) { - udpif->n_upcalls--; - next = CONTAINER_OF(list_pop_front(&udpif->upcalls), struct upcall, - list_node); - } - ovs_mutex_unlock(&udpif->upcall_mutex); - return next; + struct list *next = guarded_list_pop_front(&udpif->upcalls); + return next ? CONTAINER_OF(next, struct upcall, list_node) : NULL; } /* Destroys and deallocates 'upcall'. */ @@ -316,16 +280,28 @@ upcall_destroy(struct upcall *upcall) struct flow_miss_batch * flow_miss_batch_next(struct udpif *udpif) { - struct flow_miss_batch *next = NULL; + int i; + + for (i = 0; i < 50; i++) { + struct flow_miss_batch *next; + unsigned int reval_seq; + struct list *next_node; + + next_node = guarded_list_pop_front(&udpif->fmbs); + if (!next_node) { + break; + } - ovs_mutex_lock(&udpif->fmb_mutex); - if (udpif->n_fmbs) { - udpif->n_fmbs--; - next = CONTAINER_OF(list_pop_front(&udpif->fmbs), - struct flow_miss_batch, list_node); + next = CONTAINER_OF(next_node, struct flow_miss_batch, list_node); + atomic_read(&udpif->reval_seq, &reval_seq); + if (next->reval_seq == reval_seq) { + return next; + } + + flow_miss_batch_destroy(next); } - ovs_mutex_unlock(&udpif->fmb_mutex); - return next; + + return NULL; } /* Destroys and deallocates 'fmb'. */ @@ -347,52 +323,13 @@ flow_miss_batch_destroy(struct flow_miss_batch *fmb) free(fmb); } -/* Discards any flow miss batches queued up in 'udpif' for 'ofproto' (because - * 'ofproto' is being destroyed). - * - * 'ofproto''s xports must already have been removed, otherwise new flow miss - * batches could still end up getting queued. */ -void -flow_miss_batch_ofproto_destroyed(struct udpif *udpif, - const struct ofproto_dpif *ofproto) -{ - struct flow_miss_batch *fmb, *next_fmb; - - ovs_mutex_lock(&udpif->fmb_mutex); - LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &udpif->fmbs) { - struct flow_miss *miss, *next_miss; - - HMAP_FOR_EACH_SAFE (miss, next_miss, hmap_node, &fmb->misses) { - if (miss->ofproto == ofproto) { - hmap_remove(&fmb->misses, &miss->hmap_node); - miss_destroy(miss); - } - } - - if (hmap_is_empty(&fmb->misses)) { - list_remove(&fmb->list_node); - flow_miss_batch_destroy(fmb); - udpif->n_fmbs--; - } - } - ovs_mutex_unlock(&udpif->fmb_mutex); -} - /* Retreives the next drop key which ofproto-dpif needs to process. The caller * is responsible for destroying it with drop_key_destroy(). */ struct drop_key * drop_key_next(struct udpif *udpif) { - struct drop_key *next = NULL; - - ovs_mutex_lock(&udpif->drop_key_mutex); - if (udpif->n_drop_keys) { - udpif->n_drop_keys--; - next = CONTAINER_OF(list_pop_front(&udpif->drop_keys), struct drop_key, - list_node); - } - ovs_mutex_unlock(&udpif->drop_key_mutex); - return next; + struct list *next = guarded_list_pop_front(&udpif->drop_keys); + return next ? CONTAINER_OF(next, struct drop_key, list_node) : NULL; } /* Destorys and deallocates 'drop_key'. */ @@ -410,14 +347,13 @@ void udpif_drop_key_clear(struct udpif *udpif) { struct drop_key *drop_key, *next; + struct list list; - ovs_mutex_lock(&udpif->drop_key_mutex); - LIST_FOR_EACH_SAFE (drop_key, next, list_node, &udpif->drop_keys) { + guarded_list_pop_all(&udpif->drop_keys, &list); + LIST_FOR_EACH_SAFE (drop_key, next, list_node, &list) { list_remove(&drop_key->list_node); drop_key_destroy(drop_key); - udpif->n_drop_keys--; } - ovs_mutex_unlock(&udpif->drop_key_mutex); } /* The dispatcher thread is responsible for receving upcalls from the kernel, @@ -618,17 +554,16 @@ recv_upcalls(struct udpif *udpif) upcall_destroy(upcall); } } else { - ovs_mutex_lock(&udpif->upcall_mutex); - if (udpif->n_upcalls < MAX_QUEUE_LENGTH) { - n_udpif_new_upcalls = ++udpif->n_upcalls; - list_push_back(&udpif->upcalls, &upcall->list_node); - ovs_mutex_unlock(&udpif->upcall_mutex); + size_t len; + len = guarded_list_push_back(&udpif->upcalls, &upcall->list_node, + MAX_QUEUE_LENGTH); + if (len > 0) { + n_udpif_new_upcalls = len; if (n_udpif_new_upcalls >= FLOW_MISS_MAX_BATCH) { seq_change(udpif->wait_seq); } } else { - ovs_mutex_unlock(&udpif->upcall_mutex); COVERAGE_INC(upcall_queue_overflow); upcall_destroy(upcall); } @@ -762,13 +697,11 @@ handle_miss_upcalls(struct udpif *udpif, struct list *upcalls) { struct dpif_op *opsp[FLOW_MISS_MAX_BATCH]; struct dpif_op ops[FLOW_MISS_MAX_BATCH]; - unsigned int old_reval_seq, new_reval_seq; struct upcall *upcall, *next; struct flow_miss_batch *fmb; size_t n_upcalls, n_ops, i; struct flow_miss *miss; - - atomic_read(&udpif->reval_seq, &old_reval_seq); + unsigned int reval_seq; /* Construct the to-do list. * @@ -776,6 +709,7 @@ handle_miss_upcalls(struct udpif *udpif, struct list *upcalls) * the packets that have the same flow in the same "flow_miss" structure so * that we can process them together. */ fmb = xmalloc(sizeof *fmb); + atomic_read(&udpif->reval_seq, &fmb->reval_seq); hmap_init(&fmb->misses); n_upcalls = 0; LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) { @@ -808,14 +742,10 @@ handle_miss_upcalls(struct udpif *udpif, struct list *upcalls) drop_key->key = xmemdup(dupcall->key, dupcall->key_len); drop_key->key_len = dupcall->key_len; - ovs_mutex_lock(&udpif->drop_key_mutex); - if (udpif->n_drop_keys < MAX_QUEUE_LENGTH) { - udpif->n_drop_keys++; - list_push_back(&udpif->drop_keys, &drop_key->list_node); - ovs_mutex_unlock(&udpif->drop_key_mutex); + if (guarded_list_push_back(&udpif->drop_keys, &drop_key->list_node, + MAX_QUEUE_LENGTH)) { seq_change(udpif->wait_seq); } else { - ovs_mutex_unlock(&udpif->drop_key_mutex); COVERAGE_INC(drop_queue_overflow); drop_key_destroy(drop_key); } @@ -868,21 +798,15 @@ handle_miss_upcalls(struct udpif *udpif, struct list *upcalls) } dpif_operate(udpif->dpif, opsp, n_ops); - ovs_mutex_lock(&udpif->fmb_mutex); - atomic_read(&udpif->reval_seq, &new_reval_seq); - if (old_reval_seq != new_reval_seq) { - /* udpif_revalidate() was called as we were calculating the actions. - * To be safe, we need to assume all the misses need revalidation. */ - ovs_mutex_unlock(&udpif->fmb_mutex); + atomic_read(&udpif->reval_seq, &reval_seq); + if (reval_seq != fmb->reval_seq) { + COVERAGE_INC(fmb_queue_revalidated); flow_miss_batch_destroy(fmb); - } else if (udpif->n_fmbs < MAX_QUEUE_LENGTH) { - udpif->n_fmbs++; - list_push_back(&udpif->fmbs, &fmb->list_node); - ovs_mutex_unlock(&udpif->fmb_mutex); - seq_change(udpif->wait_seq); - } else { + } else if (!guarded_list_push_back(&udpif->fmbs, &fmb->list_node, + MAX_QUEUE_LENGTH)) { COVERAGE_INC(fmb_queue_overflow); - ovs_mutex_unlock(&udpif->fmb_mutex); flow_miss_batch_destroy(fmb); + } else { + seq_change(udpif->wait_seq); } } diff --git a/ofproto/ofproto-dpif-upcall.h b/ofproto/ofproto-dpif-upcall.h index 8e8264e2e..57d462ded 100644 --- a/ofproto/ofproto-dpif-upcall.h +++ b/ofproto/ofproto-dpif-upcall.h @@ -36,7 +36,6 @@ struct udpif *udpif_create(struct dpif_backer *, struct dpif *); void udpif_recv_set(struct udpif *, size_t n_workers, bool enable); void udpif_destroy(struct udpif *); -void udpif_run(struct udpif *); void udpif_wait(struct udpif *); void udpif_revalidate(struct udpif *); @@ -105,13 +104,12 @@ struct flow_miss_batch { struct flow_miss miss_buf[FLOW_MISS_MAX_BATCH]; struct hmap misses; + + unsigned int reval_seq; }; struct flow_miss_batch *flow_miss_batch_next(struct udpif *); void flow_miss_batch_destroy(struct flow_miss_batch *); - -void flow_miss_batch_ofproto_destroyed(struct udpif *, - const struct ofproto_dpif *); /* Drop keys are odp flow keys which have drop flows installed in the kernel. * These are datapath flows which have no associated ofproto, if they did we diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index bf4fe597b..96cbd7bfd 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -31,6 +31,7 @@ #include "dpif.h" #include "dynamic-string.h" #include "fail-open.h" +#include "guarded-list.h" #include "hmapx.h" #include "lacp.h" #include "learn.h" @@ -508,13 +509,8 @@ struct ofproto_dpif { uint64_t n_missed; /* Work queues. */ - struct ovs_mutex flow_mod_mutex; - struct list flow_mods OVS_GUARDED; - size_t n_flow_mods OVS_GUARDED; - - struct ovs_mutex pin_mutex; - struct list pins OVS_GUARDED; - size_t n_pins OVS_GUARDED; + struct guarded_list flow_mods; /* Contains "struct flow_mod"s. */ + struct guarded_list pins; /* Contains "struct ofputil_packet_in"s. */ }; /* By default, flows in the datapath are wildcarded (megaflows). They @@ -561,18 +557,11 @@ void ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto, struct ofputil_flow_mod *fm) { - ovs_mutex_lock(&ofproto->flow_mod_mutex); - if (ofproto->n_flow_mods > 1024) { - ovs_mutex_unlock(&ofproto->flow_mod_mutex); + if (!guarded_list_push_back(&ofproto->flow_mods, &fm->list_node, 1024)) { COVERAGE_INC(flow_mod_overflow); free(fm->ofpacts); free(fm); - return; } - - list_push_back(&ofproto->flow_mods, &fm->list_node); - ofproto->n_flow_mods++; - ovs_mutex_unlock(&ofproto->flow_mod_mutex); } /* Appends 'pin' to the queue of "packet ins" to be sent to the controller. @@ -581,18 +570,11 @@ void ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto, struct ofputil_packet_in *pin) { - ovs_mutex_lock(&ofproto->pin_mutex); - if (ofproto->n_pins > 1024) { - ovs_mutex_unlock(&ofproto->pin_mutex); + if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) { COVERAGE_INC(packet_in_overflow); free(CONST_CAST(void *, pin->packet)); free(pin); - return; } - - list_push_back(&ofproto->pins, &pin->list_node); - ofproto->n_pins++; - ovs_mutex_unlock(&ofproto->pin_mutex); } /* Factory functions. */ @@ -1025,7 +1007,6 @@ process_dpif_port_error(struct dpif_backer *backer, int error) static int dpif_backer_run_fast(struct dpif_backer *backer) { - udpif_run(backer->udpif); handle_upcalls(backer); return 0; @@ -1287,17 +1268,8 @@ construct(struct ofproto *ofproto_) classifier_init(&ofproto->facets); ofproto->consistency_rl = LLONG_MIN; - ovs_mutex_init(&ofproto->flow_mod_mutex); - ovs_mutex_lock(&ofproto->flow_mod_mutex); - list_init(&ofproto->flow_mods); - ofproto->n_flow_mods = 0; - ovs_mutex_unlock(&ofproto->flow_mod_mutex); - - ovs_mutex_init(&ofproto->pin_mutex); - ovs_mutex_lock(&ofproto->pin_mutex); - list_init(&ofproto->pins); - ofproto->n_pins = 0; - ovs_mutex_unlock(&ofproto->pin_mutex); + guarded_list_init(&ofproto->flow_mods); + guarded_list_init(&ofproto->pins); ofproto_dpif_unixctl_init(); @@ -1423,6 +1395,7 @@ destruct(struct ofproto *ofproto_) struct ofputil_packet_in *pin, *next_pin; struct ofputil_flow_mod *fm, *next_fm; struct facet *facet, *next_facet; + struct list flow_mods, pins; struct cls_cursor cursor; struct oftable *table; @@ -1438,7 +1411,9 @@ destruct(struct ofproto *ofproto_) xlate_remove_ofproto(ofproto); ovs_rwlock_unlock(&xlate_rwlock); - flow_miss_batch_ofproto_destroyed(ofproto->backer->udpif, ofproto); + /* Discard any flow_miss_batches queued up for 'ofproto', avoiding a + * use-after-free error. */ + udpif_revalidate(ofproto->backer->udpif); hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node); @@ -1453,25 +1428,21 @@ destruct(struct ofproto *ofproto_) ovs_rwlock_unlock(&table->cls.rwlock); } - ovs_mutex_lock(&ofproto->flow_mod_mutex); - LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &ofproto->flow_mods) { + guarded_list_pop_all(&ofproto->flow_mods, &flow_mods); + LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &flow_mods) { list_remove(&fm->list_node); - ofproto->n_flow_mods--; free(fm->ofpacts); free(fm); } - ovs_mutex_unlock(&ofproto->flow_mod_mutex); - ovs_mutex_destroy(&ofproto->flow_mod_mutex); + guarded_list_destroy(&ofproto->flow_mods); - ovs_mutex_lock(&ofproto->pin_mutex); - LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &ofproto->pins) { + guarded_list_pop_all(&ofproto->pins, &pins); + LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { list_remove(&pin->list_node); - ofproto->n_pins--; free(CONST_CAST(void *, pin->packet)); free(pin); } - ovs_mutex_unlock(&ofproto->pin_mutex); - ovs_mutex_destroy(&ofproto->pin_mutex); + guarded_list_destroy(&ofproto->pins); mbridge_unref(ofproto->mbridge); @@ -1509,12 +1480,7 @@ run_fast(struct ofproto *ofproto_) return 0; } - ovs_mutex_lock(&ofproto->flow_mod_mutex); - list_move(&flow_mods, &ofproto->flow_mods); - list_init(&ofproto->flow_mods); - ofproto->n_flow_mods = 0; - ovs_mutex_unlock(&ofproto->flow_mod_mutex); - + guarded_list_pop_all(&ofproto->flow_mods, &flow_mods); LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &flow_mods) { int error = ofproto_flow_mod(&ofproto->up, fm); if (error && !VLOG_DROP_WARN(&rl)) { @@ -1527,12 +1493,7 @@ run_fast(struct ofproto *ofproto_) free(fm); } - ovs_mutex_lock(&ofproto->pin_mutex); - list_move(&pins, &ofproto->pins); - list_init(&ofproto->pins); - ofproto->n_pins = 0; - ovs_mutex_unlock(&ofproto->pin_mutex); - + guarded_list_pop_all(&ofproto->pins, &pins); LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) { connmgr_send_packet_in(ofproto->up.connmgr, pin); list_remove(&pin->list_node); -- 2.20.1