1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
26 #include "dynamic-string.h"
27 #include "fail-open.h"
28 #include "guarded-list.h"
33 #include "ofproto-dpif-ipfix.h"
34 #include "ofproto-dpif-sflow.h"
35 #include "ofproto-dpif-xlate.h"
38 #include "poll-loop.h"
41 #include "openvswitch/vlog.h"
43 #define MAX_QUEUE_LENGTH 512
44 #define UPCALL_MAX_BATCH 64
45 #define REVALIDATE_MAX_BATCH 50
47 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
49 COVERAGE_DEFINE(dumped_duplicate_flow);
50 COVERAGE_DEFINE(dumped_new_flow);
51 COVERAGE_DEFINE(handler_duplicate_upcall);
52 COVERAGE_DEFINE(upcall_ukey_contention);
53 COVERAGE_DEFINE(revalidate_missed_dp_flow);
55 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
56 * and possibly sets up a kernel flow as a cache. */
58 struct udpif *udpif; /* Parent udpif. */
59 pthread_t thread; /* Thread ID. */
60 uint32_t handler_id; /* Handler id. */
63 /* In the absence of a multiple-writer multiple-reader datastructure for
64 * storing ukeys, we use a large number of cmaps, each with its own lock for
66 #define N_UMAPS 512 /* per udpif. */
68 struct ovs_mutex mutex; /* Take for writing to the following. */
69 struct cmap cmap; /* Datapath flow keys. */
72 /* A thread that processes datapath flows, updates OpenFlow statistics, and
73 * updates or removes them if necessary. */
75 struct udpif *udpif; /* Parent udpif. */
76 pthread_t thread; /* Thread ID. */
77 unsigned int id; /* ovsthread_id_self(). */
80 /* An upcall handler for ofproto_dpif.
82 * udpif keeps records of two kind of logically separate units:
87 * - An array of 'struct handler's for upcall handling and flow
93 * - Revalidation threads which read the datapath flow table and maintains
97 struct ovs_list list_node; /* In all_udpifs list. */
99 struct dpif *dpif; /* Datapath handle. */
100 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
102 struct handler *handlers; /* Upcall handlers. */
105 struct revalidator *revalidators; /* Flow revalidators. */
106 size_t n_revalidators;
108 struct latch exit_latch; /* Tells child threads to exit. */
111 struct seq *reval_seq; /* Incremented to force revalidation. */
112 bool reval_exit; /* Set by leader on 'exit_latch. */
113 struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
114 struct dpif_flow_dump *dump; /* DPIF flow dump state. */
115 long long int dump_duration; /* Duration of the last flow dump. */
116 struct seq *dump_seq; /* Increments each dump iteration. */
117 atomic_bool enable_ufid; /* If true, skip dumping flow attrs. */
119 /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
121 * During the flow dump phase, revalidators insert into these with a random
122 * distribution. During the garbage collection phase, each revalidator
123 * takes care of garbage collecting a slice of these maps. */
126 /* Datapath flow statistics. */
127 unsigned int max_n_flows;
128 unsigned int avg_n_flows;
130 /* Following fields are accessed and modified by different threads. */
131 atomic_uint flow_limit; /* Datapath flow hard limit. */
133 /* n_flows_mutex prevents multiple threads updating these concurrently. */
134 atomic_uint n_flows; /* Number of flows in the datapath. */
135 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
136 struct ovs_mutex n_flows_mutex;
138 /* Following fields are accessed and modified only from the main thread. */
139 struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
140 uint64_t conn_seq; /* Corresponds to 'dump_seq' when
141 conns[n_conns-1] was stored. */
142 size_t n_conns; /* Number of connections waiting. */
146 BAD_UPCALL, /* Some kind of bug somewhere. */
147 MISS_UPCALL, /* A flow miss. */
148 SFLOW_UPCALL, /* sFlow sample. */
149 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
150 IPFIX_UPCALL /* Per-bridge sampling. */
154 struct ofproto_dpif *ofproto; /* Parent ofproto. */
155 const struct recirc_id_node *recirc; /* Recirculation context. */
156 bool have_recirc_ref; /* Reference held on recirc ctx? */
158 /* The flow and packet are only required to be constant when using
159 * dpif-netdev. If a modification is absolutely necessary, a const cast
160 * may be used with other datapaths. */
161 const struct flow *flow; /* Parsed representation of the packet. */
162 const ovs_u128 *ufid; /* Unique identifier for 'flow'. */
163 unsigned pmd_id; /* Datapath poll mode driver id. */
164 const struct dp_packet *packet; /* Packet associated with this upcall. */
165 ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
167 enum dpif_upcall_type type; /* Datapath type of the upcall. */
168 const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
170 bool xout_initialized; /* True if 'xout' must be uninitialized. */
171 struct xlate_out xout; /* Result of xlate_actions(). */
172 struct ofpbuf put_actions; /* Actions 'put' in the fastapath. */
174 struct dpif_ipfix *ipfix; /* IPFIX pointer or NULL. */
175 struct dpif_sflow *sflow; /* SFlow pointer or NULL. */
177 bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
178 VLAN splinters if true. */
180 struct udpif_key *ukey; /* Revalidator flow cache. */
181 bool ukey_persists; /* Set true to keep 'ukey' beyond the
182 lifetime of this upcall. */
184 uint64_t dump_seq; /* udpif->dump_seq at translation time. */
185 uint64_t reval_seq; /* udpif->reval_seq at translation time. */
187 /* Not used by the upcall callback interface. */
188 const struct nlattr *key; /* Datapath flow key. */
189 size_t key_len; /* Datapath flow key length. */
190 const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
193 /* 'udpif_key's are responsible for tracking the little bit of state udpif
194 * needs to do flow expiration which can't be pulled directly from the
195 * datapath. They may be created by any handler or revalidator thread at any
196 * time, and read by any revalidator during the dump phase. They are however
197 * each owned by a single revalidator which takes care of destroying them
198 * during the garbage-collection phase.
200 * The mutex within the ukey protects some members of the ukey. The ukey
201 * itself is protected by RCU and is held within a umap in the parent udpif.
202 * Adding or removing a ukey from a umap is only safe when holding the
203 * corresponding umap lock. */
205 struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
207 /* These elements are read only once created, and therefore aren't
208 * protected by a mutex. */
209 const struct nlattr *key; /* Datapath flow key. */
210 size_t key_len; /* Length of 'key'. */
211 const struct nlattr *mask; /* Datapath flow mask. */
212 size_t mask_len; /* Length of 'mask'. */
213 struct ofpbuf *actions; /* Datapath flow actions as nlattrs. */
214 ovs_u128 ufid; /* Unique flow identifier. */
215 bool ufid_present; /* True if 'ufid' is in datapath. */
216 uint32_t hash; /* Pre-computed hash for 'key'. */
217 unsigned pmd_id; /* Datapath poll mode driver id. */
219 struct ovs_mutex mutex; /* Guards the following. */
220 struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
221 long long int created OVS_GUARDED; /* Estimate of creation time. */
222 uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
223 uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
224 bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
227 struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
228 * are affected by this ukey.
229 * Used for stats and learning.*/
231 struct odputil_keybuf buf;
235 /* Recirculation IDs with references held by the ukey. */
237 uint32_t recircs[]; /* 'n_recircs' id's for which references are held. */
240 /* Datapath operation with optional ukey attached. */
242 struct udpif_key *ukey;
243 struct dpif_flow_stats stats; /* Stats for 'op'. */
244 struct dpif_op dop; /* Flow operation. */
247 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
248 static struct ovs_list all_udpifs = OVS_LIST_INITIALIZER(&all_udpifs);
250 static size_t recv_upcalls(struct handler *);
251 static int process_upcall(struct udpif *, struct upcall *,
252 struct ofpbuf *odp_actions);
253 static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
254 static void udpif_stop_threads(struct udpif *);
255 static void udpif_start_threads(struct udpif *, size_t n_handlers,
256 size_t n_revalidators);
257 static void *udpif_upcall_handler(void *);
258 static void *udpif_revalidator(void *);
259 static unsigned long udpif_get_n_flows(struct udpif *);
260 static void revalidate(struct revalidator *);
261 static void revalidator_sweep(struct revalidator *);
262 static void revalidator_purge(struct revalidator *);
263 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
264 const char *argv[], void *aux);
265 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
266 const char *argv[], void *aux);
267 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
268 const char *argv[], void *aux);
269 static void upcall_unixctl_disable_ufid(struct unixctl_conn *, int argc,
270 const char *argv[], void *aux);
271 static void upcall_unixctl_enable_ufid(struct unixctl_conn *, int argc,
272 const char *argv[], void *aux);
273 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
274 const char *argv[], void *aux);
275 static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
276 const char *argv[], void *aux);
277 static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
278 const char *argv[], void *aux);
280 static struct udpif_key *ukey_create_from_upcall(struct upcall *);
281 static int ukey_create_from_dpif_flow(const struct udpif *,
282 const struct dpif_flow *,
283 struct udpif_key **);
284 static bool ukey_install_start(struct udpif *, struct udpif_key *ukey);
285 static bool ukey_install_finish(struct udpif_key *ukey, int error);
286 static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
287 static struct udpif_key *ukey_lookup(struct udpif *udpif,
288 const ovs_u128 *ufid);
289 static int ukey_acquire(struct udpif *, const struct dpif_flow *,
290 struct udpif_key **result, int *error);
291 static void ukey_delete__(struct udpif_key *);
292 static void ukey_delete(struct umap *, struct udpif_key *);
293 static enum upcall_type classify_upcall(enum dpif_upcall_type type,
294 const struct nlattr *userdata);
296 static int upcall_receive(struct upcall *, const struct dpif_backer *,
297 const struct dp_packet *packet, enum dpif_upcall_type,
298 const struct nlattr *userdata, const struct flow *,
299 const ovs_u128 *ufid, const unsigned pmd_id);
300 static void upcall_uninit(struct upcall *);
302 static upcall_callback upcall_cb;
304 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
305 static atomic_bool enable_ufid = ATOMIC_VAR_INIT(true);
310 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
311 if (ovsthread_once_start(&once)) {
312 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
314 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
315 upcall_unixctl_disable_megaflows, NULL);
316 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
317 upcall_unixctl_enable_megaflows, NULL);
318 unixctl_command_register("upcall/disable-ufid", "", 0, 0,
319 upcall_unixctl_disable_ufid, NULL);
320 unixctl_command_register("upcall/enable-ufid", "", 0, 0,
321 upcall_unixctl_enable_ufid, NULL);
322 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
323 upcall_unixctl_set_flow_limit, NULL);
324 unixctl_command_register("revalidator/wait", "", 0, 0,
325 upcall_unixctl_dump_wait, NULL);
326 unixctl_command_register("revalidator/purge", "", 0, 0,
327 upcall_unixctl_purge, NULL);
328 ovsthread_once_done(&once);
333 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
335 struct udpif *udpif = xzalloc(sizeof *udpif);
338 udpif->backer = backer;
339 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
340 udpif->reval_seq = seq_create();
341 udpif->dump_seq = seq_create();
342 latch_init(&udpif->exit_latch);
343 list_push_back(&all_udpifs, &udpif->list_node);
344 atomic_init(&udpif->enable_ufid, false);
345 atomic_init(&udpif->n_flows, 0);
346 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
347 ovs_mutex_init(&udpif->n_flows_mutex);
348 udpif->ukeys = xmalloc(N_UMAPS * sizeof *udpif->ukeys);
349 for (int i = 0; i < N_UMAPS; i++) {
350 cmap_init(&udpif->ukeys[i].cmap);
351 ovs_mutex_init(&udpif->ukeys[i].mutex);
354 dpif_register_upcall_cb(dpif, upcall_cb, udpif);
360 udpif_run(struct udpif *udpif)
362 if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
365 for (i = 0; i < udpif->n_conns; i++) {
366 unixctl_command_reply(udpif->conns[i], NULL);
375 udpif_destroy(struct udpif *udpif)
377 udpif_stop_threads(udpif);
379 for (int i = 0; i < N_UMAPS; i++) {
380 cmap_destroy(&udpif->ukeys[i].cmap);
381 ovs_mutex_destroy(&udpif->ukeys[i].mutex);
386 list_remove(&udpif->list_node);
387 latch_destroy(&udpif->exit_latch);
388 seq_destroy(udpif->reval_seq);
389 seq_destroy(udpif->dump_seq);
390 ovs_mutex_destroy(&udpif->n_flows_mutex);
394 /* Stops the handler and revalidator threads, must be enclosed in
395 * ovsrcu quiescent state unless when destroying udpif. */
397 udpif_stop_threads(struct udpif *udpif)
399 if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
402 latch_set(&udpif->exit_latch);
404 for (i = 0; i < udpif->n_handlers; i++) {
405 struct handler *handler = &udpif->handlers[i];
407 xpthread_join(handler->thread, NULL);
410 for (i = 0; i < udpif->n_revalidators; i++) {
411 xpthread_join(udpif->revalidators[i].thread, NULL);
414 dpif_disable_upcall(udpif->dpif);
416 for (i = 0; i < udpif->n_revalidators; i++) {
417 struct revalidator *revalidator = &udpif->revalidators[i];
419 /* Delete ukeys, and delete all flows from the datapath to prevent
420 * double-counting stats. */
421 revalidator_purge(revalidator);
424 latch_poll(&udpif->exit_latch);
426 ovs_barrier_destroy(&udpif->reval_barrier);
428 free(udpif->revalidators);
429 udpif->revalidators = NULL;
430 udpif->n_revalidators = 0;
432 free(udpif->handlers);
433 udpif->handlers = NULL;
434 udpif->n_handlers = 0;
438 /* Starts the handler and revalidator threads, must be enclosed in
439 * ovsrcu quiescent state. */
441 udpif_start_threads(struct udpif *udpif, size_t n_handlers,
442 size_t n_revalidators)
444 if (udpif && n_handlers && n_revalidators) {
448 udpif->n_handlers = n_handlers;
449 udpif->n_revalidators = n_revalidators;
451 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
452 for (i = 0; i < udpif->n_handlers; i++) {
453 struct handler *handler = &udpif->handlers[i];
455 handler->udpif = udpif;
456 handler->handler_id = i;
457 handler->thread = ovs_thread_create(
458 "handler", udpif_upcall_handler, handler);
461 enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
462 atomic_init(&udpif->enable_ufid, enable_ufid);
463 dpif_enable_upcall(udpif->dpif);
465 ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
466 udpif->reval_exit = false;
467 udpif->revalidators = xzalloc(udpif->n_revalidators
468 * sizeof *udpif->revalidators);
469 for (i = 0; i < udpif->n_revalidators; i++) {
470 struct revalidator *revalidator = &udpif->revalidators[i];
472 revalidator->udpif = udpif;
473 revalidator->thread = ovs_thread_create(
474 "revalidator", udpif_revalidator, revalidator);
479 /* Tells 'udpif' how many threads it should use to handle upcalls.
480 * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
481 * datapath handle must have packet reception enabled before starting
484 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
485 size_t n_revalidators)
488 ovs_assert(n_handlers && n_revalidators);
490 ovsrcu_quiesce_start();
491 if (udpif->n_handlers != n_handlers
492 || udpif->n_revalidators != n_revalidators) {
493 udpif_stop_threads(udpif);
496 if (!udpif->handlers && !udpif->revalidators) {
499 error = dpif_handlers_set(udpif->dpif, n_handlers);
501 VLOG_ERR("failed to configure handlers in dpif %s: %s",
502 dpif_name(udpif->dpif), ovs_strerror(error));
506 udpif_start_threads(udpif, n_handlers, n_revalidators);
508 ovsrcu_quiesce_end();
511 /* Waits for all ongoing upcall translations to complete. This ensures that
512 * there are no transient references to any removed ofprotos (or other
513 * objects). In particular, this should be called after an ofproto is removed
514 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
516 udpif_synchronize(struct udpif *udpif)
518 /* This is stronger than necessary. It would be sufficient to ensure
519 * (somehow) that each handler and revalidator thread had passed through
520 * its main loop once. */
521 size_t n_handlers = udpif->n_handlers;
522 size_t n_revalidators = udpif->n_revalidators;
524 ovsrcu_quiesce_start();
525 udpif_stop_threads(udpif);
526 udpif_start_threads(udpif, n_handlers, n_revalidators);
527 ovsrcu_quiesce_end();
530 /* Notifies 'udpif' that something changed which may render previous
531 * xlate_actions() results invalid. */
533 udpif_revalidate(struct udpif *udpif)
535 seq_change(udpif->reval_seq);
538 /* Returns a seq which increments every time 'udpif' pulls stats from the
539 * datapath. Callers can use this to get a sense of when might be a good time
540 * to do periodic work which relies on relatively up to date statistics. */
542 udpif_dump_seq(struct udpif *udpif)
544 return udpif->dump_seq;
548 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
552 simap_increase(usage, "handlers", udpif->n_handlers);
554 simap_increase(usage, "revalidators", udpif->n_revalidators);
555 for (i = 0; i < N_UMAPS; i++) {
556 simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
560 /* Remove flows from a single datapath. */
562 udpif_flush(struct udpif *udpif)
564 size_t n_handlers, n_revalidators;
566 n_handlers = udpif->n_handlers;
567 n_revalidators = udpif->n_revalidators;
569 ovsrcu_quiesce_start();
571 udpif_stop_threads(udpif);
572 dpif_flow_flush(udpif->dpif);
573 udpif_start_threads(udpif, n_handlers, n_revalidators);
575 ovsrcu_quiesce_end();
578 /* Removes all flows from all datapaths. */
580 udpif_flush_all_datapaths(void)
584 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
590 udpif_use_ufid(struct udpif *udpif)
594 atomic_read_relaxed(&enable_ufid, &enable);
595 return enable && ofproto_dpif_get_enable_ufid(udpif->backer);
600 udpif_get_n_flows(struct udpif *udpif)
602 long long int time, now;
603 unsigned long flow_count;
606 atomic_read_relaxed(&udpif->n_flows_timestamp, &time);
607 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
608 struct dpif_dp_stats stats;
610 atomic_store_relaxed(&udpif->n_flows_timestamp, now);
611 dpif_get_dp_stats(udpif->dpif, &stats);
612 flow_count = stats.n_flows;
613 atomic_store_relaxed(&udpif->n_flows, flow_count);
614 ovs_mutex_unlock(&udpif->n_flows_mutex);
616 atomic_read_relaxed(&udpif->n_flows, &flow_count);
621 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
622 * upcalls from dpif, processes the batch and installs corresponding flows
625 udpif_upcall_handler(void *arg)
627 struct handler *handler = arg;
628 struct udpif *udpif = handler->udpif;
630 while (!latch_is_set(&handler->udpif->exit_latch)) {
631 if (recv_upcalls(handler)) {
632 poll_immediate_wake();
634 dpif_recv_wait(udpif->dpif, handler->handler_id);
635 latch_wait(&udpif->exit_latch);
644 recv_upcalls(struct handler *handler)
646 struct udpif *udpif = handler->udpif;
647 uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
648 struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
649 struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
650 struct upcall upcalls[UPCALL_MAX_BATCH];
651 struct flow flows[UPCALL_MAX_BATCH];
655 while (n_upcalls < UPCALL_MAX_BATCH) {
656 struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
657 struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
658 struct upcall *upcall = &upcalls[n_upcalls];
659 struct flow *flow = &flows[n_upcalls];
662 ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
663 sizeof recv_stubs[n_upcalls]);
664 if (dpif_recv(udpif->dpif, handler->handler_id, dupcall, recv_buf)) {
665 ofpbuf_uninit(recv_buf);
669 if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
674 error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
675 dupcall->type, dupcall->userdata, flow,
676 &dupcall->ufid, PMD_ID_NULL);
678 if (error == ENODEV) {
679 /* Received packet on datapath port for which we couldn't
680 * associate an ofproto. This can happen if a port is removed
681 * while traffic is being received. Print a rate-limited
682 * message in case it happens frequently. */
683 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
684 dupcall->key_len, NULL, 0, NULL, 0,
685 &dupcall->ufid, PMD_ID_NULL, NULL);
686 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
687 "port %"PRIu32, flow->in_port.odp_port);
692 upcall->key = dupcall->key;
693 upcall->key_len = dupcall->key_len;
694 upcall->ufid = &dupcall->ufid;
696 upcall->out_tun_key = dupcall->out_tun_key;
698 if (vsp_adjust_flow(upcall->ofproto, flow, &dupcall->packet)) {
699 upcall->vsp_adjusted = true;
702 pkt_metadata_from_flow(&dupcall->packet.md, flow);
703 flow_extract(&dupcall->packet, flow);
705 error = process_upcall(udpif, upcall, NULL);
714 upcall_uninit(upcall);
716 dp_packet_uninit(&dupcall->packet);
717 ofpbuf_uninit(recv_buf);
721 handle_upcalls(handler->udpif, upcalls, n_upcalls);
722 for (i = 0; i < n_upcalls; i++) {
723 dp_packet_uninit(&dupcalls[i].packet);
724 ofpbuf_uninit(&recv_bufs[i]);
725 upcall_uninit(&upcalls[i]);
733 udpif_revalidator(void *arg)
735 /* Used by all revalidators. */
736 struct revalidator *revalidator = arg;
737 struct udpif *udpif = revalidator->udpif;
738 bool leader = revalidator == &udpif->revalidators[0];
740 /* Used only by the leader. */
741 long long int start_time = 0;
742 uint64_t last_reval_seq = 0;
745 revalidator->id = ovsthread_id_self();
750 recirc_run(); /* Recirculation cleanup. */
752 reval_seq = seq_read(udpif->reval_seq);
753 last_reval_seq = reval_seq;
755 n_flows = udpif_get_n_flows(udpif);
756 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
757 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
759 /* Only the leader checks the exit latch to prevent a race where
760 * some threads think it's true and exit and others think it's
761 * false and block indefinitely on the reval_barrier */
762 udpif->reval_exit = latch_is_set(&udpif->exit_latch);
764 start_time = time_msec();
765 if (!udpif->reval_exit) {
768 terse_dump = udpif_use_ufid(udpif);
769 udpif->dump = dpif_flow_dump_create(udpif->dpif, terse_dump);
773 /* Wait for the leader to start the flow dump. */
774 ovs_barrier_block(&udpif->reval_barrier);
775 if (udpif->reval_exit) {
778 revalidate(revalidator);
780 /* Wait for all flows to have been dumped before we garbage collect. */
781 ovs_barrier_block(&udpif->reval_barrier);
782 revalidator_sweep(revalidator);
784 /* Wait for all revalidators to finish garbage collection. */
785 ovs_barrier_block(&udpif->reval_barrier);
788 unsigned int flow_limit;
789 long long int duration;
791 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
793 dpif_flow_dump_destroy(udpif->dump);
794 seq_change(udpif->dump_seq);
796 duration = MAX(time_msec() - start_time, 1);
797 udpif->dump_duration = duration;
798 if (duration > 2000) {
799 flow_limit /= duration / 1000;
800 } else if (duration > 1300) {
801 flow_limit = flow_limit * 3 / 4;
802 } else if (duration < 1000 && n_flows > 2000
803 && flow_limit < n_flows * 1000 / duration) {
806 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
807 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
809 if (duration > 2000) {
810 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
814 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
815 seq_wait(udpif->reval_seq, last_reval_seq);
816 latch_wait(&udpif->exit_latch);
824 static enum upcall_type
825 classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
827 union user_action_cookie cookie;
830 /* First look at the upcall type. */
838 case DPIF_N_UC_TYPES:
840 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
844 /* "action" upcalls need a closer look. */
846 VLOG_WARN_RL(&rl, "action upcall missing cookie");
849 userdata_len = nl_attr_get_size(userdata);
850 if (userdata_len < sizeof cookie.type
851 || userdata_len > sizeof cookie) {
852 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
856 memset(&cookie, 0, sizeof cookie);
857 memcpy(&cookie, nl_attr_get(userdata), userdata_len);
858 if (userdata_len == MAX(8, sizeof cookie.sflow)
859 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
861 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
862 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
864 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
865 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
866 return FLOW_SAMPLE_UPCALL;
867 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
868 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
871 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
872 " and size %"PRIuSIZE, cookie.type, userdata_len);
877 /* Calculates slow path actions for 'xout'. 'buf' must statically be
878 * initialized with at least 128 bytes of space. */
880 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
881 const struct flow *flow, odp_port_t odp_in_port,
884 union user_action_cookie cookie;
888 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
889 cookie.slow_path.unused = 0;
890 cookie.slow_path.reason = xout->slow;
892 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
895 pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
896 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, ODPP_NONE,
900 /* If there is no error, the upcall must be destroyed with upcall_uninit()
901 * before quiescing, as the referred objects are guaranteed to exist only
902 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
903 * since the 'upcall->put_actions' remains uninitialized. */
905 upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
906 const struct dp_packet *packet, enum dpif_upcall_type type,
907 const struct nlattr *userdata, const struct flow *flow,
908 const ovs_u128 *ufid, const unsigned pmd_id)
912 error = xlate_lookup(backer, flow, &upcall->ofproto, &upcall->ipfix,
913 &upcall->sflow, NULL, &upcall->in_port);
918 upcall->recirc = NULL;
919 upcall->have_recirc_ref = false;
921 upcall->packet = packet;
923 upcall->pmd_id = pmd_id;
925 upcall->userdata = userdata;
926 ofpbuf_init(&upcall->put_actions, 0);
928 upcall->xout_initialized = false;
929 upcall->vsp_adjusted = false;
930 upcall->ukey_persists = false;
936 upcall->out_tun_key = NULL;
942 upcall_xlate(struct udpif *udpif, struct upcall *upcall,
943 struct ofpbuf *odp_actions)
945 struct dpif_flow_stats stats;
949 stats.n_bytes = dp_packet_size(upcall->packet);
950 stats.used = time_msec();
951 stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
953 xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
954 stats.tcp_flags, upcall->packet);
955 xin.odp_actions = odp_actions;
957 if (upcall->type == DPIF_UC_MISS) {
958 xin.resubmit_stats = &stats;
961 /* We may install a datapath flow only if we get a reference to the
962 * recirculation context (otherwise we could have recirculation
963 * upcalls using recirculation ID for which no context can be
964 * found). We may still execute the flow's actions even if we
965 * don't install the flow. */
966 upcall->recirc = xin.recirc;
967 upcall->have_recirc_ref = recirc_id_node_try_ref_rcu(xin.recirc);
970 /* For non-miss upcalls, we are either executing actions (one of which
971 * is an userspace action) for an upcall, in which case the stats have
972 * already been taken care of, or there's a flow in the datapath which
973 * this packet was accounted to. Presumably the revalidators will deal
974 * with pushing its stats eventually. */
977 upcall->dump_seq = seq_read(udpif->dump_seq);
978 upcall->reval_seq = seq_read(udpif->reval_seq);
979 xlate_actions(&xin, &upcall->xout);
980 upcall->xout_initialized = true;
982 /* Special case for fail-open mode.
984 * If we are in fail-open mode, but we are connected to a controller too,
985 * then we should send the packet up to the controller in the hope that it
986 * will try to set up a flow and thereby allow us to exit fail-open.
988 * See the top-level comment in fail-open.c for more information.
990 * Copy packets before they are modified by execution. */
991 if (upcall->xout.fail_open) {
992 const struct dp_packet *packet = upcall->packet;
993 struct ofproto_packet_in *pin;
995 pin = xmalloc(sizeof *pin);
996 pin->up.packet = xmemdup(dp_packet_data(packet), dp_packet_size(packet));
997 pin->up.packet_len = dp_packet_size(packet);
998 pin->up.reason = OFPR_NO_MATCH;
999 pin->up.table_id = 0;
1000 pin->up.cookie = OVS_BE64_MAX;
1001 flow_get_metadata(upcall->flow, &pin->up.flow_metadata);
1002 pin->send_len = 0; /* Not used for flow table misses. */
1003 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
1004 ofproto_dpif_send_packet_in(upcall->ofproto, pin);
1007 if (!upcall->xout.slow) {
1008 ofpbuf_use_const(&upcall->put_actions,
1009 upcall->xout.odp_actions->data,
1010 upcall->xout.odp_actions->size);
1012 ofpbuf_init(&upcall->put_actions, 0);
1013 compose_slow_path(udpif, &upcall->xout, upcall->flow,
1014 upcall->flow->in_port.odp_port,
1015 &upcall->put_actions);
1018 /* This function is also called for slow-pathed flows. As we are only
1019 * going to create new datapath flows for actual datapath misses, there is
1020 * no point in creating a ukey otherwise. */
1021 if (upcall->type == DPIF_UC_MISS) {
1022 upcall->ukey = ukey_create_from_upcall(upcall);
1027 upcall_uninit(struct upcall *upcall)
1030 if (upcall->xout_initialized) {
1031 xlate_out_uninit(&upcall->xout);
1033 ofpbuf_uninit(&upcall->put_actions);
1035 if (!upcall->ukey_persists) {
1036 ukey_delete__(upcall->ukey);
1038 } else if (upcall->have_recirc_ref) {
1039 /* The reference was transferred to the ukey if one was created. */
1040 recirc_id_node_unref(upcall->recirc);
1046 upcall_cb(const struct dp_packet *packet, const struct flow *flow, ovs_u128 *ufid,
1047 unsigned pmd_id, enum dpif_upcall_type type,
1048 const struct nlattr *userdata, struct ofpbuf *actions,
1049 struct flow_wildcards *wc, struct ofpbuf *put_actions, void *aux)
1051 struct udpif *udpif = aux;
1052 unsigned int flow_limit;
1053 struct upcall upcall;
1057 atomic_read_relaxed(&enable_megaflows, &megaflow);
1058 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1060 error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
1061 flow, ufid, pmd_id);
1066 error = process_upcall(udpif, &upcall, actions);
1071 if (upcall.xout.slow && put_actions) {
1072 ofpbuf_put(put_actions, upcall.put_actions.data,
1073 upcall.put_actions.size);
1076 if (OVS_LIKELY(wc)) {
1078 /* XXX: This could be avoided with sufficient API changes. */
1079 *wc = upcall.xout.wc;
1081 flow_wildcards_init_for_packet(wc, flow);
1085 if (udpif_get_n_flows(udpif) >= flow_limit) {
1090 /* Prevent miss flow installation if the key has recirculation ID but we
1091 * were not able to get a reference on it. */
1092 if (type == DPIF_UC_MISS && upcall.recirc && !upcall.have_recirc_ref) {
1097 if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
1102 upcall.ukey_persists = true;
1104 upcall_uninit(&upcall);
1109 process_upcall(struct udpif *udpif, struct upcall *upcall,
1110 struct ofpbuf *odp_actions)
1112 const struct nlattr *userdata = upcall->userdata;
1113 const struct dp_packet *packet = upcall->packet;
1114 const struct flow *flow = upcall->flow;
1116 switch (classify_upcall(upcall->type, userdata)) {
1118 upcall_xlate(udpif, upcall, odp_actions);
1122 if (upcall->sflow) {
1123 union user_action_cookie cookie;
1125 memset(&cookie, 0, sizeof cookie);
1126 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
1127 dpif_sflow_received(upcall->sflow, packet, flow,
1128 flow->in_port.odp_port, &cookie);
1133 if (upcall->ipfix) {
1134 union user_action_cookie cookie;
1135 struct flow_tnl output_tunnel_key;
1137 memset(&cookie, 0, sizeof cookie);
1138 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
1140 if (upcall->out_tun_key) {
1141 memset(&output_tunnel_key, 0, sizeof output_tunnel_key);
1142 odp_tun_key_from_attr(upcall->out_tun_key,
1143 &output_tunnel_key);
1145 dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow,
1146 flow->in_port.odp_port,
1147 cookie.ipfix.output_odp_port,
1148 upcall->out_tun_key ?
1149 &output_tunnel_key : NULL);
1153 case FLOW_SAMPLE_UPCALL:
1154 if (upcall->ipfix) {
1155 union user_action_cookie cookie;
1157 memset(&cookie, 0, sizeof cookie);
1158 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
1160 /* The flow reflects exactly the contents of the packet.
1161 * Sample the packet using it. */
1162 dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
1163 cookie.flow_sample.collector_set_id,
1164 cookie.flow_sample.probability,
1165 cookie.flow_sample.obs_domain_id,
1166 cookie.flow_sample.obs_point_id);
1178 handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
1181 struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
1182 struct ukey_op ops[UPCALL_MAX_BATCH * 2];
1183 unsigned int flow_limit;
1184 size_t n_ops, n_opsp, i;
1188 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1189 atomic_read_relaxed(&enable_megaflows, &megaflow);
1191 may_put = udpif_get_n_flows(udpif) < flow_limit;
1193 /* Handle the packets individually in order of arrival.
1195 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1196 * processes received packets for these protocols.
1198 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1201 * The loop fills 'ops' with an array of operations to execute in the
1204 for (i = 0; i < n_upcalls; i++) {
1205 struct upcall *upcall = &upcalls[i];
1206 const struct dp_packet *packet = upcall->packet;
1209 if (upcall->vsp_adjusted) {
1210 /* This packet was received on a VLAN splinter port. We added a
1211 * VLAN to the packet to make the packet resemble the flow, but the
1212 * actions were composed assuming that the packet contained no
1213 * VLAN. So, we must remove the VLAN header from the packet before
1214 * trying to execute the actions. */
1215 if (upcall->xout.odp_actions->size) {
1216 eth_pop_vlan(CONST_CAST(struct dp_packet *, upcall->packet));
1219 /* Remove the flow vlan tags inserted by vlan splinter logic
1220 * to ensure megaflow masks generated match the data path flow. */
1221 CONST_CAST(struct flow *, upcall->flow)->vlan_tci = 0;
1224 /* Do not install a flow into the datapath if:
1226 * - The datapath already has too many flows.
1228 * - We received this packet via some flow installed in the kernel
1231 * - Upcall was a recirculation but we do not have a reference to
1232 * to the recirculation ID. */
1233 if (may_put && upcall->type == DPIF_UC_MISS &&
1234 (!upcall->recirc || upcall->have_recirc_ref)) {
1235 struct udpif_key *ukey = upcall->ukey;
1237 upcall->ukey_persists = true;
1241 op->dop.type = DPIF_OP_FLOW_PUT;
1242 op->dop.u.flow_put.flags = DPIF_FP_CREATE;
1243 op->dop.u.flow_put.key = ukey->key;
1244 op->dop.u.flow_put.key_len = ukey->key_len;
1245 op->dop.u.flow_put.mask = ukey->mask;
1246 op->dop.u.flow_put.mask_len = ukey->mask_len;
1247 op->dop.u.flow_put.ufid = upcall->ufid;
1248 op->dop.u.flow_put.stats = NULL;
1249 op->dop.u.flow_put.actions = ukey->actions->data;
1250 op->dop.u.flow_put.actions_len = ukey->actions->size;
1253 if (upcall->xout.odp_actions->size) {
1256 op->dop.type = DPIF_OP_EXECUTE;
1257 op->dop.u.execute.packet = CONST_CAST(struct dp_packet *, packet);
1258 odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
1259 &op->dop.u.execute.packet->md);
1260 op->dop.u.execute.actions = upcall->xout.odp_actions->data;
1261 op->dop.u.execute.actions_len = upcall->xout.odp_actions->size;
1262 op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
1263 op->dop.u.execute.probe = false;
1269 * We install ukeys before installing the flows, locking them for exclusive
1270 * access by this thread for the period of installation. This ensures that
1271 * other threads won't attempt to delete the flows as we are creating them.
1274 for (i = 0; i < n_ops; i++) {
1275 struct udpif_key *ukey = ops[i].ukey;
1278 /* If we can't install the ukey, don't install the flow. */
1279 if (!ukey_install_start(udpif, ukey)) {
1280 ukey_delete__(ukey);
1285 opsp[n_opsp++] = &ops[i].dop;
1287 dpif_operate(udpif->dpif, opsp, n_opsp);
1288 for (i = 0; i < n_ops; i++) {
1290 ukey_install_finish(ops[i].ukey, ops[i].dop.error);
1296 get_ufid_hash(const ovs_u128 *ufid)
1298 return ufid->u32[0];
1301 static struct udpif_key *
1302 ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid)
1304 struct udpif_key *ukey;
1305 int idx = get_ufid_hash(ufid) % N_UMAPS;
1306 struct cmap *cmap = &udpif->ukeys[idx].cmap;
1308 CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, get_ufid_hash(ufid), cmap) {
1309 if (ovs_u128_equals(&ukey->ufid, ufid)) {
1316 static struct udpif_key *
1317 ukey_create__(const struct nlattr *key, size_t key_len,
1318 const struct nlattr *mask, size_t mask_len,
1319 bool ufid_present, const ovs_u128 *ufid,
1320 const unsigned pmd_id, const struct ofpbuf *actions,
1321 uint64_t dump_seq, uint64_t reval_seq, long long int used,
1322 const struct recirc_id_node *key_recirc, struct xlate_out *xout)
1323 OVS_NO_THREAD_SAFETY_ANALYSIS
1325 unsigned n_recircs = (key_recirc ? 1 : 0) + (xout ? xout->n_recircs : 0);
1326 struct udpif_key *ukey = xmalloc(sizeof *ukey +
1327 n_recircs * sizeof *ukey->recircs);
1329 memcpy(&ukey->keybuf, key, key_len);
1330 ukey->key = &ukey->keybuf.nla;
1331 ukey->key_len = key_len;
1332 memcpy(&ukey->maskbuf, mask, mask_len);
1333 ukey->mask = &ukey->maskbuf.nla;
1334 ukey->mask_len = mask_len;
1335 ukey->ufid_present = ufid_present;
1337 ukey->pmd_id = pmd_id;
1338 ukey->hash = get_ufid_hash(&ukey->ufid);
1339 ukey->actions = ofpbuf_clone(actions);
1341 ovs_mutex_init(&ukey->mutex);
1342 ukey->dump_seq = dump_seq;
1343 ukey->reval_seq = reval_seq;
1344 ukey->flow_exists = false;
1345 ukey->created = time_msec();
1346 memset(&ukey->stats, 0, sizeof ukey->stats);
1347 ukey->stats.used = used;
1348 ukey->xcache = NULL;
1350 ukey->n_recircs = n_recircs;
1352 ukey->recircs[0] = key_recirc->id;
1354 if (xout && xout->n_recircs) {
1355 const uint32_t *act_recircs = xlate_out_get_recircs(xout);
1357 memcpy(ukey->recircs + (key_recirc ? 1 : 0), act_recircs,
1358 xout->n_recircs * sizeof *ukey->recircs);
1359 xlate_out_take_recircs(xout);
1364 static struct udpif_key *
1365 ukey_create_from_upcall(struct upcall *upcall)
1367 struct odputil_keybuf keystub, maskstub;
1368 struct ofpbuf keybuf, maskbuf;
1369 bool recirc, megaflow;
1371 if (upcall->key_len) {
1372 ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
1374 /* dpif-netdev doesn't provide a netlink-formatted flow key in the
1375 * upcall, so convert the upcall's flow here. */
1376 ofpbuf_use_stack(&keybuf, &keystub, sizeof keystub);
1377 odp_flow_key_from_flow(&keybuf, upcall->flow, &upcall->xout.wc.masks,
1378 upcall->flow->in_port.odp_port, true);
1381 atomic_read_relaxed(&enable_megaflows, &megaflow);
1382 recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto);
1383 ofpbuf_use_stack(&maskbuf, &maskstub, sizeof maskstub);
1387 max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto);
1388 odp_flow_key_from_mask(&maskbuf, &upcall->xout.wc.masks, upcall->flow,
1389 UINT32_MAX, max_mpls, recirc);
1392 return ukey_create__(keybuf.data, keybuf.size, maskbuf.data, maskbuf.size,
1393 true, upcall->ufid, upcall->pmd_id,
1394 &upcall->put_actions, upcall->dump_seq,
1395 upcall->reval_seq, 0,
1396 upcall->have_recirc_ref ? upcall->recirc : NULL,
1401 ukey_create_from_dpif_flow(const struct udpif *udpif,
1402 const struct dpif_flow *flow,
1403 struct udpif_key **ukey)
1405 struct dpif_flow full_flow;
1406 struct ofpbuf actions;
1407 uint64_t dump_seq, reval_seq;
1408 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
1409 const struct nlattr *a;
1412 if (!flow->key_len || !flow->actions_len) {
1416 /* If the key or actions were not provided by the datapath, fetch the
1418 ofpbuf_use_stack(&buf, &stub, sizeof stub);
1419 err = dpif_flow_get(udpif->dpif, NULL, 0, &flow->ufid,
1420 flow->pmd_id, &buf, &full_flow);
1427 /* Check the flow actions for recirculation action. As recirculation
1428 * relies on OVS userspace internal state, we need to delete all old
1429 * datapath flows with recirculation upon OVS restart. */
1430 NL_ATTR_FOR_EACH_UNSAFE (a, left, flow->actions, flow->actions_len) {
1431 if (nl_attr_type(a) == OVS_ACTION_ATTR_RECIRC) {
1436 dump_seq = seq_read(udpif->dump_seq);
1437 reval_seq = seq_read(udpif->reval_seq);
1438 ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
1439 *ukey = ukey_create__(flow->key, flow->key_len,
1440 flow->mask, flow->mask_len, flow->ufid_present,
1441 &flow->ufid, flow->pmd_id, &actions, dump_seq,
1442 reval_seq, flow->stats.used, NULL, NULL);
1447 /* Attempts to insert a ukey into the shared ukey maps.
1449 * On success, returns true, installs the ukey and returns it in a locked
1450 * state. Otherwise, returns false. */
1452 ukey_install_start(struct udpif *udpif, struct udpif_key *new_ukey)
1453 OVS_TRY_LOCK(true, new_ukey->mutex)
1456 struct udpif_key *old_ukey;
1458 bool locked = false;
1460 idx = new_ukey->hash % N_UMAPS;
1461 umap = &udpif->ukeys[idx];
1462 ovs_mutex_lock(&umap->mutex);
1463 old_ukey = ukey_lookup(udpif, &new_ukey->ufid);
1465 /* Uncommon case: A ukey is already installed with the same UFID. */
1466 if (old_ukey->key_len == new_ukey->key_len
1467 && !memcmp(old_ukey->key, new_ukey->key, new_ukey->key_len)) {
1468 COVERAGE_INC(handler_duplicate_upcall);
1470 struct ds ds = DS_EMPTY_INITIALIZER;
1472 odp_format_ufid(&old_ukey->ufid, &ds);
1473 ds_put_cstr(&ds, " ");
1474 odp_flow_key_format(old_ukey->key, old_ukey->key_len, &ds);
1475 ds_put_cstr(&ds, "\n");
1476 odp_format_ufid(&new_ukey->ufid, &ds);
1477 ds_put_cstr(&ds, " ");
1478 odp_flow_key_format(new_ukey->key, new_ukey->key_len, &ds);
1480 VLOG_WARN_RL(&rl, "Conflicting ukey for flows:\n%s", ds_cstr(&ds));
1484 ovs_mutex_lock(&new_ukey->mutex);
1485 cmap_insert(&umap->cmap, &new_ukey->cmap_node, new_ukey->hash);
1488 ovs_mutex_unlock(&umap->mutex);
1494 ukey_install_finish__(struct udpif_key *ukey) OVS_REQUIRES(ukey->mutex)
1496 ukey->flow_exists = true;
1500 ukey_install_finish(struct udpif_key *ukey, int error)
1501 OVS_RELEASES(ukey->mutex)
1504 ukey_install_finish__(ukey);
1506 ovs_mutex_unlock(&ukey->mutex);
1512 ukey_install(struct udpif *udpif, struct udpif_key *ukey)
1514 /* The usual way to keep 'ukey->flow_exists' in sync with the datapath is
1515 * to call ukey_install_start(), install the corresponding datapath flow,
1516 * then call ukey_install_finish(). The netdev interface using upcall_cb()
1517 * doesn't provide a function to separately finish the flow installation,
1518 * so we perform the operations together here.
1520 * This is fine currently, as revalidator threads will only delete this
1521 * ukey during revalidator_sweep() and only if the dump_seq is mismatched.
1522 * It is unlikely for a revalidator thread to advance dump_seq and reach
1523 * the next GC phase between ukey creation and flow installation. */
1524 return ukey_install_start(udpif, ukey) && ukey_install_finish(ukey, 0);
1527 /* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
1528 * lock the ukey. If the ukey does not exist, create it.
1530 * Returns 0 on success, setting *result to the matching ukey and returning it
1531 * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
1532 * indicates that another thread is handling this flow. Other errors indicate
1533 * an unexpected condition creating a new ukey.
1535 * *error is an output parameter provided to appease the threadsafety analyser,
1536 * and its value matches the return value. */
1538 ukey_acquire(struct udpif *udpif, const struct dpif_flow *flow,
1539 struct udpif_key **result, int *error)
1540 OVS_TRY_LOCK(0, (*result)->mutex)
1542 struct udpif_key *ukey;
1545 ukey = ukey_lookup(udpif, &flow->ufid);
1547 retval = ovs_mutex_trylock(&ukey->mutex);
1549 /* Usually we try to avoid installing flows from revalidator threads,
1550 * because locking on a umap may cause handler threads to block.
1551 * However there are certain cases, like when ovs-vswitchd is
1552 * restarted, where it is desirable to handle flows that exist in the
1553 * datapath gracefully (ie, don't just clear the datapath). */
1556 retval = ukey_create_from_dpif_flow(udpif, flow, &ukey);
1560 install = ukey_install_start(udpif, ukey);
1562 ukey_install_finish__(ukey);
1565 ukey_delete__(ukey);
1581 ukey_delete__(struct udpif_key *ukey)
1582 OVS_NO_THREAD_SAFETY_ANALYSIS
1585 for (int i = 0; i < ukey->n_recircs; i++) {
1586 recirc_free_id(ukey->recircs[i]);
1588 xlate_cache_delete(ukey->xcache);
1589 ofpbuf_delete(ukey->actions);
1590 ovs_mutex_destroy(&ukey->mutex);
1596 ukey_delete(struct umap *umap, struct udpif_key *ukey)
1597 OVS_REQUIRES(umap->mutex)
1599 cmap_remove(&umap->cmap, &ukey->cmap_node, ukey->hash);
1600 ovsrcu_postpone(ukey_delete__, ukey);
1604 should_revalidate(const struct udpif *udpif, uint64_t packets,
1607 long long int metric, now, duration;
1609 if (udpif->dump_duration < 200) {
1610 /* We are likely to handle full revalidation for the flows. */
1614 /* Calculate the mean time between seeing these packets. If this
1615 * exceeds the threshold, then delete the flow rather than performing
1616 * costly revalidation for flows that aren't being hit frequently.
1618 * This is targeted at situations where the dump_duration is high (~1s),
1619 * and revalidation is triggered by a call to udpif_revalidate(). In
1620 * these situations, revalidation of all flows causes fluctuations in the
1621 * flow_limit due to the interaction with the dump_duration and max_idle.
1622 * This tends to result in deletion of low-throughput flows anyway, so
1623 * skip the revalidation and just delete those flows. */
1624 packets = MAX(packets, 1);
1625 now = MAX(used, time_msec());
1626 duration = now - used;
1627 metric = duration / packets;
1630 /* The flow is receiving more than ~5pps, so keep it. */
1637 revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
1638 const struct dpif_flow_stats *stats, uint64_t reval_seq)
1639 OVS_REQUIRES(ukey->mutex)
1641 uint64_t slow_path_buf[128 / 8];
1642 struct xlate_out xout, *xoutp;
1643 struct netflow *netflow;
1644 struct ofproto_dpif *ofproto;
1645 struct dpif_flow_stats push;
1646 struct ofpbuf xout_actions;
1647 struct flow flow, dp_mask;
1648 uint64_t *dp64, *xout64;
1649 ofp_port_t ofp_in_port;
1650 struct xlate_in xin;
1651 long long int last_used;
1655 bool need_revalidate;
1661 need_revalidate = (ukey->reval_seq != reval_seq);
1662 last_used = ukey->stats.used;
1663 push.used = stats->used;
1664 push.tcp_flags = stats->tcp_flags;
1665 push.n_packets = (stats->n_packets > ukey->stats.n_packets
1666 ? stats->n_packets - ukey->stats.n_packets
1668 push.n_bytes = (stats->n_bytes > ukey->stats.n_bytes
1669 ? stats->n_bytes - ukey->stats.n_bytes
1672 if (need_revalidate && last_used
1673 && !should_revalidate(udpif, push.n_packets, last_used)) {
1678 /* We will push the stats, so update the ukey stats cache. */
1679 ukey->stats = *stats;
1680 if (!push.n_packets && !need_revalidate) {
1685 if (ukey->xcache && !need_revalidate) {
1686 xlate_push_stats(ukey->xcache, &push);
1691 if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
1696 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
1702 if (need_revalidate) {
1703 xlate_cache_clear(ukey->xcache);
1705 if (!ukey->xcache) {
1706 ukey->xcache = xlate_cache_new();
1709 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
1711 if (push.n_packets) {
1712 xin.resubmit_stats = &push;
1713 xin.may_learn = true;
1715 xin.xcache = ukey->xcache;
1716 xin.skip_wildcards = !need_revalidate;
1717 xlate_actions(&xin, &xout);
1720 if (!need_revalidate) {
1726 ofpbuf_use_const(&xout_actions, xout.odp_actions->data,
1727 xout.odp_actions->size);
1729 ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
1730 compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
1734 if (!ofpbuf_equal(&xout_actions, ukey->actions)) {
1738 if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, &dp_mask, &flow)
1743 /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
1744 * directly check that the masks are the same. Instead we check that the
1745 * mask in the kernel is more specific i.e. less wildcarded, than what
1746 * we've calculated here. This guarantees we don't catch any packets we
1747 * shouldn't with the megaflow. */
1748 dp64 = (uint64_t *) &dp_mask;
1749 xout64 = (uint64_t *) &xout.wc.masks;
1750 for (i = 0; i < FLOW_U64S; i++) {
1751 if ((dp64[i] | xout64[i]) != dp64[i]) {
1760 ukey->reval_seq = reval_seq;
1762 if (netflow && !ok) {
1763 netflow_flow_clear(netflow, &flow);
1765 xlate_out_uninit(xoutp);
1770 delete_op_init__(struct udpif *udpif, struct ukey_op *op,
1771 const struct dpif_flow *flow)
1774 op->dop.type = DPIF_OP_FLOW_DEL;
1775 op->dop.u.flow_del.key = flow->key;
1776 op->dop.u.flow_del.key_len = flow->key_len;
1777 op->dop.u.flow_del.ufid = flow->ufid_present ? &flow->ufid : NULL;
1778 op->dop.u.flow_del.pmd_id = flow->pmd_id;
1779 op->dop.u.flow_del.stats = &op->stats;
1780 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1784 delete_op_init(struct udpif *udpif, struct ukey_op *op, struct udpif_key *ukey)
1787 op->dop.type = DPIF_OP_FLOW_DEL;
1788 op->dop.u.flow_del.key = ukey->key;
1789 op->dop.u.flow_del.key_len = ukey->key_len;
1790 op->dop.u.flow_del.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
1791 op->dop.u.flow_del.pmd_id = ukey->pmd_id;
1792 op->dop.u.flow_del.stats = &op->stats;
1793 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1797 push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
1799 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1802 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1803 for (i = 0; i < n_ops; i++) {
1804 opsp[i] = &ops[i].dop;
1806 dpif_operate(udpif->dpif, opsp, n_ops);
1808 for (i = 0; i < n_ops; i++) {
1809 struct ukey_op *op = &ops[i];
1810 struct dpif_flow_stats *push, *stats, push_buf;
1812 stats = op->dop.u.flow_del.stats;
1816 ovs_mutex_lock(&op->ukey->mutex);
1817 push->used = MAX(stats->used, op->ukey->stats.used);
1818 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1819 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1820 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1821 ovs_mutex_unlock(&op->ukey->mutex);
1826 if (push->n_packets || netflow_exists()) {
1827 const struct nlattr *key = op->dop.u.flow_del.key;
1828 size_t key_len = op->dop.u.flow_del.key_len;
1829 struct ofproto_dpif *ofproto;
1830 struct netflow *netflow;
1831 ofp_port_t ofp_in_port;
1836 ovs_mutex_lock(&op->ukey->mutex);
1837 if (op->ukey->xcache) {
1838 xlate_push_stats(op->ukey->xcache, push);
1839 ovs_mutex_unlock(&op->ukey->mutex);
1842 ovs_mutex_unlock(&op->ukey->mutex);
1843 key = op->ukey->key;
1844 key_len = op->ukey->key_len;
1847 if (odp_flow_key_to_flow(key, key_len, &flow)
1852 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL,
1853 &netflow, &ofp_in_port);
1855 struct xlate_in xin;
1857 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
1858 push->tcp_flags, NULL);
1859 xin.resubmit_stats = push->n_packets ? push : NULL;
1860 xin.may_learn = push->n_packets > 0;
1861 xin.skip_wildcards = true;
1862 xlate_actions_for_side_effects(&xin);
1865 netflow_flow_clear(netflow, &flow);
1873 push_ukey_ops(struct udpif *udpif, struct umap *umap,
1874 struct ukey_op *ops, size_t n_ops)
1878 push_ukey_ops__(udpif, ops, n_ops);
1879 ovs_mutex_lock(&umap->mutex);
1880 for (i = 0; i < n_ops; i++) {
1881 ukey_delete(umap, ops[i].ukey);
1883 ovs_mutex_unlock(&umap->mutex);
1887 log_unexpected_flow(const struct dpif_flow *flow, int error)
1889 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 60);
1890 struct ds ds = DS_EMPTY_INITIALIZER;
1892 ds_put_format(&ds, "Failed to acquire udpif_key corresponding to "
1893 "unexpected flow (%s): ", ovs_strerror(error));
1894 odp_format_ufid(&flow->ufid, &ds);
1895 VLOG_WARN_RL(&rl, "%s", ds_cstr(&ds));
1899 revalidate(struct revalidator *revalidator)
1901 struct udpif *udpif = revalidator->udpif;
1902 struct dpif_flow_dump_thread *dump_thread;
1903 uint64_t dump_seq, reval_seq;
1904 unsigned int flow_limit;
1906 dump_seq = seq_read(udpif->dump_seq);
1907 reval_seq = seq_read(udpif->reval_seq);
1908 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1909 dump_thread = dpif_flow_dump_thread_create(udpif->dump);
1911 struct ukey_op ops[REVALIDATE_MAX_BATCH];
1914 struct dpif_flow flows[REVALIDATE_MAX_BATCH];
1915 const struct dpif_flow *f;
1918 long long int max_idle;
1923 n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
1930 /* In normal operation we want to keep flows around until they have
1931 * been idle for 'ofproto_max_idle' milliseconds. However:
1933 * - If the number of datapath flows climbs above 'flow_limit',
1934 * drop that down to 100 ms to try to bring the flows down to
1937 * - If the number of datapath flows climbs above twice
1938 * 'flow_limit', delete all the datapath flows as an emergency
1939 * measure. (We reassess this condition for the next batch of
1940 * datapath flows, so we will recover before all the flows are
1942 n_dp_flows = udpif_get_n_flows(udpif);
1943 kill_them_all = n_dp_flows > flow_limit * 2;
1944 max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
1946 for (f = flows; f < &flows[n_dumped]; f++) {
1947 long long int used = f->stats.used;
1948 struct udpif_key *ukey;
1949 bool already_dumped, keep;
1952 if (ukey_acquire(udpif, f, &ukey, &error)) {
1953 if (error == EBUSY) {
1954 /* Another thread is processing this flow, so don't bother
1956 COVERAGE_INC(upcall_ukey_contention);
1958 log_unexpected_flow(f, error);
1959 if (error != ENOENT) {
1960 delete_op_init__(udpif, &ops[n_ops++], f);
1966 already_dumped = ukey->dump_seq == dump_seq;
1967 if (already_dumped) {
1968 /* The flow has already been handled during this flow dump
1969 * operation. Skip it. */
1971 COVERAGE_INC(dumped_duplicate_flow);
1973 COVERAGE_INC(dumped_new_flow);
1975 ovs_mutex_unlock(&ukey->mutex);
1980 used = ukey->created;
1982 if (kill_them_all || (used && used < now - max_idle)) {
1985 keep = revalidate_ukey(udpif, ukey, &f->stats, reval_seq);
1987 ukey->dump_seq = dump_seq;
1988 ukey->flow_exists = keep;
1991 delete_op_init(udpif, &ops[n_ops++], ukey);
1993 ovs_mutex_unlock(&ukey->mutex);
1997 push_ukey_ops__(udpif, ops, n_ops);
2001 dpif_flow_dump_thread_destroy(dump_thread);
2005 handle_missed_revalidation(struct udpif *udpif, uint64_t reval_seq,
2006 struct udpif_key *ukey)
2008 struct dpif_flow_stats stats;
2011 COVERAGE_INC(revalidate_missed_dp_flow);
2013 memset(&stats, 0, sizeof stats);
2014 ovs_mutex_lock(&ukey->mutex);
2015 keep = revalidate_ukey(udpif, ukey, &stats, reval_seq);
2016 ovs_mutex_unlock(&ukey->mutex);
2022 revalidator_sweep__(struct revalidator *revalidator, bool purge)
2024 struct udpif *udpif;
2025 uint64_t dump_seq, reval_seq;
2028 udpif = revalidator->udpif;
2029 dump_seq = seq_read(udpif->dump_seq);
2030 reval_seq = seq_read(udpif->reval_seq);
2031 slice = revalidator - udpif->revalidators;
2032 ovs_assert(slice < udpif->n_revalidators);
2034 for (int i = slice; i < N_UMAPS; i += udpif->n_revalidators) {
2035 struct ukey_op ops[REVALIDATE_MAX_BATCH];
2036 struct udpif_key *ukey;
2037 struct umap *umap = &udpif->ukeys[i];
2040 CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
2041 bool flow_exists, seq_mismatch;
2043 /* Handler threads could be holding a ukey lock while it installs a
2044 * new flow, so don't hang around waiting for access to it. */
2045 if (ovs_mutex_trylock(&ukey->mutex)) {
2048 flow_exists = ukey->flow_exists;
2049 seq_mismatch = (ukey->dump_seq != dump_seq
2050 && ukey->reval_seq != reval_seq);
2051 ovs_mutex_unlock(&ukey->mutex);
2056 && !handle_missed_revalidation(udpif, reval_seq,
2058 struct ukey_op *op = &ops[n_ops++];
2060 delete_op_init(udpif, op, ukey);
2061 if (n_ops == REVALIDATE_MAX_BATCH) {
2062 push_ukey_ops(udpif, umap, ops, n_ops);
2065 } else if (!flow_exists) {
2066 ovs_mutex_lock(&umap->mutex);
2067 ukey_delete(umap, ukey);
2068 ovs_mutex_unlock(&umap->mutex);
2073 push_ukey_ops(udpif, umap, ops, n_ops);
2080 revalidator_sweep(struct revalidator *revalidator)
2082 revalidator_sweep__(revalidator, false);
2086 revalidator_purge(struct revalidator *revalidator)
2088 revalidator_sweep__(revalidator, true);
2092 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
2093 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2095 struct ds ds = DS_EMPTY_INITIALIZER;
2096 struct udpif *udpif;
2098 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2099 unsigned int flow_limit;
2103 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
2104 ufid_enabled = udpif_use_ufid(udpif);
2106 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
2107 ds_put_format(&ds, "\tflows : (current %lu)"
2108 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
2109 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
2110 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
2111 ds_put_format(&ds, "\tufid enabled : ");
2113 ds_put_format(&ds, "true\n");
2115 ds_put_format(&ds, "false\n");
2117 ds_put_char(&ds, '\n');
2119 for (i = 0; i < n_revalidators; i++) {
2120 struct revalidator *revalidator = &udpif->revalidators[i];
2121 int j, elements = 0;
2123 for (j = i; j < N_UMAPS; j += n_revalidators) {
2124 elements += cmap_count(&udpif->ukeys[j].cmap);
2126 ds_put_format(&ds, "\t%u: (keys %d)\n", revalidator->id, elements);
2130 unixctl_command_reply(conn, ds_cstr(&ds));
2134 /* Disable using the megaflows.
2136 * This command is only needed for advanced debugging, so it's not
2137 * documented in the man page. */
2139 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
2140 int argc OVS_UNUSED,
2141 const char *argv[] OVS_UNUSED,
2142 void *aux OVS_UNUSED)
2144 atomic_store_relaxed(&enable_megaflows, false);
2145 udpif_flush_all_datapaths();
2146 unixctl_command_reply(conn, "megaflows disabled");
2149 /* Re-enable using megaflows.
2151 * This command is only needed for advanced debugging, so it's not
2152 * documented in the man page. */
2154 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
2155 int argc OVS_UNUSED,
2156 const char *argv[] OVS_UNUSED,
2157 void *aux OVS_UNUSED)
2159 atomic_store_relaxed(&enable_megaflows, true);
2160 udpif_flush_all_datapaths();
2161 unixctl_command_reply(conn, "megaflows enabled");
2164 /* Disable skipping flow attributes during flow dump.
2166 * This command is only needed for advanced debugging, so it's not
2167 * documented in the man page. */
2169 upcall_unixctl_disable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2170 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2172 atomic_store_relaxed(&enable_ufid, false);
2173 unixctl_command_reply(conn, "Datapath dumping tersely using UFID disabled");
2176 /* Re-enable skipping flow attributes during flow dump.
2178 * This command is only needed for advanced debugging, so it's not documented
2179 * in the man page. */
2181 upcall_unixctl_enable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2182 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2184 atomic_store_relaxed(&enable_ufid, true);
2185 unixctl_command_reply(conn, "Datapath dumping tersely using UFID enabled "
2186 "for supported datapaths");
2189 /* Set the flow limit.
2191 * This command is only needed for advanced debugging, so it's not
2192 * documented in the man page. */
2194 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
2195 int argc OVS_UNUSED,
2196 const char *argv[] OVS_UNUSED,
2197 void *aux OVS_UNUSED)
2199 struct ds ds = DS_EMPTY_INITIALIZER;
2200 struct udpif *udpif;
2201 unsigned int flow_limit = atoi(argv[1]);
2203 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2204 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
2206 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
2207 unixctl_command_reply(conn, ds_cstr(&ds));
2212 upcall_unixctl_dump_wait(struct unixctl_conn *conn,
2213 int argc OVS_UNUSED,
2214 const char *argv[] OVS_UNUSED,
2215 void *aux OVS_UNUSED)
2217 if (list_is_singleton(&all_udpifs)) {
2218 struct udpif *udpif = NULL;
2221 udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
2222 len = (udpif->n_conns + 1) * sizeof *udpif->conns;
2223 udpif->conn_seq = seq_read(udpif->dump_seq);
2224 udpif->conns = xrealloc(udpif->conns, len);
2225 udpif->conns[udpif->n_conns++] = conn;
2227 unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
2232 upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
2233 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2235 struct udpif *udpif;
2237 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2240 for (n = 0; n < udpif->n_revalidators; n++) {
2241 revalidator_purge(&udpif->revalidators[n]);
2244 unixctl_command_reply(conn, "");