1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
26 #include "dynamic-string.h"
27 #include "fail-open.h"
28 #include "guarded-list.h"
33 #include "ofproto-dpif-ipfix.h"
34 #include "ofproto-dpif-sflow.h"
35 #include "ofproto-dpif-xlate.h"
38 #include "poll-loop.h"
41 #include "openvswitch/vlog.h"
43 #define MAX_QUEUE_LENGTH 512
44 #define UPCALL_MAX_BATCH 64
45 #define REVALIDATE_MAX_BATCH 50
47 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
49 COVERAGE_DEFINE(dumped_duplicate_flow);
50 COVERAGE_DEFINE(dumped_new_flow);
51 COVERAGE_DEFINE(handler_duplicate_upcall);
52 COVERAGE_DEFINE(upcall_ukey_contention);
53 COVERAGE_DEFINE(revalidate_missed_dp_flow);
55 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
56 * and possibly sets up a kernel flow as a cache. */
58 struct udpif *udpif; /* Parent udpif. */
59 pthread_t thread; /* Thread ID. */
60 uint32_t handler_id; /* Handler id. */
63 /* In the absence of a multiple-writer multiple-reader datastructure for
64 * storing ukeys, we use a large number of cmaps, each with its own lock for
66 #define N_UMAPS 512 /* per udpif. */
68 struct ovs_mutex mutex; /* Take for writing to the following. */
69 struct cmap cmap; /* Datapath flow keys. */
72 /* A thread that processes datapath flows, updates OpenFlow statistics, and
73 * updates or removes them if necessary. */
75 struct udpif *udpif; /* Parent udpif. */
76 pthread_t thread; /* Thread ID. */
77 unsigned int id; /* ovsthread_id_self(). */
80 /* An upcall handler for ofproto_dpif.
82 * udpif keeps records of two kind of logically separate units:
87 * - An array of 'struct handler's for upcall handling and flow
93 * - Revalidation threads which read the datapath flow table and maintains
97 struct ovs_list list_node; /* In all_udpifs list. */
99 struct dpif *dpif; /* Datapath handle. */
100 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
102 struct handler *handlers; /* Upcall handlers. */
105 struct revalidator *revalidators; /* Flow revalidators. */
106 size_t n_revalidators;
108 struct latch exit_latch; /* Tells child threads to exit. */
111 struct seq *reval_seq; /* Incremented to force revalidation. */
112 bool reval_exit; /* Set by leader on 'exit_latch. */
113 struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
114 struct dpif_flow_dump *dump; /* DPIF flow dump state. */
115 long long int dump_duration; /* Duration of the last flow dump. */
116 struct seq *dump_seq; /* Increments each dump iteration. */
117 atomic_bool enable_ufid; /* If true, skip dumping flow attrs. */
119 /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
121 * During the flow dump phase, revalidators insert into these with a random
122 * distribution. During the garbage collection phase, each revalidator
123 * takes care of garbage collecting a slice of these maps. */
126 /* Datapath flow statistics. */
127 unsigned int max_n_flows;
128 unsigned int avg_n_flows;
130 /* Following fields are accessed and modified by different threads. */
131 atomic_uint flow_limit; /* Datapath flow hard limit. */
133 /* n_flows_mutex prevents multiple threads updating these concurrently. */
134 atomic_uint n_flows; /* Number of flows in the datapath. */
135 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
136 struct ovs_mutex n_flows_mutex;
138 /* Following fields are accessed and modified only from the main thread. */
139 struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
140 uint64_t conn_seq; /* Corresponds to 'dump_seq' when
141 conns[n_conns-1] was stored. */
142 size_t n_conns; /* Number of connections waiting. */
146 BAD_UPCALL, /* Some kind of bug somewhere. */
147 MISS_UPCALL, /* A flow miss. */
148 SFLOW_UPCALL, /* sFlow sample. */
149 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
150 IPFIX_UPCALL /* Per-bridge sampling. */
154 struct ofproto_dpif *ofproto; /* Parent ofproto. */
155 const struct recirc_id_node *recirc; /* Recirculation context. */
156 bool have_recirc_ref; /* Reference held on recirc ctx? */
158 /* The flow and packet are only required to be constant when using
159 * dpif-netdev. If a modification is absolutely necessary, a const cast
160 * may be used with other datapaths. */
161 const struct flow *flow; /* Parsed representation of the packet. */
162 const ovs_u128 *ufid; /* Unique identifier for 'flow'. */
163 unsigned pmd_id; /* Datapath poll mode driver id. */
164 const struct dp_packet *packet; /* Packet associated with this upcall. */
165 ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
167 enum dpif_upcall_type type; /* Datapath type of the upcall. */
168 const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
169 const struct nlattr *actions; /* Flow actions in DPIF_UC_ACTION Upcalls. */
171 bool xout_initialized; /* True if 'xout' must be uninitialized. */
172 struct xlate_out xout; /* Result of xlate_actions(). */
173 struct ofpbuf put_actions; /* Actions 'put' in the fastapath. */
175 struct dpif_ipfix *ipfix; /* IPFIX pointer or NULL. */
176 struct dpif_sflow *sflow; /* SFlow pointer or NULL. */
178 bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
179 VLAN splinters if true. */
181 struct udpif_key *ukey; /* Revalidator flow cache. */
182 bool ukey_persists; /* Set true to keep 'ukey' beyond the
183 lifetime of this upcall. */
185 uint64_t dump_seq; /* udpif->dump_seq at translation time. */
186 uint64_t reval_seq; /* udpif->reval_seq at translation time. */
188 /* Not used by the upcall callback interface. */
189 const struct nlattr *key; /* Datapath flow key. */
190 size_t key_len; /* Datapath flow key length. */
191 const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
194 /* 'udpif_key's are responsible for tracking the little bit of state udpif
195 * needs to do flow expiration which can't be pulled directly from the
196 * datapath. They may be created by any handler or revalidator thread at any
197 * time, and read by any revalidator during the dump phase. They are however
198 * each owned by a single revalidator which takes care of destroying them
199 * during the garbage-collection phase.
201 * The mutex within the ukey protects some members of the ukey. The ukey
202 * itself is protected by RCU and is held within a umap in the parent udpif.
203 * Adding or removing a ukey from a umap is only safe when holding the
204 * corresponding umap lock. */
206 struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
208 /* These elements are read only once created, and therefore aren't
209 * protected by a mutex. */
210 const struct nlattr *key; /* Datapath flow key. */
211 size_t key_len; /* Length of 'key'. */
212 const struct nlattr *mask; /* Datapath flow mask. */
213 size_t mask_len; /* Length of 'mask'. */
214 struct ofpbuf *actions; /* Datapath flow actions as nlattrs. */
215 ovs_u128 ufid; /* Unique flow identifier. */
216 bool ufid_present; /* True if 'ufid' is in datapath. */
217 uint32_t hash; /* Pre-computed hash for 'key'. */
218 unsigned pmd_id; /* Datapath poll mode driver id. */
220 struct ovs_mutex mutex; /* Guards the following. */
221 struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
222 long long int created OVS_GUARDED; /* Estimate of creation time. */
223 uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
224 uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
225 bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
228 struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
229 * are affected by this ukey.
230 * Used for stats and learning.*/
232 struct odputil_keybuf buf;
236 /* Recirculation IDs with references held by the ukey. */
238 uint32_t recircs[]; /* 'n_recircs' id's for which references are held. */
241 /* Datapath operation with optional ukey attached. */
243 struct udpif_key *ukey;
244 struct dpif_flow_stats stats; /* Stats for 'op'. */
245 struct dpif_op dop; /* Flow operation. */
248 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
249 static struct ovs_list all_udpifs = OVS_LIST_INITIALIZER(&all_udpifs);
251 static size_t recv_upcalls(struct handler *);
252 static int process_upcall(struct udpif *, struct upcall *,
253 struct ofpbuf *odp_actions);
254 static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
255 static void udpif_stop_threads(struct udpif *);
256 static void udpif_start_threads(struct udpif *, size_t n_handlers,
257 size_t n_revalidators);
258 static void *udpif_upcall_handler(void *);
259 static void *udpif_revalidator(void *);
260 static unsigned long udpif_get_n_flows(struct udpif *);
261 static void revalidate(struct revalidator *);
262 static void revalidator_sweep(struct revalidator *);
263 static void revalidator_purge(struct revalidator *);
264 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
265 const char *argv[], void *aux);
266 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
267 const char *argv[], void *aux);
268 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
269 const char *argv[], void *aux);
270 static void upcall_unixctl_disable_ufid(struct unixctl_conn *, int argc,
271 const char *argv[], void *aux);
272 static void upcall_unixctl_enable_ufid(struct unixctl_conn *, int argc,
273 const char *argv[], void *aux);
274 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
275 const char *argv[], void *aux);
276 static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
277 const char *argv[], void *aux);
278 static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
279 const char *argv[], void *aux);
281 static struct udpif_key *ukey_create_from_upcall(struct upcall *);
282 static int ukey_create_from_dpif_flow(const struct udpif *,
283 const struct dpif_flow *,
284 struct udpif_key **);
285 static bool ukey_install_start(struct udpif *, struct udpif_key *ukey);
286 static bool ukey_install_finish(struct udpif_key *ukey, int error);
287 static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
288 static struct udpif_key *ukey_lookup(struct udpif *udpif,
289 const ovs_u128 *ufid);
290 static int ukey_acquire(struct udpif *, const struct dpif_flow *,
291 struct udpif_key **result, int *error);
292 static void ukey_delete__(struct udpif_key *);
293 static void ukey_delete(struct umap *, struct udpif_key *);
294 static enum upcall_type classify_upcall(enum dpif_upcall_type type,
295 const struct nlattr *userdata);
297 static int upcall_receive(struct upcall *, const struct dpif_backer *,
298 const struct dp_packet *packet, enum dpif_upcall_type,
299 const struct nlattr *userdata, const struct flow *,
300 const ovs_u128 *ufid, const unsigned pmd_id);
301 static void upcall_uninit(struct upcall *);
303 static upcall_callback upcall_cb;
305 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
306 static atomic_bool enable_ufid = ATOMIC_VAR_INIT(true);
311 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
312 if (ovsthread_once_start(&once)) {
313 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
315 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
316 upcall_unixctl_disable_megaflows, NULL);
317 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
318 upcall_unixctl_enable_megaflows, NULL);
319 unixctl_command_register("upcall/disable-ufid", "", 0, 0,
320 upcall_unixctl_disable_ufid, NULL);
321 unixctl_command_register("upcall/enable-ufid", "", 0, 0,
322 upcall_unixctl_enable_ufid, NULL);
323 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
324 upcall_unixctl_set_flow_limit, NULL);
325 unixctl_command_register("revalidator/wait", "", 0, 0,
326 upcall_unixctl_dump_wait, NULL);
327 unixctl_command_register("revalidator/purge", "", 0, 0,
328 upcall_unixctl_purge, NULL);
329 ovsthread_once_done(&once);
334 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
336 struct udpif *udpif = xzalloc(sizeof *udpif);
339 udpif->backer = backer;
340 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
341 udpif->reval_seq = seq_create();
342 udpif->dump_seq = seq_create();
343 latch_init(&udpif->exit_latch);
344 list_push_back(&all_udpifs, &udpif->list_node);
345 atomic_init(&udpif->enable_ufid, false);
346 atomic_init(&udpif->n_flows, 0);
347 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
348 ovs_mutex_init(&udpif->n_flows_mutex);
349 udpif->ukeys = xmalloc(N_UMAPS * sizeof *udpif->ukeys);
350 for (int i = 0; i < N_UMAPS; i++) {
351 cmap_init(&udpif->ukeys[i].cmap);
352 ovs_mutex_init(&udpif->ukeys[i].mutex);
355 dpif_register_upcall_cb(dpif, upcall_cb, udpif);
361 udpif_run(struct udpif *udpif)
363 if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
366 for (i = 0; i < udpif->n_conns; i++) {
367 unixctl_command_reply(udpif->conns[i], NULL);
376 udpif_destroy(struct udpif *udpif)
378 udpif_stop_threads(udpif);
380 for (int i = 0; i < N_UMAPS; i++) {
381 cmap_destroy(&udpif->ukeys[i].cmap);
382 ovs_mutex_destroy(&udpif->ukeys[i].mutex);
387 list_remove(&udpif->list_node);
388 latch_destroy(&udpif->exit_latch);
389 seq_destroy(udpif->reval_seq);
390 seq_destroy(udpif->dump_seq);
391 ovs_mutex_destroy(&udpif->n_flows_mutex);
395 /* Stops the handler and revalidator threads, must be enclosed in
396 * ovsrcu quiescent state unless when destroying udpif. */
398 udpif_stop_threads(struct udpif *udpif)
400 if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
403 latch_set(&udpif->exit_latch);
405 for (i = 0; i < udpif->n_handlers; i++) {
406 struct handler *handler = &udpif->handlers[i];
408 xpthread_join(handler->thread, NULL);
411 for (i = 0; i < udpif->n_revalidators; i++) {
412 xpthread_join(udpif->revalidators[i].thread, NULL);
415 dpif_disable_upcall(udpif->dpif);
417 for (i = 0; i < udpif->n_revalidators; i++) {
418 struct revalidator *revalidator = &udpif->revalidators[i];
420 /* Delete ukeys, and delete all flows from the datapath to prevent
421 * double-counting stats. */
422 revalidator_purge(revalidator);
425 latch_poll(&udpif->exit_latch);
427 ovs_barrier_destroy(&udpif->reval_barrier);
429 free(udpif->revalidators);
430 udpif->revalidators = NULL;
431 udpif->n_revalidators = 0;
433 free(udpif->handlers);
434 udpif->handlers = NULL;
435 udpif->n_handlers = 0;
439 /* Starts the handler and revalidator threads, must be enclosed in
440 * ovsrcu quiescent state. */
442 udpif_start_threads(struct udpif *udpif, size_t n_handlers,
443 size_t n_revalidators)
445 if (udpif && n_handlers && n_revalidators) {
449 udpif->n_handlers = n_handlers;
450 udpif->n_revalidators = n_revalidators;
452 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
453 for (i = 0; i < udpif->n_handlers; i++) {
454 struct handler *handler = &udpif->handlers[i];
456 handler->udpif = udpif;
457 handler->handler_id = i;
458 handler->thread = ovs_thread_create(
459 "handler", udpif_upcall_handler, handler);
462 enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
463 atomic_init(&udpif->enable_ufid, enable_ufid);
464 dpif_enable_upcall(udpif->dpif);
466 ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
467 udpif->reval_exit = false;
468 udpif->revalidators = xzalloc(udpif->n_revalidators
469 * sizeof *udpif->revalidators);
470 for (i = 0; i < udpif->n_revalidators; i++) {
471 struct revalidator *revalidator = &udpif->revalidators[i];
473 revalidator->udpif = udpif;
474 revalidator->thread = ovs_thread_create(
475 "revalidator", udpif_revalidator, revalidator);
480 /* Tells 'udpif' how many threads it should use to handle upcalls.
481 * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
482 * datapath handle must have packet reception enabled before starting
485 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
486 size_t n_revalidators)
489 ovs_assert(n_handlers && n_revalidators);
491 ovsrcu_quiesce_start();
492 if (udpif->n_handlers != n_handlers
493 || udpif->n_revalidators != n_revalidators) {
494 udpif_stop_threads(udpif);
497 if (!udpif->handlers && !udpif->revalidators) {
500 error = dpif_handlers_set(udpif->dpif, n_handlers);
502 VLOG_ERR("failed to configure handlers in dpif %s: %s",
503 dpif_name(udpif->dpif), ovs_strerror(error));
507 udpif_start_threads(udpif, n_handlers, n_revalidators);
509 ovsrcu_quiesce_end();
512 /* Waits for all ongoing upcall translations to complete. This ensures that
513 * there are no transient references to any removed ofprotos (or other
514 * objects). In particular, this should be called after an ofproto is removed
515 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
517 udpif_synchronize(struct udpif *udpif)
519 /* This is stronger than necessary. It would be sufficient to ensure
520 * (somehow) that each handler and revalidator thread had passed through
521 * its main loop once. */
522 size_t n_handlers = udpif->n_handlers;
523 size_t n_revalidators = udpif->n_revalidators;
525 ovsrcu_quiesce_start();
526 udpif_stop_threads(udpif);
527 udpif_start_threads(udpif, n_handlers, n_revalidators);
528 ovsrcu_quiesce_end();
531 /* Notifies 'udpif' that something changed which may render previous
532 * xlate_actions() results invalid. */
534 udpif_revalidate(struct udpif *udpif)
536 seq_change(udpif->reval_seq);
539 /* Returns a seq which increments every time 'udpif' pulls stats from the
540 * datapath. Callers can use this to get a sense of when might be a good time
541 * to do periodic work which relies on relatively up to date statistics. */
543 udpif_dump_seq(struct udpif *udpif)
545 return udpif->dump_seq;
549 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
553 simap_increase(usage, "handlers", udpif->n_handlers);
555 simap_increase(usage, "revalidators", udpif->n_revalidators);
556 for (i = 0; i < N_UMAPS; i++) {
557 simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
561 /* Remove flows from a single datapath. */
563 udpif_flush(struct udpif *udpif)
565 size_t n_handlers, n_revalidators;
567 n_handlers = udpif->n_handlers;
568 n_revalidators = udpif->n_revalidators;
570 ovsrcu_quiesce_start();
572 udpif_stop_threads(udpif);
573 dpif_flow_flush(udpif->dpif);
574 udpif_start_threads(udpif, n_handlers, n_revalidators);
576 ovsrcu_quiesce_end();
579 /* Removes all flows from all datapaths. */
581 udpif_flush_all_datapaths(void)
585 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
591 udpif_use_ufid(struct udpif *udpif)
595 atomic_read_relaxed(&enable_ufid, &enable);
596 return enable && ofproto_dpif_get_enable_ufid(udpif->backer);
601 udpif_get_n_flows(struct udpif *udpif)
603 long long int time, now;
604 unsigned long flow_count;
607 atomic_read_relaxed(&udpif->n_flows_timestamp, &time);
608 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
609 struct dpif_dp_stats stats;
611 atomic_store_relaxed(&udpif->n_flows_timestamp, now);
612 dpif_get_dp_stats(udpif->dpif, &stats);
613 flow_count = stats.n_flows;
614 atomic_store_relaxed(&udpif->n_flows, flow_count);
615 ovs_mutex_unlock(&udpif->n_flows_mutex);
617 atomic_read_relaxed(&udpif->n_flows, &flow_count);
622 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
623 * upcalls from dpif, processes the batch and installs corresponding flows
626 udpif_upcall_handler(void *arg)
628 struct handler *handler = arg;
629 struct udpif *udpif = handler->udpif;
631 while (!latch_is_set(&handler->udpif->exit_latch)) {
632 if (recv_upcalls(handler)) {
633 poll_immediate_wake();
635 dpif_recv_wait(udpif->dpif, handler->handler_id);
636 latch_wait(&udpif->exit_latch);
645 recv_upcalls(struct handler *handler)
647 struct udpif *udpif = handler->udpif;
648 uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
649 struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
650 struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
651 struct upcall upcalls[UPCALL_MAX_BATCH];
652 struct flow flows[UPCALL_MAX_BATCH];
656 while (n_upcalls < UPCALL_MAX_BATCH) {
657 struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
658 struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
659 struct upcall *upcall = &upcalls[n_upcalls];
660 struct flow *flow = &flows[n_upcalls];
663 ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
664 sizeof recv_stubs[n_upcalls]);
665 if (dpif_recv(udpif->dpif, handler->handler_id, dupcall, recv_buf)) {
666 ofpbuf_uninit(recv_buf);
670 if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
675 error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
676 dupcall->type, dupcall->userdata, flow,
677 &dupcall->ufid, PMD_ID_NULL);
679 if (error == ENODEV) {
680 /* Received packet on datapath port for which we couldn't
681 * associate an ofproto. This can happen if a port is removed
682 * while traffic is being received. Print a rate-limited
683 * message in case it happens frequently. */
684 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
685 dupcall->key_len, NULL, 0, NULL, 0,
686 &dupcall->ufid, PMD_ID_NULL, NULL);
687 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
688 "port %"PRIu32, flow->in_port.odp_port);
693 upcall->key = dupcall->key;
694 upcall->key_len = dupcall->key_len;
695 upcall->ufid = &dupcall->ufid;
697 upcall->out_tun_key = dupcall->out_tun_key;
698 upcall->actions = dupcall->actions;
700 if (vsp_adjust_flow(upcall->ofproto, flow, &dupcall->packet)) {
701 upcall->vsp_adjusted = true;
704 pkt_metadata_from_flow(&dupcall->packet.md, flow);
705 flow_extract(&dupcall->packet, flow);
707 error = process_upcall(udpif, upcall, NULL);
716 upcall_uninit(upcall);
718 dp_packet_uninit(&dupcall->packet);
719 ofpbuf_uninit(recv_buf);
723 handle_upcalls(handler->udpif, upcalls, n_upcalls);
724 for (i = 0; i < n_upcalls; i++) {
725 dp_packet_uninit(&dupcalls[i].packet);
726 ofpbuf_uninit(&recv_bufs[i]);
727 upcall_uninit(&upcalls[i]);
735 udpif_revalidator(void *arg)
737 /* Used by all revalidators. */
738 struct revalidator *revalidator = arg;
739 struct udpif *udpif = revalidator->udpif;
740 bool leader = revalidator == &udpif->revalidators[0];
742 /* Used only by the leader. */
743 long long int start_time = 0;
744 uint64_t last_reval_seq = 0;
747 revalidator->id = ovsthread_id_self();
752 recirc_run(); /* Recirculation cleanup. */
754 reval_seq = seq_read(udpif->reval_seq);
755 last_reval_seq = reval_seq;
757 n_flows = udpif_get_n_flows(udpif);
758 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
759 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
761 /* Only the leader checks the exit latch to prevent a race where
762 * some threads think it's true and exit and others think it's
763 * false and block indefinitely on the reval_barrier */
764 udpif->reval_exit = latch_is_set(&udpif->exit_latch);
766 start_time = time_msec();
767 if (!udpif->reval_exit) {
770 terse_dump = udpif_use_ufid(udpif);
771 udpif->dump = dpif_flow_dump_create(udpif->dpif, terse_dump);
775 /* Wait for the leader to start the flow dump. */
776 ovs_barrier_block(&udpif->reval_barrier);
777 if (udpif->reval_exit) {
780 revalidate(revalidator);
782 /* Wait for all flows to have been dumped before we garbage collect. */
783 ovs_barrier_block(&udpif->reval_barrier);
784 revalidator_sweep(revalidator);
786 /* Wait for all revalidators to finish garbage collection. */
787 ovs_barrier_block(&udpif->reval_barrier);
790 unsigned int flow_limit;
791 long long int duration;
793 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
795 dpif_flow_dump_destroy(udpif->dump);
796 seq_change(udpif->dump_seq);
798 duration = MAX(time_msec() - start_time, 1);
799 udpif->dump_duration = duration;
800 if (duration > 2000) {
801 flow_limit /= duration / 1000;
802 } else if (duration > 1300) {
803 flow_limit = flow_limit * 3 / 4;
804 } else if (duration < 1000 && n_flows > 2000
805 && flow_limit < n_flows * 1000 / duration) {
808 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
809 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
811 if (duration > 2000) {
812 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
816 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
817 seq_wait(udpif->reval_seq, last_reval_seq);
818 latch_wait(&udpif->exit_latch);
826 static enum upcall_type
827 classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
829 union user_action_cookie cookie;
832 /* First look at the upcall type. */
840 case DPIF_N_UC_TYPES:
842 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
846 /* "action" upcalls need a closer look. */
848 VLOG_WARN_RL(&rl, "action upcall missing cookie");
851 userdata_len = nl_attr_get_size(userdata);
852 if (userdata_len < sizeof cookie.type
853 || userdata_len > sizeof cookie) {
854 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
858 memset(&cookie, 0, sizeof cookie);
859 memcpy(&cookie, nl_attr_get(userdata), userdata_len);
860 if (userdata_len == MAX(8, sizeof cookie.sflow)
861 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
863 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
864 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
866 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
867 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
868 return FLOW_SAMPLE_UPCALL;
869 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
870 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
873 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
874 " and size %"PRIuSIZE, cookie.type, userdata_len);
879 /* Calculates slow path actions for 'xout'. 'buf' must statically be
880 * initialized with at least 128 bytes of space. */
882 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
883 const struct flow *flow, odp_port_t odp_in_port,
886 union user_action_cookie cookie;
890 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
891 cookie.slow_path.unused = 0;
892 cookie.slow_path.reason = xout->slow;
894 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
897 pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
898 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path,
899 ODPP_NONE, false, buf);
902 /* If there is no error, the upcall must be destroyed with upcall_uninit()
903 * before quiescing, as the referred objects are guaranteed to exist only
904 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
905 * since the 'upcall->put_actions' remains uninitialized. */
907 upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
908 const struct dp_packet *packet, enum dpif_upcall_type type,
909 const struct nlattr *userdata, const struct flow *flow,
910 const ovs_u128 *ufid, const unsigned pmd_id)
914 error = xlate_lookup(backer, flow, &upcall->ofproto, &upcall->ipfix,
915 &upcall->sflow, NULL, &upcall->in_port);
920 upcall->recirc = NULL;
921 upcall->have_recirc_ref = false;
923 upcall->packet = packet;
925 upcall->pmd_id = pmd_id;
927 upcall->userdata = userdata;
928 ofpbuf_init(&upcall->put_actions, 0);
930 upcall->xout_initialized = false;
931 upcall->vsp_adjusted = false;
932 upcall->ukey_persists = false;
938 upcall->out_tun_key = NULL;
939 upcall->actions = NULL;
945 upcall_xlate(struct udpif *udpif, struct upcall *upcall,
946 struct ofpbuf *odp_actions)
948 struct dpif_flow_stats stats;
952 stats.n_bytes = dp_packet_size(upcall->packet);
953 stats.used = time_msec();
954 stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
956 xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
957 stats.tcp_flags, upcall->packet);
958 xin.odp_actions = odp_actions;
960 if (upcall->type == DPIF_UC_MISS) {
961 xin.resubmit_stats = &stats;
964 /* We may install a datapath flow only if we get a reference to the
965 * recirculation context (otherwise we could have recirculation
966 * upcalls using recirculation ID for which no context can be
967 * found). We may still execute the flow's actions even if we
968 * don't install the flow. */
969 upcall->recirc = xin.recirc;
970 upcall->have_recirc_ref = recirc_id_node_try_ref_rcu(xin.recirc);
973 /* For non-miss upcalls, we are either executing actions (one of which
974 * is an userspace action) for an upcall, in which case the stats have
975 * already been taken care of, or there's a flow in the datapath which
976 * this packet was accounted to. Presumably the revalidators will deal
977 * with pushing its stats eventually. */
980 upcall->dump_seq = seq_read(udpif->dump_seq);
981 upcall->reval_seq = seq_read(udpif->reval_seq);
982 xlate_actions(&xin, &upcall->xout);
983 upcall->xout_initialized = true;
985 /* Special case for fail-open mode.
987 * If we are in fail-open mode, but we are connected to a controller too,
988 * then we should send the packet up to the controller in the hope that it
989 * will try to set up a flow and thereby allow us to exit fail-open.
991 * See the top-level comment in fail-open.c for more information.
993 * Copy packets before they are modified by execution. */
994 if (upcall->xout.fail_open) {
995 const struct dp_packet *packet = upcall->packet;
996 struct ofproto_packet_in *pin;
998 pin = xmalloc(sizeof *pin);
999 pin->up.packet = xmemdup(dp_packet_data(packet), dp_packet_size(packet));
1000 pin->up.packet_len = dp_packet_size(packet);
1001 pin->up.reason = OFPR_NO_MATCH;
1002 pin->up.table_id = 0;
1003 pin->up.cookie = OVS_BE64_MAX;
1004 flow_get_metadata(upcall->flow, &pin->up.flow_metadata);
1005 pin->send_len = 0; /* Not used for flow table misses. */
1006 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
1007 ofproto_dpif_send_packet_in(upcall->ofproto, pin);
1010 if (!upcall->xout.slow) {
1011 ofpbuf_use_const(&upcall->put_actions,
1012 upcall->xout.odp_actions->data,
1013 upcall->xout.odp_actions->size);
1015 ofpbuf_init(&upcall->put_actions, 0);
1016 compose_slow_path(udpif, &upcall->xout, upcall->flow,
1017 upcall->flow->in_port.odp_port,
1018 &upcall->put_actions);
1021 /* This function is also called for slow-pathed flows. As we are only
1022 * going to create new datapath flows for actual datapath misses, there is
1023 * no point in creating a ukey otherwise. */
1024 if (upcall->type == DPIF_UC_MISS) {
1025 upcall->ukey = ukey_create_from_upcall(upcall);
1030 upcall_uninit(struct upcall *upcall)
1033 if (upcall->xout_initialized) {
1034 xlate_out_uninit(&upcall->xout);
1036 ofpbuf_uninit(&upcall->put_actions);
1038 if (!upcall->ukey_persists) {
1039 ukey_delete__(upcall->ukey);
1041 } else if (upcall->have_recirc_ref) {
1042 /* The reference was transferred to the ukey if one was created. */
1043 recirc_id_node_unref(upcall->recirc);
1049 upcall_cb(const struct dp_packet *packet, const struct flow *flow, ovs_u128 *ufid,
1050 unsigned pmd_id, enum dpif_upcall_type type,
1051 const struct nlattr *userdata, struct ofpbuf *actions,
1052 struct flow_wildcards *wc, struct ofpbuf *put_actions, void *aux)
1054 struct udpif *udpif = aux;
1055 unsigned int flow_limit;
1056 struct upcall upcall;
1060 atomic_read_relaxed(&enable_megaflows, &megaflow);
1061 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1063 error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
1064 flow, ufid, pmd_id);
1069 error = process_upcall(udpif, &upcall, actions);
1074 if (upcall.xout.slow && put_actions) {
1075 ofpbuf_put(put_actions, upcall.put_actions.data,
1076 upcall.put_actions.size);
1079 if (OVS_LIKELY(wc)) {
1081 /* XXX: This could be avoided with sufficient API changes. */
1082 *wc = upcall.xout.wc;
1084 flow_wildcards_init_for_packet(wc, flow);
1088 if (udpif_get_n_flows(udpif) >= flow_limit) {
1093 /* Prevent miss flow installation if the key has recirculation ID but we
1094 * were not able to get a reference on it. */
1095 if (type == DPIF_UC_MISS && upcall.recirc && !upcall.have_recirc_ref) {
1100 if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
1105 upcall.ukey_persists = true;
1107 upcall_uninit(&upcall);
1112 process_upcall(struct udpif *udpif, struct upcall *upcall,
1113 struct ofpbuf *odp_actions)
1115 const struct nlattr *userdata = upcall->userdata;
1116 const struct dp_packet *packet = upcall->packet;
1117 const struct flow *flow = upcall->flow;
1119 switch (classify_upcall(upcall->type, userdata)) {
1121 upcall_xlate(udpif, upcall, odp_actions);
1125 if (upcall->sflow) {
1126 union user_action_cookie cookie;
1127 const struct nlattr *actions;
1128 int actions_len = 0;
1129 struct dpif_sflow_actions sflow_actions;
1130 memset(&sflow_actions, 0, sizeof sflow_actions);
1131 memset(&cookie, 0, sizeof cookie);
1132 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
1133 if (upcall->actions) {
1134 /* Actions were passed up from datapath. */
1135 actions = nl_attr_get(upcall->actions);
1136 actions_len = nl_attr_get_size(upcall->actions);
1137 if (actions && actions_len) {
1138 dpif_sflow_read_actions(flow, actions, actions_len,
1142 if (actions_len == 0) {
1143 /* Lookup actions in userspace cache. */
1144 struct udpif_key *ukey = ukey_lookup(udpif, upcall->ufid);
1146 actions = ukey->actions->data;
1147 actions_len = ukey->actions->size;
1148 dpif_sflow_read_actions(flow, actions, actions_len,
1152 dpif_sflow_received(upcall->sflow, packet, flow,
1153 flow->in_port.odp_port, &cookie,
1154 actions_len > 0 ? &sflow_actions : NULL);
1159 if (upcall->ipfix) {
1160 union user_action_cookie cookie;
1161 struct flow_tnl output_tunnel_key;
1163 memset(&cookie, 0, sizeof cookie);
1164 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
1166 if (upcall->out_tun_key) {
1167 odp_tun_key_from_attr(upcall->out_tun_key,
1168 &output_tunnel_key);
1170 dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow,
1171 flow->in_port.odp_port,
1172 cookie.ipfix.output_odp_port,
1173 upcall->out_tun_key ?
1174 &output_tunnel_key : NULL);
1178 case FLOW_SAMPLE_UPCALL:
1179 if (upcall->ipfix) {
1180 union user_action_cookie cookie;
1182 memset(&cookie, 0, sizeof cookie);
1183 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
1185 /* The flow reflects exactly the contents of the packet.
1186 * Sample the packet using it. */
1187 dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
1188 cookie.flow_sample.collector_set_id,
1189 cookie.flow_sample.probability,
1190 cookie.flow_sample.obs_domain_id,
1191 cookie.flow_sample.obs_point_id);
1203 handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
1206 struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
1207 struct ukey_op ops[UPCALL_MAX_BATCH * 2];
1208 unsigned int flow_limit;
1209 size_t n_ops, n_opsp, i;
1213 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1214 atomic_read_relaxed(&enable_megaflows, &megaflow);
1216 may_put = udpif_get_n_flows(udpif) < flow_limit;
1218 /* Handle the packets individually in order of arrival.
1220 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1221 * processes received packets for these protocols.
1223 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1226 * The loop fills 'ops' with an array of operations to execute in the
1229 for (i = 0; i < n_upcalls; i++) {
1230 struct upcall *upcall = &upcalls[i];
1231 const struct dp_packet *packet = upcall->packet;
1234 if (upcall->vsp_adjusted) {
1235 /* This packet was received on a VLAN splinter port. We added a
1236 * VLAN to the packet to make the packet resemble the flow, but the
1237 * actions were composed assuming that the packet contained no
1238 * VLAN. So, we must remove the VLAN header from the packet before
1239 * trying to execute the actions. */
1240 if (upcall->xout.odp_actions->size) {
1241 eth_pop_vlan(CONST_CAST(struct dp_packet *, upcall->packet));
1244 /* Remove the flow vlan tags inserted by vlan splinter logic
1245 * to ensure megaflow masks generated match the data path flow. */
1246 CONST_CAST(struct flow *, upcall->flow)->vlan_tci = 0;
1249 /* Do not install a flow into the datapath if:
1251 * - The datapath already has too many flows.
1253 * - We received this packet via some flow installed in the kernel
1256 * - Upcall was a recirculation but we do not have a reference to
1257 * to the recirculation ID. */
1258 if (may_put && upcall->type == DPIF_UC_MISS &&
1259 (!upcall->recirc || upcall->have_recirc_ref)) {
1260 struct udpif_key *ukey = upcall->ukey;
1262 upcall->ukey_persists = true;
1266 op->dop.type = DPIF_OP_FLOW_PUT;
1267 op->dop.u.flow_put.flags = DPIF_FP_CREATE;
1268 op->dop.u.flow_put.key = ukey->key;
1269 op->dop.u.flow_put.key_len = ukey->key_len;
1270 op->dop.u.flow_put.mask = ukey->mask;
1271 op->dop.u.flow_put.mask_len = ukey->mask_len;
1272 op->dop.u.flow_put.ufid = upcall->ufid;
1273 op->dop.u.flow_put.stats = NULL;
1274 op->dop.u.flow_put.actions = ukey->actions->data;
1275 op->dop.u.flow_put.actions_len = ukey->actions->size;
1278 if (upcall->xout.odp_actions->size) {
1281 op->dop.type = DPIF_OP_EXECUTE;
1282 op->dop.u.execute.packet = CONST_CAST(struct dp_packet *, packet);
1283 odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
1284 &op->dop.u.execute.packet->md);
1285 op->dop.u.execute.actions = upcall->xout.odp_actions->data;
1286 op->dop.u.execute.actions_len = upcall->xout.odp_actions->size;
1287 op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
1288 op->dop.u.execute.probe = false;
1294 * We install ukeys before installing the flows, locking them for exclusive
1295 * access by this thread for the period of installation. This ensures that
1296 * other threads won't attempt to delete the flows as we are creating them.
1299 for (i = 0; i < n_ops; i++) {
1300 struct udpif_key *ukey = ops[i].ukey;
1303 /* If we can't install the ukey, don't install the flow. */
1304 if (!ukey_install_start(udpif, ukey)) {
1305 ukey_delete__(ukey);
1310 opsp[n_opsp++] = &ops[i].dop;
1312 dpif_operate(udpif->dpif, opsp, n_opsp);
1313 for (i = 0; i < n_ops; i++) {
1315 ukey_install_finish(ops[i].ukey, ops[i].dop.error);
1321 get_ufid_hash(const ovs_u128 *ufid)
1323 return ufid->u32[0];
1326 static struct udpif_key *
1327 ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid)
1329 struct udpif_key *ukey;
1330 int idx = get_ufid_hash(ufid) % N_UMAPS;
1331 struct cmap *cmap = &udpif->ukeys[idx].cmap;
1333 CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, get_ufid_hash(ufid), cmap) {
1334 if (ovs_u128_equals(&ukey->ufid, ufid)) {
1341 static struct udpif_key *
1342 ukey_create__(const struct nlattr *key, size_t key_len,
1343 const struct nlattr *mask, size_t mask_len,
1344 bool ufid_present, const ovs_u128 *ufid,
1345 const unsigned pmd_id, const struct ofpbuf *actions,
1346 uint64_t dump_seq, uint64_t reval_seq, long long int used,
1347 const struct recirc_id_node *key_recirc, struct xlate_out *xout)
1348 OVS_NO_THREAD_SAFETY_ANALYSIS
1350 unsigned n_recircs = (key_recirc ? 1 : 0) + (xout ? xout->n_recircs : 0);
1351 struct udpif_key *ukey = xmalloc(sizeof *ukey +
1352 n_recircs * sizeof *ukey->recircs);
1354 memcpy(&ukey->keybuf, key, key_len);
1355 ukey->key = &ukey->keybuf.nla;
1356 ukey->key_len = key_len;
1357 memcpy(&ukey->maskbuf, mask, mask_len);
1358 ukey->mask = &ukey->maskbuf.nla;
1359 ukey->mask_len = mask_len;
1360 ukey->ufid_present = ufid_present;
1362 ukey->pmd_id = pmd_id;
1363 ukey->hash = get_ufid_hash(&ukey->ufid);
1364 ukey->actions = ofpbuf_clone(actions);
1366 ovs_mutex_init(&ukey->mutex);
1367 ukey->dump_seq = dump_seq;
1368 ukey->reval_seq = reval_seq;
1369 ukey->flow_exists = false;
1370 ukey->created = time_msec();
1371 memset(&ukey->stats, 0, sizeof ukey->stats);
1372 ukey->stats.used = used;
1373 ukey->xcache = NULL;
1375 ukey->n_recircs = n_recircs;
1377 ukey->recircs[0] = key_recirc->id;
1379 if (xout && xout->n_recircs) {
1380 const uint32_t *act_recircs = xlate_out_get_recircs(xout);
1382 memcpy(ukey->recircs + (key_recirc ? 1 : 0), act_recircs,
1383 xout->n_recircs * sizeof *ukey->recircs);
1384 xlate_out_take_recircs(xout);
1389 static struct udpif_key *
1390 ukey_create_from_upcall(struct upcall *upcall)
1392 struct odputil_keybuf keystub, maskstub;
1393 struct ofpbuf keybuf, maskbuf;
1395 struct odp_flow_key_parms odp_parms = {
1396 .flow = upcall->flow,
1397 .mask = &upcall->xout.wc.masks,
1400 odp_parms.support = ofproto_dpif_get_support(upcall->ofproto)->odp;
1401 if (upcall->key_len) {
1402 ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
1404 /* dpif-netdev doesn't provide a netlink-formatted flow key in the
1405 * upcall, so convert the upcall's flow here. */
1406 ofpbuf_use_stack(&keybuf, &keystub, sizeof keystub);
1407 odp_parms.odp_in_port = upcall->flow->in_port.odp_port;
1408 odp_flow_key_from_flow(&odp_parms, &keybuf);
1411 atomic_read_relaxed(&enable_megaflows, &megaflow);
1412 ofpbuf_use_stack(&maskbuf, &maskstub, sizeof maskstub);
1414 odp_parms.odp_in_port = ODPP_NONE;
1415 odp_parms.key_buf = &keybuf;
1417 odp_flow_key_from_mask(&odp_parms, &maskbuf);
1420 return ukey_create__(keybuf.data, keybuf.size, maskbuf.data, maskbuf.size,
1421 true, upcall->ufid, upcall->pmd_id,
1422 &upcall->put_actions, upcall->dump_seq,
1423 upcall->reval_seq, 0,
1424 upcall->have_recirc_ref ? upcall->recirc : NULL,
1429 ukey_create_from_dpif_flow(const struct udpif *udpif,
1430 const struct dpif_flow *flow,
1431 struct udpif_key **ukey)
1433 struct dpif_flow full_flow;
1434 struct ofpbuf actions;
1435 uint64_t dump_seq, reval_seq;
1436 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
1437 const struct nlattr *a;
1440 if (!flow->key_len || !flow->actions_len) {
1444 /* If the key or actions were not provided by the datapath, fetch the
1446 ofpbuf_use_stack(&buf, &stub, sizeof stub);
1447 err = dpif_flow_get(udpif->dpif, NULL, 0, &flow->ufid,
1448 flow->pmd_id, &buf, &full_flow);
1455 /* Check the flow actions for recirculation action. As recirculation
1456 * relies on OVS userspace internal state, we need to delete all old
1457 * datapath flows with recirculation upon OVS restart. */
1458 NL_ATTR_FOR_EACH_UNSAFE (a, left, flow->actions, flow->actions_len) {
1459 if (nl_attr_type(a) == OVS_ACTION_ATTR_RECIRC) {
1464 dump_seq = seq_read(udpif->dump_seq);
1465 reval_seq = seq_read(udpif->reval_seq);
1466 ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
1467 *ukey = ukey_create__(flow->key, flow->key_len,
1468 flow->mask, flow->mask_len, flow->ufid_present,
1469 &flow->ufid, flow->pmd_id, &actions, dump_seq,
1470 reval_seq, flow->stats.used, NULL, NULL);
1475 /* Attempts to insert a ukey into the shared ukey maps.
1477 * On success, returns true, installs the ukey and returns it in a locked
1478 * state. Otherwise, returns false. */
1480 ukey_install_start(struct udpif *udpif, struct udpif_key *new_ukey)
1481 OVS_TRY_LOCK(true, new_ukey->mutex)
1484 struct udpif_key *old_ukey;
1486 bool locked = false;
1488 idx = new_ukey->hash % N_UMAPS;
1489 umap = &udpif->ukeys[idx];
1490 ovs_mutex_lock(&umap->mutex);
1491 old_ukey = ukey_lookup(udpif, &new_ukey->ufid);
1493 /* Uncommon case: A ukey is already installed with the same UFID. */
1494 if (old_ukey->key_len == new_ukey->key_len
1495 && !memcmp(old_ukey->key, new_ukey->key, new_ukey->key_len)) {
1496 COVERAGE_INC(handler_duplicate_upcall);
1498 struct ds ds = DS_EMPTY_INITIALIZER;
1500 odp_format_ufid(&old_ukey->ufid, &ds);
1501 ds_put_cstr(&ds, " ");
1502 odp_flow_key_format(old_ukey->key, old_ukey->key_len, &ds);
1503 ds_put_cstr(&ds, "\n");
1504 odp_format_ufid(&new_ukey->ufid, &ds);
1505 ds_put_cstr(&ds, " ");
1506 odp_flow_key_format(new_ukey->key, new_ukey->key_len, &ds);
1508 VLOG_WARN_RL(&rl, "Conflicting ukey for flows:\n%s", ds_cstr(&ds));
1512 ovs_mutex_lock(&new_ukey->mutex);
1513 cmap_insert(&umap->cmap, &new_ukey->cmap_node, new_ukey->hash);
1516 ovs_mutex_unlock(&umap->mutex);
1522 ukey_install_finish__(struct udpif_key *ukey) OVS_REQUIRES(ukey->mutex)
1524 ukey->flow_exists = true;
1528 ukey_install_finish(struct udpif_key *ukey, int error)
1529 OVS_RELEASES(ukey->mutex)
1532 ukey_install_finish__(ukey);
1534 ovs_mutex_unlock(&ukey->mutex);
1540 ukey_install(struct udpif *udpif, struct udpif_key *ukey)
1542 /* The usual way to keep 'ukey->flow_exists' in sync with the datapath is
1543 * to call ukey_install_start(), install the corresponding datapath flow,
1544 * then call ukey_install_finish(). The netdev interface using upcall_cb()
1545 * doesn't provide a function to separately finish the flow installation,
1546 * so we perform the operations together here.
1548 * This is fine currently, as revalidator threads will only delete this
1549 * ukey during revalidator_sweep() and only if the dump_seq is mismatched.
1550 * It is unlikely for a revalidator thread to advance dump_seq and reach
1551 * the next GC phase between ukey creation and flow installation. */
1552 return ukey_install_start(udpif, ukey) && ukey_install_finish(ukey, 0);
1555 /* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
1556 * lock the ukey. If the ukey does not exist, create it.
1558 * Returns 0 on success, setting *result to the matching ukey and returning it
1559 * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
1560 * indicates that another thread is handling this flow. Other errors indicate
1561 * an unexpected condition creating a new ukey.
1563 * *error is an output parameter provided to appease the threadsafety analyser,
1564 * and its value matches the return value. */
1566 ukey_acquire(struct udpif *udpif, const struct dpif_flow *flow,
1567 struct udpif_key **result, int *error)
1568 OVS_TRY_LOCK(0, (*result)->mutex)
1570 struct udpif_key *ukey;
1573 ukey = ukey_lookup(udpif, &flow->ufid);
1575 retval = ovs_mutex_trylock(&ukey->mutex);
1577 /* Usually we try to avoid installing flows from revalidator threads,
1578 * because locking on a umap may cause handler threads to block.
1579 * However there are certain cases, like when ovs-vswitchd is
1580 * restarted, where it is desirable to handle flows that exist in the
1581 * datapath gracefully (ie, don't just clear the datapath). */
1584 retval = ukey_create_from_dpif_flow(udpif, flow, &ukey);
1588 install = ukey_install_start(udpif, ukey);
1590 ukey_install_finish__(ukey);
1593 ukey_delete__(ukey);
1609 ukey_delete__(struct udpif_key *ukey)
1610 OVS_NO_THREAD_SAFETY_ANALYSIS
1613 for (int i = 0; i < ukey->n_recircs; i++) {
1614 recirc_free_id(ukey->recircs[i]);
1616 xlate_cache_delete(ukey->xcache);
1617 ofpbuf_delete(ukey->actions);
1618 ovs_mutex_destroy(&ukey->mutex);
1624 ukey_delete(struct umap *umap, struct udpif_key *ukey)
1625 OVS_REQUIRES(umap->mutex)
1627 cmap_remove(&umap->cmap, &ukey->cmap_node, ukey->hash);
1628 ovsrcu_postpone(ukey_delete__, ukey);
1632 should_revalidate(const struct udpif *udpif, uint64_t packets,
1635 long long int metric, now, duration;
1637 if (udpif->dump_duration < 200) {
1638 /* We are likely to handle full revalidation for the flows. */
1642 /* Calculate the mean time between seeing these packets. If this
1643 * exceeds the threshold, then delete the flow rather than performing
1644 * costly revalidation for flows that aren't being hit frequently.
1646 * This is targeted at situations where the dump_duration is high (~1s),
1647 * and revalidation is triggered by a call to udpif_revalidate(). In
1648 * these situations, revalidation of all flows causes fluctuations in the
1649 * flow_limit due to the interaction with the dump_duration and max_idle.
1650 * This tends to result in deletion of low-throughput flows anyway, so
1651 * skip the revalidation and just delete those flows. */
1652 packets = MAX(packets, 1);
1653 now = MAX(used, time_msec());
1654 duration = now - used;
1655 metric = duration / packets;
1658 /* The flow is receiving more than ~5pps, so keep it. */
1665 revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
1666 const struct dpif_flow_stats *stats, uint64_t reval_seq)
1667 OVS_REQUIRES(ukey->mutex)
1669 uint64_t slow_path_buf[128 / 8];
1670 struct xlate_out xout, *xoutp;
1671 struct netflow *netflow;
1672 struct ofproto_dpif *ofproto;
1673 struct dpif_flow_stats push;
1674 struct ofpbuf xout_actions;
1675 struct flow flow, dp_mask;
1676 uint64_t *dp64, *xout64;
1677 ofp_port_t ofp_in_port;
1678 struct xlate_in xin;
1679 long long int last_used;
1683 bool need_revalidate;
1689 need_revalidate = (ukey->reval_seq != reval_seq);
1690 last_used = ukey->stats.used;
1691 push.used = stats->used;
1692 push.tcp_flags = stats->tcp_flags;
1693 push.n_packets = (stats->n_packets > ukey->stats.n_packets
1694 ? stats->n_packets - ukey->stats.n_packets
1696 push.n_bytes = (stats->n_bytes > ukey->stats.n_bytes
1697 ? stats->n_bytes - ukey->stats.n_bytes
1700 if (need_revalidate && last_used
1701 && !should_revalidate(udpif, push.n_packets, last_used)) {
1706 /* We will push the stats, so update the ukey stats cache. */
1707 ukey->stats = *stats;
1708 if (!push.n_packets && !need_revalidate) {
1713 if (ukey->xcache && !need_revalidate) {
1714 xlate_push_stats(ukey->xcache, &push);
1719 if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
1724 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
1730 if (need_revalidate) {
1731 xlate_cache_clear(ukey->xcache);
1733 if (!ukey->xcache) {
1734 ukey->xcache = xlate_cache_new();
1737 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
1739 if (push.n_packets) {
1740 xin.resubmit_stats = &push;
1741 xin.may_learn = true;
1743 xin.xcache = ukey->xcache;
1744 xin.skip_wildcards = !need_revalidate;
1745 xlate_actions(&xin, &xout);
1748 if (!need_revalidate) {
1754 ofpbuf_use_const(&xout_actions, xout.odp_actions->data,
1755 xout.odp_actions->size);
1757 ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
1758 compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
1762 if (!ofpbuf_equal(&xout_actions, ukey->actions)) {
1766 if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, ukey->key,
1767 ukey->key_len, &dp_mask, &flow) == ODP_FIT_ERROR) {
1771 /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
1772 * directly check that the masks are the same. Instead we check that the
1773 * mask in the kernel is more specific i.e. less wildcarded, than what
1774 * we've calculated here. This guarantees we don't catch any packets we
1775 * shouldn't with the megaflow. */
1776 dp64 = (uint64_t *) &dp_mask;
1777 xout64 = (uint64_t *) &xout.wc.masks;
1778 for (i = 0; i < FLOW_U64S; i++) {
1779 if ((dp64[i] | xout64[i]) != dp64[i]) {
1788 ukey->reval_seq = reval_seq;
1790 if (netflow && !ok) {
1791 netflow_flow_clear(netflow, &flow);
1793 xlate_out_uninit(xoutp);
1798 delete_op_init__(struct udpif *udpif, struct ukey_op *op,
1799 const struct dpif_flow *flow)
1802 op->dop.type = DPIF_OP_FLOW_DEL;
1803 op->dop.u.flow_del.key = flow->key;
1804 op->dop.u.flow_del.key_len = flow->key_len;
1805 op->dop.u.flow_del.ufid = flow->ufid_present ? &flow->ufid : NULL;
1806 op->dop.u.flow_del.pmd_id = flow->pmd_id;
1807 op->dop.u.flow_del.stats = &op->stats;
1808 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1812 delete_op_init(struct udpif *udpif, struct ukey_op *op, struct udpif_key *ukey)
1815 op->dop.type = DPIF_OP_FLOW_DEL;
1816 op->dop.u.flow_del.key = ukey->key;
1817 op->dop.u.flow_del.key_len = ukey->key_len;
1818 op->dop.u.flow_del.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
1819 op->dop.u.flow_del.pmd_id = ukey->pmd_id;
1820 op->dop.u.flow_del.stats = &op->stats;
1821 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1825 push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
1827 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1830 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1831 for (i = 0; i < n_ops; i++) {
1832 opsp[i] = &ops[i].dop;
1834 dpif_operate(udpif->dpif, opsp, n_ops);
1836 for (i = 0; i < n_ops; i++) {
1837 struct ukey_op *op = &ops[i];
1838 struct dpif_flow_stats *push, *stats, push_buf;
1840 stats = op->dop.u.flow_del.stats;
1844 ovs_mutex_lock(&op->ukey->mutex);
1845 push->used = MAX(stats->used, op->ukey->stats.used);
1846 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1847 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1848 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1849 ovs_mutex_unlock(&op->ukey->mutex);
1854 if (push->n_packets || netflow_exists()) {
1855 const struct nlattr *key = op->dop.u.flow_del.key;
1856 size_t key_len = op->dop.u.flow_del.key_len;
1857 struct ofproto_dpif *ofproto;
1858 struct netflow *netflow;
1859 ofp_port_t ofp_in_port;
1864 ovs_mutex_lock(&op->ukey->mutex);
1865 if (op->ukey->xcache) {
1866 xlate_push_stats(op->ukey->xcache, push);
1867 ovs_mutex_unlock(&op->ukey->mutex);
1870 ovs_mutex_unlock(&op->ukey->mutex);
1871 key = op->ukey->key;
1872 key_len = op->ukey->key_len;
1875 if (odp_flow_key_to_flow(key, key_len, &flow)
1880 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL,
1881 &netflow, &ofp_in_port);
1883 struct xlate_in xin;
1885 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
1886 push->tcp_flags, NULL);
1887 xin.resubmit_stats = push->n_packets ? push : NULL;
1888 xin.may_learn = push->n_packets > 0;
1889 xin.skip_wildcards = true;
1890 xlate_actions_for_side_effects(&xin);
1893 netflow_flow_clear(netflow, &flow);
1901 push_ukey_ops(struct udpif *udpif, struct umap *umap,
1902 struct ukey_op *ops, size_t n_ops)
1906 push_ukey_ops__(udpif, ops, n_ops);
1907 ovs_mutex_lock(&umap->mutex);
1908 for (i = 0; i < n_ops; i++) {
1909 ukey_delete(umap, ops[i].ukey);
1911 ovs_mutex_unlock(&umap->mutex);
1915 log_unexpected_flow(const struct dpif_flow *flow, int error)
1917 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 60);
1918 struct ds ds = DS_EMPTY_INITIALIZER;
1920 ds_put_format(&ds, "Failed to acquire udpif_key corresponding to "
1921 "unexpected flow (%s): ", ovs_strerror(error));
1922 odp_format_ufid(&flow->ufid, &ds);
1923 VLOG_WARN_RL(&rl, "%s", ds_cstr(&ds));
1927 revalidate(struct revalidator *revalidator)
1929 struct udpif *udpif = revalidator->udpif;
1930 struct dpif_flow_dump_thread *dump_thread;
1931 uint64_t dump_seq, reval_seq;
1932 unsigned int flow_limit;
1934 dump_seq = seq_read(udpif->dump_seq);
1935 reval_seq = seq_read(udpif->reval_seq);
1936 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1937 dump_thread = dpif_flow_dump_thread_create(udpif->dump);
1939 struct ukey_op ops[REVALIDATE_MAX_BATCH];
1942 struct dpif_flow flows[REVALIDATE_MAX_BATCH];
1943 const struct dpif_flow *f;
1946 long long int max_idle;
1951 n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
1958 /* In normal operation we want to keep flows around until they have
1959 * been idle for 'ofproto_max_idle' milliseconds. However:
1961 * - If the number of datapath flows climbs above 'flow_limit',
1962 * drop that down to 100 ms to try to bring the flows down to
1965 * - If the number of datapath flows climbs above twice
1966 * 'flow_limit', delete all the datapath flows as an emergency
1967 * measure. (We reassess this condition for the next batch of
1968 * datapath flows, so we will recover before all the flows are
1970 n_dp_flows = udpif_get_n_flows(udpif);
1971 kill_them_all = n_dp_flows > flow_limit * 2;
1972 max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
1974 for (f = flows; f < &flows[n_dumped]; f++) {
1975 long long int used = f->stats.used;
1976 struct udpif_key *ukey;
1977 bool already_dumped, keep;
1980 if (ukey_acquire(udpif, f, &ukey, &error)) {
1981 if (error == EBUSY) {
1982 /* Another thread is processing this flow, so don't bother
1984 COVERAGE_INC(upcall_ukey_contention);
1986 log_unexpected_flow(f, error);
1987 if (error != ENOENT) {
1988 delete_op_init__(udpif, &ops[n_ops++], f);
1994 already_dumped = ukey->dump_seq == dump_seq;
1995 if (already_dumped) {
1996 /* The flow has already been handled during this flow dump
1997 * operation. Skip it. */
1999 COVERAGE_INC(dumped_duplicate_flow);
2001 COVERAGE_INC(dumped_new_flow);
2003 ovs_mutex_unlock(&ukey->mutex);
2008 used = ukey->created;
2010 if (kill_them_all || (used && used < now - max_idle)) {
2013 keep = revalidate_ukey(udpif, ukey, &f->stats, reval_seq);
2015 ukey->dump_seq = dump_seq;
2016 ukey->flow_exists = keep;
2019 delete_op_init(udpif, &ops[n_ops++], ukey);
2021 ovs_mutex_unlock(&ukey->mutex);
2025 push_ukey_ops__(udpif, ops, n_ops);
2029 dpif_flow_dump_thread_destroy(dump_thread);
2033 handle_missed_revalidation(struct udpif *udpif, uint64_t reval_seq,
2034 struct udpif_key *ukey)
2036 struct dpif_flow_stats stats;
2039 COVERAGE_INC(revalidate_missed_dp_flow);
2041 memset(&stats, 0, sizeof stats);
2042 ovs_mutex_lock(&ukey->mutex);
2043 keep = revalidate_ukey(udpif, ukey, &stats, reval_seq);
2044 ovs_mutex_unlock(&ukey->mutex);
2050 revalidator_sweep__(struct revalidator *revalidator, bool purge)
2052 struct udpif *udpif;
2053 uint64_t dump_seq, reval_seq;
2056 udpif = revalidator->udpif;
2057 dump_seq = seq_read(udpif->dump_seq);
2058 reval_seq = seq_read(udpif->reval_seq);
2059 slice = revalidator - udpif->revalidators;
2060 ovs_assert(slice < udpif->n_revalidators);
2062 for (int i = slice; i < N_UMAPS; i += udpif->n_revalidators) {
2063 struct ukey_op ops[REVALIDATE_MAX_BATCH];
2064 struct udpif_key *ukey;
2065 struct umap *umap = &udpif->ukeys[i];
2068 CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
2069 bool flow_exists, seq_mismatch;
2071 /* Handler threads could be holding a ukey lock while it installs a
2072 * new flow, so don't hang around waiting for access to it. */
2073 if (ovs_mutex_trylock(&ukey->mutex)) {
2076 flow_exists = ukey->flow_exists;
2077 seq_mismatch = (ukey->dump_seq != dump_seq
2078 && ukey->reval_seq != reval_seq);
2079 ovs_mutex_unlock(&ukey->mutex);
2084 && !handle_missed_revalidation(udpif, reval_seq,
2086 struct ukey_op *op = &ops[n_ops++];
2088 delete_op_init(udpif, op, ukey);
2089 if (n_ops == REVALIDATE_MAX_BATCH) {
2090 push_ukey_ops(udpif, umap, ops, n_ops);
2093 } else if (!flow_exists) {
2094 ovs_mutex_lock(&umap->mutex);
2095 ukey_delete(umap, ukey);
2096 ovs_mutex_unlock(&umap->mutex);
2101 push_ukey_ops(udpif, umap, ops, n_ops);
2108 revalidator_sweep(struct revalidator *revalidator)
2110 revalidator_sweep__(revalidator, false);
2114 revalidator_purge(struct revalidator *revalidator)
2116 revalidator_sweep__(revalidator, true);
2120 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
2121 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2123 struct ds ds = DS_EMPTY_INITIALIZER;
2124 struct udpif *udpif;
2126 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2127 unsigned int flow_limit;
2131 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
2132 ufid_enabled = udpif_use_ufid(udpif);
2134 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
2135 ds_put_format(&ds, "\tflows : (current %lu)"
2136 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
2137 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
2138 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
2139 ds_put_format(&ds, "\tufid enabled : ");
2141 ds_put_format(&ds, "true\n");
2143 ds_put_format(&ds, "false\n");
2145 ds_put_char(&ds, '\n');
2147 for (i = 0; i < n_revalidators; i++) {
2148 struct revalidator *revalidator = &udpif->revalidators[i];
2149 int j, elements = 0;
2151 for (j = i; j < N_UMAPS; j += n_revalidators) {
2152 elements += cmap_count(&udpif->ukeys[j].cmap);
2154 ds_put_format(&ds, "\t%u: (keys %d)\n", revalidator->id, elements);
2158 unixctl_command_reply(conn, ds_cstr(&ds));
2162 /* Disable using the megaflows.
2164 * This command is only needed for advanced debugging, so it's not
2165 * documented in the man page. */
2167 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
2168 int argc OVS_UNUSED,
2169 const char *argv[] OVS_UNUSED,
2170 void *aux OVS_UNUSED)
2172 atomic_store_relaxed(&enable_megaflows, false);
2173 udpif_flush_all_datapaths();
2174 unixctl_command_reply(conn, "megaflows disabled");
2177 /* Re-enable using megaflows.
2179 * This command is only needed for advanced debugging, so it's not
2180 * documented in the man page. */
2182 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
2183 int argc OVS_UNUSED,
2184 const char *argv[] OVS_UNUSED,
2185 void *aux OVS_UNUSED)
2187 atomic_store_relaxed(&enable_megaflows, true);
2188 udpif_flush_all_datapaths();
2189 unixctl_command_reply(conn, "megaflows enabled");
2192 /* Disable skipping flow attributes during flow dump.
2194 * This command is only needed for advanced debugging, so it's not
2195 * documented in the man page. */
2197 upcall_unixctl_disable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2198 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2200 atomic_store_relaxed(&enable_ufid, false);
2201 unixctl_command_reply(conn, "Datapath dumping tersely using UFID disabled");
2204 /* Re-enable skipping flow attributes during flow dump.
2206 * This command is only needed for advanced debugging, so it's not documented
2207 * in the man page. */
2209 upcall_unixctl_enable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2210 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2212 atomic_store_relaxed(&enable_ufid, true);
2213 unixctl_command_reply(conn, "Datapath dumping tersely using UFID enabled "
2214 "for supported datapaths");
2217 /* Set the flow limit.
2219 * This command is only needed for advanced debugging, so it's not
2220 * documented in the man page. */
2222 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
2223 int argc OVS_UNUSED,
2224 const char *argv[] OVS_UNUSED,
2225 void *aux OVS_UNUSED)
2227 struct ds ds = DS_EMPTY_INITIALIZER;
2228 struct udpif *udpif;
2229 unsigned int flow_limit = atoi(argv[1]);
2231 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2232 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
2234 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
2235 unixctl_command_reply(conn, ds_cstr(&ds));
2240 upcall_unixctl_dump_wait(struct unixctl_conn *conn,
2241 int argc OVS_UNUSED,
2242 const char *argv[] OVS_UNUSED,
2243 void *aux OVS_UNUSED)
2245 if (list_is_singleton(&all_udpifs)) {
2246 struct udpif *udpif = NULL;
2249 udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
2250 len = (udpif->n_conns + 1) * sizeof *udpif->conns;
2251 udpif->conn_seq = seq_read(udpif->dump_seq);
2252 udpif->conns = xrealloc(udpif->conns, len);
2253 udpif->conns[udpif->n_conns++] = conn;
2255 unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
2260 upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
2261 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2263 struct udpif *udpif;
2265 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2268 for (n = 0; n < udpif->n_revalidators; n++) {
2269 revalidator_purge(&udpif->revalidators[n]);
2272 unixctl_command_reply(conn, "");