1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
26 #include "dynamic-string.h"
27 #include "fail-open.h"
28 #include "guarded-list.h"
33 #include "ofproto-dpif-ipfix.h"
34 #include "ofproto-dpif-sflow.h"
35 #include "ofproto-dpif-xlate.h"
38 #include "poll-loop.h"
41 #include "openvswitch/vlog.h"
43 #define MAX_QUEUE_LENGTH 512
44 #define UPCALL_MAX_BATCH 64
45 #define REVALIDATE_MAX_BATCH 50
47 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
49 COVERAGE_DEFINE(dumped_duplicate_flow);
50 COVERAGE_DEFINE(dumped_new_flow);
51 COVERAGE_DEFINE(handler_duplicate_upcall);
52 COVERAGE_DEFINE(upcall_ukey_contention);
53 COVERAGE_DEFINE(revalidate_missed_dp_flow);
55 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
56 * and possibly sets up a kernel flow as a cache. */
58 struct udpif *udpif; /* Parent udpif. */
59 pthread_t thread; /* Thread ID. */
60 uint32_t handler_id; /* Handler id. */
63 /* In the absence of a multiple-writer multiple-reader datastructure for
64 * storing ukeys, we use a large number of cmaps, each with its own lock for
66 #define N_UMAPS 512 /* per udpif. */
68 struct ovs_mutex mutex; /* Take for writing to the following. */
69 struct cmap cmap; /* Datapath flow keys. */
72 /* A thread that processes datapath flows, updates OpenFlow statistics, and
73 * updates or removes them if necessary. */
75 struct udpif *udpif; /* Parent udpif. */
76 pthread_t thread; /* Thread ID. */
77 unsigned int id; /* ovsthread_id_self(). */
80 /* An upcall handler for ofproto_dpif.
82 * udpif keeps records of two kind of logically separate units:
87 * - An array of 'struct handler's for upcall handling and flow
93 * - Revalidation threads which read the datapath flow table and maintains
97 struct ovs_list list_node; /* In all_udpifs list. */
99 struct dpif *dpif; /* Datapath handle. */
100 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
102 struct handler *handlers; /* Upcall handlers. */
105 struct revalidator *revalidators; /* Flow revalidators. */
106 size_t n_revalidators;
108 struct latch exit_latch; /* Tells child threads to exit. */
111 struct seq *reval_seq; /* Incremented to force revalidation. */
112 bool reval_exit; /* Set by leader on 'exit_latch. */
113 struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
114 struct dpif_flow_dump *dump; /* DPIF flow dump state. */
115 long long int dump_duration; /* Duration of the last flow dump. */
116 struct seq *dump_seq; /* Increments each dump iteration. */
117 atomic_bool enable_ufid; /* If true, skip dumping flow attrs. */
119 /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
121 * During the flow dump phase, revalidators insert into these with a random
122 * distribution. During the garbage collection phase, each revalidator
123 * takes care of garbage collecting a slice of these maps. */
126 /* Datapath flow statistics. */
127 unsigned int max_n_flows;
128 unsigned int avg_n_flows;
130 /* Following fields are accessed and modified by different threads. */
131 atomic_uint flow_limit; /* Datapath flow hard limit. */
133 /* n_flows_mutex prevents multiple threads updating these concurrently. */
134 atomic_uint n_flows; /* Number of flows in the datapath. */
135 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
136 struct ovs_mutex n_flows_mutex;
138 /* Following fields are accessed and modified only from the main thread. */
139 struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
140 uint64_t conn_seq; /* Corresponds to 'dump_seq' when
141 conns[n_conns-1] was stored. */
142 size_t n_conns; /* Number of connections waiting. */
146 BAD_UPCALL, /* Some kind of bug somewhere. */
147 MISS_UPCALL, /* A flow miss. */
148 SFLOW_UPCALL, /* sFlow sample. */
149 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
150 IPFIX_UPCALL /* Per-bridge sampling. */
154 struct ofproto_dpif *ofproto; /* Parent ofproto. */
156 /* The flow and packet are only required to be constant when using
157 * dpif-netdev. If a modification is absolutely necessary, a const cast
158 * may be used with other datapaths. */
159 const struct flow *flow; /* Parsed representation of the packet. */
160 const ovs_u128 *ufid; /* Unique identifier for 'flow'. */
161 int pmd_id; /* Datapath poll mode driver id. */
162 const struct ofpbuf *packet; /* Packet associated with this upcall. */
163 ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
165 enum dpif_upcall_type type; /* Datapath type of the upcall. */
166 const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
168 bool xout_initialized; /* True if 'xout' must be uninitialized. */
169 struct xlate_out xout; /* Result of xlate_actions(). */
170 struct ofpbuf put_actions; /* Actions 'put' in the fastapath. */
172 struct dpif_ipfix *ipfix; /* IPFIX pointer or NULL. */
173 struct dpif_sflow *sflow; /* SFlow pointer or NULL. */
175 bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
176 VLAN splinters if true. */
178 struct udpif_key *ukey; /* Revalidator flow cache. */
179 bool ukey_persists; /* Set true to keep 'ukey' beyond the
180 lifetime of this upcall. */
182 uint64_t dump_seq; /* udpif->dump_seq at translation time. */
183 uint64_t reval_seq; /* udpif->reval_seq at translation time. */
185 /* Not used by the upcall callback interface. */
186 const struct nlattr *key; /* Datapath flow key. */
187 size_t key_len; /* Datapath flow key length. */
188 const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
191 /* 'udpif_key's are responsible for tracking the little bit of state udpif
192 * needs to do flow expiration which can't be pulled directly from the
193 * datapath. They may be created by any handler or revalidator thread at any
194 * time, and read by any revalidator during the dump phase. They are however
195 * each owned by a single revalidator which takes care of destroying them
196 * during the garbage-collection phase.
198 * The mutex within the ukey protects some members of the ukey. The ukey
199 * itself is protected by RCU and is held within a umap in the parent udpif.
200 * Adding or removing a ukey from a umap is only safe when holding the
201 * corresponding umap lock. */
203 struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
205 /* These elements are read only once created, and therefore aren't
206 * protected by a mutex. */
207 const struct nlattr *key; /* Datapath flow key. */
208 size_t key_len; /* Length of 'key'. */
209 const struct nlattr *mask; /* Datapath flow mask. */
210 size_t mask_len; /* Length of 'mask'. */
211 struct ofpbuf *actions; /* Datapath flow actions as nlattrs. */
212 ovs_u128 ufid; /* Unique flow identifier. */
213 bool ufid_present; /* True if 'ufid' is in datapath. */
214 uint32_t hash; /* Pre-computed hash for 'key'. */
215 int pmd_id; /* Datapath poll mode driver id. */
217 struct ovs_mutex mutex; /* Guards the following. */
218 struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
219 long long int created OVS_GUARDED; /* Estimate of creation time. */
220 uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
221 uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
222 bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
225 struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
226 * are affected by this ukey.
227 * Used for stats and learning.*/
229 struct odputil_keybuf buf;
234 /* Datapath operation with optional ukey attached. */
236 struct udpif_key *ukey;
237 struct dpif_flow_stats stats; /* Stats for 'op'. */
238 struct dpif_op dop; /* Flow operation. */
241 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
242 static struct ovs_list all_udpifs = OVS_LIST_INITIALIZER(&all_udpifs);
244 static size_t recv_upcalls(struct handler *);
245 static int process_upcall(struct udpif *, struct upcall *,
246 struct ofpbuf *odp_actions);
247 static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
248 static void udpif_stop_threads(struct udpif *);
249 static void udpif_start_threads(struct udpif *, size_t n_handlers,
250 size_t n_revalidators);
251 static void *udpif_upcall_handler(void *);
252 static void *udpif_revalidator(void *);
253 static unsigned long udpif_get_n_flows(struct udpif *);
254 static void revalidate(struct revalidator *);
255 static void revalidator_sweep(struct revalidator *);
256 static void revalidator_purge(struct revalidator *);
257 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
258 const char *argv[], void *aux);
259 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
260 const char *argv[], void *aux);
261 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
262 const char *argv[], void *aux);
263 static void upcall_unixctl_disable_ufid(struct unixctl_conn *, int argc,
264 const char *argv[], void *aux);
265 static void upcall_unixctl_enable_ufid(struct unixctl_conn *, int argc,
266 const char *argv[], void *aux);
267 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
268 const char *argv[], void *aux);
269 static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
270 const char *argv[], void *aux);
271 static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
272 const char *argv[], void *aux);
274 static struct udpif_key *ukey_create_from_upcall(const struct upcall *);
275 static int ukey_create_from_dpif_flow(const struct udpif *,
276 const struct dpif_flow *,
277 struct udpif_key **);
278 static bool ukey_install_start(struct udpif *, struct udpif_key *ukey);
279 static bool ukey_install_finish(struct udpif_key *ukey, int error);
280 static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
281 static struct udpif_key *ukey_lookup(struct udpif *udpif,
282 const ovs_u128 *ufid);
283 static int ukey_acquire(struct udpif *, const struct dpif_flow *,
284 struct udpif_key **result, int *error);
285 static void ukey_delete__(struct udpif_key *);
286 static void ukey_delete(struct umap *, struct udpif_key *);
287 static enum upcall_type classify_upcall(enum dpif_upcall_type type,
288 const struct nlattr *userdata);
290 static int upcall_receive(struct upcall *, const struct dpif_backer *,
291 const struct ofpbuf *packet, enum dpif_upcall_type,
292 const struct nlattr *userdata, const struct flow *,
293 const ovs_u128 *ufid, const int pmd_id);
294 static void upcall_uninit(struct upcall *);
296 static upcall_callback upcall_cb;
298 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
299 static atomic_bool enable_ufid = ATOMIC_VAR_INIT(true);
302 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
304 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
305 struct udpif *udpif = xzalloc(sizeof *udpif);
307 if (ovsthread_once_start(&once)) {
308 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
310 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
311 upcall_unixctl_disable_megaflows, NULL);
312 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
313 upcall_unixctl_enable_megaflows, NULL);
314 unixctl_command_register("upcall/disable-ufid", "", 0, 0,
315 upcall_unixctl_disable_ufid, NULL);
316 unixctl_command_register("upcall/enable-ufid", "", 0, 0,
317 upcall_unixctl_enable_ufid, NULL);
318 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
319 upcall_unixctl_set_flow_limit, NULL);
320 unixctl_command_register("revalidator/wait", "", 0, 0,
321 upcall_unixctl_dump_wait, NULL);
322 unixctl_command_register("revalidator/purge", "", 0, 0,
323 upcall_unixctl_purge, NULL);
324 ovsthread_once_done(&once);
328 udpif->backer = backer;
329 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
330 udpif->reval_seq = seq_create();
331 udpif->dump_seq = seq_create();
332 latch_init(&udpif->exit_latch);
333 list_push_back(&all_udpifs, &udpif->list_node);
334 atomic_init(&udpif->enable_ufid, false);
335 atomic_init(&udpif->n_flows, 0);
336 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
337 ovs_mutex_init(&udpif->n_flows_mutex);
338 udpif->ukeys = xmalloc(N_UMAPS * sizeof *udpif->ukeys);
339 for (int i = 0; i < N_UMAPS; i++) {
340 cmap_init(&udpif->ukeys[i].cmap);
341 ovs_mutex_init(&udpif->ukeys[i].mutex);
344 dpif_register_upcall_cb(dpif, upcall_cb, udpif);
350 udpif_run(struct udpif *udpif)
352 if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
355 for (i = 0; i < udpif->n_conns; i++) {
356 unixctl_command_reply(udpif->conns[i], NULL);
365 udpif_destroy(struct udpif *udpif)
367 udpif_stop_threads(udpif);
369 for (int i = 0; i < N_UMAPS; i++) {
370 cmap_destroy(&udpif->ukeys[i].cmap);
371 ovs_mutex_destroy(&udpif->ukeys[i].mutex);
376 list_remove(&udpif->list_node);
377 latch_destroy(&udpif->exit_latch);
378 seq_destroy(udpif->reval_seq);
379 seq_destroy(udpif->dump_seq);
380 ovs_mutex_destroy(&udpif->n_flows_mutex);
384 /* Stops the handler and revalidator threads, must be enclosed in
385 * ovsrcu quiescent state unless when destroying udpif. */
387 udpif_stop_threads(struct udpif *udpif)
389 if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
392 latch_set(&udpif->exit_latch);
394 for (i = 0; i < udpif->n_handlers; i++) {
395 struct handler *handler = &udpif->handlers[i];
397 xpthread_join(handler->thread, NULL);
400 for (i = 0; i < udpif->n_revalidators; i++) {
401 xpthread_join(udpif->revalidators[i].thread, NULL);
404 dpif_disable_upcall(udpif->dpif);
406 for (i = 0; i < udpif->n_revalidators; i++) {
407 struct revalidator *revalidator = &udpif->revalidators[i];
409 /* Delete ukeys, and delete all flows from the datapath to prevent
410 * double-counting stats. */
411 revalidator_purge(revalidator);
414 latch_poll(&udpif->exit_latch);
416 ovs_barrier_destroy(&udpif->reval_barrier);
418 free(udpif->revalidators);
419 udpif->revalidators = NULL;
420 udpif->n_revalidators = 0;
422 free(udpif->handlers);
423 udpif->handlers = NULL;
424 udpif->n_handlers = 0;
428 /* Starts the handler and revalidator threads, must be enclosed in
429 * ovsrcu quiescent state. */
431 udpif_start_threads(struct udpif *udpif, size_t n_handlers,
432 size_t n_revalidators)
434 if (udpif && n_handlers && n_revalidators) {
438 udpif->n_handlers = n_handlers;
439 udpif->n_revalidators = n_revalidators;
441 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
442 for (i = 0; i < udpif->n_handlers; i++) {
443 struct handler *handler = &udpif->handlers[i];
445 handler->udpif = udpif;
446 handler->handler_id = i;
447 handler->thread = ovs_thread_create(
448 "handler", udpif_upcall_handler, handler);
451 enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
452 atomic_init(&udpif->enable_ufid, enable_ufid);
453 dpif_enable_upcall(udpif->dpif);
455 ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
456 udpif->reval_exit = false;
457 udpif->revalidators = xzalloc(udpif->n_revalidators
458 * sizeof *udpif->revalidators);
459 for (i = 0; i < udpif->n_revalidators; i++) {
460 struct revalidator *revalidator = &udpif->revalidators[i];
462 revalidator->udpif = udpif;
463 revalidator->thread = ovs_thread_create(
464 "revalidator", udpif_revalidator, revalidator);
469 /* Tells 'udpif' how many threads it should use to handle upcalls.
470 * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
471 * datapath handle must have packet reception enabled before starting
474 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
475 size_t n_revalidators)
478 ovs_assert(n_handlers && n_revalidators);
480 ovsrcu_quiesce_start();
481 if (udpif->n_handlers != n_handlers
482 || udpif->n_revalidators != n_revalidators) {
483 udpif_stop_threads(udpif);
486 if (!udpif->handlers && !udpif->revalidators) {
489 error = dpif_handlers_set(udpif->dpif, n_handlers);
491 VLOG_ERR("failed to configure handlers in dpif %s: %s",
492 dpif_name(udpif->dpif), ovs_strerror(error));
496 udpif_start_threads(udpif, n_handlers, n_revalidators);
498 ovsrcu_quiesce_end();
501 /* Waits for all ongoing upcall translations to complete. This ensures that
502 * there are no transient references to any removed ofprotos (or other
503 * objects). In particular, this should be called after an ofproto is removed
504 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
506 udpif_synchronize(struct udpif *udpif)
508 /* This is stronger than necessary. It would be sufficient to ensure
509 * (somehow) that each handler and revalidator thread had passed through
510 * its main loop once. */
511 size_t n_handlers = udpif->n_handlers;
512 size_t n_revalidators = udpif->n_revalidators;
514 ovsrcu_quiesce_start();
515 udpif_stop_threads(udpif);
516 udpif_start_threads(udpif, n_handlers, n_revalidators);
517 ovsrcu_quiesce_end();
520 /* Notifies 'udpif' that something changed which may render previous
521 * xlate_actions() results invalid. */
523 udpif_revalidate(struct udpif *udpif)
525 seq_change(udpif->reval_seq);
528 /* Returns a seq which increments every time 'udpif' pulls stats from the
529 * datapath. Callers can use this to get a sense of when might be a good time
530 * to do periodic work which relies on relatively up to date statistics. */
532 udpif_dump_seq(struct udpif *udpif)
534 return udpif->dump_seq;
538 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
542 simap_increase(usage, "handlers", udpif->n_handlers);
544 simap_increase(usage, "revalidators", udpif->n_revalidators);
545 for (i = 0; i < N_UMAPS; i++) {
546 simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
550 /* Remove flows from a single datapath. */
552 udpif_flush(struct udpif *udpif)
554 size_t n_handlers, n_revalidators;
556 n_handlers = udpif->n_handlers;
557 n_revalidators = udpif->n_revalidators;
559 ovsrcu_quiesce_start();
561 udpif_stop_threads(udpif);
562 dpif_flow_flush(udpif->dpif);
563 udpif_start_threads(udpif, n_handlers, n_revalidators);
565 ovsrcu_quiesce_end();
568 /* Removes all flows from all datapaths. */
570 udpif_flush_all_datapaths(void)
574 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
580 udpif_use_ufid(struct udpif *udpif)
584 atomic_read_relaxed(&enable_ufid, &enable);
585 return enable && ofproto_dpif_get_enable_ufid(udpif->backer);
590 udpif_get_n_flows(struct udpif *udpif)
592 long long int time, now;
593 unsigned long flow_count;
596 atomic_read_relaxed(&udpif->n_flows_timestamp, &time);
597 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
598 struct dpif_dp_stats stats;
600 atomic_store_relaxed(&udpif->n_flows_timestamp, now);
601 dpif_get_dp_stats(udpif->dpif, &stats);
602 flow_count = stats.n_flows;
603 atomic_store_relaxed(&udpif->n_flows, flow_count);
604 ovs_mutex_unlock(&udpif->n_flows_mutex);
606 atomic_read_relaxed(&udpif->n_flows, &flow_count);
611 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
612 * upcalls from dpif, processes the batch and installs corresponding flows
615 udpif_upcall_handler(void *arg)
617 struct handler *handler = arg;
618 struct udpif *udpif = handler->udpif;
620 while (!latch_is_set(&handler->udpif->exit_latch)) {
621 if (recv_upcalls(handler)) {
622 poll_immediate_wake();
624 dpif_recv_wait(udpif->dpif, handler->handler_id);
625 latch_wait(&udpif->exit_latch);
634 recv_upcalls(struct handler *handler)
636 struct udpif *udpif = handler->udpif;
637 uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
638 struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
639 struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
640 struct upcall upcalls[UPCALL_MAX_BATCH];
641 struct flow flows[UPCALL_MAX_BATCH];
645 while (n_upcalls < UPCALL_MAX_BATCH) {
646 struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
647 struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
648 struct upcall *upcall = &upcalls[n_upcalls];
649 struct flow *flow = &flows[n_upcalls];
650 struct pkt_metadata md;
653 ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
654 sizeof recv_stubs[n_upcalls]);
655 if (dpif_recv(udpif->dpif, handler->handler_id, dupcall, recv_buf)) {
656 ofpbuf_uninit(recv_buf);
660 if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
665 error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
666 dupcall->type, dupcall->userdata, flow,
667 &dupcall->ufid, PMD_ID_NULL);
669 if (error == ENODEV) {
670 /* Received packet on datapath port for which we couldn't
671 * associate an ofproto. This can happen if a port is removed
672 * while traffic is being received. Print a rate-limited
673 * message in case it happens frequently. */
674 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
675 dupcall->key_len, NULL, 0, NULL, 0,
676 &dupcall->ufid, PMD_ID_NULL, NULL);
677 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
678 "port %"PRIu32, flow->in_port.odp_port);
683 upcall->key = dupcall->key;
684 upcall->key_len = dupcall->key_len;
685 upcall->ufid = &dupcall->ufid;
687 upcall->out_tun_key = dupcall->out_tun_key;
689 if (vsp_adjust_flow(upcall->ofproto, flow, &dupcall->packet)) {
690 upcall->vsp_adjusted = true;
693 md = pkt_metadata_from_flow(flow);
694 flow_extract(&dupcall->packet, &md, flow);
696 error = process_upcall(udpif, upcall, NULL);
705 upcall_uninit(upcall);
707 ofpbuf_uninit(&dupcall->packet);
708 ofpbuf_uninit(recv_buf);
712 handle_upcalls(handler->udpif, upcalls, n_upcalls);
713 for (i = 0; i < n_upcalls; i++) {
714 ofpbuf_uninit(&dupcalls[i].packet);
715 ofpbuf_uninit(&recv_bufs[i]);
716 upcall_uninit(&upcalls[i]);
724 udpif_revalidator(void *arg)
726 /* Used by all revalidators. */
727 struct revalidator *revalidator = arg;
728 struct udpif *udpif = revalidator->udpif;
729 bool leader = revalidator == &udpif->revalidators[0];
731 /* Used only by the leader. */
732 long long int start_time = 0;
733 uint64_t last_reval_seq = 0;
736 revalidator->id = ovsthread_id_self();
741 reval_seq = seq_read(udpif->reval_seq);
742 last_reval_seq = reval_seq;
744 n_flows = udpif_get_n_flows(udpif);
745 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
746 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
748 /* Only the leader checks the exit latch to prevent a race where
749 * some threads think it's true and exit and others think it's
750 * false and block indefinitely on the reval_barrier */
751 udpif->reval_exit = latch_is_set(&udpif->exit_latch);
753 start_time = time_msec();
754 if (!udpif->reval_exit) {
757 terse_dump = udpif_use_ufid(udpif);
758 udpif->dump = dpif_flow_dump_create(udpif->dpif, terse_dump);
762 /* Wait for the leader to start the flow dump. */
763 ovs_barrier_block(&udpif->reval_barrier);
764 if (udpif->reval_exit) {
767 revalidate(revalidator);
769 /* Wait for all flows to have been dumped before we garbage collect. */
770 ovs_barrier_block(&udpif->reval_barrier);
771 revalidator_sweep(revalidator);
773 /* Wait for all revalidators to finish garbage collection. */
774 ovs_barrier_block(&udpif->reval_barrier);
777 unsigned int flow_limit;
778 long long int duration;
780 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
782 dpif_flow_dump_destroy(udpif->dump);
783 seq_change(udpif->dump_seq);
785 duration = MAX(time_msec() - start_time, 1);
786 udpif->dump_duration = duration;
787 if (duration > 2000) {
788 flow_limit /= duration / 1000;
789 } else if (duration > 1300) {
790 flow_limit = flow_limit * 3 / 4;
791 } else if (duration < 1000 && n_flows > 2000
792 && flow_limit < n_flows * 1000 / duration) {
795 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
796 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
798 if (duration > 2000) {
799 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
803 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
804 seq_wait(udpif->reval_seq, last_reval_seq);
805 latch_wait(&udpif->exit_latch);
813 static enum upcall_type
814 classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
816 union user_action_cookie cookie;
819 /* First look at the upcall type. */
827 case DPIF_N_UC_TYPES:
829 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
833 /* "action" upcalls need a closer look. */
835 VLOG_WARN_RL(&rl, "action upcall missing cookie");
838 userdata_len = nl_attr_get_size(userdata);
839 if (userdata_len < sizeof cookie.type
840 || userdata_len > sizeof cookie) {
841 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
845 memset(&cookie, 0, sizeof cookie);
846 memcpy(&cookie, nl_attr_get(userdata), userdata_len);
847 if (userdata_len == MAX(8, sizeof cookie.sflow)
848 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
850 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
851 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
853 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
854 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
855 return FLOW_SAMPLE_UPCALL;
856 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
857 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
860 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
861 " and size %"PRIuSIZE, cookie.type, userdata_len);
866 /* Calculates slow path actions for 'xout'. 'buf' must statically be
867 * initialized with at least 128 bytes of space. */
869 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
870 const struct flow *flow, odp_port_t odp_in_port,
873 union user_action_cookie cookie;
877 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
878 cookie.slow_path.unused = 0;
879 cookie.slow_path.reason = xout->slow;
881 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
884 pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
885 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, ODPP_NONE,
889 /* If there is no error, the upcall must be destroyed with upcall_uninit()
890 * before quiescing, as the referred objects are guaranteed to exist only
891 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
892 * since the 'upcall->put_actions' remains uninitialized. */
894 upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
895 const struct ofpbuf *packet, enum dpif_upcall_type type,
896 const struct nlattr *userdata, const struct flow *flow,
897 const ovs_u128 *ufid, const int pmd_id)
901 error = xlate_lookup(backer, flow, &upcall->ofproto, &upcall->ipfix,
902 &upcall->sflow, NULL, &upcall->in_port);
908 upcall->packet = packet;
910 upcall->pmd_id = pmd_id;
912 upcall->userdata = userdata;
913 ofpbuf_init(&upcall->put_actions, 0);
915 upcall->xout_initialized = false;
916 upcall->vsp_adjusted = false;
917 upcall->ukey_persists = false;
923 upcall->out_tun_key = NULL;
929 upcall_xlate(struct udpif *udpif, struct upcall *upcall,
930 struct ofpbuf *odp_actions)
932 struct dpif_flow_stats stats;
936 stats.n_bytes = ofpbuf_size(upcall->packet);
937 stats.used = time_msec();
938 stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
940 xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
941 stats.tcp_flags, upcall->packet);
942 xin.odp_actions = odp_actions;
944 if (upcall->type == DPIF_UC_MISS) {
945 xin.resubmit_stats = &stats;
947 /* For non-miss upcalls, there's a flow in the datapath which this
948 * packet was accounted to. Presumably the revalidators will deal
949 * with pushing its stats eventually. */
952 upcall->dump_seq = seq_read(udpif->dump_seq);
953 upcall->reval_seq = seq_read(udpif->reval_seq);
954 xlate_actions(&xin, &upcall->xout);
955 upcall->xout_initialized = true;
957 /* Special case for fail-open mode.
959 * If we are in fail-open mode, but we are connected to a controller too,
960 * then we should send the packet up to the controller in the hope that it
961 * will try to set up a flow and thereby allow us to exit fail-open.
963 * See the top-level comment in fail-open.c for more information.
965 * Copy packets before they are modified by execution. */
966 if (upcall->xout.fail_open) {
967 const struct ofpbuf *packet = upcall->packet;
968 struct ofproto_packet_in *pin;
970 pin = xmalloc(sizeof *pin);
971 pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet));
972 pin->up.packet_len = ofpbuf_size(packet);
973 pin->up.reason = OFPR_NO_MATCH;
974 pin->up.table_id = 0;
975 pin->up.cookie = OVS_BE64_MAX;
976 flow_get_metadata(upcall->flow, &pin->up.fmd);
977 pin->send_len = 0; /* Not used for flow table misses. */
978 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
979 ofproto_dpif_send_packet_in(upcall->ofproto, pin);
982 if (!upcall->xout.slow) {
983 ofpbuf_use_const(&upcall->put_actions,
984 ofpbuf_data(upcall->xout.odp_actions),
985 ofpbuf_size(upcall->xout.odp_actions));
987 ofpbuf_init(&upcall->put_actions, 0);
988 compose_slow_path(udpif, &upcall->xout, upcall->flow,
989 upcall->flow->in_port.odp_port,
990 &upcall->put_actions);
993 upcall->ukey = ukey_create_from_upcall(upcall);
997 upcall_uninit(struct upcall *upcall)
1000 if (upcall->xout_initialized) {
1001 xlate_out_uninit(&upcall->xout);
1003 ofpbuf_uninit(&upcall->put_actions);
1004 if (!upcall->ukey_persists) {
1005 ukey_delete__(upcall->ukey);
1011 upcall_cb(const struct ofpbuf *packet, const struct flow *flow, ovs_u128 *ufid,
1012 int pmd_id, enum dpif_upcall_type type,
1013 const struct nlattr *userdata, struct ofpbuf *actions,
1014 struct flow_wildcards *wc, struct ofpbuf *put_actions, void *aux)
1016 struct udpif *udpif = aux;
1017 unsigned int flow_limit;
1018 struct upcall upcall;
1022 atomic_read_relaxed(&enable_megaflows, &megaflow);
1023 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1025 error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
1026 flow, ufid, pmd_id);
1031 error = process_upcall(udpif, &upcall, actions);
1036 if (upcall.xout.slow && put_actions) {
1037 ofpbuf_put(put_actions, ofpbuf_data(&upcall.put_actions),
1038 ofpbuf_size(&upcall.put_actions));
1041 if (OVS_LIKELY(wc)) {
1043 /* XXX: This could be avoided with sufficient API changes. */
1044 *wc = upcall.xout.wc;
1046 flow_wildcards_init_for_packet(wc, flow);
1050 if (udpif_get_n_flows(udpif) >= flow_limit) {
1055 if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
1061 upcall.ukey_persists = true;
1063 upcall_uninit(&upcall);
1068 process_upcall(struct udpif *udpif, struct upcall *upcall,
1069 struct ofpbuf *odp_actions)
1071 const struct nlattr *userdata = upcall->userdata;
1072 const struct ofpbuf *packet = upcall->packet;
1073 const struct flow *flow = upcall->flow;
1075 switch (classify_upcall(upcall->type, userdata)) {
1077 upcall_xlate(udpif, upcall, odp_actions);
1081 if (upcall->sflow) {
1082 union user_action_cookie cookie;
1084 memset(&cookie, 0, sizeof cookie);
1085 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
1086 dpif_sflow_received(upcall->sflow, packet, flow,
1087 flow->in_port.odp_port, &cookie);
1092 if (upcall->ipfix) {
1093 union user_action_cookie cookie;
1094 struct flow_tnl output_tunnel_key;
1096 memset(&cookie, 0, sizeof cookie);
1097 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
1099 if (upcall->out_tun_key) {
1100 memset(&output_tunnel_key, 0, sizeof output_tunnel_key);
1101 odp_tun_key_from_attr(upcall->out_tun_key,
1102 &output_tunnel_key);
1104 dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow,
1105 flow->in_port.odp_port,
1106 cookie.ipfix.output_odp_port,
1107 upcall->out_tun_key ?
1108 &output_tunnel_key : NULL);
1112 case FLOW_SAMPLE_UPCALL:
1113 if (upcall->ipfix) {
1114 union user_action_cookie cookie;
1116 memset(&cookie, 0, sizeof cookie);
1117 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
1119 /* The flow reflects exactly the contents of the packet.
1120 * Sample the packet using it. */
1121 dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
1122 cookie.flow_sample.collector_set_id,
1123 cookie.flow_sample.probability,
1124 cookie.flow_sample.obs_domain_id,
1125 cookie.flow_sample.obs_point_id);
1137 handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
1140 struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
1141 struct ukey_op ops[UPCALL_MAX_BATCH * 2];
1142 unsigned int flow_limit;
1143 size_t n_ops, n_opsp, i;
1147 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1148 atomic_read_relaxed(&enable_megaflows, &megaflow);
1150 may_put = udpif_get_n_flows(udpif) < flow_limit;
1152 /* Handle the packets individually in order of arrival.
1154 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1155 * processes received packets for these protocols.
1157 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1160 * The loop fills 'ops' with an array of operations to execute in the
1163 for (i = 0; i < n_upcalls; i++) {
1164 struct upcall *upcall = &upcalls[i];
1165 const struct ofpbuf *packet = upcall->packet;
1168 if (upcall->vsp_adjusted) {
1169 /* This packet was received on a VLAN splinter port. We added a
1170 * VLAN to the packet to make the packet resemble the flow, but the
1171 * actions were composed assuming that the packet contained no
1172 * VLAN. So, we must remove the VLAN header from the packet before
1173 * trying to execute the actions. */
1174 if (ofpbuf_size(upcall->xout.odp_actions)) {
1175 eth_pop_vlan(CONST_CAST(struct ofpbuf *, upcall->packet));
1178 /* Remove the flow vlan tags inserted by vlan splinter logic
1179 * to ensure megaflow masks generated match the data path flow. */
1180 CONST_CAST(struct flow *, upcall->flow)->vlan_tci = 0;
1183 /* Do not install a flow into the datapath if:
1185 * - The datapath already has too many flows.
1187 * - We received this packet via some flow installed in the kernel
1189 if (may_put && upcall->type == DPIF_UC_MISS) {
1190 struct udpif_key *ukey = upcall->ukey;
1192 upcall->ukey_persists = true;
1196 op->dop.type = DPIF_OP_FLOW_PUT;
1197 op->dop.u.flow_put.flags = DPIF_FP_CREATE;
1198 op->dop.u.flow_put.key = ukey->key;
1199 op->dop.u.flow_put.key_len = ukey->key_len;
1200 op->dop.u.flow_put.mask = ukey->mask;
1201 op->dop.u.flow_put.mask_len = ukey->mask_len;
1202 op->dop.u.flow_put.ufid = upcall->ufid;
1203 op->dop.u.flow_put.stats = NULL;
1204 op->dop.u.flow_put.actions = ofpbuf_data(ukey->actions);
1205 op->dop.u.flow_put.actions_len = ofpbuf_size(ukey->actions);
1208 if (ofpbuf_size(upcall->xout.odp_actions)) {
1211 op->dop.type = DPIF_OP_EXECUTE;
1212 op->dop.u.execute.packet = CONST_CAST(struct ofpbuf *, packet);
1213 odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
1214 &op->dop.u.execute.md);
1215 op->dop.u.execute.actions = ofpbuf_data(upcall->xout.odp_actions);
1216 op->dop.u.execute.actions_len = ofpbuf_size(upcall->xout.odp_actions);
1217 op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
1218 op->dop.u.execute.probe = false;
1224 * We install ukeys before installing the flows, locking them for exclusive
1225 * access by this thread for the period of installation. This ensures that
1226 * other threads won't attempt to delete the flows as we are creating them.
1229 for (i = 0; i < n_ops; i++) {
1230 struct udpif_key *ukey = ops[i].ukey;
1233 /* If we can't install the ukey, don't install the flow. */
1234 if (!ukey_install_start(udpif, ukey)) {
1235 ukey_delete__(ukey);
1240 opsp[n_opsp++] = &ops[i].dop;
1242 dpif_operate(udpif->dpif, opsp, n_opsp);
1243 for (i = 0; i < n_ops; i++) {
1245 ukey_install_finish(ops[i].ukey, ops[i].dop.error);
1251 get_ufid_hash(const ovs_u128 *ufid)
1253 return ufid->u32[0];
1256 static struct udpif_key *
1257 ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid)
1259 struct udpif_key *ukey;
1260 int idx = get_ufid_hash(ufid) % N_UMAPS;
1261 struct cmap *cmap = &udpif->ukeys[idx].cmap;
1263 CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, get_ufid_hash(ufid), cmap) {
1264 if (ovs_u128_equal(&ukey->ufid, ufid)) {
1271 static struct udpif_key *
1272 ukey_create__(const struct nlattr *key, size_t key_len,
1273 const struct nlattr *mask, size_t mask_len,
1274 bool ufid_present, const ovs_u128 *ufid,
1275 const int pmd_id, const struct ofpbuf *actions,
1276 uint64_t dump_seq, uint64_t reval_seq, long long int used)
1277 OVS_NO_THREAD_SAFETY_ANALYSIS
1279 struct udpif_key *ukey = xmalloc(sizeof *ukey);
1281 memcpy(&ukey->keybuf, key, key_len);
1282 ukey->key = &ukey->keybuf.nla;
1283 ukey->key_len = key_len;
1284 memcpy(&ukey->maskbuf, mask, mask_len);
1285 ukey->mask = &ukey->maskbuf.nla;
1286 ukey->mask_len = mask_len;
1287 ukey->ufid_present = ufid_present;
1289 ukey->pmd_id = pmd_id;
1290 ukey->hash = get_ufid_hash(&ukey->ufid);
1291 ukey->actions = ofpbuf_clone(actions);
1293 ovs_mutex_init(&ukey->mutex);
1294 ukey->dump_seq = dump_seq;
1295 ukey->reval_seq = reval_seq;
1296 ukey->flow_exists = false;
1297 ukey->created = time_msec();
1298 memset(&ukey->stats, 0, sizeof ukey->stats);
1299 ukey->stats.used = used;
1300 ukey->xcache = NULL;
1305 static struct udpif_key *
1306 ukey_create_from_upcall(const struct upcall *upcall)
1308 struct odputil_keybuf keystub, maskstub;
1309 struct ofpbuf keybuf, maskbuf;
1310 bool recirc, megaflow;
1312 if (upcall->key_len) {
1313 ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
1315 /* dpif-netdev doesn't provide a netlink-formatted flow key in the
1316 * upcall, so convert the upcall's flow here. */
1317 ofpbuf_use_stack(&keybuf, &keystub, sizeof keystub);
1318 odp_flow_key_from_flow(&keybuf, upcall->flow, &upcall->xout.wc.masks,
1319 upcall->flow->in_port.odp_port, true);
1322 atomic_read_relaxed(&enable_megaflows, &megaflow);
1323 recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto);
1324 ofpbuf_use_stack(&maskbuf, &maskstub, sizeof maskstub);
1328 max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto);
1329 odp_flow_key_from_mask(&maskbuf, &upcall->xout.wc.masks, upcall->flow,
1330 UINT32_MAX, max_mpls, recirc);
1333 return ukey_create__(ofpbuf_data(&keybuf), ofpbuf_size(&keybuf),
1334 ofpbuf_data(&maskbuf), ofpbuf_size(&maskbuf),
1335 true, upcall->ufid, upcall->pmd_id,
1336 &upcall->put_actions, upcall->dump_seq,
1337 upcall->reval_seq, 0);
1341 ukey_create_from_dpif_flow(const struct udpif *udpif,
1342 const struct dpif_flow *flow,
1343 struct udpif_key **ukey)
1345 struct dpif_flow full_flow;
1346 struct ofpbuf actions;
1347 uint64_t dump_seq, reval_seq;
1348 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
1350 if (!flow->key_len) {
1354 /* If the key was not provided by the datapath, fetch the full flow. */
1355 ofpbuf_use_stack(&buf, &stub, sizeof stub);
1356 err = dpif_flow_get(udpif->dpif, NULL, 0, &flow->ufid,
1357 flow->pmd_id, &buf, &full_flow);
1363 dump_seq = seq_read(udpif->dump_seq);
1364 reval_seq = seq_read(udpif->reval_seq);
1365 ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
1366 *ukey = ukey_create__(flow->key, flow->key_len,
1367 flow->mask, flow->mask_len, flow->ufid_present,
1368 &flow->ufid, flow->pmd_id, &actions, dump_seq,
1369 reval_seq, flow->stats.used);
1374 /* Attempts to insert a ukey into the shared ukey maps.
1376 * On success, returns true, installs the ukey and returns it in a locked
1377 * state. Otherwise, returns false. */
1379 ukey_install_start(struct udpif *udpif, struct udpif_key *new_ukey)
1380 OVS_TRY_LOCK(true, new_ukey->mutex)
1383 struct udpif_key *old_ukey;
1385 bool locked = false;
1387 idx = new_ukey->hash % N_UMAPS;
1388 umap = &udpif->ukeys[idx];
1389 ovs_mutex_lock(&umap->mutex);
1390 old_ukey = ukey_lookup(udpif, &new_ukey->ufid);
1392 /* Uncommon case: A ukey is already installed with the same UFID. */
1393 if (old_ukey->key_len == new_ukey->key_len
1394 && !memcmp(old_ukey->key, new_ukey->key, new_ukey->key_len)) {
1395 COVERAGE_INC(handler_duplicate_upcall);
1397 struct ds ds = DS_EMPTY_INITIALIZER;
1399 odp_format_ufid(&old_ukey->ufid, &ds);
1400 ds_put_cstr(&ds, " ");
1401 odp_flow_key_format(old_ukey->key, old_ukey->key_len, &ds);
1402 ds_put_cstr(&ds, "\n");
1403 odp_format_ufid(&new_ukey->ufid, &ds);
1404 ds_put_cstr(&ds, " ");
1405 odp_flow_key_format(new_ukey->key, new_ukey->key_len, &ds);
1407 VLOG_WARN_RL(&rl, "Conflicting ukey for flows:\n%s", ds_cstr(&ds));
1411 ovs_mutex_lock(&new_ukey->mutex);
1412 cmap_insert(&umap->cmap, &new_ukey->cmap_node, new_ukey->hash);
1415 ovs_mutex_unlock(&umap->mutex);
1421 ukey_install_finish__(struct udpif_key *ukey) OVS_REQUIRES(ukey->mutex)
1423 ukey->flow_exists = true;
1427 ukey_install_finish(struct udpif_key *ukey, int error)
1428 OVS_RELEASES(ukey->mutex)
1431 ukey_install_finish__(ukey);
1433 ovs_mutex_unlock(&ukey->mutex);
1439 ukey_install(struct udpif *udpif, struct udpif_key *ukey)
1441 /* The usual way to keep 'ukey->flow_exists' in sync with the datapath is
1442 * to call ukey_install_start(), install the corresponding datapath flow,
1443 * then call ukey_install_finish(). The netdev interface using upcall_cb()
1444 * doesn't provide a function to separately finish the flow installation,
1445 * so we perform the operations together here.
1447 * This is fine currently, as revalidator threads will only delete this
1448 * ukey during revalidator_sweep() and only if the dump_seq is mismatched.
1449 * It is unlikely for a revalidator thread to advance dump_seq and reach
1450 * the next GC phase between ukey creation and flow installation. */
1451 return ukey_install_start(udpif, ukey) && ukey_install_finish(ukey, 0);
1454 /* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
1455 * lock the ukey. If the ukey does not exist, create it.
1457 * Returns 0 on success, setting *result to the matching ukey and returning it
1458 * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
1459 * indicates that another thread is handling this flow. Other errors indicate
1460 * an unexpected condition creating a new ukey.
1462 * *error is an output parameter provided to appease the threadsafety analyser,
1463 * and its value matches the return value. */
1465 ukey_acquire(struct udpif *udpif, const struct dpif_flow *flow,
1466 struct udpif_key **result, int *error)
1467 OVS_TRY_LOCK(0, (*result)->mutex)
1469 struct udpif_key *ukey;
1472 ukey = ukey_lookup(udpif, &flow->ufid);
1474 retval = ovs_mutex_trylock(&ukey->mutex);
1476 /* Usually we try to avoid installing flows from revalidator threads,
1477 * because locking on a umap may cause handler threads to block.
1478 * However there are certain cases, like when ovs-vswitchd is
1479 * restarted, where it is desirable to handle flows that exist in the
1480 * datapath gracefully (ie, don't just clear the datapath). */
1483 retval = ukey_create_from_dpif_flow(udpif, flow, &ukey);
1487 install = ukey_install_start(udpif, ukey);
1489 ukey_install_finish__(ukey);
1492 ukey_delete__(ukey);
1508 ukey_delete__(struct udpif_key *ukey)
1509 OVS_NO_THREAD_SAFETY_ANALYSIS
1512 xlate_cache_delete(ukey->xcache);
1513 ofpbuf_delete(ukey->actions);
1514 ovs_mutex_destroy(&ukey->mutex);
1520 ukey_delete(struct umap *umap, struct udpif_key *ukey)
1521 OVS_REQUIRES(umap->mutex)
1523 cmap_remove(&umap->cmap, &ukey->cmap_node, ukey->hash);
1524 ovsrcu_postpone(ukey_delete__, ukey);
1528 should_revalidate(const struct udpif *udpif, uint64_t packets,
1531 long long int metric, now, duration;
1533 if (udpif->dump_duration < 200) {
1534 /* We are likely to handle full revalidation for the flows. */
1538 /* Calculate the mean time between seeing these packets. If this
1539 * exceeds the threshold, then delete the flow rather than performing
1540 * costly revalidation for flows that aren't being hit frequently.
1542 * This is targeted at situations where the dump_duration is high (~1s),
1543 * and revalidation is triggered by a call to udpif_revalidate(). In
1544 * these situations, revalidation of all flows causes fluctuations in the
1545 * flow_limit due to the interaction with the dump_duration and max_idle.
1546 * This tends to result in deletion of low-throughput flows anyway, so
1547 * skip the revalidation and just delete those flows. */
1548 packets = MAX(packets, 1);
1549 now = MAX(used, time_msec());
1550 duration = now - used;
1551 metric = duration / packets;
1554 /* The flow is receiving more than ~5pps, so keep it. */
1561 revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
1562 const struct dpif_flow_stats *stats, uint64_t reval_seq)
1563 OVS_REQUIRES(ukey->mutex)
1565 uint64_t slow_path_buf[128 / 8];
1566 struct xlate_out xout, *xoutp;
1567 struct netflow *netflow;
1568 struct ofproto_dpif *ofproto;
1569 struct dpif_flow_stats push;
1570 struct ofpbuf xout_actions;
1571 struct flow flow, dp_mask;
1572 uint64_t *dp64, *xout64;
1573 ofp_port_t ofp_in_port;
1574 struct xlate_in xin;
1575 long long int last_used;
1579 bool need_revalidate;
1585 need_revalidate = (ukey->reval_seq != reval_seq);
1586 last_used = ukey->stats.used;
1587 push.used = stats->used;
1588 push.tcp_flags = stats->tcp_flags;
1589 push.n_packets = (stats->n_packets > ukey->stats.n_packets
1590 ? stats->n_packets - ukey->stats.n_packets
1592 push.n_bytes = (stats->n_bytes > ukey->stats.n_bytes
1593 ? stats->n_bytes - ukey->stats.n_bytes
1596 if (need_revalidate && last_used
1597 && !should_revalidate(udpif, push.n_packets, last_used)) {
1602 /* We will push the stats, so update the ukey stats cache. */
1603 ukey->stats = *stats;
1604 if (!push.n_packets && !need_revalidate) {
1609 if (ukey->xcache && !need_revalidate) {
1610 xlate_push_stats(ukey->xcache, &push);
1615 if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
1620 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
1626 if (need_revalidate) {
1627 xlate_cache_clear(ukey->xcache);
1629 if (!ukey->xcache) {
1630 ukey->xcache = xlate_cache_new();
1633 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
1635 if (push.n_packets) {
1636 xin.resubmit_stats = &push;
1637 xin.may_learn = true;
1639 xin.xcache = ukey->xcache;
1640 xin.skip_wildcards = !need_revalidate;
1641 xlate_actions(&xin, &xout);
1644 if (!need_revalidate) {
1650 ofpbuf_use_const(&xout_actions, ofpbuf_data(xout.odp_actions),
1651 ofpbuf_size(xout.odp_actions));
1653 ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
1654 compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
1658 if (!ofpbuf_equal(&xout_actions, ukey->actions)) {
1662 if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, &dp_mask, &flow)
1667 /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
1668 * directly check that the masks are the same. Instead we check that the
1669 * mask in the kernel is more specific i.e. less wildcarded, than what
1670 * we've calculated here. This guarantees we don't catch any packets we
1671 * shouldn't with the megaflow. */
1672 dp64 = (uint64_t *) &dp_mask;
1673 xout64 = (uint64_t *) &xout.wc.masks;
1674 for (i = 0; i < FLOW_U64S; i++) {
1675 if ((dp64[i] | xout64[i]) != dp64[i]) {
1684 ukey->reval_seq = reval_seq;
1686 if (netflow && !ok) {
1687 netflow_flow_clear(netflow, &flow);
1689 xlate_out_uninit(xoutp);
1694 delete_op_init__(struct udpif *udpif, struct ukey_op *op,
1695 const struct dpif_flow *flow)
1698 op->dop.type = DPIF_OP_FLOW_DEL;
1699 op->dop.u.flow_del.key = flow->key;
1700 op->dop.u.flow_del.key_len = flow->key_len;
1701 op->dop.u.flow_del.ufid = flow->ufid_present ? &flow->ufid : NULL;
1702 op->dop.u.flow_del.pmd_id = flow->pmd_id;
1703 op->dop.u.flow_del.stats = &op->stats;
1704 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1708 delete_op_init(struct udpif *udpif, struct ukey_op *op, struct udpif_key *ukey)
1711 op->dop.type = DPIF_OP_FLOW_DEL;
1712 op->dop.u.flow_del.key = ukey->key;
1713 op->dop.u.flow_del.key_len = ukey->key_len;
1714 op->dop.u.flow_del.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
1715 op->dop.u.flow_del.pmd_id = ukey->pmd_id;
1716 op->dop.u.flow_del.stats = &op->stats;
1717 op->dop.u.flow_del.terse = udpif_use_ufid(udpif);
1721 push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
1723 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1726 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1727 for (i = 0; i < n_ops; i++) {
1728 opsp[i] = &ops[i].dop;
1730 dpif_operate(udpif->dpif, opsp, n_ops);
1732 for (i = 0; i < n_ops; i++) {
1733 struct ukey_op *op = &ops[i];
1734 struct dpif_flow_stats *push, *stats, push_buf;
1736 stats = op->dop.u.flow_del.stats;
1740 ovs_mutex_lock(&op->ukey->mutex);
1741 push->used = MAX(stats->used, op->ukey->stats.used);
1742 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1743 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1744 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1745 ovs_mutex_unlock(&op->ukey->mutex);
1750 if (push->n_packets || netflow_exists()) {
1751 const struct nlattr *key = op->dop.u.flow_del.key;
1752 size_t key_len = op->dop.u.flow_del.key_len;
1753 struct ofproto_dpif *ofproto;
1754 struct netflow *netflow;
1755 ofp_port_t ofp_in_port;
1760 ovs_mutex_lock(&op->ukey->mutex);
1761 if (op->ukey->xcache) {
1762 xlate_push_stats(op->ukey->xcache, push);
1763 ovs_mutex_unlock(&op->ukey->mutex);
1766 ovs_mutex_unlock(&op->ukey->mutex);
1767 key = op->ukey->key;
1768 key_len = op->ukey->key_len;
1771 if (odp_flow_key_to_flow(key, key_len, &flow)
1776 error = xlate_lookup(udpif->backer, &flow, &ofproto,
1777 NULL, NULL, &netflow, &ofp_in_port);
1779 struct xlate_in xin;
1781 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
1782 push->tcp_flags, NULL);
1783 xin.resubmit_stats = push->n_packets ? push : NULL;
1784 xin.may_learn = push->n_packets > 0;
1785 xin.skip_wildcards = true;
1786 xlate_actions_for_side_effects(&xin);
1789 netflow_flow_clear(netflow, &flow);
1797 push_ukey_ops(struct udpif *udpif, struct umap *umap,
1798 struct ukey_op *ops, size_t n_ops)
1802 push_ukey_ops__(udpif, ops, n_ops);
1803 ovs_mutex_lock(&umap->mutex);
1804 for (i = 0; i < n_ops; i++) {
1805 ukey_delete(umap, ops[i].ukey);
1807 ovs_mutex_unlock(&umap->mutex);
1811 log_unexpected_flow(const struct dpif_flow *flow, int error)
1813 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 60);
1814 struct ds ds = DS_EMPTY_INITIALIZER;
1816 ds_put_format(&ds, "Failed to acquire udpif_key corresponding to "
1817 "unexpected flow (%s): ", ovs_strerror(error));
1818 odp_format_ufid(&flow->ufid, &ds);
1819 VLOG_WARN_RL(&rl, "%s", ds_cstr(&ds));
1823 revalidate(struct revalidator *revalidator)
1825 struct udpif *udpif = revalidator->udpif;
1826 struct dpif_flow_dump_thread *dump_thread;
1827 uint64_t dump_seq, reval_seq;
1828 unsigned int flow_limit;
1830 dump_seq = seq_read(udpif->dump_seq);
1831 reval_seq = seq_read(udpif->reval_seq);
1832 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1833 dump_thread = dpif_flow_dump_thread_create(udpif->dump);
1835 struct ukey_op ops[REVALIDATE_MAX_BATCH];
1838 struct dpif_flow flows[REVALIDATE_MAX_BATCH];
1839 const struct dpif_flow *f;
1842 long long int max_idle;
1847 n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
1854 /* In normal operation we want to keep flows around until they have
1855 * been idle for 'ofproto_max_idle' milliseconds. However:
1857 * - If the number of datapath flows climbs above 'flow_limit',
1858 * drop that down to 100 ms to try to bring the flows down to
1861 * - If the number of datapath flows climbs above twice
1862 * 'flow_limit', delete all the datapath flows as an emergency
1863 * measure. (We reassess this condition for the next batch of
1864 * datapath flows, so we will recover before all the flows are
1866 n_dp_flows = udpif_get_n_flows(udpif);
1867 kill_them_all = n_dp_flows > flow_limit * 2;
1868 max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
1870 for (f = flows; f < &flows[n_dumped]; f++) {
1871 long long int used = f->stats.used;
1872 struct udpif_key *ukey;
1873 bool already_dumped, keep;
1876 if (ukey_acquire(udpif, f, &ukey, &error)) {
1877 if (error == EBUSY) {
1878 /* Another thread is processing this flow, so don't bother
1880 COVERAGE_INC(upcall_ukey_contention);
1882 log_unexpected_flow(f, error);
1883 if (error != ENOENT) {
1884 delete_op_init__(udpif, &ops[n_ops++], f);
1890 already_dumped = ukey->dump_seq == dump_seq;
1891 if (already_dumped) {
1892 /* The flow has already been handled during this flow dump
1893 * operation. Skip it. */
1895 COVERAGE_INC(dumped_duplicate_flow);
1897 COVERAGE_INC(dumped_new_flow);
1899 ovs_mutex_unlock(&ukey->mutex);
1904 used = ukey->created;
1906 if (kill_them_all || (used && used < now - max_idle)) {
1909 keep = revalidate_ukey(udpif, ukey, &f->stats, reval_seq);
1911 ukey->dump_seq = dump_seq;
1912 ukey->flow_exists = keep;
1915 delete_op_init(udpif, &ops[n_ops++], ukey);
1917 ovs_mutex_unlock(&ukey->mutex);
1921 push_ukey_ops__(udpif, ops, n_ops);
1925 dpif_flow_dump_thread_destroy(dump_thread);
1929 handle_missed_revalidation(struct udpif *udpif, uint64_t reval_seq,
1930 struct udpif_key *ukey)
1932 struct dpif_flow_stats stats;
1935 COVERAGE_INC(revalidate_missed_dp_flow);
1937 memset(&stats, 0, sizeof stats);
1938 ovs_mutex_lock(&ukey->mutex);
1939 keep = revalidate_ukey(udpif, ukey, &stats, reval_seq);
1940 ovs_mutex_unlock(&ukey->mutex);
1946 revalidator_sweep__(struct revalidator *revalidator, bool purge)
1948 struct udpif *udpif;
1949 uint64_t dump_seq, reval_seq;
1952 udpif = revalidator->udpif;
1953 dump_seq = seq_read(udpif->dump_seq);
1954 reval_seq = seq_read(udpif->reval_seq);
1955 slice = revalidator - udpif->revalidators;
1956 ovs_assert(slice < udpif->n_revalidators);
1958 for (int i = slice; i < N_UMAPS; i += udpif->n_revalidators) {
1959 struct ukey_op ops[REVALIDATE_MAX_BATCH];
1960 struct udpif_key *ukey;
1961 struct umap *umap = &udpif->ukeys[i];
1964 CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
1965 bool flow_exists, seq_mismatch;
1967 /* Handler threads could be holding a ukey lock while it installs a
1968 * new flow, so don't hang around waiting for access to it. */
1969 if (ovs_mutex_trylock(&ukey->mutex)) {
1972 flow_exists = ukey->flow_exists;
1973 seq_mismatch = (ukey->dump_seq != dump_seq
1974 && ukey->reval_seq != reval_seq);
1975 ovs_mutex_unlock(&ukey->mutex);
1980 && !handle_missed_revalidation(udpif, reval_seq,
1982 struct ukey_op *op = &ops[n_ops++];
1984 delete_op_init(udpif, op, ukey);
1985 if (n_ops == REVALIDATE_MAX_BATCH) {
1986 push_ukey_ops(udpif, umap, ops, n_ops);
1989 } else if (!flow_exists) {
1990 ovs_mutex_lock(&umap->mutex);
1991 ukey_delete(umap, ukey);
1992 ovs_mutex_unlock(&umap->mutex);
1997 push_ukey_ops(udpif, umap, ops, n_ops);
2004 revalidator_sweep(struct revalidator *revalidator)
2006 revalidator_sweep__(revalidator, false);
2010 revalidator_purge(struct revalidator *revalidator)
2012 revalidator_sweep__(revalidator, true);
2016 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
2017 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2019 struct ds ds = DS_EMPTY_INITIALIZER;
2020 struct udpif *udpif;
2022 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2023 unsigned int flow_limit;
2027 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
2028 ufid_enabled = udpif_use_ufid(udpif);
2030 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
2031 ds_put_format(&ds, "\tflows : (current %lu)"
2032 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
2033 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
2034 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
2035 ds_put_format(&ds, "\tufid enabled : ");
2037 ds_put_format(&ds, "true\n");
2039 ds_put_format(&ds, "false\n");
2041 ds_put_char(&ds, '\n');
2043 for (i = 0; i < n_revalidators; i++) {
2044 struct revalidator *revalidator = &udpif->revalidators[i];
2045 int j, elements = 0;
2047 for (j = i; j < N_UMAPS; j += n_revalidators) {
2048 elements += cmap_count(&udpif->ukeys[j].cmap);
2050 ds_put_format(&ds, "\t%u: (keys %d)\n", revalidator->id, elements);
2054 unixctl_command_reply(conn, ds_cstr(&ds));
2058 /* Disable using the megaflows.
2060 * This command is only needed for advanced debugging, so it's not
2061 * documented in the man page. */
2063 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
2064 int argc OVS_UNUSED,
2065 const char *argv[] OVS_UNUSED,
2066 void *aux OVS_UNUSED)
2068 atomic_store_relaxed(&enable_megaflows, false);
2069 udpif_flush_all_datapaths();
2070 unixctl_command_reply(conn, "megaflows disabled");
2073 /* Re-enable using megaflows.
2075 * This command is only needed for advanced debugging, so it's not
2076 * documented in the man page. */
2078 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
2079 int argc OVS_UNUSED,
2080 const char *argv[] OVS_UNUSED,
2081 void *aux OVS_UNUSED)
2083 atomic_store_relaxed(&enable_megaflows, true);
2084 udpif_flush_all_datapaths();
2085 unixctl_command_reply(conn, "megaflows enabled");
2088 /* Disable skipping flow attributes during flow dump.
2090 * This command is only needed for advanced debugging, so it's not
2091 * documented in the man page. */
2093 upcall_unixctl_disable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2094 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2096 atomic_store_relaxed(&enable_ufid, false);
2097 unixctl_command_reply(conn, "Datapath dumping tersely using UFID disabled");
2100 /* Re-enable skipping flow attributes during flow dump.
2102 * This command is only needed for advanced debugging, so it's not documented
2103 * in the man page. */
2105 upcall_unixctl_enable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2106 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2108 atomic_store_relaxed(&enable_ufid, true);
2109 unixctl_command_reply(conn, "Datapath dumping tersely using UFID enabled "
2110 "for supported datapaths");
2113 /* Set the flow limit.
2115 * This command is only needed for advanced debugging, so it's not
2116 * documented in the man page. */
2118 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
2119 int argc OVS_UNUSED,
2120 const char *argv[] OVS_UNUSED,
2121 void *aux OVS_UNUSED)
2123 struct ds ds = DS_EMPTY_INITIALIZER;
2124 struct udpif *udpif;
2125 unsigned int flow_limit = atoi(argv[1]);
2127 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2128 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
2130 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
2131 unixctl_command_reply(conn, ds_cstr(&ds));
2136 upcall_unixctl_dump_wait(struct unixctl_conn *conn,
2137 int argc OVS_UNUSED,
2138 const char *argv[] OVS_UNUSED,
2139 void *aux OVS_UNUSED)
2141 if (list_is_singleton(&all_udpifs)) {
2142 struct udpif *udpif = NULL;
2145 udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
2146 len = (udpif->n_conns + 1) * sizeof *udpif->conns;
2147 udpif->conn_seq = seq_read(udpif->dump_seq);
2148 udpif->conns = xrealloc(udpif->conns, len);
2149 udpif->conns[udpif->n_conns++] = conn;
2151 unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
2156 upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
2157 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2159 struct udpif *udpif;
2161 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2164 for (n = 0; n < udpif->n_revalidators; n++) {
2165 revalidator_purge(&udpif->revalidators[n]);
2168 unixctl_command_reply(conn, "");