1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
26 #include "dynamic-string.h"
27 #include "fail-open.h"
28 #include "guarded-list.h"
33 #include "ofproto-dpif-ipfix.h"
34 #include "ofproto-dpif-sflow.h"
35 #include "ofproto-dpif-xlate.h"
38 #include "poll-loop.h"
41 #include "openvswitch/vlog.h"
43 #define MAX_QUEUE_LENGTH 512
44 #define UPCALL_MAX_BATCH 64
45 #define REVALIDATE_MAX_BATCH 50
47 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
49 COVERAGE_DEFINE(dumped_duplicate_flow);
50 COVERAGE_DEFINE(dumped_new_flow);
51 COVERAGE_DEFINE(handler_duplicate_upcall);
52 COVERAGE_DEFINE(upcall_ukey_contention);
53 COVERAGE_DEFINE(revalidate_missed_dp_flow);
55 /* A thread that reads upcalls from dpif, forwards each upcall's packet,
56 * and possibly sets up a kernel flow as a cache. */
58 struct udpif *udpif; /* Parent udpif. */
59 pthread_t thread; /* Thread ID. */
60 uint32_t handler_id; /* Handler id. */
63 /* In the absence of a multiple-writer multiple-reader datastructure for
64 * storing ukeys, we use a large number of cmaps, each with its own lock for
66 #define N_UMAPS 512 /* per udpif. */
68 struct ovs_mutex mutex; /* Take for writing to the following. */
69 struct cmap cmap; /* Datapath flow keys. */
72 /* A thread that processes datapath flows, updates OpenFlow statistics, and
73 * updates or removes them if necessary. */
75 struct udpif *udpif; /* Parent udpif. */
76 pthread_t thread; /* Thread ID. */
77 unsigned int id; /* ovsthread_id_self(). */
80 /* An upcall handler for ofproto_dpif.
82 * udpif keeps records of two kind of logically separate units:
87 * - An array of 'struct handler's for upcall handling and flow
93 * - Revalidation threads which read the datapath flow table and maintains
97 struct ovs_list list_node; /* In all_udpifs list. */
99 struct dpif *dpif; /* Datapath handle. */
100 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
102 struct handler *handlers; /* Upcall handlers. */
105 struct revalidator *revalidators; /* Flow revalidators. */
106 size_t n_revalidators;
108 struct latch exit_latch; /* Tells child threads to exit. */
111 struct seq *reval_seq; /* Incremented to force revalidation. */
112 bool reval_exit; /* Set by leader on 'exit_latch. */
113 struct ovs_barrier reval_barrier; /* Barrier used by revalidators. */
114 struct dpif_flow_dump *dump; /* DPIF flow dump state. */
115 long long int dump_duration; /* Duration of the last flow dump. */
116 struct seq *dump_seq; /* Increments each dump iteration. */
117 atomic_bool enable_ufid; /* If true, skip dumping flow attrs. */
119 /* There are 'N_UMAPS' maps containing 'struct udpif_key' elements.
121 * During the flow dump phase, revalidators insert into these with a random
122 * distribution. During the garbage collection phase, each revalidator
123 * takes care of garbage collecting a slice of these maps. */
126 /* Datapath flow statistics. */
127 unsigned int max_n_flows;
128 unsigned int avg_n_flows;
130 /* Following fields are accessed and modified by different threads. */
131 atomic_uint flow_limit; /* Datapath flow hard limit. */
133 /* n_flows_mutex prevents multiple threads updating these concurrently. */
134 atomic_uint n_flows; /* Number of flows in the datapath. */
135 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
136 struct ovs_mutex n_flows_mutex;
138 /* Following fields are accessed and modified only from the main thread. */
139 struct unixctl_conn **conns; /* Connections waiting on dump_seq. */
140 uint64_t conn_seq; /* Corresponds to 'dump_seq' when
141 conns[n_conns-1] was stored. */
142 size_t n_conns; /* Number of connections waiting. */
146 BAD_UPCALL, /* Some kind of bug somewhere. */
147 MISS_UPCALL, /* A flow miss. */
148 SFLOW_UPCALL, /* sFlow sample. */
149 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
150 IPFIX_UPCALL /* Per-bridge sampling. */
154 struct ofproto_dpif *ofproto; /* Parent ofproto. */
156 /* The flow and packet are only required to be constant when using
157 * dpif-netdev. If a modification is absolutely necessary, a const cast
158 * may be used with other datapaths. */
159 const struct flow *flow; /* Parsed representation of the packet. */
160 const ovs_u128 *ufid; /* Unique identifier for 'flow'. */
161 const struct ofpbuf *packet; /* Packet associated with this upcall. */
162 ofp_port_t in_port; /* OpenFlow in port, or OFPP_NONE. */
164 enum dpif_upcall_type type; /* Datapath type of the upcall. */
165 const struct nlattr *userdata; /* Userdata for DPIF_UC_ACTION Upcalls. */
167 bool xout_initialized; /* True if 'xout' must be uninitialized. */
168 struct xlate_out xout; /* Result of xlate_actions(). */
169 struct ofpbuf put_actions; /* Actions 'put' in the fastapath. */
171 struct dpif_ipfix *ipfix; /* IPFIX pointer or NULL. */
172 struct dpif_sflow *sflow; /* SFlow pointer or NULL. */
174 bool vsp_adjusted; /* 'packet' and 'flow' were adjusted for
175 VLAN splinters if true. */
177 struct udpif_key *ukey; /* Revalidator flow cache. */
178 bool ukey_persists; /* Set true to keep 'ukey' beyond the
179 lifetime of this upcall. */
181 uint64_t dump_seq; /* udpif->dump_seq at translation time. */
182 uint64_t reval_seq; /* udpif->reval_seq at translation time. */
184 /* Not used by the upcall callback interface. */
185 const struct nlattr *key; /* Datapath flow key. */
186 size_t key_len; /* Datapath flow key length. */
187 const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
190 /* 'udpif_key's are responsible for tracking the little bit of state udpif
191 * needs to do flow expiration which can't be pulled directly from the
192 * datapath. They may be created by any handler or revalidator thread at any
193 * time, and read by any revalidator during the dump phase. They are however
194 * each owned by a single revalidator which takes care of destroying them
195 * during the garbage-collection phase.
197 * The mutex within the ukey protects some members of the ukey. The ukey
198 * itself is protected by RCU and is held within a umap in the parent udpif.
199 * Adding or removing a ukey from a umap is only safe when holding the
200 * corresponding umap lock. */
202 struct cmap_node cmap_node; /* In parent revalidator 'ukeys' map. */
204 /* These elements are read only once created, and therefore aren't
205 * protected by a mutex. */
206 const struct nlattr *key; /* Datapath flow key. */
207 size_t key_len; /* Length of 'key'. */
208 const struct nlattr *mask; /* Datapath flow mask. */
209 size_t mask_len; /* Length of 'mask'. */
210 struct ofpbuf *actions; /* Datapath flow actions as nlattrs. */
211 ovs_u128 ufid; /* Unique flow identifier. */
212 bool ufid_present; /* True if 'ufid' is in datapath. */
213 uint32_t hash; /* Pre-computed hash for 'key'. */
215 struct ovs_mutex mutex; /* Guards the following. */
216 struct dpif_flow_stats stats OVS_GUARDED; /* Last known stats.*/
217 long long int created OVS_GUARDED; /* Estimate of creation time. */
218 uint64_t dump_seq OVS_GUARDED; /* Tracks udpif->dump_seq. */
219 uint64_t reval_seq OVS_GUARDED; /* Tracks udpif->reval_seq. */
220 bool flow_exists OVS_GUARDED; /* Ensures flows are only deleted
223 struct xlate_cache *xcache OVS_GUARDED; /* Cache for xlate entries that
224 * are affected by this ukey.
225 * Used for stats and learning.*/
227 struct odputil_keybuf buf;
232 /* Datapath operation with optional ukey attached. */
234 struct udpif_key *ukey;
235 struct dpif_flow_stats stats; /* Stats for 'op'. */
236 struct dpif_op dop; /* Flow operation. */
239 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
240 static struct ovs_list all_udpifs = OVS_LIST_INITIALIZER(&all_udpifs);
242 static size_t recv_upcalls(struct handler *);
243 static int process_upcall(struct udpif *, struct upcall *,
244 struct ofpbuf *odp_actions);
245 static void handle_upcalls(struct udpif *, struct upcall *, size_t n_upcalls);
246 static void udpif_stop_threads(struct udpif *);
247 static void udpif_start_threads(struct udpif *, size_t n_handlers,
248 size_t n_revalidators);
249 static void *udpif_upcall_handler(void *);
250 static void *udpif_revalidator(void *);
251 static unsigned long udpif_get_n_flows(struct udpif *);
252 static void revalidate(struct revalidator *);
253 static void revalidator_sweep(struct revalidator *);
254 static void revalidator_purge(struct revalidator *);
255 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
256 const char *argv[], void *aux);
257 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
258 const char *argv[], void *aux);
259 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
260 const char *argv[], void *aux);
261 static void upcall_unixctl_disable_ufid(struct unixctl_conn *, int argc,
262 const char *argv[], void *aux);
263 static void upcall_unixctl_enable_ufid(struct unixctl_conn *, int argc,
264 const char *argv[], void *aux);
265 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
266 const char *argv[], void *aux);
267 static void upcall_unixctl_dump_wait(struct unixctl_conn *conn, int argc,
268 const char *argv[], void *aux);
269 static void upcall_unixctl_purge(struct unixctl_conn *conn, int argc,
270 const char *argv[], void *aux);
272 static struct udpif_key *ukey_create_from_upcall(const struct upcall *);
273 static int ukey_create_from_dpif_flow(const struct udpif *,
274 const struct dpif_flow *,
275 struct udpif_key **);
276 static bool ukey_install_start(struct udpif *, struct udpif_key *ukey);
277 static bool ukey_install_finish(struct udpif_key *ukey, int error);
278 static bool ukey_install(struct udpif *udpif, struct udpif_key *ukey);
279 static struct udpif_key *ukey_lookup(struct udpif *udpif,
280 const ovs_u128 *ufid);
281 static int ukey_acquire(struct udpif *, const struct dpif_flow *,
282 struct udpif_key **result, int *error);
283 static void ukey_delete__(struct udpif_key *);
284 static void ukey_delete(struct umap *, struct udpif_key *);
285 static enum upcall_type classify_upcall(enum dpif_upcall_type type,
286 const struct nlattr *userdata);
288 static int upcall_receive(struct upcall *, const struct dpif_backer *,
289 const struct ofpbuf *packet, enum dpif_upcall_type,
290 const struct nlattr *userdata, const struct flow *,
291 const ovs_u128 *ufid);
292 static void upcall_uninit(struct upcall *);
294 static upcall_callback upcall_cb;
296 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
299 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
301 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
302 struct udpif *udpif = xzalloc(sizeof *udpif);
304 if (ovsthread_once_start(&once)) {
305 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
307 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
308 upcall_unixctl_disable_megaflows, NULL);
309 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
310 upcall_unixctl_enable_megaflows, NULL);
311 unixctl_command_register("upcall/disable-ufid", "", 0, 0,
312 upcall_unixctl_disable_ufid, NULL);
313 unixctl_command_register("upcall/enable-ufid", "", 0, 0,
314 upcall_unixctl_enable_ufid, NULL);
315 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
316 upcall_unixctl_set_flow_limit, NULL);
317 unixctl_command_register("revalidator/wait", "", 0, 0,
318 upcall_unixctl_dump_wait, NULL);
319 unixctl_command_register("revalidator/purge", "", 0, 0,
320 upcall_unixctl_purge, NULL);
321 ovsthread_once_done(&once);
325 udpif->backer = backer;
326 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
327 udpif->reval_seq = seq_create();
328 udpif->dump_seq = seq_create();
329 latch_init(&udpif->exit_latch);
330 list_push_back(&all_udpifs, &udpif->list_node);
331 atomic_init(&udpif->enable_ufid, false);
332 atomic_init(&udpif->n_flows, 0);
333 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
334 ovs_mutex_init(&udpif->n_flows_mutex);
335 udpif->ukeys = xmalloc(N_UMAPS * sizeof *udpif->ukeys);
336 for (int i = 0; i < N_UMAPS; i++) {
337 cmap_init(&udpif->ukeys[i].cmap);
338 ovs_mutex_init(&udpif->ukeys[i].mutex);
341 dpif_register_upcall_cb(dpif, upcall_cb, udpif);
347 udpif_run(struct udpif *udpif)
349 if (udpif->conns && udpif->conn_seq != seq_read(udpif->dump_seq)) {
352 for (i = 0; i < udpif->n_conns; i++) {
353 unixctl_command_reply(udpif->conns[i], NULL);
362 udpif_destroy(struct udpif *udpif)
364 udpif_stop_threads(udpif);
366 for (int i = 0; i < N_UMAPS; i++) {
367 cmap_destroy(&udpif->ukeys[i].cmap);
368 ovs_mutex_destroy(&udpif->ukeys[i].mutex);
373 list_remove(&udpif->list_node);
374 latch_destroy(&udpif->exit_latch);
375 seq_destroy(udpif->reval_seq);
376 seq_destroy(udpif->dump_seq);
377 ovs_mutex_destroy(&udpif->n_flows_mutex);
381 /* Stops the handler and revalidator threads, must be enclosed in
382 * ovsrcu quiescent state unless when destroying udpif. */
384 udpif_stop_threads(struct udpif *udpif)
386 if (udpif && (udpif->n_handlers != 0 || udpif->n_revalidators != 0)) {
389 latch_set(&udpif->exit_latch);
391 for (i = 0; i < udpif->n_handlers; i++) {
392 struct handler *handler = &udpif->handlers[i];
394 xpthread_join(handler->thread, NULL);
397 for (i = 0; i < udpif->n_revalidators; i++) {
398 xpthread_join(udpif->revalidators[i].thread, NULL);
401 dpif_disable_upcall(udpif->dpif);
403 for (i = 0; i < udpif->n_revalidators; i++) {
404 struct revalidator *revalidator = &udpif->revalidators[i];
406 /* Delete ukeys, and delete all flows from the datapath to prevent
407 * double-counting stats. */
408 revalidator_purge(revalidator);
411 latch_poll(&udpif->exit_latch);
413 ovs_barrier_destroy(&udpif->reval_barrier);
415 free(udpif->revalidators);
416 udpif->revalidators = NULL;
417 udpif->n_revalidators = 0;
419 free(udpif->handlers);
420 udpif->handlers = NULL;
421 udpif->n_handlers = 0;
425 /* Starts the handler and revalidator threads, must be enclosed in
426 * ovsrcu quiescent state. */
428 udpif_start_threads(struct udpif *udpif, size_t n_handlers,
429 size_t n_revalidators)
431 if (udpif && n_handlers && n_revalidators) {
435 udpif->n_handlers = n_handlers;
436 udpif->n_revalidators = n_revalidators;
438 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
439 for (i = 0; i < udpif->n_handlers; i++) {
440 struct handler *handler = &udpif->handlers[i];
442 handler->udpif = udpif;
443 handler->handler_id = i;
444 handler->thread = ovs_thread_create(
445 "handler", udpif_upcall_handler, handler);
448 enable_ufid = ofproto_dpif_get_enable_ufid(udpif->backer);
449 atomic_init(&udpif->enable_ufid, enable_ufid);
450 dpif_enable_upcall(udpif->dpif);
452 ovs_barrier_init(&udpif->reval_barrier, udpif->n_revalidators);
453 udpif->reval_exit = false;
454 udpif->revalidators = xzalloc(udpif->n_revalidators
455 * sizeof *udpif->revalidators);
456 for (i = 0; i < udpif->n_revalidators; i++) {
457 struct revalidator *revalidator = &udpif->revalidators[i];
459 revalidator->udpif = udpif;
460 revalidator->thread = ovs_thread_create(
461 "revalidator", udpif_revalidator, revalidator);
466 /* Tells 'udpif' how many threads it should use to handle upcalls.
467 * 'n_handlers' and 'n_revalidators' can never be zero. 'udpif''s
468 * datapath handle must have packet reception enabled before starting
471 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
472 size_t n_revalidators)
475 ovs_assert(n_handlers && n_revalidators);
477 ovsrcu_quiesce_start();
478 if (udpif->n_handlers != n_handlers
479 || udpif->n_revalidators != n_revalidators) {
480 udpif_stop_threads(udpif);
483 if (!udpif->handlers && !udpif->revalidators) {
486 error = dpif_handlers_set(udpif->dpif, n_handlers);
488 VLOG_ERR("failed to configure handlers in dpif %s: %s",
489 dpif_name(udpif->dpif), ovs_strerror(error));
493 udpif_start_threads(udpif, n_handlers, n_revalidators);
495 ovsrcu_quiesce_end();
498 /* Waits for all ongoing upcall translations to complete. This ensures that
499 * there are no transient references to any removed ofprotos (or other
500 * objects). In particular, this should be called after an ofproto is removed
501 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
503 udpif_synchronize(struct udpif *udpif)
505 /* This is stronger than necessary. It would be sufficient to ensure
506 * (somehow) that each handler and revalidator thread had passed through
507 * its main loop once. */
508 size_t n_handlers = udpif->n_handlers;
509 size_t n_revalidators = udpif->n_revalidators;
511 ovsrcu_quiesce_start();
512 udpif_stop_threads(udpif);
513 udpif_start_threads(udpif, n_handlers, n_revalidators);
514 ovsrcu_quiesce_end();
517 /* Notifies 'udpif' that something changed which may render previous
518 * xlate_actions() results invalid. */
520 udpif_revalidate(struct udpif *udpif)
522 seq_change(udpif->reval_seq);
525 /* Returns a seq which increments every time 'udpif' pulls stats from the
526 * datapath. Callers can use this to get a sense of when might be a good time
527 * to do periodic work which relies on relatively up to date statistics. */
529 udpif_dump_seq(struct udpif *udpif)
531 return udpif->dump_seq;
535 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
539 simap_increase(usage, "handlers", udpif->n_handlers);
541 simap_increase(usage, "revalidators", udpif->n_revalidators);
542 for (i = 0; i < N_UMAPS; i++) {
543 simap_increase(usage, "udpif keys", cmap_count(&udpif->ukeys[i].cmap));
547 /* Remove flows from a single datapath. */
549 udpif_flush(struct udpif *udpif)
551 size_t n_handlers, n_revalidators;
553 n_handlers = udpif->n_handlers;
554 n_revalidators = udpif->n_revalidators;
556 ovsrcu_quiesce_start();
558 udpif_stop_threads(udpif);
559 dpif_flow_flush(udpif->dpif);
560 udpif_start_threads(udpif, n_handlers, n_revalidators);
562 ovsrcu_quiesce_end();
565 /* Removes all flows from all datapaths. */
567 udpif_flush_all_datapaths(void)
571 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
578 udpif_get_n_flows(struct udpif *udpif)
580 long long int time, now;
581 unsigned long flow_count;
584 atomic_read_relaxed(&udpif->n_flows_timestamp, &time);
585 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
586 struct dpif_dp_stats stats;
588 atomic_store_relaxed(&udpif->n_flows_timestamp, now);
589 dpif_get_dp_stats(udpif->dpif, &stats);
590 flow_count = stats.n_flows;
591 atomic_store_relaxed(&udpif->n_flows, flow_count);
592 ovs_mutex_unlock(&udpif->n_flows_mutex);
594 atomic_read_relaxed(&udpif->n_flows, &flow_count);
599 /* The upcall handler thread tries to read a batch of UPCALL_MAX_BATCH
600 * upcalls from dpif, processes the batch and installs corresponding flows
603 udpif_upcall_handler(void *arg)
605 struct handler *handler = arg;
606 struct udpif *udpif = handler->udpif;
608 while (!latch_is_set(&handler->udpif->exit_latch)) {
609 if (recv_upcalls(handler)) {
610 poll_immediate_wake();
612 dpif_recv_wait(udpif->dpif, handler->handler_id);
613 latch_wait(&udpif->exit_latch);
622 recv_upcalls(struct handler *handler)
624 struct udpif *udpif = handler->udpif;
625 uint64_t recv_stubs[UPCALL_MAX_BATCH][512 / 8];
626 struct ofpbuf recv_bufs[UPCALL_MAX_BATCH];
627 struct dpif_upcall dupcalls[UPCALL_MAX_BATCH];
628 struct upcall upcalls[UPCALL_MAX_BATCH];
629 struct flow flows[UPCALL_MAX_BATCH];
633 while (n_upcalls < UPCALL_MAX_BATCH) {
634 struct ofpbuf *recv_buf = &recv_bufs[n_upcalls];
635 struct dpif_upcall *dupcall = &dupcalls[n_upcalls];
636 struct upcall *upcall = &upcalls[n_upcalls];
637 struct flow *flow = &flows[n_upcalls];
638 struct pkt_metadata md;
641 ofpbuf_use_stub(recv_buf, recv_stubs[n_upcalls],
642 sizeof recv_stubs[n_upcalls]);
643 if (dpif_recv(udpif->dpif, handler->handler_id, dupcall, recv_buf)) {
644 ofpbuf_uninit(recv_buf);
648 if (odp_flow_key_to_flow(dupcall->key, dupcall->key_len, flow)
653 error = upcall_receive(upcall, udpif->backer, &dupcall->packet,
654 dupcall->type, dupcall->userdata, flow,
657 if (error == ENODEV) {
658 /* Received packet on datapath port for which we couldn't
659 * associate an ofproto. This can happen if a port is removed
660 * while traffic is being received. Print a rate-limited
661 * message in case it happens frequently. */
662 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE, dupcall->key,
663 dupcall->key_len, NULL, 0, NULL, 0,
664 &dupcall->ufid, NULL);
665 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
666 "port %"PRIu32, flow->in_port.odp_port);
671 upcall->key = dupcall->key;
672 upcall->key_len = dupcall->key_len;
673 upcall->ufid = &dupcall->ufid;
675 upcall->out_tun_key = dupcall->out_tun_key;
677 if (vsp_adjust_flow(upcall->ofproto, flow, &dupcall->packet)) {
678 upcall->vsp_adjusted = true;
681 md = pkt_metadata_from_flow(flow);
682 flow_extract(&dupcall->packet, &md, flow);
684 error = process_upcall(udpif, upcall, NULL);
693 upcall_uninit(upcall);
695 ofpbuf_uninit(&dupcall->packet);
696 ofpbuf_uninit(recv_buf);
700 handle_upcalls(handler->udpif, upcalls, n_upcalls);
701 for (i = 0; i < n_upcalls; i++) {
702 ofpbuf_uninit(&dupcalls[i].packet);
703 ofpbuf_uninit(&recv_bufs[i]);
704 upcall_uninit(&upcalls[i]);
712 udpif_revalidator(void *arg)
714 /* Used by all revalidators. */
715 struct revalidator *revalidator = arg;
716 struct udpif *udpif = revalidator->udpif;
717 bool leader = revalidator == &udpif->revalidators[0];
719 /* Used only by the leader. */
720 long long int start_time = 0;
721 uint64_t last_reval_seq = 0;
724 revalidator->id = ovsthread_id_self();
729 reval_seq = seq_read(udpif->reval_seq);
730 last_reval_seq = reval_seq;
732 n_flows = udpif_get_n_flows(udpif);
733 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
734 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
736 /* Only the leader checks the exit latch to prevent a race where
737 * some threads think it's true and exit and others think it's
738 * false and block indefinitely on the reval_barrier */
739 udpif->reval_exit = latch_is_set(&udpif->exit_latch);
741 start_time = time_msec();
742 if (!udpif->reval_exit) {
745 atomic_read_relaxed(&udpif->enable_ufid, &terse_dump);
746 udpif->dump = dpif_flow_dump_create(udpif->dpif, terse_dump);
750 /* Wait for the leader to start the flow dump. */
751 ovs_barrier_block(&udpif->reval_barrier);
752 if (udpif->reval_exit) {
755 revalidate(revalidator);
757 /* Wait for all flows to have been dumped before we garbage collect. */
758 ovs_barrier_block(&udpif->reval_barrier);
759 revalidator_sweep(revalidator);
761 /* Wait for all revalidators to finish garbage collection. */
762 ovs_barrier_block(&udpif->reval_barrier);
765 unsigned int flow_limit;
766 long long int duration;
768 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
770 dpif_flow_dump_destroy(udpif->dump);
771 seq_change(udpif->dump_seq);
773 duration = MAX(time_msec() - start_time, 1);
774 udpif->dump_duration = duration;
775 if (duration > 2000) {
776 flow_limit /= duration / 1000;
777 } else if (duration > 1300) {
778 flow_limit = flow_limit * 3 / 4;
779 } else if (duration < 1000 && n_flows > 2000
780 && flow_limit < n_flows * 1000 / duration) {
783 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
784 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
786 if (duration > 2000) {
787 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
791 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
792 seq_wait(udpif->reval_seq, last_reval_seq);
793 latch_wait(&udpif->exit_latch);
801 static enum upcall_type
802 classify_upcall(enum dpif_upcall_type type, const struct nlattr *userdata)
804 union user_action_cookie cookie;
807 /* First look at the upcall type. */
815 case DPIF_N_UC_TYPES:
817 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, type);
821 /* "action" upcalls need a closer look. */
823 VLOG_WARN_RL(&rl, "action upcall missing cookie");
826 userdata_len = nl_attr_get_size(userdata);
827 if (userdata_len < sizeof cookie.type
828 || userdata_len > sizeof cookie) {
829 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
833 memset(&cookie, 0, sizeof cookie);
834 memcpy(&cookie, nl_attr_get(userdata), userdata_len);
835 if (userdata_len == MAX(8, sizeof cookie.sflow)
836 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
838 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
839 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
841 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
842 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
843 return FLOW_SAMPLE_UPCALL;
844 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
845 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
848 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
849 " and size %"PRIuSIZE, cookie.type, userdata_len);
854 /* Calculates slow path actions for 'xout'. 'buf' must statically be
855 * initialized with at least 128 bytes of space. */
857 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
858 const struct flow *flow, odp_port_t odp_in_port,
861 union user_action_cookie cookie;
865 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
866 cookie.slow_path.unused = 0;
867 cookie.slow_path.reason = xout->slow;
869 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
872 pid = dpif_port_get_pid(udpif->dpif, port, flow_hash_5tuple(flow, 0));
873 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, ODPP_NONE,
877 /* If there is no error, the upcall must be destroyed with upcall_uninit()
878 * before quiescing, as the referred objects are guaranteed to exist only
879 * until the calling thread quiesces. Otherwise, do not call upcall_uninit()
880 * since the 'upcall->put_actions' remains uninitialized. */
882 upcall_receive(struct upcall *upcall, const struct dpif_backer *backer,
883 const struct ofpbuf *packet, enum dpif_upcall_type type,
884 const struct nlattr *userdata, const struct flow *flow,
885 const ovs_u128 *ufid)
889 error = xlate_lookup(backer, flow, &upcall->ofproto, &upcall->ipfix,
890 &upcall->sflow, NULL, &upcall->in_port);
896 upcall->packet = packet;
899 upcall->userdata = userdata;
900 ofpbuf_init(&upcall->put_actions, 0);
902 upcall->xout_initialized = false;
903 upcall->vsp_adjusted = false;
904 upcall->ukey_persists = false;
910 upcall->out_tun_key = NULL;
916 upcall_xlate(struct udpif *udpif, struct upcall *upcall,
917 struct ofpbuf *odp_actions)
919 struct dpif_flow_stats stats;
923 stats.n_bytes = ofpbuf_size(upcall->packet);
924 stats.used = time_msec();
925 stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
927 xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
928 stats.tcp_flags, upcall->packet);
929 xin.odp_actions = odp_actions;
931 if (upcall->type == DPIF_UC_MISS) {
932 xin.resubmit_stats = &stats;
934 /* For non-miss upcalls, there's a flow in the datapath which this
935 * packet was accounted to. Presumably the revalidators will deal
936 * with pushing its stats eventually. */
939 upcall->dump_seq = seq_read(udpif->dump_seq);
940 upcall->reval_seq = seq_read(udpif->reval_seq);
941 xlate_actions(&xin, &upcall->xout);
942 upcall->xout_initialized = true;
944 /* Special case for fail-open mode.
946 * If we are in fail-open mode, but we are connected to a controller too,
947 * then we should send the packet up to the controller in the hope that it
948 * will try to set up a flow and thereby allow us to exit fail-open.
950 * See the top-level comment in fail-open.c for more information.
952 * Copy packets before they are modified by execution. */
953 if (upcall->xout.fail_open) {
954 const struct ofpbuf *packet = upcall->packet;
955 struct ofproto_packet_in *pin;
957 pin = xmalloc(sizeof *pin);
958 pin->up.packet = xmemdup(ofpbuf_data(packet), ofpbuf_size(packet));
959 pin->up.packet_len = ofpbuf_size(packet);
960 pin->up.reason = OFPR_NO_MATCH;
961 pin->up.table_id = 0;
962 pin->up.cookie = OVS_BE64_MAX;
963 flow_get_metadata(upcall->flow, &pin->up.fmd);
964 pin->send_len = 0; /* Not used for flow table misses. */
965 pin->miss_type = OFPROTO_PACKET_IN_NO_MISS;
966 ofproto_dpif_send_packet_in(upcall->ofproto, pin);
969 if (!upcall->xout.slow) {
970 ofpbuf_use_const(&upcall->put_actions,
971 ofpbuf_data(upcall->xout.odp_actions),
972 ofpbuf_size(upcall->xout.odp_actions));
974 ofpbuf_init(&upcall->put_actions, 0);
975 compose_slow_path(udpif, &upcall->xout, upcall->flow,
976 upcall->flow->in_port.odp_port,
977 &upcall->put_actions);
980 upcall->ukey = ukey_create_from_upcall(upcall);
984 upcall_uninit(struct upcall *upcall)
987 if (upcall->xout_initialized) {
988 xlate_out_uninit(&upcall->xout);
990 ofpbuf_uninit(&upcall->put_actions);
991 if (!upcall->ukey_persists) {
992 ukey_delete__(upcall->ukey);
998 upcall_cb(const struct ofpbuf *packet, const struct flow *flow, ovs_u128 *ufid,
999 enum dpif_upcall_type type, const struct nlattr *userdata,
1000 struct ofpbuf *actions, struct flow_wildcards *wc,
1001 struct ofpbuf *put_actions, void *aux)
1003 struct udpif *udpif = aux;
1004 unsigned int flow_limit;
1005 struct upcall upcall;
1009 atomic_read_relaxed(&enable_megaflows, &megaflow);
1010 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1012 error = upcall_receive(&upcall, udpif->backer, packet, type, userdata,
1018 error = process_upcall(udpif, &upcall, actions);
1023 if (upcall.xout.slow && put_actions) {
1024 ofpbuf_put(put_actions, ofpbuf_data(&upcall.put_actions),
1025 ofpbuf_size(&upcall.put_actions));
1028 if (OVS_LIKELY(wc)) {
1030 /* XXX: This could be avoided with sufficient API changes. */
1031 *wc = upcall.xout.wc;
1033 flow_wildcards_init_for_packet(wc, flow);
1037 if (udpif_get_n_flows(udpif) >= flow_limit) {
1042 if (upcall.ukey && !ukey_install(udpif, upcall.ukey)) {
1048 upcall.ukey_persists = true;
1050 upcall_uninit(&upcall);
1055 process_upcall(struct udpif *udpif, struct upcall *upcall,
1056 struct ofpbuf *odp_actions)
1058 const struct nlattr *userdata = upcall->userdata;
1059 const struct ofpbuf *packet = upcall->packet;
1060 const struct flow *flow = upcall->flow;
1062 switch (classify_upcall(upcall->type, userdata)) {
1064 upcall_xlate(udpif, upcall, odp_actions);
1068 if (upcall->sflow) {
1069 union user_action_cookie cookie;
1071 memset(&cookie, 0, sizeof cookie);
1072 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.sflow);
1073 dpif_sflow_received(upcall->sflow, packet, flow,
1074 flow->in_port.odp_port, &cookie);
1079 if (upcall->ipfix) {
1080 union user_action_cookie cookie;
1081 struct flow_tnl output_tunnel_key;
1083 memset(&cookie, 0, sizeof cookie);
1084 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.ipfix);
1086 if (upcall->out_tun_key) {
1087 memset(&output_tunnel_key, 0, sizeof output_tunnel_key);
1088 odp_tun_key_from_attr(upcall->out_tun_key,
1089 &output_tunnel_key);
1091 dpif_ipfix_bridge_sample(upcall->ipfix, packet, flow,
1092 flow->in_port.odp_port,
1093 cookie.ipfix.output_odp_port,
1094 upcall->out_tun_key ?
1095 &output_tunnel_key : NULL);
1099 case FLOW_SAMPLE_UPCALL:
1100 if (upcall->ipfix) {
1101 union user_action_cookie cookie;
1103 memset(&cookie, 0, sizeof cookie);
1104 memcpy(&cookie, nl_attr_get(userdata), sizeof cookie.flow_sample);
1106 /* The flow reflects exactly the contents of the packet.
1107 * Sample the packet using it. */
1108 dpif_ipfix_flow_sample(upcall->ipfix, packet, flow,
1109 cookie.flow_sample.collector_set_id,
1110 cookie.flow_sample.probability,
1111 cookie.flow_sample.obs_domain_id,
1112 cookie.flow_sample.obs_point_id);
1124 handle_upcalls(struct udpif *udpif, struct upcall *upcalls,
1127 struct dpif_op *opsp[UPCALL_MAX_BATCH * 2];
1128 struct ukey_op ops[UPCALL_MAX_BATCH * 2];
1129 unsigned int flow_limit;
1130 size_t n_ops, n_opsp, i;
1134 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1135 atomic_read_relaxed(&enable_megaflows, &megaflow);
1137 may_put = udpif_get_n_flows(udpif) < flow_limit;
1139 /* Handle the packets individually in order of arrival.
1141 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1142 * processes received packets for these protocols.
1144 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1147 * The loop fills 'ops' with an array of operations to execute in the
1150 for (i = 0; i < n_upcalls; i++) {
1151 struct upcall *upcall = &upcalls[i];
1152 const struct ofpbuf *packet = upcall->packet;
1155 if (upcall->vsp_adjusted) {
1156 /* This packet was received on a VLAN splinter port. We added a
1157 * VLAN to the packet to make the packet resemble the flow, but the
1158 * actions were composed assuming that the packet contained no
1159 * VLAN. So, we must remove the VLAN header from the packet before
1160 * trying to execute the actions. */
1161 if (ofpbuf_size(upcall->xout.odp_actions)) {
1162 eth_pop_vlan(CONST_CAST(struct ofpbuf *, upcall->packet));
1165 /* Remove the flow vlan tags inserted by vlan splinter logic
1166 * to ensure megaflow masks generated match the data path flow. */
1167 CONST_CAST(struct flow *, upcall->flow)->vlan_tci = 0;
1170 /* Do not install a flow into the datapath if:
1172 * - The datapath already has too many flows.
1174 * - We received this packet via some flow installed in the kernel
1176 if (may_put && upcall->type == DPIF_UC_MISS) {
1177 struct udpif_key *ukey = upcall->ukey;
1179 upcall->ukey_persists = true;
1183 op->dop.type = DPIF_OP_FLOW_PUT;
1184 op->dop.u.flow_put.flags = DPIF_FP_CREATE;
1185 op->dop.u.flow_put.key = ukey->key;
1186 op->dop.u.flow_put.key_len = ukey->key_len;
1187 op->dop.u.flow_put.mask = ukey->mask;
1188 op->dop.u.flow_put.mask_len = ukey->mask_len;
1189 op->dop.u.flow_put.ufid = upcall->ufid;
1190 op->dop.u.flow_put.stats = NULL;
1191 op->dop.u.flow_put.actions = ofpbuf_data(ukey->actions);
1192 op->dop.u.flow_put.actions_len = ofpbuf_size(ukey->actions);
1195 if (ofpbuf_size(upcall->xout.odp_actions)) {
1198 op->dop.type = DPIF_OP_EXECUTE;
1199 op->dop.u.execute.packet = CONST_CAST(struct ofpbuf *, packet);
1200 odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
1201 &op->dop.u.execute.md);
1202 op->dop.u.execute.actions = ofpbuf_data(upcall->xout.odp_actions);
1203 op->dop.u.execute.actions_len = ofpbuf_size(upcall->xout.odp_actions);
1204 op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
1205 op->dop.u.execute.probe = false;
1211 * We install ukeys before installing the flows, locking them for exclusive
1212 * access by this thread for the period of installation. This ensures that
1213 * other threads won't attempt to delete the flows as we are creating them.
1216 for (i = 0; i < n_ops; i++) {
1217 struct udpif_key *ukey = ops[i].ukey;
1220 /* If we can't install the ukey, don't install the flow. */
1221 if (!ukey_install_start(udpif, ukey)) {
1222 ukey_delete__(ukey);
1227 opsp[n_opsp++] = &ops[i].dop;
1229 dpif_operate(udpif->dpif, opsp, n_opsp);
1230 for (i = 0; i < n_ops; i++) {
1232 ukey_install_finish(ops[i].ukey, ops[i].dop.error);
1238 get_ufid_hash(const ovs_u128 *ufid)
1240 return ufid->u32[0];
1243 static struct udpif_key *
1244 ukey_lookup(struct udpif *udpif, const ovs_u128 *ufid)
1246 struct udpif_key *ukey;
1247 int idx = get_ufid_hash(ufid) % N_UMAPS;
1248 struct cmap *cmap = &udpif->ukeys[idx].cmap;
1250 CMAP_FOR_EACH_WITH_HASH (ukey, cmap_node, get_ufid_hash(ufid), cmap) {
1251 if (ovs_u128_equal(&ukey->ufid, ufid)) {
1258 static struct udpif_key *
1259 ukey_create__(const struct nlattr *key, size_t key_len,
1260 const struct nlattr *mask, size_t mask_len,
1261 bool ufid_present, const ovs_u128 *ufid,
1262 const struct ofpbuf *actions,
1263 uint64_t dump_seq, uint64_t reval_seq, long long int used)
1264 OVS_NO_THREAD_SAFETY_ANALYSIS
1266 struct udpif_key *ukey = xmalloc(sizeof *ukey);
1268 memcpy(&ukey->keybuf, key, key_len);
1269 ukey->key = &ukey->keybuf.nla;
1270 ukey->key_len = key_len;
1271 memcpy(&ukey->maskbuf, mask, mask_len);
1272 ukey->mask = &ukey->maskbuf.nla;
1273 ukey->mask_len = mask_len;
1274 ukey->ufid_present = ufid_present;
1276 ukey->hash = get_ufid_hash(&ukey->ufid);
1277 ukey->actions = ofpbuf_clone(actions);
1279 ovs_mutex_init(&ukey->mutex);
1280 ukey->dump_seq = dump_seq;
1281 ukey->reval_seq = reval_seq;
1282 ukey->flow_exists = false;
1283 ukey->created = time_msec();
1284 memset(&ukey->stats, 0, sizeof ukey->stats);
1285 ukey->stats.used = used;
1286 ukey->xcache = NULL;
1291 static struct udpif_key *
1292 ukey_create_from_upcall(const struct upcall *upcall)
1294 struct odputil_keybuf keystub, maskstub;
1295 struct ofpbuf keybuf, maskbuf;
1296 bool recirc, megaflow;
1298 if (upcall->key_len) {
1299 ofpbuf_use_const(&keybuf, upcall->key, upcall->key_len);
1301 /* dpif-netdev doesn't provide a netlink-formatted flow key in the
1302 * upcall, so convert the upcall's flow here. */
1303 ofpbuf_use_stack(&keybuf, &keystub, sizeof keystub);
1304 odp_flow_key_from_flow(&keybuf, upcall->flow, &upcall->xout.wc.masks,
1305 upcall->flow->in_port.odp_port, true);
1308 atomic_read_relaxed(&enable_megaflows, &megaflow);
1309 recirc = ofproto_dpif_get_enable_recirc(upcall->ofproto);
1310 ofpbuf_use_stack(&maskbuf, &maskstub, sizeof maskstub);
1314 max_mpls = ofproto_dpif_get_max_mpls_depth(upcall->ofproto);
1315 odp_flow_key_from_mask(&maskbuf, &upcall->xout.wc.masks, upcall->flow,
1316 UINT32_MAX, max_mpls, recirc);
1319 return ukey_create__(ofpbuf_data(&keybuf), ofpbuf_size(&keybuf),
1320 ofpbuf_data(&maskbuf), ofpbuf_size(&maskbuf),
1321 true, upcall->ufid, &upcall->put_actions,
1322 upcall->dump_seq, upcall->reval_seq, 0);
1326 ukey_create_from_dpif_flow(const struct udpif *udpif,
1327 const struct dpif_flow *flow,
1328 struct udpif_key **ukey)
1330 struct dpif_flow full_flow;
1331 struct ofpbuf actions;
1332 uint64_t dump_seq, reval_seq;
1333 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
1335 if (!flow->key_len) {
1339 /* If the key was not provided by the datapath, fetch the full flow. */
1340 ofpbuf_use_stack(&buf, &stub, sizeof stub);
1341 err = dpif_flow_get(udpif->dpif, NULL, 0, &flow->ufid, &buf,
1348 dump_seq = seq_read(udpif->dump_seq);
1349 reval_seq = seq_read(udpif->reval_seq);
1350 ofpbuf_use_const(&actions, &flow->actions, flow->actions_len);
1351 *ukey = ukey_create__(flow->key, flow->key_len,
1352 flow->mask, flow->mask_len, flow->ufid_present,
1353 &flow->ufid, &actions, dump_seq, reval_seq,
1358 /* Attempts to insert a ukey into the shared ukey maps.
1360 * On success, returns true, installs the ukey and returns it in a locked
1361 * state. Otherwise, returns false. */
1363 ukey_install_start(struct udpif *udpif, struct udpif_key *new_ukey)
1364 OVS_TRY_LOCK(true, new_ukey->mutex)
1367 struct udpif_key *old_ukey;
1369 bool locked = false;
1371 idx = new_ukey->hash % N_UMAPS;
1372 umap = &udpif->ukeys[idx];
1373 ovs_mutex_lock(&umap->mutex);
1374 old_ukey = ukey_lookup(udpif, &new_ukey->ufid);
1376 /* Uncommon case: A ukey is already installed with the same UFID. */
1377 if (old_ukey->key_len == new_ukey->key_len
1378 && !memcmp(old_ukey->key, new_ukey->key, new_ukey->key_len)) {
1379 COVERAGE_INC(handler_duplicate_upcall);
1381 struct ds ds = DS_EMPTY_INITIALIZER;
1383 odp_format_ufid(&old_ukey->ufid, &ds);
1384 ds_put_cstr(&ds, " ");
1385 odp_flow_key_format(old_ukey->key, old_ukey->key_len, &ds);
1386 ds_put_cstr(&ds, "\n");
1387 odp_format_ufid(&new_ukey->ufid, &ds);
1388 ds_put_cstr(&ds, " ");
1389 odp_flow_key_format(new_ukey->key, new_ukey->key_len, &ds);
1391 VLOG_WARN_RL(&rl, "Conflicting ukey for flows:\n%s", ds_cstr(&ds));
1395 ovs_mutex_lock(&new_ukey->mutex);
1396 cmap_insert(&umap->cmap, &new_ukey->cmap_node, new_ukey->hash);
1399 ovs_mutex_unlock(&umap->mutex);
1405 ukey_install_finish__(struct udpif_key *ukey) OVS_REQUIRES(ukey->mutex)
1407 ukey->flow_exists = true;
1411 ukey_install_finish(struct udpif_key *ukey, int error)
1412 OVS_RELEASES(ukey->mutex)
1415 ukey_install_finish__(ukey);
1417 ovs_mutex_unlock(&ukey->mutex);
1423 ukey_install(struct udpif *udpif, struct udpif_key *ukey)
1425 /* The usual way to keep 'ukey->flow_exists' in sync with the datapath is
1426 * to call ukey_install_start(), install the corresponding datapath flow,
1427 * then call ukey_install_finish(). The netdev interface using upcall_cb()
1428 * doesn't provide a function to separately finish the flow installation,
1429 * so we perform the operations together here.
1431 * This is fine currently, as revalidator threads will only delete this
1432 * ukey during revalidator_sweep() and only if the dump_seq is mismatched.
1433 * It is unlikely for a revalidator thread to advance dump_seq and reach
1434 * the next GC phase between ukey creation and flow installation. */
1435 return ukey_install_start(udpif, ukey) && ukey_install_finish(ukey, 0);
1438 /* Searches for a ukey in 'udpif->ukeys' that matches 'flow' and attempts to
1439 * lock the ukey. If the ukey does not exist, create it.
1441 * Returns 0 on success, setting *result to the matching ukey and returning it
1442 * in a locked state. Otherwise, returns an errno and clears *result. EBUSY
1443 * indicates that another thread is handling this flow. Other errors indicate
1444 * an unexpected condition creating a new ukey.
1446 * *error is an output parameter provided to appease the threadsafety analyser,
1447 * and its value matches the return value. */
1449 ukey_acquire(struct udpif *udpif, const struct dpif_flow *flow,
1450 struct udpif_key **result, int *error)
1451 OVS_TRY_LOCK(0, (*result)->mutex)
1453 struct udpif_key *ukey;
1456 ukey = ukey_lookup(udpif, &flow->ufid);
1458 retval = ovs_mutex_trylock(&ukey->mutex);
1460 /* Usually we try to avoid installing flows from revalidator threads,
1461 * because locking on a umap may cause handler threads to block.
1462 * However there are certain cases, like when ovs-vswitchd is
1463 * restarted, where it is desirable to handle flows that exist in the
1464 * datapath gracefully (ie, don't just clear the datapath). */
1467 retval = ukey_create_from_dpif_flow(udpif, flow, &ukey);
1471 install = ukey_install_start(udpif, ukey);
1473 ukey_install_finish__(ukey);
1476 ukey_delete__(ukey);
1492 ukey_delete__(struct udpif_key *ukey)
1493 OVS_NO_THREAD_SAFETY_ANALYSIS
1496 xlate_cache_delete(ukey->xcache);
1497 ofpbuf_delete(ukey->actions);
1498 ovs_mutex_destroy(&ukey->mutex);
1504 ukey_delete(struct umap *umap, struct udpif_key *ukey)
1505 OVS_REQUIRES(umap->mutex)
1507 cmap_remove(&umap->cmap, &ukey->cmap_node, ukey->hash);
1508 ovsrcu_postpone(ukey_delete__, ukey);
1512 should_revalidate(const struct udpif *udpif, uint64_t packets,
1515 long long int metric, now, duration;
1517 if (udpif->dump_duration < 200) {
1518 /* We are likely to handle full revalidation for the flows. */
1522 /* Calculate the mean time between seeing these packets. If this
1523 * exceeds the threshold, then delete the flow rather than performing
1524 * costly revalidation for flows that aren't being hit frequently.
1526 * This is targeted at situations where the dump_duration is high (~1s),
1527 * and revalidation is triggered by a call to udpif_revalidate(). In
1528 * these situations, revalidation of all flows causes fluctuations in the
1529 * flow_limit due to the interaction with the dump_duration and max_idle.
1530 * This tends to result in deletion of low-throughput flows anyway, so
1531 * skip the revalidation and just delete those flows. */
1532 packets = MAX(packets, 1);
1533 now = MAX(used, time_msec());
1534 duration = now - used;
1535 metric = duration / packets;
1538 /* The flow is receiving more than ~5pps, so keep it. */
1545 revalidate_ukey(struct udpif *udpif, struct udpif_key *ukey,
1546 const struct dpif_flow_stats *stats, uint64_t reval_seq)
1547 OVS_REQUIRES(ukey->mutex)
1549 uint64_t slow_path_buf[128 / 8];
1550 struct xlate_out xout, *xoutp;
1551 struct netflow *netflow;
1552 struct ofproto_dpif *ofproto;
1553 struct dpif_flow_stats push;
1554 struct ofpbuf xout_actions;
1555 struct flow flow, dp_mask;
1556 uint32_t *dp32, *xout32;
1557 ofp_port_t ofp_in_port;
1558 struct xlate_in xin;
1559 long long int last_used;
1563 bool need_revalidate;
1569 need_revalidate = (ukey->reval_seq != reval_seq);
1570 last_used = ukey->stats.used;
1571 push.used = stats->used;
1572 push.tcp_flags = stats->tcp_flags;
1573 push.n_packets = (stats->n_packets > ukey->stats.n_packets
1574 ? stats->n_packets - ukey->stats.n_packets
1576 push.n_bytes = (stats->n_bytes > ukey->stats.n_bytes
1577 ? stats->n_bytes - ukey->stats.n_bytes
1580 if (need_revalidate && last_used
1581 && !should_revalidate(udpif, push.n_packets, last_used)) {
1586 /* We will push the stats, so update the ukey stats cache. */
1587 ukey->stats = *stats;
1588 if (!push.n_packets && !need_revalidate) {
1593 if (ukey->xcache && !need_revalidate) {
1594 xlate_push_stats(ukey->xcache, &push);
1599 if (odp_flow_key_to_flow(ukey->key, ukey->key_len, &flow)
1604 error = xlate_lookup(udpif->backer, &flow, &ofproto, NULL, NULL, &netflow,
1610 if (need_revalidate) {
1611 xlate_cache_clear(ukey->xcache);
1613 if (!ukey->xcache) {
1614 ukey->xcache = xlate_cache_new();
1617 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
1619 if (push.n_packets) {
1620 xin.resubmit_stats = &push;
1621 xin.may_learn = true;
1623 xin.xcache = ukey->xcache;
1624 xin.skip_wildcards = !need_revalidate;
1625 xlate_actions(&xin, &xout);
1628 if (!need_revalidate) {
1634 ofpbuf_use_const(&xout_actions, ofpbuf_data(xout.odp_actions),
1635 ofpbuf_size(xout.odp_actions));
1637 ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
1638 compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
1642 if (!ofpbuf_equal(&xout_actions, ukey->actions)) {
1646 if (odp_flow_key_to_mask(ukey->mask, ukey->mask_len, &dp_mask, &flow)
1651 /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
1652 * directly check that the masks are the same. Instead we check that the
1653 * mask in the kernel is more specific i.e. less wildcarded, than what
1654 * we've calculated here. This guarantees we don't catch any packets we
1655 * shouldn't with the megaflow. */
1656 dp32 = (uint32_t *) &dp_mask;
1657 xout32 = (uint32_t *) &xout.wc.masks;
1658 for (i = 0; i < FLOW_U32S; i++) {
1659 if ((dp32[i] | xout32[i]) != dp32[i]) {
1668 ukey->reval_seq = reval_seq;
1670 if (netflow && !ok) {
1671 netflow_flow_clear(netflow, &flow);
1673 xlate_out_uninit(xoutp);
1678 delete_op_init__(struct udpif *udpif, struct ukey_op *op,
1679 const struct dpif_flow *flow)
1682 op->dop.type = DPIF_OP_FLOW_DEL;
1683 op->dop.u.flow_del.key = flow->key;
1684 op->dop.u.flow_del.key_len = flow->key_len;
1685 op->dop.u.flow_del.ufid = flow->ufid_present ? &flow->ufid : NULL;
1686 op->dop.u.flow_del.stats = &op->stats;
1687 atomic_read_relaxed(&udpif->enable_ufid, &op->dop.u.flow_del.terse);
1691 delete_op_init(struct udpif *udpif, struct ukey_op *op, struct udpif_key *ukey)
1694 op->dop.type = DPIF_OP_FLOW_DEL;
1695 op->dop.u.flow_del.key = ukey->key;
1696 op->dop.u.flow_del.key_len = ukey->key_len;
1697 op->dop.u.flow_del.ufid = ukey->ufid_present ? &ukey->ufid : NULL;
1698 op->dop.u.flow_del.stats = &op->stats;
1699 atomic_read_relaxed(&udpif->enable_ufid, &op->dop.u.flow_del.terse);
1703 push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
1705 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1708 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1709 for (i = 0; i < n_ops; i++) {
1710 opsp[i] = &ops[i].dop;
1712 dpif_operate(udpif->dpif, opsp, n_ops);
1714 for (i = 0; i < n_ops; i++) {
1715 struct ukey_op *op = &ops[i];
1716 struct dpif_flow_stats *push, *stats, push_buf;
1718 stats = op->dop.u.flow_del.stats;
1722 ovs_mutex_lock(&op->ukey->mutex);
1723 push->used = MAX(stats->used, op->ukey->stats.used);
1724 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1725 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1726 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1727 ovs_mutex_unlock(&op->ukey->mutex);
1732 if (push->n_packets || netflow_exists()) {
1733 const struct nlattr *key = op->dop.u.flow_del.key;
1734 size_t key_len = op->dop.u.flow_del.key_len;
1735 struct ofproto_dpif *ofproto;
1736 struct netflow *netflow;
1737 ofp_port_t ofp_in_port;
1742 ovs_mutex_lock(&op->ukey->mutex);
1743 if (op->ukey->xcache) {
1744 xlate_push_stats(op->ukey->xcache, push);
1745 ovs_mutex_unlock(&op->ukey->mutex);
1748 ovs_mutex_unlock(&op->ukey->mutex);
1749 key = op->ukey->key;
1750 key_len = op->ukey->key_len;
1753 if (odp_flow_key_to_flow(key, key_len, &flow)
1758 error = xlate_lookup(udpif->backer, &flow, &ofproto,
1759 NULL, NULL, &netflow, &ofp_in_port);
1761 struct xlate_in xin;
1763 xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
1764 push->tcp_flags, NULL);
1765 xin.resubmit_stats = push->n_packets ? push : NULL;
1766 xin.may_learn = push->n_packets > 0;
1767 xin.skip_wildcards = true;
1768 xlate_actions_for_side_effects(&xin);
1771 netflow_flow_clear(netflow, &flow);
1779 push_ukey_ops(struct udpif *udpif, struct umap *umap,
1780 struct ukey_op *ops, size_t n_ops)
1784 push_ukey_ops__(udpif, ops, n_ops);
1785 ovs_mutex_lock(&umap->mutex);
1786 for (i = 0; i < n_ops; i++) {
1787 ukey_delete(umap, ops[i].ukey);
1789 ovs_mutex_unlock(&umap->mutex);
1793 log_unexpected_flow(const struct dpif_flow *flow, int error)
1795 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 60);
1796 struct ds ds = DS_EMPTY_INITIALIZER;
1798 ds_put_format(&ds, "Failed to acquire udpif_key corresponding to "
1799 "unexpected flow (%s): ", ovs_strerror(error));
1800 odp_format_ufid(&flow->ufid, &ds);
1801 VLOG_WARN_RL(&rl, "%s", ds_cstr(&ds));
1805 revalidate(struct revalidator *revalidator)
1807 struct udpif *udpif = revalidator->udpif;
1808 struct dpif_flow_dump_thread *dump_thread;
1809 uint64_t dump_seq, reval_seq;
1810 unsigned int flow_limit;
1812 dump_seq = seq_read(udpif->dump_seq);
1813 reval_seq = seq_read(udpif->reval_seq);
1814 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
1815 dump_thread = dpif_flow_dump_thread_create(udpif->dump);
1817 struct ukey_op ops[REVALIDATE_MAX_BATCH];
1820 struct dpif_flow flows[REVALIDATE_MAX_BATCH];
1821 const struct dpif_flow *f;
1824 long long int max_idle;
1829 n_dumped = dpif_flow_dump_next(dump_thread, flows, ARRAY_SIZE(flows));
1836 /* In normal operation we want to keep flows around until they have
1837 * been idle for 'ofproto_max_idle' milliseconds. However:
1839 * - If the number of datapath flows climbs above 'flow_limit',
1840 * drop that down to 100 ms to try to bring the flows down to
1843 * - If the number of datapath flows climbs above twice
1844 * 'flow_limit', delete all the datapath flows as an emergency
1845 * measure. (We reassess this condition for the next batch of
1846 * datapath flows, so we will recover before all the flows are
1848 n_dp_flows = udpif_get_n_flows(udpif);
1849 kill_them_all = n_dp_flows > flow_limit * 2;
1850 max_idle = n_dp_flows > flow_limit ? 100 : ofproto_max_idle;
1852 for (f = flows; f < &flows[n_dumped]; f++) {
1853 long long int used = f->stats.used;
1854 struct udpif_key *ukey;
1855 bool already_dumped, keep;
1858 if (ukey_acquire(udpif, f, &ukey, &error)) {
1859 if (error == EBUSY) {
1860 /* Another thread is processing this flow, so don't bother
1862 COVERAGE_INC(upcall_ukey_contention);
1864 log_unexpected_flow(f, error);
1865 if (error != ENOENT) {
1866 delete_op_init__(udpif, &ops[n_ops++], f);
1872 already_dumped = ukey->dump_seq == dump_seq;
1873 if (already_dumped) {
1874 /* The flow has already been handled during this flow dump
1875 * operation. Skip it. */
1877 COVERAGE_INC(dumped_duplicate_flow);
1879 COVERAGE_INC(dumped_new_flow);
1881 ovs_mutex_unlock(&ukey->mutex);
1886 used = ukey->created;
1888 if (kill_them_all || (used && used < now - max_idle)) {
1891 keep = revalidate_ukey(udpif, ukey, &f->stats, reval_seq);
1893 ukey->dump_seq = dump_seq;
1894 ukey->flow_exists = keep;
1897 delete_op_init(udpif, &ops[n_ops++], ukey);
1899 ovs_mutex_unlock(&ukey->mutex);
1903 push_ukey_ops__(udpif, ops, n_ops);
1907 dpif_flow_dump_thread_destroy(dump_thread);
1911 handle_missed_revalidation(struct udpif *udpif, uint64_t reval_seq,
1912 struct udpif_key *ukey)
1914 struct dpif_flow_stats stats;
1917 COVERAGE_INC(revalidate_missed_dp_flow);
1919 memset(&stats, 0, sizeof stats);
1920 ovs_mutex_lock(&ukey->mutex);
1921 keep = revalidate_ukey(udpif, ukey, &stats, reval_seq);
1922 ovs_mutex_unlock(&ukey->mutex);
1928 revalidator_sweep__(struct revalidator *revalidator, bool purge)
1930 struct udpif *udpif;
1931 uint64_t dump_seq, reval_seq;
1934 udpif = revalidator->udpif;
1935 dump_seq = seq_read(udpif->dump_seq);
1936 reval_seq = seq_read(udpif->reval_seq);
1937 slice = revalidator - udpif->revalidators;
1938 ovs_assert(slice < udpif->n_revalidators);
1940 for (int i = slice; i < N_UMAPS; i += udpif->n_revalidators) {
1941 struct ukey_op ops[REVALIDATE_MAX_BATCH];
1942 struct udpif_key *ukey;
1943 struct umap *umap = &udpif->ukeys[i];
1946 CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
1947 bool flow_exists, seq_mismatch;
1949 /* Handler threads could be holding a ukey lock while it installs a
1950 * new flow, so don't hang around waiting for access to it. */
1951 if (ovs_mutex_trylock(&ukey->mutex)) {
1954 flow_exists = ukey->flow_exists;
1955 seq_mismatch = (ukey->dump_seq != dump_seq
1956 && ukey->reval_seq != reval_seq);
1957 ovs_mutex_unlock(&ukey->mutex);
1962 && !handle_missed_revalidation(udpif, reval_seq,
1964 struct ukey_op *op = &ops[n_ops++];
1966 delete_op_init(udpif, op, ukey);
1967 if (n_ops == REVALIDATE_MAX_BATCH) {
1968 push_ukey_ops(udpif, umap, ops, n_ops);
1971 } else if (!flow_exists) {
1972 ovs_mutex_lock(&umap->mutex);
1973 ukey_delete(umap, ukey);
1974 ovs_mutex_unlock(&umap->mutex);
1979 push_ukey_ops(udpif, umap, ops, n_ops);
1986 revalidator_sweep(struct revalidator *revalidator)
1988 revalidator_sweep__(revalidator, false);
1992 revalidator_purge(struct revalidator *revalidator)
1994 revalidator_sweep__(revalidator, true);
1998 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
1999 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2001 struct ds ds = DS_EMPTY_INITIALIZER;
2002 struct udpif *udpif;
2004 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2005 unsigned int flow_limit;
2009 atomic_read_relaxed(&udpif->flow_limit, &flow_limit);
2010 atomic_read_relaxed(&udpif->enable_ufid, &ufid_enabled);
2012 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
2013 ds_put_format(&ds, "\tflows : (current %lu)"
2014 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
2015 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
2016 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
2017 ds_put_format(&ds, "\tufid enabled : ");
2019 ds_put_format(&ds, "true\n");
2021 ds_put_format(&ds, "false\n");
2023 ds_put_char(&ds, '\n');
2025 for (i = 0; i < n_revalidators; i++) {
2026 struct revalidator *revalidator = &udpif->revalidators[i];
2027 int j, elements = 0;
2029 for (j = i; j < N_UMAPS; j += n_revalidators) {
2030 elements += cmap_count(&udpif->ukeys[j].cmap);
2032 ds_put_format(&ds, "\t%u: (keys %d)\n", revalidator->id, elements);
2036 unixctl_command_reply(conn, ds_cstr(&ds));
2040 /* Disable using the megaflows.
2042 * This command is only needed for advanced debugging, so it's not
2043 * documented in the man page. */
2045 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
2046 int argc OVS_UNUSED,
2047 const char *argv[] OVS_UNUSED,
2048 void *aux OVS_UNUSED)
2050 atomic_store_relaxed(&enable_megaflows, false);
2051 udpif_flush_all_datapaths();
2052 unixctl_command_reply(conn, "megaflows disabled");
2055 /* Re-enable using megaflows.
2057 * This command is only needed for advanced debugging, so it's not
2058 * documented in the man page. */
2060 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
2061 int argc OVS_UNUSED,
2062 const char *argv[] OVS_UNUSED,
2063 void *aux OVS_UNUSED)
2065 atomic_store_relaxed(&enable_megaflows, true);
2066 udpif_flush_all_datapaths();
2067 unixctl_command_reply(conn, "megaflows enabled");
2070 /* Disable skipping flow attributes during flow dump.
2072 * This command is only needed for advanced debugging, so it's not
2073 * documented in the man page. */
2075 upcall_unixctl_disable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2076 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2078 struct udpif *udpif;
2080 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2081 atomic_store(&udpif->enable_ufid, false);
2083 unixctl_command_reply(conn, "Datapath dumping tersely using UFID disabled");
2086 /* Re-enable skipping flow attributes during flow dump.
2088 * This command is only needed for advanced debugging, so it's not documented
2089 * in the man page. */
2091 upcall_unixctl_enable_ufid(struct unixctl_conn *conn, int argc OVS_UNUSED,
2092 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2094 struct udpif *udpif;
2096 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2097 atomic_store(&udpif->enable_ufid, true);
2099 unixctl_command_reply(conn, "Datapath dumping tersely using UFID enabled");
2102 /* Set the flow limit.
2104 * This command is only needed for advanced debugging, so it's not
2105 * documented in the man page. */
2107 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
2108 int argc OVS_UNUSED,
2109 const char *argv[] OVS_UNUSED,
2110 void *aux OVS_UNUSED)
2112 struct ds ds = DS_EMPTY_INITIALIZER;
2113 struct udpif *udpif;
2114 unsigned int flow_limit = atoi(argv[1]);
2116 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2117 atomic_store_relaxed(&udpif->flow_limit, flow_limit);
2119 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
2120 unixctl_command_reply(conn, ds_cstr(&ds));
2125 upcall_unixctl_dump_wait(struct unixctl_conn *conn,
2126 int argc OVS_UNUSED,
2127 const char *argv[] OVS_UNUSED,
2128 void *aux OVS_UNUSED)
2130 if (list_is_singleton(&all_udpifs)) {
2131 struct udpif *udpif = NULL;
2134 udpif = OBJECT_CONTAINING(list_front(&all_udpifs), udpif, list_node);
2135 len = (udpif->n_conns + 1) * sizeof *udpif->conns;
2136 udpif->conn_seq = seq_read(udpif->dump_seq);
2137 udpif->conns = xrealloc(udpif->conns, len);
2138 udpif->conns[udpif->n_conns++] = conn;
2140 unixctl_command_reply_error(conn, "can't wait on multiple udpifs.");
2145 upcall_unixctl_purge(struct unixctl_conn *conn, int argc OVS_UNUSED,
2146 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
2148 struct udpif *udpif;
2150 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
2153 for (n = 0; n < udpif->n_revalidators; n++) {
2154 revalidator_purge(&udpif->revalidators[n]);
2157 unixctl_command_reply(conn, "");