1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
25 #include "dynamic-string.h"
26 #include "fail-open.h"
27 #include "guarded-list.h"
32 #include "ofproto-dpif-ipfix.h"
33 #include "ofproto-dpif-sflow.h"
34 #include "ofproto-dpif-xlate.h"
36 #include "poll-loop.h"
41 #define MAX_QUEUE_LENGTH 512
42 #define FLOW_MISS_MAX_BATCH 50
43 #define REVALIDATE_MAX_BATCH 50
45 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
47 COVERAGE_DEFINE(upcall_queue_overflow);
48 COVERAGE_DEFINE(upcall_duplicate_flow);
50 /* A thread that processes each upcall handed to it by the dispatcher thread,
51 * forwards the upcall's packet, and possibly sets up a kernel flow as a
54 struct udpif *udpif; /* Parent udpif. */
55 pthread_t thread; /* Thread ID. */
56 char *name; /* Thread name. */
58 struct ovs_mutex mutex; /* Mutex guarding the following. */
60 /* Atomic queue of unprocessed upcalls. */
61 struct list upcalls OVS_GUARDED;
62 size_t n_upcalls OVS_GUARDED;
64 bool need_signal; /* Only changed by the dispatcher. */
66 pthread_cond_t wake_cond; /* Wakes 'thread' while holding
70 /* A thread that processes each kernel flow handed to it by the flow_dumper
71 * thread, updates OpenFlow statistics, and updates or removes the kernel flow
74 struct udpif *udpif; /* Parent udpif. */
75 char *name; /* Thread name. */
77 pthread_t thread; /* Thread ID. */
78 struct hmap ukeys; /* Datapath flow keys. */
82 struct ovs_mutex mutex; /* Mutex guarding the following. */
83 pthread_cond_t wake_cond;
84 struct list udumps OVS_GUARDED; /* Unprocessed udumps. */
85 size_t n_udumps OVS_GUARDED; /* Number of unprocessed udumps. */
88 /* An upcall handler for ofproto_dpif.
90 * udpif has two logically separate pieces:
92 * - A "dispatcher" thread that reads upcalls from the kernel and dispatches
93 * them to one of several "handler" threads (see struct handler).
95 * - A "flow_dumper" thread that reads the kernel flow table and dispatches
96 * flows to one of several "revalidator" threads (see struct
99 struct list list_node; /* In all_udpifs list. */
101 struct dpif *dpif; /* Datapath handle. */
102 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
104 uint32_t secret; /* Random seed for upcall hash. */
106 pthread_t dispatcher; /* Dispatcher thread ID. */
107 pthread_t flow_dumper; /* Flow dumper thread ID. */
109 struct handler *handlers; /* Upcall handlers. */
112 struct revalidator *revalidators; /* Flow revalidators. */
113 size_t n_revalidators;
115 uint64_t last_reval_seq; /* 'reval_seq' at last revalidation. */
116 struct seq *reval_seq; /* Incremented to force revalidation. */
118 struct seq *dump_seq; /* Increments each dump iteration. */
120 struct latch exit_latch; /* Tells child threads to exit. */
122 long long int dump_duration; /* Duration of the last flow dump. */
124 /* Datapath flow statistics. */
125 unsigned int max_n_flows;
126 unsigned int avg_n_flows;
128 atomic_uint flow_limit; /* Datapath flow hard limit. */
130 /* n_flows_mutex prevents multiple threads updating these concurrently. */
131 atomic_uint64_t n_flows; /* Number of flows in the datapath. */
132 atomic_llong n_flows_timestamp; /* Last time n_flows was updated. */
133 struct ovs_mutex n_flows_mutex;
137 BAD_UPCALL, /* Some kind of bug somewhere. */
138 MISS_UPCALL, /* A flow miss. */
139 SFLOW_UPCALL, /* sFlow sample. */
140 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
141 IPFIX_UPCALL /* Per-bridge sampling. */
145 struct list list_node; /* For queuing upcalls. */
146 struct flow_miss *flow_miss; /* This upcall's flow_miss. */
148 /* Raw upcall plus data for keeping track of the memory backing it. */
149 struct dpif_upcall dpif_upcall; /* As returned by dpif_recv() */
150 struct ofpbuf upcall_buf; /* Owns some data in 'dpif_upcall'. */
151 uint64_t upcall_stub[512 / 8]; /* Buffer to reduce need for malloc(). */
154 /* 'udpif_key's are responsible for tracking the little bit of state udpif
155 * needs to do flow expiration which can't be pulled directly from the
156 * datapath. They are owned, created by, maintained, and destroyed by a single
157 * revalidator making them easy to efficiently handle with multiple threads. */
159 struct hmap_node hmap_node; /* In parent revalidator 'ukeys' map. */
161 struct nlattr *key; /* Datapath flow key. */
162 size_t key_len; /* Length of 'key'. */
164 struct dpif_flow_stats stats; /* Stats at most recent flow dump. */
165 long long int created; /* Estimation of creation time. */
167 bool mark; /* Used by mark and sweep GC algorithm. */
168 bool flow_exists; /* Ensures flows are only deleted once. */
170 struct odputil_keybuf key_buf; /* Memory for 'key'. */
173 /* 'udpif_flow_dump's hold the state associated with one iteration in a flow
174 * dump operation. This is created by the flow_dumper thread and handed to the
175 * appropriate revalidator thread to be processed. */
176 struct udpif_flow_dump {
177 struct list list_node;
179 struct nlattr *key; /* Datapath flow key. */
180 size_t key_len; /* Length of 'key'. */
181 uint32_t key_hash; /* Hash of 'key'. */
183 struct odputil_keybuf mask_buf;
184 struct nlattr *mask; /* Datapath mask for 'key'. */
185 size_t mask_len; /* Length of 'mask'. */
187 struct dpif_flow_stats stats; /* Stats pulled from the datapath. */
189 bool need_revalidate; /* Key needs revalidation? */
191 struct odputil_keybuf key_buf;
194 /* Flow miss batching.
196 * Some dpifs implement operations faster when you hand them off in a batch.
197 * To allow batching, "struct flow_miss" queues the dpif-related work needed
198 * for a given flow. Each "struct flow_miss" corresponds to sending one or
199 * more packets, plus possibly installing the flow in the dpif. */
201 struct hmap_node hmap_node;
202 struct ofproto_dpif *ofproto;
205 enum odp_key_fitness key_fitness;
206 const struct nlattr *key;
208 enum dpif_upcall_type upcall_type;
209 struct dpif_flow_stats stats;
210 odp_port_t odp_in_port;
212 uint64_t slow_path_buf[128 / 8];
213 struct odputil_keybuf mask_buf;
215 struct xlate_out xout;
220 static void upcall_destroy(struct upcall *);
222 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
223 static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs);
225 static void recv_upcalls(struct udpif *);
226 static void handle_upcalls(struct handler *handler, struct list *upcalls);
227 static void *udpif_flow_dumper(void *);
228 static void *udpif_dispatcher(void *);
229 static void *udpif_upcall_handler(void *);
230 static void *udpif_revalidator(void *);
231 static uint64_t udpif_get_n_flows(struct udpif *);
232 static void revalidate_udumps(struct revalidator *, struct list *udumps);
233 static void revalidator_sweep(struct revalidator *);
234 static void revalidator_purge(struct revalidator *);
235 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
236 const char *argv[], void *aux);
237 static void upcall_unixctl_disable_megaflows(struct unixctl_conn *, int argc,
238 const char *argv[], void *aux);
239 static void upcall_unixctl_enable_megaflows(struct unixctl_conn *, int argc,
240 const char *argv[], void *aux);
241 static void upcall_unixctl_set_flow_limit(struct unixctl_conn *conn, int argc,
242 const char *argv[], void *aux);
243 static void ukey_delete(struct revalidator *, struct udpif_key *);
245 static atomic_bool enable_megaflows = ATOMIC_VAR_INIT(true);
248 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
250 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
251 struct udpif *udpif = xzalloc(sizeof *udpif);
253 if (ovsthread_once_start(&once)) {
254 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
256 unixctl_command_register("upcall/disable-megaflows", "", 0, 0,
257 upcall_unixctl_disable_megaflows, NULL);
258 unixctl_command_register("upcall/enable-megaflows", "", 0, 0,
259 upcall_unixctl_enable_megaflows, NULL);
260 unixctl_command_register("upcall/set-flow-limit", "", 1, 1,
261 upcall_unixctl_set_flow_limit, NULL);
262 ovsthread_once_done(&once);
266 udpif->backer = backer;
267 atomic_init(&udpif->flow_limit, MIN(ofproto_flow_limit, 10000));
268 udpif->secret = random_uint32();
269 udpif->reval_seq = seq_create();
270 udpif->dump_seq = seq_create();
271 latch_init(&udpif->exit_latch);
272 list_push_back(&all_udpifs, &udpif->list_node);
273 atomic_init(&udpif->n_flows, 0);
274 atomic_init(&udpif->n_flows_timestamp, LLONG_MIN);
275 ovs_mutex_init(&udpif->n_flows_mutex);
281 udpif_destroy(struct udpif *udpif)
283 udpif_set_threads(udpif, 0, 0);
286 list_remove(&udpif->list_node);
287 latch_destroy(&udpif->exit_latch);
288 seq_destroy(udpif->reval_seq);
289 seq_destroy(udpif->dump_seq);
290 ovs_mutex_destroy(&udpif->n_flows_mutex);
294 /* Tells 'udpif' how many threads it should use to handle upcalls. Disables
295 * all threads if 'n_handlers' and 'n_revalidators' is zero. 'udpif''s
296 * datapath handle must have packet reception enabled before starting threads.
299 udpif_set_threads(struct udpif *udpif, size_t n_handlers,
300 size_t n_revalidators)
302 /* Stop the old threads (if any). */
303 if (udpif->handlers &&
304 (udpif->n_handlers != n_handlers
305 || udpif->n_revalidators != n_revalidators)) {
308 latch_set(&udpif->exit_latch);
310 for (i = 0; i < udpif->n_handlers; i++) {
311 struct handler *handler = &udpif->handlers[i];
313 ovs_mutex_lock(&handler->mutex);
314 xpthread_cond_signal(&handler->wake_cond);
315 ovs_mutex_unlock(&handler->mutex);
316 xpthread_join(handler->thread, NULL);
319 for (i = 0; i < udpif->n_revalidators; i++) {
320 struct revalidator *revalidator = &udpif->revalidators[i];
322 ovs_mutex_lock(&revalidator->mutex);
323 xpthread_cond_signal(&revalidator->wake_cond);
324 ovs_mutex_unlock(&revalidator->mutex);
325 xpthread_join(revalidator->thread, NULL);
328 xpthread_join(udpif->flow_dumper, NULL);
329 xpthread_join(udpif->dispatcher, NULL);
331 for (i = 0; i < udpif->n_revalidators; i++) {
332 struct revalidator *revalidator = &udpif->revalidators[i];
333 struct udpif_flow_dump *udump, *next_udump;
335 LIST_FOR_EACH_SAFE (udump, next_udump, list_node,
336 &revalidator->udumps) {
337 list_remove(&udump->list_node);
341 /* Delete ukeys, and delete all flows from the datapath to prevent
342 * double-counting stats. */
343 revalidator_purge(revalidator);
344 hmap_destroy(&revalidator->ukeys);
345 ovs_mutex_destroy(&revalidator->mutex);
347 free(revalidator->name);
350 for (i = 0; i < udpif->n_handlers; i++) {
351 struct handler *handler = &udpif->handlers[i];
352 struct upcall *miss, *next;
354 LIST_FOR_EACH_SAFE (miss, next, list_node, &handler->upcalls) {
355 list_remove(&miss->list_node);
356 upcall_destroy(miss);
358 ovs_mutex_destroy(&handler->mutex);
360 xpthread_cond_destroy(&handler->wake_cond);
363 latch_poll(&udpif->exit_latch);
365 free(udpif->revalidators);
366 udpif->revalidators = NULL;
367 udpif->n_revalidators = 0;
369 free(udpif->handlers);
370 udpif->handlers = NULL;
371 udpif->n_handlers = 0;
374 /* Start new threads (if necessary). */
375 if (!udpif->handlers && n_handlers) {
378 udpif->n_handlers = n_handlers;
379 udpif->n_revalidators = n_revalidators;
381 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
382 for (i = 0; i < udpif->n_handlers; i++) {
383 struct handler *handler = &udpif->handlers[i];
385 handler->udpif = udpif;
386 list_init(&handler->upcalls);
387 handler->need_signal = false;
388 xpthread_cond_init(&handler->wake_cond, NULL);
389 ovs_mutex_init(&handler->mutex);
390 xpthread_create(&handler->thread, NULL, udpif_upcall_handler,
394 udpif->revalidators = xzalloc(udpif->n_revalidators
395 * sizeof *udpif->revalidators);
396 for (i = 0; i < udpif->n_revalidators; i++) {
397 struct revalidator *revalidator = &udpif->revalidators[i];
399 revalidator->udpif = udpif;
400 list_init(&revalidator->udumps);
401 hmap_init(&revalidator->ukeys);
402 ovs_mutex_init(&revalidator->mutex);
403 xpthread_cond_init(&revalidator->wake_cond, NULL);
404 xpthread_create(&revalidator->thread, NULL, udpif_revalidator,
407 xpthread_create(&udpif->dispatcher, NULL, udpif_dispatcher, udpif);
408 xpthread_create(&udpif->flow_dumper, NULL, udpif_flow_dumper, udpif);
412 /* Waits for all ongoing upcall translations to complete. This ensures that
413 * there are no transient references to any removed ofprotos (or other
414 * objects). In particular, this should be called after an ofproto is removed
415 * (e.g. via xlate_remove_ofproto()) but before it is destroyed. */
417 udpif_synchronize(struct udpif *udpif)
419 /* This is stronger than necessary. It would be sufficient to ensure
420 * (somehow) that each handler and revalidator thread had passed through
421 * its main loop once. */
422 size_t n_handlers = udpif->n_handlers;
423 size_t n_revalidators = udpif->n_revalidators;
424 udpif_set_threads(udpif, 0, 0);
425 udpif_set_threads(udpif, n_handlers, n_revalidators);
428 /* Notifies 'udpif' that something changed which may render previous
429 * xlate_actions() results invalid. */
431 udpif_revalidate(struct udpif *udpif)
433 seq_change(udpif->reval_seq);
436 /* Returns a seq which increments every time 'udpif' pulls stats from the
437 * datapath. Callers can use this to get a sense of when might be a good time
438 * to do periodic work which relies on relatively up to date statistics. */
440 udpif_dump_seq(struct udpif *udpif)
442 return udpif->dump_seq;
446 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
450 simap_increase(usage, "dispatchers", 1);
451 simap_increase(usage, "flow_dumpers", 1);
453 simap_increase(usage, "handlers", udpif->n_handlers);
454 for (i = 0; i < udpif->n_handlers; i++) {
455 struct handler *handler = &udpif->handlers[i];
456 ovs_mutex_lock(&handler->mutex);
457 simap_increase(usage, "handler upcalls", handler->n_upcalls);
458 ovs_mutex_unlock(&handler->mutex);
461 simap_increase(usage, "revalidators", udpif->n_revalidators);
462 for (i = 0; i < udpif->n_revalidators; i++) {
463 struct revalidator *revalidator = &udpif->revalidators[i];
464 ovs_mutex_lock(&revalidator->mutex);
465 simap_increase(usage, "revalidator dumps", revalidator->n_udumps);
467 /* XXX: This isn't technically thread safe because the revalidator
468 * ukeys maps isn't protected by a mutex since it's per thread. */
469 simap_increase(usage, "revalidator keys",
470 hmap_count(&revalidator->ukeys));
471 ovs_mutex_unlock(&revalidator->mutex);
475 /* Removes all flows from all datapaths. */
481 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
482 dpif_flow_flush(udpif->dpif);
486 /* Destroys and deallocates 'upcall'. */
488 upcall_destroy(struct upcall *upcall)
491 ofpbuf_uninit(&upcall->dpif_upcall.packet);
492 ofpbuf_uninit(&upcall->upcall_buf);
498 udpif_get_n_flows(struct udpif *udpif)
500 long long int time, now;
504 atomic_read(&udpif->n_flows_timestamp, &time);
505 if (time < now - 100 && !ovs_mutex_trylock(&udpif->n_flows_mutex)) {
506 struct dpif_dp_stats stats;
508 atomic_store(&udpif->n_flows_timestamp, now);
509 dpif_get_dp_stats(udpif->dpif, &stats);
510 flow_count = stats.n_flows;
511 atomic_store(&udpif->n_flows, flow_count);
512 ovs_mutex_unlock(&udpif->n_flows_mutex);
514 atomic_read(&udpif->n_flows, &flow_count);
519 /* The dispatcher thread is responsible for receiving upcalls from the kernel,
520 * assigning them to a upcall_handler thread. */
522 udpif_dispatcher(void *arg)
524 struct udpif *udpif = arg;
526 set_subprogram_name("dispatcher");
527 while (!latch_is_set(&udpif->exit_latch)) {
529 dpif_recv_wait(udpif->dpif);
530 latch_wait(&udpif->exit_latch);
538 udpif_flow_dumper(void *arg)
540 struct udpif *udpif = arg;
542 set_subprogram_name("flow_dumper");
543 while (!latch_is_set(&udpif->exit_latch)) {
544 const struct dpif_flow_stats *stats;
545 long long int start_time, duration;
546 const struct nlattr *key, *mask;
547 struct dpif_flow_dump dump;
548 size_t key_len, mask_len;
549 unsigned int flow_limit;
550 bool need_revalidate;
554 reval_seq = seq_read(udpif->reval_seq);
555 need_revalidate = udpif->last_reval_seq != reval_seq;
556 udpif->last_reval_seq = reval_seq;
558 n_flows = udpif_get_n_flows(udpif);
559 udpif->max_n_flows = MAX(n_flows, udpif->max_n_flows);
560 udpif->avg_n_flows = (udpif->avg_n_flows + n_flows) / 2;
562 start_time = time_msec();
563 dpif_flow_dump_start(&dump, udpif->dpif);
564 while (dpif_flow_dump_next(&dump, &key, &key_len, &mask, &mask_len,
566 && !latch_is_set(&udpif->exit_latch)) {
567 struct udpif_flow_dump *udump = xmalloc(sizeof *udump);
568 struct revalidator *revalidator;
570 udump->key_hash = hash_bytes(key, key_len, udpif->secret);
571 memcpy(&udump->key_buf, key, key_len);
572 udump->key = (struct nlattr *) &udump->key_buf;
573 udump->key_len = key_len;
575 memcpy(&udump->mask_buf, mask, mask_len);
576 udump->mask = (struct nlattr *) &udump->mask_buf;
577 udump->mask_len = mask_len;
579 udump->stats = *stats;
580 udump->need_revalidate = need_revalidate;
582 revalidator = &udpif->revalidators[udump->key_hash
583 % udpif->n_revalidators];
585 ovs_mutex_lock(&revalidator->mutex);
586 while (revalidator->n_udumps >= REVALIDATE_MAX_BATCH * 3
587 && !latch_is_set(&udpif->exit_latch)) {
588 ovs_mutex_cond_wait(&revalidator->wake_cond,
589 &revalidator->mutex);
591 list_push_back(&revalidator->udumps, &udump->list_node);
592 revalidator->n_udumps++;
593 xpthread_cond_signal(&revalidator->wake_cond);
594 ovs_mutex_unlock(&revalidator->mutex);
596 dpif_flow_dump_done(&dump);
598 /* Let all the revalidators finish and garbage collect. */
599 seq_change(udpif->dump_seq);
600 for (i = 0; i < udpif->n_revalidators; i++) {
601 struct revalidator *revalidator = &udpif->revalidators[i];
602 ovs_mutex_lock(&revalidator->mutex);
603 xpthread_cond_signal(&revalidator->wake_cond);
604 ovs_mutex_unlock(&revalidator->mutex);
607 for (i = 0; i < udpif->n_revalidators; i++) {
608 struct revalidator *revalidator = &udpif->revalidators[i];
610 ovs_mutex_lock(&revalidator->mutex);
611 while (revalidator->dump_seq != seq_read(udpif->dump_seq)
612 && !latch_is_set(&udpif->exit_latch)) {
613 ovs_mutex_cond_wait(&revalidator->wake_cond,
614 &revalidator->mutex);
616 ovs_mutex_unlock(&revalidator->mutex);
619 duration = MAX(time_msec() - start_time, 1);
620 udpif->dump_duration = duration;
621 atomic_read(&udpif->flow_limit, &flow_limit);
622 if (duration > 2000) {
623 flow_limit /= duration / 1000;
624 } else if (duration > 1300) {
625 flow_limit = flow_limit * 3 / 4;
626 } else if (duration < 1000 && n_flows > 2000
627 && flow_limit < n_flows * 1000 / duration) {
630 flow_limit = MIN(ofproto_flow_limit, MAX(flow_limit, 1000));
631 atomic_store(&udpif->flow_limit, flow_limit);
633 if (duration > 2000) {
634 VLOG_INFO("Spent an unreasonably long %lldms dumping flows",
638 poll_timer_wait_until(start_time + MIN(ofproto_max_idle, 500));
639 seq_wait(udpif->reval_seq, udpif->last_reval_seq);
640 latch_wait(&udpif->exit_latch);
647 /* The miss handler thread is responsible for processing miss upcalls retrieved
648 * by the dispatcher thread. Once finished it passes the processed miss
649 * upcalls to ofproto-dpif where they're installed in the datapath. */
651 udpif_upcall_handler(void *arg)
653 struct handler *handler = arg;
655 handler->name = xasprintf("handler_%u", ovsthread_id_self());
656 set_subprogram_name("%s", handler->name);
659 struct list misses = LIST_INITIALIZER(&misses);
662 ovs_mutex_lock(&handler->mutex);
664 if (latch_is_set(&handler->udpif->exit_latch)) {
665 ovs_mutex_unlock(&handler->mutex);
669 if (!handler->n_upcalls) {
670 ovs_mutex_cond_wait(&handler->wake_cond, &handler->mutex);
673 for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) {
674 if (handler->n_upcalls) {
675 handler->n_upcalls--;
676 list_push_back(&misses, list_pop_front(&handler->upcalls));
681 ovs_mutex_unlock(&handler->mutex);
683 handle_upcalls(handler, &misses);
690 udpif_revalidator(void *arg)
692 struct revalidator *revalidator = arg;
694 revalidator->name = xasprintf("revalidator_%u", ovsthread_id_self());
695 set_subprogram_name("%s", revalidator->name);
697 struct list udumps = LIST_INITIALIZER(&udumps);
698 struct udpif *udpif = revalidator->udpif;
701 ovs_mutex_lock(&revalidator->mutex);
702 if (latch_is_set(&udpif->exit_latch)) {
703 ovs_mutex_unlock(&revalidator->mutex);
707 if (!revalidator->n_udumps) {
708 if (revalidator->dump_seq != seq_read(udpif->dump_seq)) {
709 revalidator->dump_seq = seq_read(udpif->dump_seq);
710 revalidator_sweep(revalidator);
712 ovs_mutex_cond_wait(&revalidator->wake_cond,
713 &revalidator->mutex);
717 for (i = 0; i < REVALIDATE_MAX_BATCH && revalidator->n_udumps; i++) {
718 list_push_back(&udumps, list_pop_front(&revalidator->udumps));
719 revalidator->n_udumps--;
722 /* Wake up the flow dumper. */
723 xpthread_cond_signal(&revalidator->wake_cond);
724 ovs_mutex_unlock(&revalidator->mutex);
726 if (!list_is_empty(&udumps)) {
727 revalidate_udumps(revalidator, &udumps);
734 static enum upcall_type
735 classify_upcall(const struct upcall *upcall)
737 const struct dpif_upcall *dpif_upcall = &upcall->dpif_upcall;
738 union user_action_cookie cookie;
741 /* First look at the upcall type. */
742 switch (dpif_upcall->type) {
749 case DPIF_N_UC_TYPES:
751 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
756 /* "action" upcalls need a closer look. */
757 if (!dpif_upcall->userdata) {
758 VLOG_WARN_RL(&rl, "action upcall missing cookie");
761 userdata_len = nl_attr_get_size(dpif_upcall->userdata);
762 if (userdata_len < sizeof cookie.type
763 || userdata_len > sizeof cookie) {
764 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
768 memset(&cookie, 0, sizeof cookie);
769 memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len);
770 if (userdata_len == MAX(8, sizeof cookie.sflow)
771 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
773 } else if (userdata_len == MAX(8, sizeof cookie.slow_path)
774 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
776 } else if (userdata_len == MAX(8, sizeof cookie.flow_sample)
777 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
778 return FLOW_SAMPLE_UPCALL;
779 } else if (userdata_len == MAX(8, sizeof cookie.ipfix)
780 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
783 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
784 " and size %"PRIuSIZE, cookie.type, userdata_len);
790 recv_upcalls(struct udpif *udpif)
795 uint32_t hash = udpif->secret;
796 struct handler *handler;
797 struct upcall *upcall;
798 size_t n_bytes, left;
802 upcall = xmalloc(sizeof *upcall);
803 ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub,
804 sizeof upcall->upcall_stub);
805 error = dpif_recv(udpif->dpif, &upcall->dpif_upcall,
806 &upcall->upcall_buf);
808 /* upcall_destroy() can only be called on successfully received
810 ofpbuf_uninit(&upcall->upcall_buf);
816 NL_ATTR_FOR_EACH (nla, left, upcall->dpif_upcall.key,
817 upcall->dpif_upcall.key_len) {
818 enum ovs_key_attr type = nl_attr_type(nla);
819 if (type == OVS_KEY_ATTR_IN_PORT
820 || type == OVS_KEY_ATTR_TCP
821 || type == OVS_KEY_ATTR_UDP) {
822 if (nl_attr_get_size(nla) == 4) {
823 hash = mhash_add(hash, nl_attr_get_u32(nla));
827 "Netlink attribute with incorrect size.");
831 hash = mhash_finish(hash, n_bytes);
833 handler = &udpif->handlers[hash % udpif->n_handlers];
835 ovs_mutex_lock(&handler->mutex);
836 if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
837 list_push_back(&handler->upcalls, &upcall->list_node);
838 if (handler->n_upcalls == 0) {
839 handler->need_signal = true;
841 handler->n_upcalls++;
842 if (handler->need_signal &&
843 handler->n_upcalls >= FLOW_MISS_MAX_BATCH) {
844 handler->need_signal = false;
845 xpthread_cond_signal(&handler->wake_cond);
847 ovs_mutex_unlock(&handler->mutex);
848 if (!VLOG_DROP_DBG(&rl)) {
849 struct ds ds = DS_EMPTY_INITIALIZER;
851 odp_flow_key_format(upcall->dpif_upcall.key,
852 upcall->dpif_upcall.key_len,
854 VLOG_DBG("dispatcher: enqueue (%s)", ds_cstr(&ds));
858 ovs_mutex_unlock(&handler->mutex);
859 COVERAGE_INC(upcall_queue_overflow);
860 upcall_destroy(upcall);
864 for (n = 0; n < udpif->n_handlers; ++n) {
865 struct handler *handler = &udpif->handlers[n];
867 if (handler->need_signal) {
868 handler->need_signal = false;
869 ovs_mutex_lock(&handler->mutex);
870 xpthread_cond_signal(&handler->wake_cond);
871 ovs_mutex_unlock(&handler->mutex);
876 /* Calculates slow path actions for 'xout'. 'buf' must statically be
877 * initialized with at least 128 bytes of space. */
879 compose_slow_path(struct udpif *udpif, struct xlate_out *xout,
880 odp_port_t odp_in_port, struct ofpbuf *buf)
882 union user_action_cookie cookie;
886 cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
887 cookie.slow_path.unused = 0;
888 cookie.slow_path.reason = xout->slow;
890 port = xout->slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)
893 pid = dpif_port_get_pid(udpif->dpif, port);
894 odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, buf);
897 static struct flow_miss *
898 flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
899 const struct flow *flow, uint32_t hash)
901 struct flow_miss *miss;
903 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
904 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
913 handle_upcalls(struct handler *handler, struct list *upcalls)
915 struct hmap misses = HMAP_INITIALIZER(&misses);
916 struct udpif *udpif = handler->udpif;
918 struct flow_miss miss_buf[FLOW_MISS_MAX_BATCH];
919 struct dpif_op *opsp[FLOW_MISS_MAX_BATCH * 2];
920 struct dpif_op ops[FLOW_MISS_MAX_BATCH * 2];
921 struct flow_miss *miss, *next_miss;
922 struct upcall *upcall, *next;
923 size_t n_misses, n_ops, i;
924 unsigned int flow_limit;
925 bool fail_open, may_put;
926 enum upcall_type type;
928 atomic_read(&udpif->flow_limit, &flow_limit);
929 may_put = udpif_get_n_flows(udpif) < flow_limit;
931 /* Extract the flow from each upcall. Construct in 'misses' a hash table
932 * that maps each unique flow to a 'struct flow_miss'.
934 * Most commonly there is a single packet per flow_miss, but there are
935 * several reasons why there might be more than one, e.g.:
937 * - The dpif packet interface does not support TSO (or UFO, etc.), so a
938 * large packet sent to userspace is split into a sequence of smaller
941 * - A stream of quickly arriving packets in an established "slow-pathed"
944 * - Rarely, a stream of quickly arriving packets in a flow not yet
945 * established. (This is rare because most protocols do not send
946 * multiple back-to-back packets before receiving a reply from the
947 * other end of the connection, which gives OVS a chance to set up a
951 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
952 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
953 struct flow_miss *miss = &miss_buf[n_misses];
954 struct ofpbuf *packet = &dupcall->packet;
955 struct flow_miss *existing_miss;
956 struct ofproto_dpif *ofproto;
957 struct dpif_sflow *sflow;
958 struct dpif_ipfix *ipfix;
959 odp_port_t odp_in_port;
963 error = xlate_receive(udpif->backer, packet, dupcall->key,
964 dupcall->key_len, &flow, &miss->key_fitness,
965 &ofproto, &ipfix, &sflow, NULL, &odp_in_port);
967 if (error == ENODEV) {
968 /* Received packet on datapath port for which we couldn't
969 * associate an ofproto. This can happen if a port is removed
970 * while traffic is being received. Print a rate-limited
971 * message in case it happens frequently. Install a drop flow
972 * so that future packets of the flow are inexpensively dropped
974 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
975 "port %"PRIu32, odp_in_port);
976 dpif_flow_put(udpif->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
977 dupcall->key, dupcall->key_len, NULL, 0, NULL, 0,
980 list_remove(&upcall->list_node);
981 upcall_destroy(upcall);
985 type = classify_upcall(upcall);
986 if (type == MISS_UPCALL) {
989 flow_extract(packet, flow.skb_priority, flow.pkt_mark,
990 &flow.tunnel, &flow.in_port, &miss->flow);
992 hash = flow_hash(&miss->flow, 0);
993 existing_miss = flow_miss_find(&misses, ofproto, &miss->flow,
995 if (!existing_miss) {
996 hmap_insert(&misses, &miss->hmap_node, hash);
997 miss->ofproto = ofproto;
998 miss->key = dupcall->key;
999 miss->key_len = dupcall->key_len;
1000 miss->upcall_type = dupcall->type;
1001 miss->stats.n_packets = 0;
1002 miss->stats.n_bytes = 0;
1003 miss->stats.used = time_msec();
1004 miss->stats.tcp_flags = 0;
1005 miss->odp_in_port = odp_in_port;
1010 miss = existing_miss;
1012 miss->stats.tcp_flags |= packet_get_tcp_flags(packet, &miss->flow);
1013 miss->stats.n_bytes += packet->size;
1014 miss->stats.n_packets++;
1016 upcall->flow_miss = miss;
1023 union user_action_cookie cookie;
1025 memset(&cookie, 0, sizeof cookie);
1026 memcpy(&cookie, nl_attr_get(dupcall->userdata),
1027 sizeof cookie.sflow);
1028 dpif_sflow_received(sflow, packet, &flow, odp_in_port,
1034 dpif_ipfix_bridge_sample(ipfix, packet, &flow);
1037 case FLOW_SAMPLE_UPCALL:
1039 union user_action_cookie cookie;
1041 memset(&cookie, 0, sizeof cookie);
1042 memcpy(&cookie, nl_attr_get(dupcall->userdata),
1043 sizeof cookie.flow_sample);
1045 /* The flow reflects exactly the contents of the packet.
1046 * Sample the packet using it. */
1047 dpif_ipfix_flow_sample(ipfix, packet, &flow,
1048 cookie.flow_sample.collector_set_id,
1049 cookie.flow_sample.probability,
1050 cookie.flow_sample.obs_domain_id,
1051 cookie.flow_sample.obs_point_id);
1060 dpif_ipfix_unref(ipfix);
1061 dpif_sflow_unref(sflow);
1063 list_remove(&upcall->list_node);
1064 upcall_destroy(upcall);
1067 /* Initialize each 'struct flow_miss's ->xout.
1069 * We do this per-flow_miss rather than per-packet because, most commonly,
1070 * all the packets in a flow can use the same translation.
1072 * We can't do this in the previous loop because we need the TCP flags for
1073 * all the packets in each miss. */
1075 HMAP_FOR_EACH (miss, hmap_node, &misses) {
1076 struct xlate_in xin;
1078 xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL,
1079 miss->stats.tcp_flags, NULL);
1080 xin.may_learn = true;
1082 if (miss->upcall_type == DPIF_UC_MISS) {
1083 xin.resubmit_stats = &miss->stats;
1085 /* For non-miss upcalls, there's a flow in the datapath which this
1086 * packet was accounted to. Presumably the revalidators will deal
1087 * with pushing its stats eventually. */
1090 xlate_actions(&xin, &miss->xout);
1091 fail_open = fail_open || miss->xout.fail_open;
1094 /* Now handle the packets individually in order of arrival. In the common
1095 * case each packet of a miss can share the same actions, but slow-pathed
1096 * packets need to be translated individually:
1098 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
1099 * processes received packets for these protocols.
1101 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
1104 * The loop fills 'ops' with an array of operations to execute in the
1107 LIST_FOR_EACH (upcall, list_node, upcalls) {
1108 struct flow_miss *miss = upcall->flow_miss;
1109 struct ofpbuf *packet = &upcall->dpif_upcall.packet;
1111 ovs_be16 flow_vlan_tci;
1113 /* Save a copy of flow.vlan_tci in case it is changed to
1114 * generate proper mega flow masks for VLAN splinter flows. */
1115 flow_vlan_tci = miss->flow.vlan_tci;
1117 if (miss->xout.slow) {
1118 struct xlate_in xin;
1120 xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL, 0, packet);
1121 xlate_actions_for_side_effects(&xin);
1124 if (miss->flow.in_port.ofp_port
1125 != vsp_realdev_to_vlandev(miss->ofproto,
1126 miss->flow.in_port.ofp_port,
1127 miss->flow.vlan_tci)) {
1128 /* This packet was received on a VLAN splinter port. We
1129 * added a VLAN to the packet to make the packet resemble
1130 * the flow, but the actions were composed assuming that
1131 * the packet contained no VLAN. So, we must remove the
1132 * VLAN header from the packet before trying to execute the
1134 if (miss->xout.odp_actions.size) {
1135 eth_pop_vlan(packet);
1138 /* Remove the flow vlan tags inserted by vlan splinter logic
1139 * to ensure megaflow masks generated match the data path flow. */
1140 miss->flow.vlan_tci = 0;
1143 /* Do not install a flow into the datapath if:
1145 * - The datapath already has too many flows.
1147 * - An earlier iteration of this loop already put the same flow.
1149 * - We received this packet via some flow installed in the kernel
1153 && upcall->dpif_upcall.type == DPIF_UC_MISS) {
1159 atomic_read(&enable_megaflows, &megaflow);
1160 ofpbuf_use_stack(&mask, &miss->mask_buf, sizeof miss->mask_buf);
1162 odp_flow_key_from_mask(&mask, &miss->xout.wc.masks,
1163 &miss->flow, UINT32_MAX);
1167 op->type = DPIF_OP_FLOW_PUT;
1168 op->u.flow_put.flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
1169 op->u.flow_put.key = miss->key;
1170 op->u.flow_put.key_len = miss->key_len;
1171 op->u.flow_put.mask = mask.data;
1172 op->u.flow_put.mask_len = mask.size;
1173 op->u.flow_put.stats = NULL;
1175 if (!miss->xout.slow) {
1176 op->u.flow_put.actions = miss->xout.odp_actions.data;
1177 op->u.flow_put.actions_len = miss->xout.odp_actions.size;
1181 ofpbuf_use_stack(&buf, miss->slow_path_buf,
1182 sizeof miss->slow_path_buf);
1183 compose_slow_path(udpif, &miss->xout, miss->odp_in_port, &buf);
1184 op->u.flow_put.actions = buf.data;
1185 op->u.flow_put.actions_len = buf.size;
1190 * The 'miss' may be shared by multiple upcalls. Restore
1191 * the saved flow vlan_tci field before processing the next
1193 miss->flow.vlan_tci = flow_vlan_tci;
1195 if (miss->xout.odp_actions.size) {
1198 op->type = DPIF_OP_EXECUTE;
1199 op->u.execute.key = miss->key;
1200 op->u.execute.key_len = miss->key_len;
1201 op->u.execute.packet = packet;
1202 op->u.execute.actions = miss->xout.odp_actions.data;
1203 op->u.execute.actions_len = miss->xout.odp_actions.size;
1204 op->u.execute.needs_help = (miss->xout.slow & SLOW_ACTION) != 0;
1208 /* Special case for fail-open mode.
1210 * If we are in fail-open mode, but we are connected to a controller too,
1211 * then we should send the packet up to the controller in the hope that it
1212 * will try to set up a flow and thereby allow us to exit fail-open.
1214 * See the top-level comment in fail-open.c for more information.
1216 * Copy packets before they are modified by execution. */
1218 LIST_FOR_EACH (upcall, list_node, upcalls) {
1219 struct flow_miss *miss = upcall->flow_miss;
1220 struct ofpbuf *packet = &upcall->dpif_upcall.packet;
1221 struct ofproto_packet_in *pin;
1223 pin = xmalloc(sizeof *pin);
1224 pin->up.packet = xmemdup(packet->data, packet->size);
1225 pin->up.packet_len = packet->size;
1226 pin->up.reason = OFPR_NO_MATCH;
1227 pin->up.table_id = 0;
1228 pin->up.cookie = OVS_BE64_MAX;
1229 flow_get_metadata(&miss->flow, &pin->up.fmd);
1230 pin->send_len = 0; /* Not used for flow table misses. */
1231 pin->generated_by_table_miss = false;
1232 ofproto_dpif_send_packet_in(miss->ofproto, pin);
1236 /* Execute batch. */
1237 for (i = 0; i < n_ops; i++) {
1240 dpif_operate(udpif->dpif, opsp, n_ops);
1242 HMAP_FOR_EACH_SAFE (miss, next_miss, hmap_node, &misses) {
1243 hmap_remove(&misses, &miss->hmap_node);
1244 xlate_out_uninit(&miss->xout);
1246 hmap_destroy(&misses);
1248 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
1249 list_remove(&upcall->list_node);
1250 upcall_destroy(upcall);
1254 static struct udpif_key *
1255 ukey_lookup(struct revalidator *revalidator, struct udpif_flow_dump *udump)
1257 struct udpif_key *ukey;
1259 HMAP_FOR_EACH_WITH_HASH (ukey, hmap_node, udump->key_hash,
1260 &revalidator->ukeys) {
1261 if (ukey->key_len == udump->key_len
1262 && !memcmp(ukey->key, udump->key, udump->key_len)) {
1269 static struct udpif_key *
1270 ukey_create(const struct nlattr *key, size_t key_len, long long int used)
1272 struct udpif_key *ukey = xmalloc(sizeof *ukey);
1274 ukey->key = (struct nlattr *) &ukey->key_buf;
1275 memcpy(&ukey->key_buf, key, key_len);
1276 ukey->key_len = key_len;
1279 ukey->flow_exists = true;
1280 ukey->created = used ? used : time_msec();
1281 memset(&ukey->stats, 0, sizeof ukey->stats);
1287 ukey_delete(struct revalidator *revalidator, struct udpif_key *ukey)
1289 hmap_remove(&revalidator->ukeys, &ukey->hmap_node);
1294 revalidate_ukey(struct udpif *udpif, struct udpif_flow_dump *udump,
1295 struct udpif_key *ukey)
1297 struct ofpbuf xout_actions, *actions;
1298 uint64_t slow_path_buf[128 / 8];
1299 struct xlate_out xout, *xoutp;
1300 struct netflow *netflow;
1301 struct flow flow, udump_mask;
1302 struct ofproto_dpif *ofproto;
1303 struct dpif_flow_stats push;
1304 uint32_t *udump32, *xout32;
1305 odp_port_t odp_in_port;
1306 struct xlate_in xin;
1316 /* If we don't need to revalidate, we can simply push the stats contained
1317 * in the udump, otherwise we'll have to get the actions so we can check
1319 if (udump->need_revalidate) {
1320 if (dpif_flow_get(udpif->dpif, ukey->key, ukey->key_len, &actions,
1326 push.used = udump->stats.used;
1327 push.tcp_flags = udump->stats.tcp_flags;
1328 push.n_packets = udump->stats.n_packets > ukey->stats.n_packets
1329 ? udump->stats.n_packets - ukey->stats.n_packets
1331 push.n_bytes = udump->stats.n_bytes > ukey->stats.n_bytes
1332 ? udump->stats.n_bytes - ukey->stats.n_bytes
1334 ukey->stats = udump->stats;
1336 if (!push.n_packets && !udump->need_revalidate) {
1341 error = xlate_receive(udpif->backer, NULL, ukey->key, ukey->key_len, &flow,
1342 NULL, &ofproto, NULL, NULL, &netflow, &odp_in_port);
1347 xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags, NULL);
1348 xin.resubmit_stats = push.n_packets ? &push : NULL;
1349 xin.may_learn = push.n_packets > 0;
1350 xin.skip_wildcards = !udump->need_revalidate;
1351 xlate_actions(&xin, &xout);
1354 if (!udump->need_revalidate) {
1360 ofpbuf_use_const(&xout_actions, xout.odp_actions.data,
1361 xout.odp_actions.size);
1363 ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
1364 compose_slow_path(udpif, &xout, odp_in_port, &xout_actions);
1367 if (!ofpbuf_equal(&xout_actions, actions)) {
1371 if (odp_flow_key_to_mask(udump->mask, udump->mask_len, &udump_mask, &flow)
1376 /* Since the kernel is free to ignore wildcarded bits in the mask, we can't
1377 * directly check that the masks are the same. Instead we check that the
1378 * mask in the kernel is more specific i.e. less wildcarded, than what
1379 * we've calculated here. This guarantees we don't catch any packets we
1380 * shouldn't with the megaflow. */
1381 udump32 = (uint32_t *) &udump_mask;
1382 xout32 = (uint32_t *) &xout.wc.masks;
1383 for (i = 0; i < FLOW_U32S; i++) {
1384 if ((udump32[i] | xout32[i]) != udump32[i]) {
1393 netflow_flow_clear(netflow, &flow);
1395 netflow_unref(netflow);
1397 ofpbuf_delete(actions);
1398 xlate_out_uninit(xoutp);
1403 struct udpif_key *ukey;
1404 struct udpif_flow_dump *udump;
1405 struct dpif_flow_stats stats; /* Stats for 'op'. */
1406 struct dpif_op op; /* Flow del operation. */
1410 dump_op_init(struct dump_op *op, const struct nlattr *key, size_t key_len,
1411 struct udpif_key *ukey, struct udpif_flow_dump *udump)
1415 op->op.type = DPIF_OP_FLOW_DEL;
1416 op->op.u.flow_del.key = key;
1417 op->op.u.flow_del.key_len = key_len;
1418 op->op.u.flow_del.stats = &op->stats;
1422 push_dump_ops(struct revalidator *revalidator,
1423 struct dump_op *ops, size_t n_ops)
1425 struct udpif *udpif = revalidator->udpif;
1426 struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
1429 ovs_assert(n_ops <= REVALIDATE_MAX_BATCH);
1430 for (i = 0; i < n_ops; i++) {
1431 opsp[i] = &ops[i].op;
1433 dpif_operate(udpif->dpif, opsp, n_ops);
1435 for (i = 0; i < n_ops; i++) {
1436 struct dump_op *op = &ops[i];
1437 struct dpif_flow_stats *push, *stats, push_buf;
1439 stats = op->op.u.flow_del.stats;
1442 push->used = MAX(stats->used, op->ukey->stats.used);
1443 push->tcp_flags = stats->tcp_flags | op->ukey->stats.tcp_flags;
1444 push->n_packets = stats->n_packets - op->ukey->stats.n_packets;
1445 push->n_bytes = stats->n_bytes - op->ukey->stats.n_bytes;
1450 if (push->n_packets || netflow_exists()) {
1451 struct ofproto_dpif *ofproto;
1452 struct netflow *netflow;
1455 if (!xlate_receive(udpif->backer, NULL, op->op.u.flow_del.key,
1456 op->op.u.flow_del.key_len, &flow, NULL,
1457 &ofproto, NULL, NULL, &netflow, NULL)) {
1458 struct xlate_in xin;
1460 xlate_in_init(&xin, ofproto, &flow, NULL, push->tcp_flags,
1462 xin.resubmit_stats = push->n_packets ? push : NULL;
1463 xin.may_learn = push->n_packets > 0;
1464 xin.skip_wildcards = true;
1465 xlate_actions_for_side_effects(&xin);
1468 netflow_flow_clear(netflow, &flow);
1469 netflow_unref(netflow);
1475 for (i = 0; i < n_ops; i++) {
1476 struct udpif_key *ukey;
1478 /* If there's a udump, this ukey came directly from a datapath flow
1479 * dump. Sometimes a datapath can send duplicates in flow dumps, in
1480 * which case we wouldn't want to double-free a ukey, so avoid that by
1481 * looking up the ukey again.
1483 * If there's no udump then we know what we're doing. */
1484 ukey = (ops[i].udump
1485 ? ukey_lookup(revalidator, ops[i].udump)
1488 ukey_delete(revalidator, ukey);
1494 revalidate_udumps(struct revalidator *revalidator, struct list *udumps)
1496 struct udpif *udpif = revalidator->udpif;
1498 struct dump_op ops[REVALIDATE_MAX_BATCH];
1499 struct udpif_flow_dump *udump, *next_udump;
1500 size_t n_ops, n_flows;
1501 unsigned int flow_limit;
1502 long long int max_idle;
1505 atomic_read(&udpif->flow_limit, &flow_limit);
1507 n_flows = udpif_get_n_flows(udpif);
1510 max_idle = ofproto_max_idle;
1511 if (n_flows > flow_limit) {
1512 must_del = n_flows > 2 * flow_limit;
1517 LIST_FOR_EACH_SAFE (udump, next_udump, list_node, udumps) {
1518 long long int used, now;
1519 struct udpif_key *ukey;
1522 ukey = ukey_lookup(revalidator, udump);
1524 used = udump->stats.used;
1525 if (!used && ukey) {
1526 used = ukey->created;
1529 if (ukey && (ukey->mark || !ukey->flow_exists)) {
1530 /* The flow has already been dumped. This can occasionally occur
1531 * if the datapath is changed in the middle of a flow dump. Rather
1532 * than perform the same work twice, skip the flow this time. */
1533 COVERAGE_INC(upcall_duplicate_flow);
1537 if (must_del || (used && used < now - max_idle)) {
1538 struct dump_op *dop = &ops[n_ops++];
1541 ukey->flow_exists = false;
1543 dump_op_init(dop, udump->key, udump->key_len, ukey, udump);
1548 ukey = ukey_create(udump->key, udump->key_len, used);
1549 hmap_insert(&revalidator->ukeys, &ukey->hmap_node,
1554 if (!revalidate_ukey(udpif, udump, ukey)) {
1555 ukey->flow_exists = false;
1556 dpif_flow_del(udpif->dpif, udump->key, udump->key_len, NULL);
1557 /* The ukey will be cleaned up by revalidator_sweep().
1558 * This helps to avoid deleting the same flow twice. */
1561 list_remove(&udump->list_node);
1565 push_dump_ops(revalidator, ops, n_ops);
1567 LIST_FOR_EACH_SAFE (udump, next_udump, list_node, udumps) {
1568 list_remove(&udump->list_node);
1574 revalidator_sweep__(struct revalidator *revalidator, bool purge)
1576 struct dump_op ops[REVALIDATE_MAX_BATCH];
1577 struct udpif_key *ukey, *next;
1582 HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, &revalidator->ukeys) {
1583 if (!purge && ukey->mark) {
1585 } else if (!ukey->flow_exists) {
1586 ukey_delete(revalidator, ukey);
1588 struct dump_op *op = &ops[n_ops++];
1590 /* If we have previously seen a flow in the datapath, but didn't
1591 * see it during the most recent dump, delete it. This allows us
1592 * to clean up the ukey and keep the statistics consistent. */
1593 dump_op_init(op, ukey->key, ukey->key_len, ukey, NULL);
1594 if (n_ops == REVALIDATE_MAX_BATCH) {
1595 push_dump_ops(revalidator, ops, n_ops);
1602 push_dump_ops(revalidator, ops, n_ops);
1607 revalidator_sweep(struct revalidator *revalidator)
1609 revalidator_sweep__(revalidator, false);
1613 revalidator_purge(struct revalidator *revalidator)
1615 revalidator_sweep__(revalidator, true);
1619 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
1620 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
1622 struct ds ds = DS_EMPTY_INITIALIZER;
1623 struct udpif *udpif;
1625 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
1626 unsigned int flow_limit;
1629 atomic_read(&udpif->flow_limit, &flow_limit);
1631 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
1632 ds_put_format(&ds, "\tflows : (current %"PRIu64")"
1633 " (avg %u) (max %u) (limit %u)\n", udpif_get_n_flows(udpif),
1634 udpif->avg_n_flows, udpif->max_n_flows, flow_limit);
1635 ds_put_format(&ds, "\tdump duration : %lldms\n", udpif->dump_duration);
1637 ds_put_char(&ds, '\n');
1638 for (i = 0; i < udpif->n_handlers; i++) {
1639 struct handler *handler = &udpif->handlers[i];
1641 ovs_mutex_lock(&handler->mutex);
1642 ds_put_format(&ds, "\t%s: (upcall queue %"PRIuSIZE")\n",
1643 handler->name, handler->n_upcalls);
1644 ovs_mutex_unlock(&handler->mutex);
1647 ds_put_char(&ds, '\n');
1648 for (i = 0; i < n_revalidators; i++) {
1649 struct revalidator *revalidator = &udpif->revalidators[i];
1651 /* XXX: The result of hmap_count(&revalidator->ukeys) may not be
1652 * accurate because it's not protected by the revalidator mutex. */
1653 ovs_mutex_lock(&revalidator->mutex);
1654 ds_put_format(&ds, "\t%s: (dump queue %"PRIuSIZE") (keys %"PRIuSIZE
1655 ")\n", revalidator->name, revalidator->n_udumps,
1656 hmap_count(&revalidator->ukeys));
1657 ovs_mutex_unlock(&revalidator->mutex);
1661 unixctl_command_reply(conn, ds_cstr(&ds));
1665 /* Disable using the megaflows.
1667 * This command is only needed for advanced debugging, so it's not
1668 * documented in the man page. */
1670 upcall_unixctl_disable_megaflows(struct unixctl_conn *conn,
1671 int argc OVS_UNUSED,
1672 const char *argv[] OVS_UNUSED,
1673 void *aux OVS_UNUSED)
1675 atomic_store(&enable_megaflows, false);
1677 unixctl_command_reply(conn, "megaflows disabled");
1680 /* Re-enable using megaflows.
1682 * This command is only needed for advanced debugging, so it's not
1683 * documented in the man page. */
1685 upcall_unixctl_enable_megaflows(struct unixctl_conn *conn,
1686 int argc OVS_UNUSED,
1687 const char *argv[] OVS_UNUSED,
1688 void *aux OVS_UNUSED)
1690 atomic_store(&enable_megaflows, true);
1692 unixctl_command_reply(conn, "megaflows enabled");
1695 /* Set the flow limit.
1697 * This command is only needed for advanced debugging, so it's not
1698 * documented in the man page. */
1700 upcall_unixctl_set_flow_limit(struct unixctl_conn *conn,
1701 int argc OVS_UNUSED,
1702 const char *argv[] OVS_UNUSED,
1703 void *aux OVS_UNUSED)
1705 struct ds ds = DS_EMPTY_INITIALIZER;
1706 struct udpif *udpif;
1707 unsigned int flow_limit = atoi(argv[1]);
1709 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
1710 atomic_store(&udpif->flow_limit, flow_limit);
1712 ds_put_format(&ds, "set flow_limit to %u\n", flow_limit);
1713 unixctl_command_reply(conn, ds_cstr(&ds));