1 /* Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License. */
16 #include "ofproto-dpif-upcall.h"
25 #include "dynamic-string.h"
26 #include "fail-open.h"
27 #include "guarded-list.h"
32 #include "ofproto-dpif-ipfix.h"
33 #include "ofproto-dpif-sflow.h"
35 #include "poll-loop.h"
40 #define MAX_QUEUE_LENGTH 512
42 VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
44 COVERAGE_DEFINE(drop_queue_overflow);
45 COVERAGE_DEFINE(upcall_queue_overflow);
46 COVERAGE_DEFINE(fmb_queue_overflow);
47 COVERAGE_DEFINE(fmb_queue_revalidated);
49 /* A thread that processes each upcall handed to it by the dispatcher thread,
50 * forwards the upcall's packet, and then queues it to the main ofproto_dpif
51 * to possibly set up a kernel flow as a cache. */
53 struct udpif *udpif; /* Parent udpif. */
54 pthread_t thread; /* Thread ID. */
55 char *name; /* Thread name. */
57 struct ovs_mutex mutex; /* Mutex guarding the following. */
59 /* Atomic queue of unprocessed upcalls. */
60 struct list upcalls OVS_GUARDED;
61 size_t n_upcalls OVS_GUARDED;
63 bool need_signal; /* Only changed by the dispatcher. */
65 pthread_cond_t wake_cond; /* Wakes 'thread' while holding
69 /* An upcall handler for ofproto_dpif.
71 * udpif is implemented as a "dispatcher" thread that reads upcalls from the
72 * kernel. It processes each upcall just enough to figure out its next
73 * destination. For a "miss" upcall (MISS_UPCALL), this is one of several
74 * "handler" threads (see struct handler). Other upcalls are queued to the
75 * main ofproto_dpif. */
77 struct list list_node; /* In all_udpifs list. */
79 struct dpif *dpif; /* Datapath handle. */
80 struct dpif_backer *backer; /* Opaque dpif_backer pointer. */
82 uint32_t secret; /* Random seed for upcall hash. */
84 pthread_t dispatcher; /* Dispatcher thread ID. */
86 struct handler *handlers; /* Upcall handlers. */
89 /* Queues to pass up to ofproto-dpif. */
90 struct guarded_list drop_keys; /* "struct drop key"s. */
91 struct guarded_list fmbs; /* "struct flow_miss_batch"es. */
94 struct seq *reval_seq;
96 struct latch exit_latch; /* Tells child threads to exit. */
100 BAD_UPCALL, /* Some kind of bug somewhere. */
101 MISS_UPCALL, /* A flow miss. */
102 SFLOW_UPCALL, /* sFlow sample. */
103 FLOW_SAMPLE_UPCALL, /* Per-flow sampling. */
104 IPFIX_UPCALL /* Per-bridge sampling. */
108 struct list list_node; /* For queuing upcalls. */
109 struct flow_miss *flow_miss; /* This upcall's flow_miss. */
111 /* Raw upcall plus data for keeping track of the memory backing it. */
112 struct dpif_upcall dpif_upcall; /* As returned by dpif_recv() */
113 struct ofpbuf upcall_buf; /* Owns some data in 'dpif_upcall'. */
114 uint64_t upcall_stub[512 / 8]; /* Buffer to reduce need for malloc(). */
117 static void upcall_destroy(struct upcall *);
119 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
120 static struct list all_udpifs = LIST_INITIALIZER(&all_udpifs);
122 static void recv_upcalls(struct udpif *);
123 static void handle_upcalls(struct udpif *, struct list *upcalls);
124 static void miss_destroy(struct flow_miss *);
125 static void *udpif_dispatcher(void *);
126 static void *udpif_upcall_handler(void *);
127 static void upcall_unixctl_show(struct unixctl_conn *conn, int argc,
128 const char *argv[], void *aux);
131 udpif_create(struct dpif_backer *backer, struct dpif *dpif)
133 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
134 struct udpif *udpif = xzalloc(sizeof *udpif);
136 if (ovsthread_once_start(&once)) {
137 unixctl_command_register("upcall/show", "", 0, 0, upcall_unixctl_show,
139 ovsthread_once_done(&once);
143 udpif->backer = backer;
144 udpif->secret = random_uint32();
145 udpif->wait_seq = seq_create();
146 udpif->reval_seq = seq_create();
147 latch_init(&udpif->exit_latch);
148 guarded_list_init(&udpif->drop_keys);
149 guarded_list_init(&udpif->fmbs);
150 list_push_back(&all_udpifs, &udpif->list_node);
156 udpif_destroy(struct udpif *udpif)
158 struct flow_miss_batch *fmb;
159 struct drop_key *drop_key;
161 udpif_set_threads(udpif, 0);
162 list_remove(&udpif->list_node);
164 while ((drop_key = drop_key_next(udpif))) {
165 drop_key_destroy(drop_key);
168 while ((fmb = flow_miss_batch_next(udpif))) {
169 flow_miss_batch_destroy(fmb);
172 guarded_list_destroy(&udpif->drop_keys);
173 guarded_list_destroy(&udpif->fmbs);
174 latch_destroy(&udpif->exit_latch);
175 seq_destroy(udpif->wait_seq);
176 seq_destroy(udpif->reval_seq);
180 /* Tells 'udpif' how many threads it should use to handle upcalls. Disables
181 * all threads if 'n_handlers' is zero. 'udpif''s datapath handle must have
182 * packet reception enabled before starting threads. */
184 udpif_set_threads(struct udpif *udpif, size_t n_handlers)
186 /* Stop the old threads (if any). */
187 if (udpif->handlers && udpif->n_handlers != n_handlers) {
190 latch_set(&udpif->exit_latch);
192 /* Wake the handlers so they can exit. */
193 for (i = 0; i < udpif->n_handlers; i++) {
194 struct handler *handler = &udpif->handlers[i];
196 ovs_mutex_lock(&handler->mutex);
197 xpthread_cond_signal(&handler->wake_cond);
198 ovs_mutex_unlock(&handler->mutex);
201 xpthread_join(udpif->dispatcher, NULL);
202 for (i = 0; i < udpif->n_handlers; i++) {
203 struct handler *handler = &udpif->handlers[i];
204 struct upcall *miss, *next;
206 xpthread_join(handler->thread, NULL);
208 ovs_mutex_lock(&handler->mutex);
209 LIST_FOR_EACH_SAFE (miss, next, list_node, &handler->upcalls) {
210 list_remove(&miss->list_node);
211 upcall_destroy(miss);
213 ovs_mutex_unlock(&handler->mutex);
214 ovs_mutex_destroy(&handler->mutex);
216 xpthread_cond_destroy(&handler->wake_cond);
219 latch_poll(&udpif->exit_latch);
221 free(udpif->handlers);
222 udpif->handlers = NULL;
223 udpif->n_handlers = 0;
226 /* Start new threads (if necessary). */
227 if (!udpif->handlers && n_handlers) {
230 udpif->n_handlers = n_handlers;
231 udpif->handlers = xzalloc(udpif->n_handlers * sizeof *udpif->handlers);
232 for (i = 0; i < udpif->n_handlers; i++) {
233 struct handler *handler = &udpif->handlers[i];
235 handler->udpif = udpif;
236 list_init(&handler->upcalls);
237 handler->need_signal = false;
238 xpthread_cond_init(&handler->wake_cond, NULL);
239 ovs_mutex_init(&handler->mutex);
240 xpthread_create(&handler->thread, NULL, udpif_upcall_handler,
243 xpthread_create(&udpif->dispatcher, NULL, udpif_dispatcher, udpif);
248 udpif_wait(struct udpif *udpif)
250 uint64_t seq = seq_read(udpif->wait_seq);
251 if (!guarded_list_is_empty(&udpif->drop_keys) ||
252 !guarded_list_is_empty(&udpif->fmbs)) {
253 poll_immediate_wake();
255 seq_wait(udpif->wait_seq, seq);
259 /* Notifies 'udpif' that something changed which may render previous
260 * xlate_actions() results invalid. */
262 udpif_revalidate(struct udpif *udpif)
264 struct flow_miss_batch *fmb, *next_fmb;
267 /* Since we remove each miss on revalidation, their statistics won't be
268 * accounted to the appropriate 'facet's in the upper layer. In most
269 * cases, this is alright because we've already pushed the stats to the
271 seq_change(udpif->reval_seq);
273 guarded_list_pop_all(&udpif->fmbs, &fmbs);
274 LIST_FOR_EACH_SAFE (fmb, next_fmb, list_node, &fmbs) {
275 list_remove(&fmb->list_node);
276 flow_miss_batch_destroy(fmb);
279 udpif_drop_key_clear(udpif);
283 udpif_get_memory_usage(struct udpif *udpif, struct simap *usage)
287 simap_increase(usage, "dispatchers", 1);
288 simap_increase(usage, "flow_dumpers", 1);
290 simap_increase(usage, "handlers", udpif->n_handlers);
291 for (i = 0; i < udpif->n_handlers; i++) {
292 struct handler *handler = &udpif->handlers[i];
293 ovs_mutex_lock(&handler->mutex);
294 simap_increase(usage, "handler upcalls", handler->n_upcalls);
295 ovs_mutex_unlock(&handler->mutex);
299 /* Destroys and deallocates 'upcall'. */
301 upcall_destroy(struct upcall *upcall)
304 ofpbuf_uninit(&upcall->dpif_upcall.packet);
305 ofpbuf_uninit(&upcall->upcall_buf);
310 /* Retrieves the next batch of processed flow misses for 'udpif' to install.
311 * The caller is responsible for destroying it with flow_miss_batch_destroy().
313 struct flow_miss_batch *
314 flow_miss_batch_next(struct udpif *udpif)
318 for (i = 0; i < 50; i++) {
319 struct flow_miss_batch *next;
320 struct list *next_node;
322 next_node = guarded_list_pop_front(&udpif->fmbs);
327 next = CONTAINER_OF(next_node, struct flow_miss_batch, list_node);
328 if (next->reval_seq == seq_read(udpif->reval_seq)) {
332 flow_miss_batch_destroy(next);
338 /* Destroys and deallocates 'fmb'. */
340 flow_miss_batch_destroy(struct flow_miss_batch *fmb)
342 struct flow_miss *miss, *next;
343 struct upcall *upcall, *next_upcall;
349 HMAP_FOR_EACH_SAFE (miss, next, hmap_node, &fmb->misses) {
350 hmap_remove(&fmb->misses, &miss->hmap_node);
354 LIST_FOR_EACH_SAFE (upcall, next_upcall, list_node, &fmb->upcalls) {
355 list_remove(&upcall->list_node);
356 upcall_destroy(upcall);
359 hmap_destroy(&fmb->misses);
363 /* Retrieves the next drop key which ofproto-dpif needs to process. The caller
364 * is responsible for destroying it with drop_key_destroy(). */
366 drop_key_next(struct udpif *udpif)
368 struct list *next = guarded_list_pop_front(&udpif->drop_keys);
369 return next ? CONTAINER_OF(next, struct drop_key, list_node) : NULL;
372 /* Destroys and deallocates 'drop_key'. */
374 drop_key_destroy(struct drop_key *drop_key)
382 /* Clears all drop keys waiting to be processed by drop_key_next(). */
384 udpif_drop_key_clear(struct udpif *udpif)
386 struct drop_key *drop_key, *next;
389 guarded_list_pop_all(&udpif->drop_keys, &list);
390 LIST_FOR_EACH_SAFE (drop_key, next, list_node, &list) {
391 list_remove(&drop_key->list_node);
392 drop_key_destroy(drop_key);
396 /* The dispatcher thread is responsible for receiving upcalls from the kernel,
397 * assigning them to a upcall_handler thread. */
399 udpif_dispatcher(void *arg)
401 struct udpif *udpif = arg;
403 set_subprogram_name("dispatcher");
404 while (!latch_is_set(&udpif->exit_latch)) {
406 dpif_recv_wait(udpif->dpif);
407 latch_wait(&udpif->exit_latch);
414 /* The miss handler thread is responsible for processing miss upcalls retrieved
415 * by the dispatcher thread. Once finished it passes the processed miss
416 * upcalls to ofproto-dpif where they're installed in the datapath. */
418 udpif_upcall_handler(void *arg)
420 struct handler *handler = arg;
422 handler->name = xasprintf("handler_%u", ovsthread_id_self());
423 set_subprogram_name("%s", handler->name);
426 struct list misses = LIST_INITIALIZER(&misses);
429 ovs_mutex_lock(&handler->mutex);
431 if (latch_is_set(&handler->udpif->exit_latch)) {
432 ovs_mutex_unlock(&handler->mutex);
436 if (!handler->n_upcalls) {
437 ovs_mutex_cond_wait(&handler->wake_cond, &handler->mutex);
440 for (i = 0; i < FLOW_MISS_MAX_BATCH; i++) {
441 if (handler->n_upcalls) {
442 handler->n_upcalls--;
443 list_push_back(&misses, list_pop_front(&handler->upcalls));
448 ovs_mutex_unlock(&handler->mutex);
450 handle_upcalls(handler->udpif, &misses);
457 miss_destroy(struct flow_miss *miss)
459 xlate_out_uninit(&miss->xout);
462 static enum upcall_type
463 classify_upcall(const struct upcall *upcall)
465 const struct dpif_upcall *dpif_upcall = &upcall->dpif_upcall;
466 union user_action_cookie cookie;
469 /* First look at the upcall type. */
470 switch (dpif_upcall->type) {
477 case DPIF_N_UC_TYPES:
479 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
484 /* "action" upcalls need a closer look. */
485 if (!dpif_upcall->userdata) {
486 VLOG_WARN_RL(&rl, "action upcall missing cookie");
489 userdata_len = nl_attr_get_size(dpif_upcall->userdata);
490 if (userdata_len < sizeof cookie.type
491 || userdata_len > sizeof cookie) {
492 VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %"PRIuSIZE,
496 memset(&cookie, 0, sizeof cookie);
497 memcpy(&cookie, nl_attr_get(dpif_upcall->userdata), userdata_len);
498 if (userdata_len == sizeof cookie.sflow
499 && cookie.type == USER_ACTION_COOKIE_SFLOW) {
501 } else if (userdata_len == sizeof cookie.slow_path
502 && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
504 } else if (userdata_len == sizeof cookie.flow_sample
505 && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
506 return FLOW_SAMPLE_UPCALL;
507 } else if (userdata_len == sizeof cookie.ipfix
508 && cookie.type == USER_ACTION_COOKIE_IPFIX) {
511 VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
512 " and size %"PRIuSIZE, cookie.type, userdata_len);
518 recv_upcalls(struct udpif *udpif)
523 uint32_t hash = udpif->secret;
524 struct handler *handler;
525 struct upcall *upcall;
526 size_t n_bytes, left;
530 upcall = xmalloc(sizeof *upcall);
531 ofpbuf_use_stub(&upcall->upcall_buf, upcall->upcall_stub,
532 sizeof upcall->upcall_stub);
533 error = dpif_recv(udpif->dpif, &upcall->dpif_upcall,
534 &upcall->upcall_buf);
536 /* upcall_destroy() can only be called on successfully received
538 ofpbuf_uninit(&upcall->upcall_buf);
544 NL_ATTR_FOR_EACH (nla, left, upcall->dpif_upcall.key,
545 upcall->dpif_upcall.key_len) {
546 enum ovs_key_attr type = nl_attr_type(nla);
547 if (type == OVS_KEY_ATTR_IN_PORT
548 || type == OVS_KEY_ATTR_TCP
549 || type == OVS_KEY_ATTR_UDP) {
550 if (nl_attr_get_size(nla) == 4) {
551 hash = mhash_add(hash, nl_attr_get_u32(nla));
555 "Netlink attribute with incorrect size.");
559 hash = mhash_finish(hash, n_bytes);
561 handler = &udpif->handlers[hash % udpif->n_handlers];
563 ovs_mutex_lock(&handler->mutex);
564 if (handler->n_upcalls < MAX_QUEUE_LENGTH) {
565 list_push_back(&handler->upcalls, &upcall->list_node);
566 if (handler->n_upcalls == 0) {
567 handler->need_signal = true;
569 handler->n_upcalls++;
570 if (handler->need_signal &&
571 handler->n_upcalls >= FLOW_MISS_MAX_BATCH) {
572 handler->need_signal = false;
573 xpthread_cond_signal(&handler->wake_cond);
575 ovs_mutex_unlock(&handler->mutex);
576 if (!VLOG_DROP_DBG(&rl)) {
577 struct ds ds = DS_EMPTY_INITIALIZER;
579 odp_flow_key_format(upcall->dpif_upcall.key,
580 upcall->dpif_upcall.key_len,
582 VLOG_DBG("dispatcher: enqueue (%s)", ds_cstr(&ds));
586 ovs_mutex_unlock(&handler->mutex);
587 COVERAGE_INC(upcall_queue_overflow);
588 upcall_destroy(upcall);
592 for (n = 0; n < udpif->n_handlers; ++n) {
593 struct handler *handler = &udpif->handlers[n];
595 if (handler->need_signal) {
596 handler->need_signal = false;
597 ovs_mutex_lock(&handler->mutex);
598 xpthread_cond_signal(&handler->wake_cond);
599 ovs_mutex_unlock(&handler->mutex);
604 static struct flow_miss *
605 flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
606 const struct flow *flow, uint32_t hash)
608 struct flow_miss *miss;
610 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
611 if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
620 handle_upcalls(struct udpif *udpif, struct list *upcalls)
622 struct dpif_op *opsp[FLOW_MISS_MAX_BATCH];
623 struct dpif_op ops[FLOW_MISS_MAX_BATCH];
624 struct upcall *upcall, *next;
625 struct flow_miss_batch *fmb;
626 size_t n_misses, n_ops, i;
627 struct flow_miss *miss;
628 enum upcall_type type;
631 /* Extract the flow from each upcall. Construct in fmb->misses a hash
632 * table that maps each unique flow to a 'struct flow_miss'.
634 * Most commonly there is a single packet per flow_miss, but there are
635 * several reasons why there might be more than one, e.g.:
637 * - The dpif packet interface does not support TSO (or UFO, etc.), so a
638 * large packet sent to userspace is split into a sequence of smaller
641 * - A stream of quickly arriving packets in an established "slow-pathed"
644 * - Rarely, a stream of quickly arriving packets in a flow not yet
645 * established. (This is rare because most protocols do not send
646 * multiple back-to-back packets before receiving a reply from the
647 * other end of the connection, which gives OVS a chance to set up a
650 fmb = xmalloc(sizeof *fmb);
651 fmb->reval_seq = seq_read(udpif->reval_seq);
652 hmap_init(&fmb->misses);
653 list_init(&fmb->upcalls);
655 LIST_FOR_EACH_SAFE (upcall, next, list_node, upcalls) {
656 struct dpif_upcall *dupcall = &upcall->dpif_upcall;
657 struct ofpbuf *packet = &dupcall->packet;
658 struct flow_miss *miss = &fmb->miss_buf[n_misses];
659 struct flow_miss *existing_miss;
660 struct ofproto_dpif *ofproto;
661 struct dpif_sflow *sflow;
662 struct dpif_ipfix *ipfix;
663 odp_port_t odp_in_port;
667 error = xlate_receive(udpif->backer, packet, dupcall->key,
668 dupcall->key_len, &flow, &miss->key_fitness,
669 &ofproto, &ipfix, &sflow, NULL, &odp_in_port);
671 if (error == ENODEV) {
672 struct drop_key *drop_key;
674 /* Received packet on datapath port for which we couldn't
675 * associate an ofproto. This can happen if a port is removed
676 * while traffic is being received. Print a rate-limited
677 * message in case it happens frequently. Install a drop flow
678 * so that future packets of the flow are inexpensively dropped
680 VLOG_INFO_RL(&rl, "received packet on unassociated datapath "
681 "port %"PRIu32, odp_in_port);
683 drop_key = xmalloc(sizeof *drop_key);
684 drop_key->key = xmemdup(dupcall->key, dupcall->key_len);
685 drop_key->key_len = dupcall->key_len;
687 if (guarded_list_push_back(&udpif->drop_keys,
688 &drop_key->list_node,
690 seq_change(udpif->wait_seq);
692 COVERAGE_INC(drop_queue_overflow);
693 drop_key_destroy(drop_key);
696 list_remove(&upcall->list_node);
697 upcall_destroy(upcall);
701 type = classify_upcall(upcall);
702 if (type == MISS_UPCALL) {
705 flow_extract(packet, flow.skb_priority, flow.pkt_mark,
706 &flow.tunnel, &flow.in_port, &miss->flow);
708 hash = flow_hash(&miss->flow, 0);
709 existing_miss = flow_miss_find(&fmb->misses, ofproto, &miss->flow,
711 if (!existing_miss) {
712 hmap_insert(&fmb->misses, &miss->hmap_node, hash);
713 miss->ofproto = ofproto;
714 miss->key = dupcall->key;
715 miss->key_len = dupcall->key_len;
716 miss->upcall_type = dupcall->type;
717 miss->stats.n_packets = 0;
718 miss->stats.n_bytes = 0;
719 miss->stats.used = time_msec();
720 miss->stats.tcp_flags = 0;
724 miss = existing_miss;
726 miss->stats.tcp_flags |= packet_get_tcp_flags(packet, &miss->flow);
727 miss->stats.n_bytes += packet->size;
728 miss->stats.n_packets++;
730 upcall->flow_miss = miss;
737 union user_action_cookie cookie;
739 memset(&cookie, 0, sizeof cookie);
740 memcpy(&cookie, nl_attr_get(dupcall->userdata),
741 sizeof cookie.sflow);
742 dpif_sflow_received(sflow, packet, &flow, odp_in_port,
748 dpif_ipfix_bridge_sample(ipfix, packet, &flow);
751 case FLOW_SAMPLE_UPCALL:
753 union user_action_cookie cookie;
755 memset(&cookie, 0, sizeof cookie);
756 memcpy(&cookie, nl_attr_get(dupcall->userdata),
757 sizeof cookie.flow_sample);
759 /* The flow reflects exactly the contents of the packet.
760 * Sample the packet using it. */
761 dpif_ipfix_flow_sample(ipfix, packet, &flow,
762 cookie.flow_sample.collector_set_id,
763 cookie.flow_sample.probability,
764 cookie.flow_sample.obs_domain_id,
765 cookie.flow_sample.obs_point_id);
774 dpif_ipfix_unref(ipfix);
775 dpif_sflow_unref(sflow);
777 list_remove(&upcall->list_node);
778 upcall_destroy(upcall);
781 /* Initialize each 'struct flow_miss's ->xout.
783 * We do this per-flow_miss rather than per-packet because, most commonly,
784 * all the packets in a flow can use the same translation.
786 * We can't do this in the previous loop because we need the TCP flags for
787 * all the packets in each miss. */
789 HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) {
792 xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL,
793 miss->stats.tcp_flags, NULL);
794 xin.may_learn = true;
795 xin.resubmit_stats = &miss->stats;
796 xlate_actions(&xin, &miss->xout);
797 fail_open = fail_open || miss->xout.fail_open;
800 /* Now handle the packets individually in order of arrival. In the common
801 * case each packet of a miss can share the same actions, but slow-pathed
802 * packets need to be translated individually:
804 * - For SLOW_CFM, SLOW_LACP, SLOW_STP, and SLOW_BFD, translation is what
805 * processes received packets for these protocols.
807 * - For SLOW_CONTROLLER, translation sends the packet to the OpenFlow
810 * The loop fills 'ops' with an array of operations to execute in the
813 LIST_FOR_EACH (upcall, list_node, upcalls) {
814 struct flow_miss *miss = upcall->flow_miss;
815 struct ofpbuf *packet = &upcall->dpif_upcall.packet;
817 if (miss->xout.slow) {
820 xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL, 0, packet);
821 xlate_actions_for_side_effects(&xin);
824 if (miss->xout.odp_actions.size) {
827 if (miss->flow.in_port.ofp_port
828 != vsp_realdev_to_vlandev(miss->ofproto,
829 miss->flow.in_port.ofp_port,
830 miss->flow.vlan_tci)) {
831 /* This packet was received on a VLAN splinter port. We
832 * added a VLAN to the packet to make the packet resemble
833 * the flow, but the actions were composed assuming that
834 * the packet contained no VLAN. So, we must remove the
835 * VLAN header from the packet before trying to execute the
837 eth_pop_vlan(packet);
841 op->type = DPIF_OP_EXECUTE;
842 op->u.execute.key = miss->key;
843 op->u.execute.key_len = miss->key_len;
844 op->u.execute.packet = packet;
845 op->u.execute.actions = miss->xout.odp_actions.data;
846 op->u.execute.actions_len = miss->xout.odp_actions.size;
847 op->u.execute.needs_help = (miss->xout.slow & SLOW_ACTION) != 0;
851 /* Special case for fail-open mode.
853 * If we are in fail-open mode, but we are connected to a controller too,
854 * then we should send the packet up to the controller in the hope that it
855 * will try to set up a flow and thereby allow us to exit fail-open.
857 * See the top-level comment in fail-open.c for more information.
859 * Copy packets before they are modified by execution. */
861 LIST_FOR_EACH (upcall, list_node, upcalls) {
862 struct flow_miss *miss = upcall->flow_miss;
863 struct ofpbuf *packet = &upcall->dpif_upcall.packet;
864 struct ofproto_packet_in *pin;
866 pin = xmalloc(sizeof *pin);
867 pin->up.packet = xmemdup(packet->data, packet->size);
868 pin->up.packet_len = packet->size;
869 pin->up.reason = OFPR_NO_MATCH;
870 pin->up.table_id = 0;
871 pin->up.cookie = OVS_BE64_MAX;
872 flow_get_metadata(&miss->flow, &pin->up.fmd);
873 pin->send_len = 0; /* Not used for flow table misses. */
874 pin->generated_by_table_miss = false;
875 ofproto_dpif_send_packet_in(miss->ofproto, pin);
880 for (i = 0; i < n_ops; i++) {
883 dpif_operate(udpif->dpif, opsp, n_ops);
885 list_move(&fmb->upcalls, upcalls);
887 if (fmb->reval_seq != seq_read(udpif->reval_seq)) {
888 COVERAGE_INC(fmb_queue_revalidated);
889 flow_miss_batch_destroy(fmb);
890 } else if (!guarded_list_push_back(&udpif->fmbs, &fmb->list_node,
892 COVERAGE_INC(fmb_queue_overflow);
893 flow_miss_batch_destroy(fmb);
895 seq_change(udpif->wait_seq);
900 upcall_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
901 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
903 struct ds ds = DS_EMPTY_INITIALIZER;
906 LIST_FOR_EACH (udpif, list_node, &all_udpifs) {
909 ds_put_format(&ds, "%s:\n", dpif_name(udpif->dpif));
910 for (i = 0; i < udpif->n_handlers; i++) {
911 struct handler *handler = &udpif->handlers[i];
913 ovs_mutex_lock(&handler->mutex);
914 ds_put_format(&ds, "\t%s: (upcall queue %"PRIuSIZE")\n",
915 handler->name, handler->n_upcalls);
916 ovs_mutex_unlock(&handler->mutex);
920 unixctl_command_reply(conn, ds_cstr(&ds));