2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "dpif-provider.h"
28 #include "dp-packet.h"
29 #include "dpif-netdev.h"
30 #include "dynamic-string.h"
34 #include "odp-execute.h"
36 #include "ofp-errors.h"
37 #include "ofp-print.h"
41 #include "poll-loop.h"
42 #include "route-table.h"
47 #include "tnl-arp-cache.h"
48 #include "tnl-ports.h"
52 #include "openvswitch/vlog.h"
54 VLOG_DEFINE_THIS_MODULE(dpif);
56 COVERAGE_DEFINE(dpif_destroy);
57 COVERAGE_DEFINE(dpif_port_add);
58 COVERAGE_DEFINE(dpif_port_del);
59 COVERAGE_DEFINE(dpif_flow_flush);
60 COVERAGE_DEFINE(dpif_flow_get);
61 COVERAGE_DEFINE(dpif_flow_put);
62 COVERAGE_DEFINE(dpif_flow_del);
63 COVERAGE_DEFINE(dpif_execute);
64 COVERAGE_DEFINE(dpif_purge);
65 COVERAGE_DEFINE(dpif_execute_with_help);
67 static const struct dpif_class *base_dpif_classes[] = {
68 #if defined(__linux__) || defined(_WIN32)
74 struct registered_dpif_class {
75 const struct dpif_class *dpif_class;
78 static struct shash dpif_classes = SHASH_INITIALIZER(&dpif_classes);
79 static struct sset dpif_blacklist = SSET_INITIALIZER(&dpif_blacklist);
81 /* Protects 'dpif_classes', including the refcount, and 'dpif_blacklist'. */
82 static struct ovs_mutex dpif_mutex = OVS_MUTEX_INITIALIZER;
84 /* Rate limit for individual messages going to or from the datapath, output at
85 * DBG level. This is very high because, if these are enabled, it is because
86 * we really need to see them. */
87 static struct vlog_rate_limit dpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
89 /* Not really much point in logging many dpif errors. */
90 static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(60, 5);
92 static void log_flow_message(const struct dpif *dpif, int error,
93 const char *operation,
94 const struct nlattr *key, size_t key_len,
95 const struct nlattr *mask, size_t mask_len,
97 const struct dpif_flow_stats *stats,
98 const struct nlattr *actions, size_t actions_len);
99 static void log_operation(const struct dpif *, const char *operation,
101 static bool should_log_flow_message(int error);
102 static void log_flow_put_message(struct dpif *, const struct dpif_flow_put *,
104 static void log_flow_del_message(struct dpif *, const struct dpif_flow_del *,
106 static void log_execute_message(struct dpif *, const struct dpif_execute *,
107 bool subexecute, int error);
108 static void log_flow_get_message(const struct dpif *,
109 const struct dpif_flow_get *, int error);
111 /* Incremented whenever tnl route, arp, etc changes. */
112 struct seq *tnl_conf_seq;
117 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
119 if (ovsthread_once_start(&once)) {
122 tnl_conf_seq = seq_create();
123 dpctl_unixctl_register();
125 tnl_arp_cache_init();
128 for (i = 0; i < ARRAY_SIZE(base_dpif_classes); i++) {
129 dp_register_provider(base_dpif_classes[i]);
132 ovsthread_once_done(&once);
137 dp_register_provider__(const struct dpif_class *new_class)
139 struct registered_dpif_class *registered_class;
142 if (sset_contains(&dpif_blacklist, new_class->type)) {
143 VLOG_DBG("attempted to register blacklisted provider: %s",
148 if (shash_find(&dpif_classes, new_class->type)) {
149 VLOG_WARN("attempted to register duplicate datapath provider: %s",
154 error = new_class->init ? new_class->init() : 0;
156 VLOG_WARN("failed to initialize %s datapath class: %s",
157 new_class->type, ovs_strerror(error));
161 registered_class = xmalloc(sizeof *registered_class);
162 registered_class->dpif_class = new_class;
163 registered_class->refcount = 0;
165 shash_add(&dpif_classes, new_class->type, registered_class);
170 /* Registers a new datapath provider. After successful registration, new
171 * datapaths of that type can be opened using dpif_open(). */
173 dp_register_provider(const struct dpif_class *new_class)
177 ovs_mutex_lock(&dpif_mutex);
178 error = dp_register_provider__(new_class);
179 ovs_mutex_unlock(&dpif_mutex);
184 /* Unregisters a datapath provider. 'type' must have been previously
185 * registered and not currently be in use by any dpifs. After unregistration
186 * new datapaths of that type cannot be opened using dpif_open(). */
188 dp_unregister_provider__(const char *type)
190 struct shash_node *node;
191 struct registered_dpif_class *registered_class;
193 node = shash_find(&dpif_classes, type);
195 VLOG_WARN("attempted to unregister a datapath provider that is not "
196 "registered: %s", type);
200 registered_class = node->data;
201 if (registered_class->refcount) {
202 VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
206 shash_delete(&dpif_classes, node);
207 free(registered_class);
212 /* Unregisters a datapath provider. 'type' must have been previously
213 * registered and not currently be in use by any dpifs. After unregistration
214 * new datapaths of that type cannot be opened using dpif_open(). */
216 dp_unregister_provider(const char *type)
222 ovs_mutex_lock(&dpif_mutex);
223 error = dp_unregister_provider__(type);
224 ovs_mutex_unlock(&dpif_mutex);
229 /* Blacklists a provider. Causes future calls of dp_register_provider() with
230 * a dpif_class which implements 'type' to fail. */
232 dp_blacklist_provider(const char *type)
234 ovs_mutex_lock(&dpif_mutex);
235 sset_add(&dpif_blacklist, type);
236 ovs_mutex_unlock(&dpif_mutex);
239 /* Adds the types of all currently registered datapath providers to 'types'.
240 * The caller must first initialize the sset. */
242 dp_enumerate_types(struct sset *types)
244 struct shash_node *node;
248 ovs_mutex_lock(&dpif_mutex);
249 SHASH_FOR_EACH(node, &dpif_classes) {
250 const struct registered_dpif_class *registered_class = node->data;
251 sset_add(types, registered_class->dpif_class->type);
253 ovs_mutex_unlock(&dpif_mutex);
257 dp_class_unref(struct registered_dpif_class *rc)
259 ovs_mutex_lock(&dpif_mutex);
260 ovs_assert(rc->refcount);
262 ovs_mutex_unlock(&dpif_mutex);
265 static struct registered_dpif_class *
266 dp_class_lookup(const char *type)
268 struct registered_dpif_class *rc;
270 ovs_mutex_lock(&dpif_mutex);
271 rc = shash_find_data(&dpif_classes, type);
275 ovs_mutex_unlock(&dpif_mutex);
280 /* Clears 'names' and enumerates the names of all known created datapaths with
281 * the given 'type'. The caller must first initialize the sset. Returns 0 if
282 * successful, otherwise a positive errno value.
284 * Some kinds of datapaths might not be practically enumerable. This is not
285 * considered an error. */
287 dp_enumerate_names(const char *type, struct sset *names)
289 struct registered_dpif_class *registered_class;
290 const struct dpif_class *dpif_class;
296 registered_class = dp_class_lookup(type);
297 if (!registered_class) {
298 VLOG_WARN("could not enumerate unknown type: %s", type);
302 dpif_class = registered_class->dpif_class;
303 error = (dpif_class->enumerate
304 ? dpif_class->enumerate(names, dpif_class)
307 VLOG_WARN("failed to enumerate %s datapaths: %s", dpif_class->type,
308 ovs_strerror(error));
310 dp_class_unref(registered_class);
315 /* Parses 'datapath_name_', which is of the form [type@]name into its
316 * component pieces. 'name' and 'type' must be freed by the caller.
318 * The returned 'type' is normalized, as if by dpif_normalize_type(). */
320 dp_parse_name(const char *datapath_name_, char **name, char **type)
322 char *datapath_name = xstrdup(datapath_name_);
325 separator = strchr(datapath_name, '@');
328 *type = datapath_name;
329 *name = xstrdup(dpif_normalize_type(separator + 1));
331 *name = datapath_name;
332 *type = xstrdup(dpif_normalize_type(NULL));
337 do_open(const char *name, const char *type, bool create, struct dpif **dpifp)
339 struct dpif *dpif = NULL;
341 struct registered_dpif_class *registered_class;
345 type = dpif_normalize_type(type);
346 registered_class = dp_class_lookup(type);
347 if (!registered_class) {
348 VLOG_WARN("could not create datapath %s of unknown type %s", name,
350 error = EAFNOSUPPORT;
354 error = registered_class->dpif_class->open(registered_class->dpif_class,
355 name, create, &dpif);
357 ovs_assert(dpif->dpif_class == registered_class->dpif_class);
359 dp_class_unref(registered_class);
363 *dpifp = error ? NULL : dpif;
367 /* Tries to open an existing datapath named 'name' and type 'type'. Will fail
368 * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
369 * the empty string to specify the default system type. Returns 0 if
370 * successful, otherwise a positive errno value. On success stores a pointer
371 * to the datapath in '*dpifp', otherwise a null pointer. */
373 dpif_open(const char *name, const char *type, struct dpif **dpifp)
375 return do_open(name, type, false, dpifp);
378 /* Tries to create and open a new datapath with the given 'name' and 'type'.
379 * 'type' may be either NULL or the empty string to specify the default system
380 * type. Will fail if a datapath with 'name' and 'type' already exists.
381 * Returns 0 if successful, otherwise a positive errno value. On success
382 * stores a pointer to the datapath in '*dpifp', otherwise a null pointer. */
384 dpif_create(const char *name, const char *type, struct dpif **dpifp)
386 return do_open(name, type, true, dpifp);
389 /* Tries to open a datapath with the given 'name' and 'type', creating it if it
390 * does not exist. 'type' may be either NULL or the empty string to specify
391 * the default system type. Returns 0 if successful, otherwise a positive
392 * errno value. On success stores a pointer to the datapath in '*dpifp',
393 * otherwise a null pointer. */
395 dpif_create_and_open(const char *name, const char *type, struct dpif **dpifp)
399 error = dpif_create(name, type, dpifp);
400 if (error == EEXIST || error == EBUSY) {
401 error = dpif_open(name, type, dpifp);
403 VLOG_WARN("datapath %s already exists but cannot be opened: %s",
404 name, ovs_strerror(error));
407 VLOG_WARN("failed to create datapath %s: %s",
408 name, ovs_strerror(error));
413 /* Closes and frees the connection to 'dpif'. Does not destroy the datapath
414 * itself; call dpif_delete() first, instead, if that is desirable. */
416 dpif_close(struct dpif *dpif)
419 struct registered_dpif_class *rc;
421 rc = shash_find_data(&dpif_classes, dpif->dpif_class->type);
422 dpif_uninit(dpif, true);
427 /* Performs periodic work needed by 'dpif'. */
429 dpif_run(struct dpif *dpif)
431 if (dpif->dpif_class->run) {
432 return dpif->dpif_class->run(dpif);
437 /* Arranges for poll_block() to wake up when dp_run() needs to be called for
440 dpif_wait(struct dpif *dpif)
442 if (dpif->dpif_class->wait) {
443 dpif->dpif_class->wait(dpif);
447 /* Returns the name of datapath 'dpif' prefixed with the type
448 * (for use in log messages). */
450 dpif_name(const struct dpif *dpif)
452 return dpif->full_name;
455 /* Returns the name of datapath 'dpif' without the type
456 * (for use in device names). */
458 dpif_base_name(const struct dpif *dpif)
460 return dpif->base_name;
463 /* Returns the type of datapath 'dpif'. */
465 dpif_type(const struct dpif *dpif)
467 return dpif->dpif_class->type;
470 /* Returns the fully spelled out name for the given datapath 'type'.
472 * Normalized type string can be compared with strcmp(). Unnormalized type
473 * string might be the same even if they have different spellings. */
475 dpif_normalize_type(const char *type)
477 return type && type[0] ? type : "system";
480 /* Destroys the datapath that 'dpif' is connected to, first removing all of its
481 * ports. After calling this function, it does not make sense to pass 'dpif'
482 * to any functions other than dpif_name() or dpif_close(). */
484 dpif_delete(struct dpif *dpif)
488 COVERAGE_INC(dpif_destroy);
490 error = dpif->dpif_class->destroy(dpif);
491 log_operation(dpif, "delete", error);
495 /* Retrieves statistics for 'dpif' into 'stats'. Returns 0 if successful,
496 * otherwise a positive errno value. */
498 dpif_get_dp_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
500 int error = dpif->dpif_class->get_stats(dpif, stats);
502 memset(stats, 0, sizeof *stats);
504 log_operation(dpif, "get_stats", error);
509 dpif_port_open_type(const char *datapath_type, const char *port_type)
511 struct registered_dpif_class *rc;
513 datapath_type = dpif_normalize_type(datapath_type);
515 ovs_mutex_lock(&dpif_mutex);
516 rc = shash_find_data(&dpif_classes, datapath_type);
517 if (rc && rc->dpif_class->port_open_type) {
518 port_type = rc->dpif_class->port_open_type(rc->dpif_class, port_type);
520 ovs_mutex_unlock(&dpif_mutex);
525 /* Attempts to add 'netdev' as a port on 'dpif'. If 'port_nop' is
526 * non-null and its value is not ODPP_NONE, then attempts to use the
527 * value as the port number.
529 * If successful, returns 0 and sets '*port_nop' to the new port's port
530 * number (if 'port_nop' is non-null). On failure, returns a positive
531 * errno value and sets '*port_nop' to ODPP_NONE (if 'port_nop' is
534 dpif_port_add(struct dpif *dpif, struct netdev *netdev, odp_port_t *port_nop)
536 const char *netdev_name = netdev_get_name(netdev);
537 odp_port_t port_no = ODPP_NONE;
540 COVERAGE_INC(dpif_port_add);
546 error = dpif->dpif_class->port_add(dpif, netdev, &port_no);
548 VLOG_DBG_RL(&dpmsg_rl, "%s: added %s as port %"PRIu32,
549 dpif_name(dpif), netdev_name, port_no);
551 VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
552 dpif_name(dpif), netdev_name, ovs_strerror(error));
561 /* Attempts to remove 'dpif''s port number 'port_no'. Returns 0 if successful,
562 * otherwise a positive errno value. */
564 dpif_port_del(struct dpif *dpif, odp_port_t port_no)
568 COVERAGE_INC(dpif_port_del);
570 error = dpif->dpif_class->port_del(dpif, port_no);
572 VLOG_DBG_RL(&dpmsg_rl, "%s: port_del(%"PRIu32")",
573 dpif_name(dpif), port_no);
575 log_operation(dpif, "port_del", error);
580 /* Makes a deep copy of 'src' into 'dst'. */
582 dpif_port_clone(struct dpif_port *dst, const struct dpif_port *src)
584 dst->name = xstrdup(src->name);
585 dst->type = xstrdup(src->type);
586 dst->port_no = src->port_no;
589 /* Frees memory allocated to members of 'dpif_port'.
591 * Do not call this function on a dpif_port obtained from
592 * dpif_port_dump_next(): that function retains ownership of the data in the
595 dpif_port_destroy(struct dpif_port *dpif_port)
597 free(dpif_port->name);
598 free(dpif_port->type);
601 /* Checks if port named 'devname' exists in 'dpif'. If so, returns
602 * true; otherwise, returns false. */
604 dpif_port_exists(const struct dpif *dpif, const char *devname)
606 int error = dpif->dpif_class->port_query_by_name(dpif, devname, NULL);
607 if (error != 0 && error != ENOENT && error != ENODEV) {
608 VLOG_WARN_RL(&error_rl, "%s: failed to query port %s: %s",
609 dpif_name(dpif), devname, ovs_strerror(error));
615 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and
616 * initializes '*port' appropriately; on failure, returns a positive errno
619 * The caller owns the data in 'port' and must free it with
620 * dpif_port_destroy() when it is no longer needed. */
622 dpif_port_query_by_number(const struct dpif *dpif, odp_port_t port_no,
623 struct dpif_port *port)
625 int error = dpif->dpif_class->port_query_by_number(dpif, port_no, port);
627 VLOG_DBG_RL(&dpmsg_rl, "%s: port %"PRIu32" is device %s",
628 dpif_name(dpif), port_no, port->name);
630 memset(port, 0, sizeof *port);
631 VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu32": %s",
632 dpif_name(dpif), port_no, ovs_strerror(error));
637 /* Looks up port named 'devname' in 'dpif'. On success, returns 0 and
638 * initializes '*port' appropriately; on failure, returns a positive errno
641 * The caller owns the data in 'port' and must free it with
642 * dpif_port_destroy() when it is no longer needed. */
644 dpif_port_query_by_name(const struct dpif *dpif, const char *devname,
645 struct dpif_port *port)
647 int error = dpif->dpif_class->port_query_by_name(dpif, devname, port);
649 VLOG_DBG_RL(&dpmsg_rl, "%s: device %s is on port %"PRIu32,
650 dpif_name(dpif), devname, port->port_no);
652 memset(port, 0, sizeof *port);
654 /* For ENOENT or ENODEV we use DBG level because the caller is probably
655 * interested in whether 'dpif' actually has a port 'devname', so that
656 * it's not an issue worth logging if it doesn't. Other errors are
657 * uncommon and more likely to indicate a real problem. */
659 error == ENOENT || error == ENODEV ? VLL_DBG : VLL_WARN,
660 "%s: failed to query port %s: %s",
661 dpif_name(dpif), devname, ovs_strerror(error));
666 /* Returns the Netlink PID value to supply in OVS_ACTION_ATTR_USERSPACE
667 * actions as the OVS_USERSPACE_ATTR_PID attribute's value, for use in
668 * flows whose packets arrived on port 'port_no'. In the case where the
669 * provider allocates multiple Netlink PIDs to a single port, it may use
670 * 'hash' to spread load among them. The caller need not use a particular
671 * hash function; a 5-tuple hash is suitable.
673 * (The datapath implementation might use some different hash function for
674 * distributing packets received via flow misses among PIDs. This means
675 * that packets received via flow misses might be reordered relative to
676 * packets received via userspace actions. This is not ordinarily a
679 * A 'port_no' of ODPP_NONE is a special case: it returns a reserved PID, not
680 * allocated to any port, that the client may use for special purposes.
682 * The return value is only meaningful when DPIF_UC_ACTION has been enabled in
683 * the 'dpif''s listen mask. It is allowed to change when DPIF_UC_ACTION is
684 * disabled and then re-enabled, so a client that does that must be prepared to
685 * update all of the flows that it installed that contain
686 * OVS_ACTION_ATTR_USERSPACE actions. */
688 dpif_port_get_pid(const struct dpif *dpif, odp_port_t port_no, uint32_t hash)
690 return (dpif->dpif_class->port_get_pid
691 ? (dpif->dpif_class->port_get_pid)(dpif, port_no, hash)
695 /* Looks up port number 'port_no' in 'dpif'. On success, returns 0 and copies
696 * the port's name into the 'name_size' bytes in 'name', ensuring that the
697 * result is null-terminated. On failure, returns a positive errno value and
698 * makes 'name' the empty string. */
700 dpif_port_get_name(struct dpif *dpif, odp_port_t port_no,
701 char *name, size_t name_size)
703 struct dpif_port port;
706 ovs_assert(name_size > 0);
708 error = dpif_port_query_by_number(dpif, port_no, &port);
710 ovs_strlcpy(name, port.name, name_size);
711 dpif_port_destroy(&port);
718 /* Initializes 'dump' to begin dumping the ports in a dpif.
720 * This function provides no status indication. An error status for the entire
721 * dump operation is provided when it is completed by calling
722 * dpif_port_dump_done().
725 dpif_port_dump_start(struct dpif_port_dump *dump, const struct dpif *dpif)
728 dump->error = dpif->dpif_class->port_dump_start(dpif, &dump->state);
729 log_operation(dpif, "port_dump_start", dump->error);
732 /* Attempts to retrieve another port from 'dump', which must have been
733 * initialized with dpif_port_dump_start(). On success, stores a new dpif_port
734 * into 'port' and returns true. On failure, returns false.
736 * Failure might indicate an actual error or merely that the last port has been
737 * dumped. An error status for the entire dump operation is provided when it
738 * is completed by calling dpif_port_dump_done().
740 * The dpif owns the data stored in 'port'. It will remain valid until at
741 * least the next time 'dump' is passed to dpif_port_dump_next() or
742 * dpif_port_dump_done(). */
744 dpif_port_dump_next(struct dpif_port_dump *dump, struct dpif_port *port)
746 const struct dpif *dpif = dump->dpif;
752 dump->error = dpif->dpif_class->port_dump_next(dpif, dump->state, port);
753 if (dump->error == EOF) {
754 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all ports", dpif_name(dpif));
756 log_operation(dpif, "port_dump_next", dump->error);
760 dpif->dpif_class->port_dump_done(dpif, dump->state);
766 /* Completes port table dump operation 'dump', which must have been initialized
767 * with dpif_port_dump_start(). Returns 0 if the dump operation was
768 * error-free, otherwise a positive errno value describing the problem. */
770 dpif_port_dump_done(struct dpif_port_dump *dump)
772 const struct dpif *dpif = dump->dpif;
774 dump->error = dpif->dpif_class->port_dump_done(dpif, dump->state);
775 log_operation(dpif, "port_dump_done", dump->error);
777 return dump->error == EOF ? 0 : dump->error;
780 /* Polls for changes in the set of ports in 'dpif'. If the set of ports in
781 * 'dpif' has changed, this function does one of the following:
783 * - Stores the name of the device that was added to or deleted from 'dpif' in
784 * '*devnamep' and returns 0. The caller is responsible for freeing
785 * '*devnamep' (with free()) when it no longer needs it.
787 * - Returns ENOBUFS and sets '*devnamep' to NULL.
789 * This function may also return 'false positives', where it returns 0 and
790 * '*devnamep' names a device that was not actually added or deleted or it
791 * returns ENOBUFS without any change.
793 * Returns EAGAIN if the set of ports in 'dpif' has not changed. May also
794 * return other positive errno values to indicate that something has gone
797 dpif_port_poll(const struct dpif *dpif, char **devnamep)
799 int error = dpif->dpif_class->port_poll(dpif, devnamep);
806 /* Arranges for the poll loop to wake up when port_poll(dpif) will return a
807 * value other than EAGAIN. */
809 dpif_port_poll_wait(const struct dpif *dpif)
811 dpif->dpif_class->port_poll_wait(dpif);
814 /* Extracts the flow stats for a packet. The 'flow' and 'packet'
815 * arguments must have been initialized through a call to flow_extract().
816 * 'used' is stored into stats->used. */
818 dpif_flow_stats_extract(const struct flow *flow, const struct dp_packet *packet,
819 long long int used, struct dpif_flow_stats *stats)
821 stats->tcp_flags = ntohs(flow->tcp_flags);
822 stats->n_bytes = dp_packet_size(packet);
823 stats->n_packets = 1;
827 /* Appends a human-readable representation of 'stats' to 's'. */
829 dpif_flow_stats_format(const struct dpif_flow_stats *stats, struct ds *s)
831 ds_put_format(s, "packets:%"PRIu64", bytes:%"PRIu64", used:",
832 stats->n_packets, stats->n_bytes);
834 ds_put_format(s, "%.3fs", (time_msec() - stats->used) / 1000.0);
836 ds_put_format(s, "never");
838 if (stats->tcp_flags) {
839 ds_put_cstr(s, ", flags:");
840 packet_format_tcp_flags(s, stats->tcp_flags);
844 /* Places the hash of the 'key_len' bytes starting at 'key' into '*hash'. */
846 dpif_flow_hash(const struct dpif *dpif OVS_UNUSED,
847 const void *key, size_t key_len, ovs_u128 *hash)
849 static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
850 static uint32_t secret;
852 if (ovsthread_once_start(&once)) {
853 secret = random_uint32();
854 ovsthread_once_done(&once);
856 hash_bytes128(key, key_len, secret, hash);
857 uuid_set_bits_v4((struct uuid *)hash);
860 /* Deletes all flows from 'dpif'. Returns 0 if successful, otherwise a
861 * positive errno value. */
863 dpif_flow_flush(struct dpif *dpif)
867 COVERAGE_INC(dpif_flow_flush);
869 error = dpif->dpif_class->flow_flush(dpif);
870 log_operation(dpif, "flow_flush", error);
874 /* Attempts to install 'key' into the datapath, fetches it, then deletes it.
875 * Returns true if the datapath supported installing 'flow', false otherwise.
878 dpif_probe_feature(struct dpif *dpif, const char *name,
879 const struct ofpbuf *key, const ovs_u128 *ufid)
881 struct dpif_flow flow;
883 uint64_t stub[DPIF_FLOW_BUFSIZE / 8];
884 bool enable_feature = false;
887 /* Use DPIF_FP_MODIFY to cover the case where ovs-vswitchd is killed (and
888 * restarted) at just the right time such that feature probes from the
889 * previous run are still present in the datapath. */
890 error = dpif_flow_put(dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY | DPIF_FP_PROBE,
891 key->data, key->size, NULL, 0, NULL, 0,
892 ufid, PMD_ID_NULL, NULL);
894 if (error != EINVAL) {
895 VLOG_WARN("%s: %s flow probe failed (%s)",
896 dpif_name(dpif), name, ovs_strerror(error));
901 ofpbuf_use_stack(&reply, &stub, sizeof stub);
902 error = dpif_flow_get(dpif, key->data, key->size, ufid,
903 PMD_ID_NULL, &reply, &flow);
905 && (!ufid || (flow.ufid_present
906 && ovs_u128_equals(ufid, &flow.ufid)))) {
907 enable_feature = true;
910 error = dpif_flow_del(dpif, key->data, key->size, ufid,
913 VLOG_WARN("%s: failed to delete %s feature probe flow",
914 dpif_name(dpif), name);
917 return enable_feature;
920 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_GET. */
922 dpif_flow_get(struct dpif *dpif,
923 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
924 const unsigned pmd_id, struct ofpbuf *buf, struct dpif_flow *flow)
929 op.type = DPIF_OP_FLOW_GET;
930 op.u.flow_get.key = key;
931 op.u.flow_get.key_len = key_len;
932 op.u.flow_get.ufid = ufid;
933 op.u.flow_get.pmd_id = pmd_id;
934 op.u.flow_get.buffer = buf;
936 memset(flow, 0, sizeof *flow);
937 op.u.flow_get.flow = flow;
938 op.u.flow_get.flow->key = key;
939 op.u.flow_get.flow->key_len = key_len;
942 dpif_operate(dpif, &opp, 1);
947 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_PUT. */
949 dpif_flow_put(struct dpif *dpif, enum dpif_flow_put_flags flags,
950 const struct nlattr *key, size_t key_len,
951 const struct nlattr *mask, size_t mask_len,
952 const struct nlattr *actions, size_t actions_len,
953 const ovs_u128 *ufid, const unsigned pmd_id,
954 struct dpif_flow_stats *stats)
959 op.type = DPIF_OP_FLOW_PUT;
960 op.u.flow_put.flags = flags;
961 op.u.flow_put.key = key;
962 op.u.flow_put.key_len = key_len;
963 op.u.flow_put.mask = mask;
964 op.u.flow_put.mask_len = mask_len;
965 op.u.flow_put.actions = actions;
966 op.u.flow_put.actions_len = actions_len;
967 op.u.flow_put.ufid = ufid;
968 op.u.flow_put.pmd_id = pmd_id;
969 op.u.flow_put.stats = stats;
972 dpif_operate(dpif, &opp, 1);
977 /* A dpif_operate() wrapper for performing a single DPIF_OP_FLOW_DEL. */
979 dpif_flow_del(struct dpif *dpif,
980 const struct nlattr *key, size_t key_len, const ovs_u128 *ufid,
981 const unsigned pmd_id, struct dpif_flow_stats *stats)
986 op.type = DPIF_OP_FLOW_DEL;
987 op.u.flow_del.key = key;
988 op.u.flow_del.key_len = key_len;
989 op.u.flow_del.ufid = ufid;
990 op.u.flow_del.pmd_id = pmd_id;
991 op.u.flow_del.stats = stats;
992 op.u.flow_del.terse = false;
995 dpif_operate(dpif, &opp, 1);
1000 /* Creates and returns a new 'struct dpif_flow_dump' for iterating through the
1001 * flows in 'dpif'. If 'terse' is true, then only UFID and statistics will
1002 * be returned in the dump. Otherwise, all fields will be returned.
1004 * This function always successfully returns a dpif_flow_dump. Error
1005 * reporting is deferred to dpif_flow_dump_destroy(). */
1006 struct dpif_flow_dump *
1007 dpif_flow_dump_create(const struct dpif *dpif, bool terse)
1009 return dpif->dpif_class->flow_dump_create(dpif, terse);
1012 /* Destroys 'dump', which must have been created with dpif_flow_dump_create().
1013 * All dpif_flow_dump_thread structures previously created for 'dump' must
1014 * previously have been destroyed.
1016 * Returns 0 if the dump operation was error-free, otherwise a positive errno
1017 * value describing the problem. */
1019 dpif_flow_dump_destroy(struct dpif_flow_dump *dump)
1021 const struct dpif *dpif = dump->dpif;
1022 int error = dpif->dpif_class->flow_dump_destroy(dump);
1023 log_operation(dpif, "flow_dump_destroy", error);
1024 return error == EOF ? 0 : error;
1027 /* Returns new thread-local state for use with dpif_flow_dump_next(). */
1028 struct dpif_flow_dump_thread *
1029 dpif_flow_dump_thread_create(struct dpif_flow_dump *dump)
1031 return dump->dpif->dpif_class->flow_dump_thread_create(dump);
1034 /* Releases 'thread'. */
1036 dpif_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread)
1038 thread->dpif->dpif_class->flow_dump_thread_destroy(thread);
1041 /* Attempts to retrieve up to 'max_flows' more flows from 'thread'. Returns 0
1042 * if and only if no flows remained to be retrieved, otherwise a positive
1043 * number reflecting the number of elements in 'flows[]' that were updated.
1044 * The number of flows returned might be less than 'max_flows' because
1045 * fewer than 'max_flows' remained, because this particular datapath does not
1046 * benefit from batching, or because an error occurred partway through
1047 * retrieval. Thus, the caller should continue calling until a 0 return value,
1048 * even if intermediate return values are less than 'max_flows'.
1050 * No error status is immediately provided. An error status for the entire
1051 * dump operation is provided when it is completed by calling
1052 * dpif_flow_dump_destroy().
1054 * All of the data stored into 'flows' is owned by the datapath, not by the
1055 * caller, and the caller must not modify or free it. The datapath guarantees
1056 * that it remains accessible and unchanged until the first of:
1057 * - The next call to dpif_flow_dump_next() for 'thread', or
1058 * - The next rcu quiescent period. */
1060 dpif_flow_dump_next(struct dpif_flow_dump_thread *thread,
1061 struct dpif_flow *flows, int max_flows)
1063 struct dpif *dpif = thread->dpif;
1066 ovs_assert(max_flows > 0);
1067 n = dpif->dpif_class->flow_dump_next(thread, flows, max_flows);
1069 struct dpif_flow *f;
1071 for (f = flows; f < &flows[n] && should_log_flow_message(0); f++) {
1072 log_flow_message(dpif, 0, "flow_dump",
1073 f->key, f->key_len, f->mask, f->mask_len,
1074 &f->ufid, &f->stats, f->actions, f->actions_len);
1077 VLOG_DBG_RL(&dpmsg_rl, "%s: dumped all flows", dpif_name(dpif));
1082 struct dpif_execute_helper_aux {
1087 /* This is called for actions that need the context of the datapath to be
1090 dpif_execute_helper_cb(void *aux_, struct dp_packet **packets, int cnt,
1091 const struct nlattr *action, bool may_steal OVS_UNUSED)
1093 struct dpif_execute_helper_aux *aux = aux_;
1094 int type = nl_attr_type(action);
1095 struct dp_packet *packet = *packets;
1097 ovs_assert(cnt == 1);
1099 switch ((enum ovs_action_attr)type) {
1100 case OVS_ACTION_ATTR_CT:
1101 case OVS_ACTION_ATTR_OUTPUT:
1102 case OVS_ACTION_ATTR_TUNNEL_PUSH:
1103 case OVS_ACTION_ATTR_TUNNEL_POP:
1104 case OVS_ACTION_ATTR_USERSPACE:
1105 case OVS_ACTION_ATTR_RECIRC: {
1106 struct dpif_execute execute;
1107 struct ofpbuf execute_actions;
1108 uint64_t stub[256 / 8];
1109 struct pkt_metadata *md = &packet->md;
1111 if (md->tunnel.ip_dst) {
1112 /* The Linux kernel datapath throws away the tunnel information
1113 * that we supply as metadata. We have to use a "set" action to
1115 ofpbuf_use_stub(&execute_actions, stub, sizeof stub);
1116 odp_put_tunnel_action(&md->tunnel, &execute_actions);
1117 ofpbuf_put(&execute_actions, action, NLA_ALIGN(action->nla_len));
1119 execute.actions = execute_actions.data;
1120 execute.actions_len = execute_actions.size;
1122 execute.actions = action;
1123 execute.actions_len = NLA_ALIGN(action->nla_len);
1126 execute.packet = packet;
1127 execute.needs_help = false;
1128 execute.probe = false;
1130 aux->error = dpif_execute(aux->dpif, &execute);
1131 log_execute_message(aux->dpif, &execute, true, aux->error);
1133 if (md->tunnel.ip_dst) {
1134 ofpbuf_uninit(&execute_actions);
1139 case OVS_ACTION_ATTR_HASH:
1140 case OVS_ACTION_ATTR_PUSH_VLAN:
1141 case OVS_ACTION_ATTR_POP_VLAN:
1142 case OVS_ACTION_ATTR_PUSH_MPLS:
1143 case OVS_ACTION_ATTR_POP_MPLS:
1144 case OVS_ACTION_ATTR_SET:
1145 case OVS_ACTION_ATTR_SET_MASKED:
1146 case OVS_ACTION_ATTR_SAMPLE:
1147 case OVS_ACTION_ATTR_UNSPEC:
1148 case __OVS_ACTION_ATTR_MAX:
1153 /* Executes 'execute' by performing most of the actions in userspace and
1154 * passing the fully constructed packets to 'dpif' for output and userspace
1157 * This helps with actions that a given 'dpif' doesn't implement directly. */
1159 dpif_execute_with_help(struct dpif *dpif, struct dpif_execute *execute)
1161 struct dpif_execute_helper_aux aux = {dpif, 0};
1162 struct dp_packet *pp;
1164 COVERAGE_INC(dpif_execute_with_help);
1166 pp = execute->packet;
1167 odp_execute_actions(&aux, &pp, 1, false, execute->actions,
1168 execute->actions_len, dpif_execute_helper_cb);
1172 /* Returns true if the datapath needs help executing 'execute'. */
1174 dpif_execute_needs_help(const struct dpif_execute *execute)
1176 return execute->needs_help || nl_attr_oversized(execute->actions_len);
1179 /* A dpif_operate() wrapper for performing a single DPIF_OP_EXECUTE. */
1181 dpif_execute(struct dpif *dpif, struct dpif_execute *execute)
1183 if (execute->actions_len) {
1184 struct dpif_op *opp;
1187 op.type = DPIF_OP_EXECUTE;
1188 op.u.execute = *execute;
1191 dpif_operate(dpif, &opp, 1);
1199 /* Executes each of the 'n_ops' operations in 'ops' on 'dpif', in the order in
1200 * which they are specified. Places each operation's results in the "output"
1201 * members documented in comments, and 0 in the 'error' member on success or a
1202 * positive errno on failure. */
1204 dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
1209 /* Count 'chunk', the number of ops that can be executed without
1210 * needing any help. Ops that need help should be rare, so we
1211 * expect this to ordinarily be 'n_ops', that is, all the ops. */
1212 for (chunk = 0; chunk < n_ops; chunk++) {
1213 struct dpif_op *op = ops[chunk];
1215 if (op->type == DPIF_OP_EXECUTE
1216 && dpif_execute_needs_help(&op->u.execute)) {
1222 /* Execute a chunk full of ops that the dpif provider can
1223 * handle itself, without help. */
1226 dpif->dpif_class->operate(dpif, ops, chunk);
1228 for (i = 0; i < chunk; i++) {
1229 struct dpif_op *op = ops[i];
1230 int error = op->error;
1233 case DPIF_OP_FLOW_PUT: {
1234 struct dpif_flow_put *put = &op->u.flow_put;
1236 COVERAGE_INC(dpif_flow_put);
1237 log_flow_put_message(dpif, put, error);
1238 if (error && put->stats) {
1239 memset(put->stats, 0, sizeof *put->stats);
1244 case DPIF_OP_FLOW_GET: {
1245 struct dpif_flow_get *get = &op->u.flow_get;
1247 COVERAGE_INC(dpif_flow_get);
1249 memset(get->flow, 0, sizeof *get->flow);
1251 log_flow_get_message(dpif, get, error);
1256 case DPIF_OP_FLOW_DEL: {
1257 struct dpif_flow_del *del = &op->u.flow_del;
1259 COVERAGE_INC(dpif_flow_del);
1260 log_flow_del_message(dpif, del, error);
1261 if (error && del->stats) {
1262 memset(del->stats, 0, sizeof *del->stats);
1267 case DPIF_OP_EXECUTE:
1268 COVERAGE_INC(dpif_execute);
1269 log_execute_message(dpif, &op->u.execute, false, error);
1277 /* Help the dpif provider to execute one op. */
1278 struct dpif_op *op = ops[0];
1280 COVERAGE_INC(dpif_execute);
1281 op->error = dpif_execute_with_help(dpif, &op->u.execute);
1288 /* Returns a string that represents 'type', for use in log messages. */
1290 dpif_upcall_type_to_string(enum dpif_upcall_type type)
1293 case DPIF_UC_MISS: return "miss";
1294 case DPIF_UC_ACTION: return "action";
1295 case DPIF_N_UC_TYPES: default: return "<unknown>";
1299 /* Enables or disables receiving packets with dpif_recv() on 'dpif'. Returns 0
1300 * if successful, otherwise a positive errno value.
1302 * Turning packet receive off and then back on may change the Netlink PID
1303 * assignments returned by dpif_port_get_pid(). If the client does this, it
1304 * must update all of the flows that have OVS_ACTION_ATTR_USERSPACE actions
1305 * using the new PID assignment. */
1307 dpif_recv_set(struct dpif *dpif, bool enable)
1311 if (dpif->dpif_class->recv_set) {
1312 error = dpif->dpif_class->recv_set(dpif, enable);
1313 log_operation(dpif, "recv_set", error);
1318 /* Refreshes the poll loops and Netlink sockets associated to each port,
1319 * when the number of upcall handlers (upcall receiving thread) is changed
1320 * to 'n_handlers' and receiving packets for 'dpif' is enabled by
1323 * Since multiple upcall handlers can read upcalls simultaneously from
1324 * 'dpif', each port can have multiple Netlink sockets, one per upcall
1325 * handler. So, handlers_set() is responsible for the following tasks:
1327 * When receiving upcall is enabled, extends or creates the
1328 * configuration to support:
1330 * - 'n_handlers' Netlink sockets for each port.
1332 * - 'n_handlers' poll loops, one for each upcall handler.
1334 * - registering the Netlink sockets for the same upcall handler to
1335 * the corresponding poll loop.
1337 * Returns 0 if successful, otherwise a positive errno value. */
1339 dpif_handlers_set(struct dpif *dpif, uint32_t n_handlers)
1343 if (dpif->dpif_class->handlers_set) {
1344 error = dpif->dpif_class->handlers_set(dpif, n_handlers);
1345 log_operation(dpif, "handlers_set", error);
1351 dpif_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb, void *aux)
1353 if (dpif->dpif_class->register_dp_purge_cb) {
1354 dpif->dpif_class->register_dp_purge_cb(dpif, cb, aux);
1359 dpif_register_upcall_cb(struct dpif *dpif, upcall_callback *cb, void *aux)
1361 if (dpif->dpif_class->register_upcall_cb) {
1362 dpif->dpif_class->register_upcall_cb(dpif, cb, aux);
1367 dpif_enable_upcall(struct dpif *dpif)
1369 if (dpif->dpif_class->enable_upcall) {
1370 dpif->dpif_class->enable_upcall(dpif);
1375 dpif_disable_upcall(struct dpif *dpif)
1377 if (dpif->dpif_class->disable_upcall) {
1378 dpif->dpif_class->disable_upcall(dpif);
1383 dpif_print_packet(struct dpif *dpif, struct dpif_upcall *upcall)
1385 if (!VLOG_DROP_DBG(&dpmsg_rl)) {
1389 packet = ofp_packet_to_string(dp_packet_data(&upcall->packet),
1390 dp_packet_size(&upcall->packet));
1393 odp_flow_key_format(upcall->key, upcall->key_len, &flow);
1395 VLOG_DBG("%s: %s upcall:\n%s\n%s",
1396 dpif_name(dpif), dpif_upcall_type_to_string(upcall->type),
1397 ds_cstr(&flow), packet);
1404 /* If 'dpif' creates its own I/O polling threads, refreshes poll threads
1407 dpif_poll_threads_set(struct dpif *dpif, unsigned int n_rxqs,
1412 if (dpif->dpif_class->poll_threads_set) {
1413 error = dpif->dpif_class->poll_threads_set(dpif, n_rxqs, cmask);
1415 log_operation(dpif, "poll_threads_set", error);
1422 /* Polls for an upcall from 'dpif' for an upcall handler. Since there
1423 * there can be multiple poll loops, 'handler_id' is needed as index to
1424 * identify the corresponding poll loop. If successful, stores the upcall
1425 * into '*upcall', using 'buf' for storage. Should only be called if
1426 * 'recv_set' has been used to enable receiving packets from 'dpif'.
1428 * 'upcall->key' and 'upcall->userdata' point into data in the caller-provided
1429 * 'buf', so their memory cannot be freed separately from 'buf'.
1431 * The caller owns the data of 'upcall->packet' and may modify it. If
1432 * packet's headroom is exhausted as it is manipulated, 'upcall->packet'
1433 * will be reallocated. This requires the data of 'upcall->packet' to be
1434 * released with ofpbuf_uninit() before 'upcall' is destroyed. However,
1435 * when an error is returned, the 'upcall->packet' may be uninitialized
1436 * and should not be released.
1438 * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
1439 * if no upcall is immediately available. */
1441 dpif_recv(struct dpif *dpif, uint32_t handler_id, struct dpif_upcall *upcall,
1446 if (dpif->dpif_class->recv) {
1447 error = dpif->dpif_class->recv(dpif, handler_id, upcall, buf);
1449 dpif_print_packet(dpif, upcall);
1450 } else if (error != EAGAIN) {
1451 log_operation(dpif, "recv", error);
1457 /* Discards all messages that would otherwise be received by dpif_recv() on
1460 dpif_recv_purge(struct dpif *dpif)
1462 COVERAGE_INC(dpif_purge);
1463 if (dpif->dpif_class->recv_purge) {
1464 dpif->dpif_class->recv_purge(dpif);
1468 /* Arranges for the poll loop for an upcall handler to wake up when 'dpif'
1469 * 'dpif' has a message queued to be received with the recv member
1470 * function. Since there can be multiple poll loops, 'handler_id' is
1471 * needed as index to identify the corresponding poll loop. */
1473 dpif_recv_wait(struct dpif *dpif, uint32_t handler_id)
1475 if (dpif->dpif_class->recv_wait) {
1476 dpif->dpif_class->recv_wait(dpif, handler_id);
1481 * Return the datapath version. Caller is responsible for freeing
1485 dpif_get_dp_version(const struct dpif *dpif)
1487 char *version = NULL;
1489 if (dpif->dpif_class->get_datapath_version) {
1490 version = dpif->dpif_class->get_datapath_version();
1496 /* Obtains the NetFlow engine type and engine ID for 'dpif' into '*engine_type'
1497 * and '*engine_id', respectively. */
1499 dpif_get_netflow_ids(const struct dpif *dpif,
1500 uint8_t *engine_type, uint8_t *engine_id)
1502 *engine_type = dpif->netflow_engine_type;
1503 *engine_id = dpif->netflow_engine_id;
1506 /* Translates OpenFlow queue ID 'queue_id' (in host byte order) into a priority
1507 * value used for setting packet priority.
1508 * On success, returns 0 and stores the priority into '*priority'.
1509 * On failure, returns a positive errno value and stores 0 into '*priority'. */
1511 dpif_queue_to_priority(const struct dpif *dpif, uint32_t queue_id,
1514 int error = (dpif->dpif_class->queue_to_priority
1515 ? dpif->dpif_class->queue_to_priority(dpif, queue_id,
1521 log_operation(dpif, "queue_to_priority", error);
1526 dpif_init(struct dpif *dpif, const struct dpif_class *dpif_class,
1528 uint8_t netflow_engine_type, uint8_t netflow_engine_id)
1530 dpif->dpif_class = dpif_class;
1531 dpif->base_name = xstrdup(name);
1532 dpif->full_name = xasprintf("%s@%s", dpif_class->type, name);
1533 dpif->netflow_engine_type = netflow_engine_type;
1534 dpif->netflow_engine_id = netflow_engine_id;
1537 /* Undoes the results of initialization.
1539 * Normally this function only needs to be called from dpif_close().
1540 * However, it may be called by providers due to an error on opening
1541 * that occurs after initialization. It this case dpif_close() would
1542 * never be called. */
1544 dpif_uninit(struct dpif *dpif, bool close)
1546 char *base_name = dpif->base_name;
1547 char *full_name = dpif->full_name;
1550 dpif->dpif_class->close(dpif);
1558 log_operation(const struct dpif *dpif, const char *operation, int error)
1561 VLOG_DBG_RL(&dpmsg_rl, "%s: %s success", dpif_name(dpif), operation);
1562 } else if (ofperr_is_valid(error)) {
1563 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1564 dpif_name(dpif), operation, ofperr_get_name(error));
1566 VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
1567 dpif_name(dpif), operation, ovs_strerror(error));
1571 static enum vlog_level
1572 flow_message_log_level(int error)
1574 /* If flows arrive in a batch, userspace may push down multiple
1575 * unique flow definitions that overlap when wildcards are applied.
1576 * Kernels that support flow wildcarding will reject these flows as
1577 * duplicates (EEXIST), so lower the log level to debug for these
1578 * types of messages. */
1579 return (error && error != EEXIST) ? VLL_WARN : VLL_DBG;
1583 should_log_flow_message(int error)
1585 return !vlog_should_drop(THIS_MODULE, flow_message_log_level(error),
1586 error ? &error_rl : &dpmsg_rl);
1590 log_flow_message(const struct dpif *dpif, int error, const char *operation,
1591 const struct nlattr *key, size_t key_len,
1592 const struct nlattr *mask, size_t mask_len,
1593 const ovs_u128 *ufid, const struct dpif_flow_stats *stats,
1594 const struct nlattr *actions, size_t actions_len)
1596 struct ds ds = DS_EMPTY_INITIALIZER;
1597 ds_put_format(&ds, "%s: ", dpif_name(dpif));
1599 ds_put_cstr(&ds, "failed to ");
1601 ds_put_format(&ds, "%s ", operation);
1603 ds_put_format(&ds, "(%s) ", ovs_strerror(error));
1606 odp_format_ufid(ufid, &ds);
1607 ds_put_cstr(&ds, " ");
1609 odp_flow_format(key, key_len, mask, mask_len, NULL, &ds, true);
1611 ds_put_cstr(&ds, ", ");
1612 dpif_flow_stats_format(stats, &ds);
1614 if (actions || actions_len) {
1615 ds_put_cstr(&ds, ", actions:");
1616 format_odp_actions(&ds, actions, actions_len);
1618 vlog(THIS_MODULE, flow_message_log_level(error), "%s", ds_cstr(&ds));
1623 log_flow_put_message(struct dpif *dpif, const struct dpif_flow_put *put,
1626 if (should_log_flow_message(error) && !(put->flags & DPIF_FP_PROBE)) {
1630 ds_put_cstr(&s, "put");
1631 if (put->flags & DPIF_FP_CREATE) {
1632 ds_put_cstr(&s, "[create]");
1634 if (put->flags & DPIF_FP_MODIFY) {
1635 ds_put_cstr(&s, "[modify]");
1637 if (put->flags & DPIF_FP_ZERO_STATS) {
1638 ds_put_cstr(&s, "[zero]");
1640 log_flow_message(dpif, error, ds_cstr(&s),
1641 put->key, put->key_len, put->mask, put->mask_len,
1642 put->ufid, put->stats, put->actions,
1649 log_flow_del_message(struct dpif *dpif, const struct dpif_flow_del *del,
1652 if (should_log_flow_message(error)) {
1653 log_flow_message(dpif, error, "flow_del", del->key, del->key_len,
1654 NULL, 0, del->ufid, !error ? del->stats : NULL,
1659 /* Logs that 'execute' was executed on 'dpif' and completed with errno 'error'
1660 * (0 for success). 'subexecute' should be true if the execution is a result
1661 * of breaking down a larger execution that needed help, false otherwise.
1664 * XXX In theory, the log message could be deceptive because this function is
1665 * called after the dpif_provider's '->execute' function, which is allowed to
1666 * modify execute->packet and execute->md. In practice, though:
1668 * - dpif-netlink doesn't modify execute->packet or execute->md.
1670 * - dpif-netdev does modify them but it is less likely to have problems
1671 * because it is built into ovs-vswitchd and cannot have version skew,
1674 * It would still be better to avoid the potential problem. I don't know of a
1675 * good way to do that, though, that isn't expensive. */
1677 log_execute_message(struct dpif *dpif, const struct dpif_execute *execute,
1678 bool subexecute, int error)
1680 if (!(error ? VLOG_DROP_WARN(&error_rl) : VLOG_DROP_DBG(&dpmsg_rl))
1681 && !execute->probe) {
1682 struct ds ds = DS_EMPTY_INITIALIZER;
1685 packet = ofp_packet_to_string(dp_packet_data(execute->packet),
1686 dp_packet_size(execute->packet));
1687 ds_put_format(&ds, "%s: %sexecute ",
1689 (subexecute ? "sub-"
1690 : dpif_execute_needs_help(execute) ? "super-"
1692 format_odp_actions(&ds, execute->actions, execute->actions_len);
1694 ds_put_format(&ds, " failed (%s)", ovs_strerror(error));
1696 ds_put_format(&ds, " on packet %s", packet);
1697 ds_put_format(&ds, " mtu %d", execute->mtu);
1698 vlog(THIS_MODULE, error ? VLL_WARN : VLL_DBG, "%s", ds_cstr(&ds));
1705 log_flow_get_message(const struct dpif *dpif, const struct dpif_flow_get *get,
1708 if (should_log_flow_message(error)) {
1709 log_flow_message(dpif, error, "flow_get",
1710 get->key, get->key_len,
1711 get->flow->mask, get->flow->mask_len,
1712 get->ufid, &get->flow->stats,
1713 get->flow->actions, get->flow->actions_len);
1718 dpif_supports_tnl_push_pop(const struct dpif *dpif)
1720 return dpif_is_netdev(dpif);