/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "coverage.h"
#include "dpif.h"
+#include "dp-packet.h"
#include "dynamic-string.h"
#include "fatal-signal.h"
#include "hash.h"
#include "list.h"
+#include "netdev-dpdk.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
-#include "ofpbuf.h"
+#include "odp-netlink.h"
#include "openflow/openflow.h"
#include "packets.h"
#include "poll-loop.h"
+#include "seq.h"
#include "shash.h"
#include "smap.h"
#include "sset.h"
#include "svec.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
+#include "flow.h"
VLOG_DEFINE_THIS_MODULE(netdev);
struct netdev_saved_flags {
struct netdev *netdev;
- struct list node; /* In struct netdev's saved_flags_list. */
+ struct ovs_list node; /* In struct netdev's saved_flags_list. */
enum netdev_flags saved_flags;
enum netdev_flags saved_values;
};
-static struct shash netdev_classes = SHASH_INITIALIZER(&netdev_classes);
+/* Protects 'netdev_shash' and the mutable members of struct netdev. */
+static struct ovs_mutex netdev_mutex = OVS_MUTEX_INITIALIZER;
/* All created network devices. */
-static struct shash netdev_shash = SHASH_INITIALIZER(&netdev_shash);
+static struct shash netdev_shash OVS_GUARDED_BY(netdev_mutex)
+ = SHASH_INITIALIZER(&netdev_shash);
+
+/* Protects 'netdev_classes' against insertions or deletions.
+ *
+ * This is a recursive mutex to allow recursive acquisition when calling into
+ * providers. For example, netdev_run() calls into provider 'run' functions,
+ * which might reasonably want to call one of the netdev functions that takes
+ * netdev_class_mutex. */
+static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex);
+
+/* Contains 'struct netdev_registered_class'es. */
+static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_mutex)
+ = HMAP_INITIALIZER(&netdev_classes);
+
+struct netdev_registered_class {
+ /* In 'netdev_classes', by class->type. */
+ struct hmap_node hmap_node OVS_GUARDED_BY(netdev_class_mutex);
+ const struct netdev_class *class OVS_GUARDED_BY(netdev_class_mutex);
+ /* Number of 'struct netdev's of this class. */
+ int ref_cnt OVS_GUARDED_BY(netdev_class_mutex);
+};
/* This is set pretty low because we probably won't learn anything from the
* additional log messages. */
static void restore_all_flags(void *aux OVS_UNUSED);
void update_device_args(struct netdev *, const struct shash *args);
+int
+netdev_n_txq(const struct netdev *netdev)
+{
+ return netdev->n_txq;
+}
+
+int
+netdev_n_rxq(const struct netdev *netdev)
+{
+ return netdev->n_rxq;
+}
+
+int
+netdev_requested_n_rxq(const struct netdev *netdev)
+{
+ return netdev->requested_n_rxq;
+}
+
+bool
+netdev_is_pmd(const struct netdev *netdev)
+{
+ return (!strcmp(netdev->netdev_class->type, "dpdk") ||
+ !strcmp(netdev->netdev_class->type, "dpdkr") ||
+ !strcmp(netdev->netdev_class->type, "dpdkvhostcuse") ||
+ !strcmp(netdev->netdev_class->type, "dpdkvhostuser"));
+}
+
+static void
+netdev_class_mutex_initialize(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+ if (ovsthread_once_start(&once)) {
+ ovs_mutex_init_recursive(&netdev_class_mutex);
+ ovsthread_once_done(&once);
+ }
+}
+
static void
netdev_initialize(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
- static bool inited;
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
- if (!inited) {
- inited = true;
+ if (ovsthread_once_start(&once)) {
+ netdev_class_mutex_initialize();
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
-#ifdef LINUX_DATAPATH
+#ifdef __linux__
netdev_register_provider(&netdev_linux_class);
netdev_register_provider(&netdev_internal_class);
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_bsd_class);
#endif
+#ifdef _WIN32
+ netdev_register_provider(&netdev_windows_class);
+ netdev_register_provider(&netdev_internal_class);
+ netdev_vport_tunnel_register();
+#endif
+ netdev_dpdk_register();
+
+ ovsthread_once_done(&once);
}
}
* main poll loop. */
void
netdev_run(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
- struct shash_node *node;
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- if (netdev_class->run) {
- netdev_class->run();
+ struct netdev_registered_class *rc;
+
+ netdev_initialize();
+ ovs_mutex_lock(&netdev_class_mutex);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ if (rc->class->run) {
+ rc->class->run();
}
}
+ ovs_mutex_unlock(&netdev_class_mutex);
}
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
* main poll loop. */
void
netdev_wait(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
- struct shash_node *node;
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- if (netdev_class->wait) {
- netdev_class->wait();
+ struct netdev_registered_class *rc;
+
+ ovs_mutex_lock(&netdev_class_mutex);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ if (rc->class->wait) {
+ rc->class->wait();
}
}
+ ovs_mutex_unlock(&netdev_class_mutex);
+}
+
+static struct netdev_registered_class *
+netdev_lookup_class(const char *type)
+ OVS_REQ_RDLOCK(netdev_class_mutex)
+{
+ struct netdev_registered_class *rc;
+
+ HMAP_FOR_EACH_WITH_HASH (rc, hmap_node, hash_string(type, 0),
+ &netdev_classes) {
+ if (!strcmp(type, rc->class->type)) {
+ return rc;
+ }
+ }
+ return NULL;
}
/* Initializes and registers a new netdev provider. After successful
* registration, new netdevs of that type can be opened using netdev_open(). */
int
netdev_register_provider(const struct netdev_class *new_class)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
- if (shash_find(&netdev_classes, new_class->type)) {
+ int error;
+
+ netdev_class_mutex_initialize();
+ ovs_mutex_lock(&netdev_class_mutex);
+ if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
new_class->type);
- return EEXIST;
- }
-
- if (new_class->init) {
- int error = new_class->init();
- if (error) {
+ error = EEXIST;
+ } else {
+ error = new_class->init ? new_class->init() : 0;
+ if (!error) {
+ struct netdev_registered_class *rc;
+
+ rc = xmalloc(sizeof *rc);
+ hmap_insert(&netdev_classes, &rc->hmap_node,
+ hash_string(new_class->type, 0));
+ rc->class = new_class;
+ rc->ref_cnt = 0;
+ } else {
VLOG_ERR("failed to initialize %s network device class: %s",
new_class->type, ovs_strerror(error));
- return error;
}
}
+ ovs_mutex_unlock(&netdev_class_mutex);
- shash_add(&netdev_classes, new_class->type, new_class);
-
- return 0;
+ return error;
}
/* Unregisters a netdev provider. 'type' must have been previously
* new netdevs of that type cannot be opened using netdev_open(). */
int
netdev_unregister_provider(const char *type)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
- struct shash_node *del_node, *netdev_node;
+ struct netdev_registered_class *rc;
+ int error;
+
+ netdev_initialize();
- del_node = shash_find(&netdev_classes, type);
- if (!del_node) {
+ ovs_mutex_lock(&netdev_class_mutex);
+ rc = netdev_lookup_class(type);
+ if (!rc) {
VLOG_WARN("attempted to unregister a netdev provider that is not "
"registered: %s", type);
- return EAFNOSUPPORT;
- }
-
- SHASH_FOR_EACH (netdev_node, &netdev_shash) {
- struct netdev *netdev = netdev_node->data;
- if (!strcmp(netdev->netdev_class->type, type)) {
+ error = EAFNOSUPPORT;
+ } else {
+ if (!rc->ref_cnt) {
+ hmap_remove(&netdev_classes, &rc->hmap_node);
+ free(rc);
+ error = 0;
+ } else {
VLOG_WARN("attempted to unregister in use netdev provider: %s",
type);
- return EBUSY;
+ error = EBUSY;
}
}
+ ovs_mutex_unlock(&netdev_class_mutex);
- shash_delete(&netdev_classes, del_node);
-
- return 0;
-}
-
-const struct netdev_class *
-netdev_lookup_provider(const char *type)
-{
- netdev_initialize();
- return shash_find_data(&netdev_classes, type && type[0] ? type : "system");
+ return error;
}
/* Clears 'types' and enumerates the types of all currently registered netdev
* providers into it. The caller must first initialize the sset. */
void
netdev_enumerate_types(struct sset *types)
+ OVS_EXCLUDED(netdev_mutex)
{
- struct shash_node *node;
+ struct netdev_registered_class *rc;
netdev_initialize();
sset_clear(types);
- SHASH_FOR_EACH(node, &netdev_classes) {
- const struct netdev_class *netdev_class = node->data;
- sset_add(types, netdev_class->type);
+ ovs_mutex_lock(&netdev_class_mutex);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ sset_add(types, rc->class->type);
}
+ ovs_mutex_unlock(&netdev_class_mutex);
}
/* Check that the network device name is not the same as any of the registered
* Returns true if there is a name conflict, false otherwise. */
bool
netdev_is_reserved_name(const char *name)
+ OVS_EXCLUDED(netdev_mutex)
{
- struct shash_node *node;
+ struct netdev_registered_class *rc;
netdev_initialize();
- SHASH_FOR_EACH (node, &netdev_classes) {
- const char *dpif_port;
- dpif_port = netdev_vport_class_get_dpif_port(node->data);
- if (dpif_port && !strcmp(dpif_port, name)) {
+
+ ovs_mutex_lock(&netdev_class_mutex);
+ HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
+ const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
+ if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
+ ovs_mutex_unlock(&netdev_class_mutex);
return true;
}
}
+ ovs_mutex_unlock(&netdev_class_mutex);
if (!strncmp(name, "ovs-", 4)) {
struct sset types;
* before they can be used. */
int
netdev_open(const char *name, const char *type, struct netdev **netdevp)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev;
int error;
- *netdevp = NULL;
netdev_initialize();
+ ovs_mutex_lock(&netdev_class_mutex);
+ ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (!netdev) {
- const struct netdev_class *class;
-
- class = netdev_lookup_provider(type);
- if (!class) {
+ struct netdev_registered_class *rc;
+
+ rc = netdev_lookup_class(type && type[0] ? type : "system");
+ if (rc) {
+ netdev = rc->class->alloc();
+ if (netdev) {
+ memset(netdev, 0, sizeof *netdev);
+ netdev->netdev_class = rc->class;
+ netdev->name = xstrdup(name);
+ netdev->change_seq = 1;
+ netdev->node = shash_add(&netdev_shash, name, netdev);
+
+ /* By default enable one tx and rx queue per netdev. */
+ netdev->n_txq = netdev->netdev_class->send ? 1 : 0;
+ netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
+ netdev->requested_n_rxq = netdev->n_rxq;
+
+ list_init(&netdev->saved_flags_list);
+
+ error = rc->class->construct(netdev);
+ if (!error) {
+ rc->ref_cnt++;
+ netdev_change_seq_changed(netdev);
+ } else {
+ free(netdev->name);
+ ovs_assert(list_is_empty(&netdev->saved_flags_list));
+ shash_delete(&netdev_shash, netdev->node);
+ rc->class->dealloc(netdev);
+ }
+ } else {
+ error = ENOMEM;
+ }
+ } else {
VLOG_WARN("could not create netdev %s of unknown type %s",
name, type);
- return EAFNOSUPPORT;
+ error = EAFNOSUPPORT;
}
- error = class->create(class, name, &netdev);
- if (error) {
- return error;
- }
- ovs_assert(netdev->netdev_class == class);
+ } else {
+ error = 0;
+ }
+ if (!error) {
+ netdev->ref_cnt++;
+ *netdevp = netdev;
+ } else {
+ *netdevp = NULL;
}
- netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
+ ovs_mutex_unlock(&netdev_class_mutex);
- *netdevp = netdev;
- return 0;
+ return error;
}
/* Returns a reference to 'netdev_' for the caller to own. Returns null if
* 'netdev_' is null. */
struct netdev *
netdev_ref(const struct netdev *netdev_)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev *netdev = CONST_CAST(struct netdev *, netdev_);
if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
ovs_assert(netdev->ref_cnt > 0);
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
}
return netdev;
}
/* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
* or NULL if none are needed. */
int
-netdev_set_config(struct netdev *netdev, const struct smap *args)
+netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->set_config) {
- struct smap no_args = SMAP_INITIALIZER(&no_args);
- return netdev->netdev_class->set_config(netdev,
- args ? args : &no_args);
+ const struct smap no_args = SMAP_INITIALIZER(&no_args);
+ int error;
+
+ error = netdev->netdev_class->set_config(netdev,
+ args ? args : &no_args);
+ if (error) {
+ VLOG_WARN_BUF(errp, "%s: could not set configuration (%s)",
+ netdev_get_name(netdev), ovs_strerror(error));
+ }
+ return error;
} else if (args && !smap_is_empty(args)) {
- VLOG_WARN("%s: arguments provided to device that is not configurable",
- netdev_get_name(netdev));
+ VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
+ netdev_get_name(netdev));
}
-
return 0;
}
* smap_destroy(). */
int
netdev_get_config(const struct netdev *netdev, struct smap *args)
+ OVS_EXCLUDED(netdev_mutex)
{
int error;
const struct netdev_tunnel_config *
netdev_get_tunnel_config(const struct netdev *netdev)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->get_tunnel_config) {
return netdev->netdev_class->get_tunnel_config(netdev);
}
}
+/* Returns the id of the numa node the 'netdev' is on. If the function
+ * is not implemented, returns NETDEV_NUMA_UNSPEC. */
+int
+netdev_get_numa_id(const struct netdev *netdev)
+{
+ if (netdev->netdev_class->get_numa_id) {
+ return netdev->netdev_class->get_numa_id(netdev);
+ } else {
+ return NETDEV_NUMA_UNSPEC;
+ }
+}
+
static void
netdev_unref(struct netdev *dev)
+ OVS_RELEASES(netdev_mutex)
{
ovs_assert(dev->ref_cnt);
if (!--dev->ref_cnt) {
- netdev_uninit(dev, true);
+ const struct netdev_class *class = dev->netdev_class;
+ struct netdev_registered_class *rc;
+
+ dev->netdev_class->destruct(dev);
+
+ if (dev->node) {
+ shash_delete(&netdev_shash, dev->node);
+ }
+ free(dev->name);
+ dev->netdev_class->dealloc(dev);
+ ovs_mutex_unlock(&netdev_mutex);
+
+ ovs_mutex_lock(&netdev_class_mutex);
+ rc = netdev_lookup_class(class->type);
+ ovs_assert(rc->ref_cnt > 0);
+ rc->ref_cnt--;
+ ovs_mutex_unlock(&netdev_class_mutex);
+ } else {
+ ovs_mutex_unlock(&netdev_mutex);
}
}
/* Closes and destroys 'netdev'. */
void
netdev_close(struct netdev *netdev)
+ OVS_EXCLUDED(netdev_mutex)
{
if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
+ netdev_unref(netdev);
+ }
+}
+
+/* Removes 'netdev' from the global shash and unrefs 'netdev'.
+ *
+ * This allows handler and revalidator threads to still retain references
+ * to this netdev while the main thread changes interface configuration.
+ *
+ * This function should only be called by the main thread when closing
+ * netdevs during user configuration changes. Otherwise, netdev_close should be
+ * used to close netdevs. */
+void
+netdev_remove(struct netdev *netdev)
+{
+ if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
+ if (netdev->node) {
+ shash_delete(&netdev_shash, netdev->node);
+ netdev->node = NULL;
+ netdev_change_seq_changed(netdev);
+ }
netdev_unref(netdev);
}
}
}
}
+/* Attempts to open a netdev_rxq handle for obtaining packets received on
+ * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rxq *' into
+ * '*rxp'. On failure, returns a positive errno value and stores NULL into
+ * '*rxp'.
+ *
+ * Some kinds of network devices might not support receiving packets. This
+ * function returns EOPNOTSUPP in that case.*/
int
-netdev_rx_open(struct netdev *netdev, struct netdev_rx **rxp)
+netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id)
+ OVS_EXCLUDED(netdev_mutex)
{
int error;
- error = (netdev->netdev_class->rx_open
- ? netdev->netdev_class->rx_open(netdev, rxp)
- : EOPNOTSUPP);
- if (!error) {
- ovs_assert((*rxp)->netdev == netdev);
- netdev->ref_cnt++;
+ if (netdev->netdev_class->rxq_alloc && id < netdev->n_rxq) {
+ struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc();
+ if (rx) {
+ rx->netdev = netdev;
+ rx->queue_id = id;
+ error = netdev->netdev_class->rxq_construct(rx);
+ if (!error) {
+ netdev_ref(netdev);
+ *rxp = rx;
+ return 0;
+ }
+ netdev->netdev_class->rxq_dealloc(rx);
+ } else {
+ error = ENOMEM;
+ }
} else {
- *rxp = NULL;
+ error = EOPNOTSUPP;
}
+
+ *rxp = NULL;
return error;
}
+/* Closes 'rx'. */
void
-netdev_rx_close(struct netdev_rx *rx)
+netdev_rxq_close(struct netdev_rxq *rx)
+ OVS_EXCLUDED(netdev_mutex)
{
if (rx) {
- struct netdev *dev = rx->netdev;
-
- rx->rx_class->destroy(rx);
- netdev_unref(dev);
+ struct netdev *netdev = rx->netdev;
+ netdev->netdev_class->rxq_destruct(rx);
+ netdev->netdev_class->rxq_dealloc(rx);
+ netdev_close(netdev);
}
}
+/* Attempts to receive batch of packets from 'rx'.
+ *
+ * Returns EAGAIN immediately if no packet is ready to be received.
+ *
+ * Returns EMSGSIZE, and discards the packet, if the received packet is longer
+ * than 'dp_packet_tailroom(buffer)'.
+ *
+ * It is advised that the tailroom of 'buffer' should be
+ * VLAN_HEADER_LEN bytes longer than the MTU to allow space for an
+ * out-of-band VLAN header to be added to the packet. At the very least,
+ * 'buffer' must have at least ETH_TOTAL_MIN bytes of tailroom.
+ *
+ * This function may be set to null if it would always return EOPNOTSUPP
+ * anyhow. */
int
-netdev_rx_recv(struct netdev_rx *rx, struct ofpbuf *buffer)
+netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet **buffers, int *cnt)
{
int retval;
- ovs_assert(buffer->size == 0);
- ovs_assert(ofpbuf_tailroom(buffer) >= ETH_TOTAL_MIN);
-
- retval = rx->rx_class->recv(rx, buffer->data, ofpbuf_tailroom(buffer));
- if (retval >= 0) {
+ retval = rx->netdev->netdev_class->rxq_recv(rx, buffers, cnt);
+ if (!retval) {
COVERAGE_INC(netdev_received);
- buffer->size += retval;
- if (buffer->size < ETH_TOTAL_MIN) {
- ofpbuf_put_zeros(buffer, ETH_TOTAL_MIN - buffer->size);
- }
- return 0;
- } else {
- return -retval;
}
+ return retval;
}
+/* Arranges for poll_block() to wake up when a packet is ready to be received
+ * on 'rx'. */
void
-netdev_rx_wait(struct netdev_rx *rx)
+netdev_rxq_wait(struct netdev_rxq *rx)
{
- rx->rx_class->wait(rx);
+ rx->netdev->netdev_class->rxq_wait(rx);
}
+/* Discards any packets ready to be received on 'rx'. */
int
-netdev_rx_drain(struct netdev_rx *rx)
+netdev_rxq_drain(struct netdev_rxq *rx)
{
- return rx->rx_class->drain ? rx->rx_class->drain(rx) : 0;
+ return (rx->netdev->netdev_class->rxq_drain
+ ? rx->netdev->netdev_class->rxq_drain(rx)
+ : 0);
}
-/* Sends 'buffer' on 'netdev'. Returns 0 if successful, otherwise a positive
- * errno value. Returns EAGAIN without blocking if the packet cannot be queued
- * immediately. Returns EMSGSIZE if a partial packet was transmitted or if
- * the packet is too big or too small to transmit on the device.
+/* Configures the number of tx queues and rx queues of 'netdev'.
+ * Return 0 if successful, otherwise a positive errno value.
*
- * The caller retains ownership of 'buffer' in all cases.
+ * 'n_rxq' specifies the maximum number of receive queues to create.
+ * The netdev provider might choose to create less (e.g. if the hardware
+ * supports only a smaller number). The caller can check how many have been
+ * actually created by calling 'netdev_n_rxq()'
*
- * The kernel maintains a packet transmission queue, so the caller is not
- * expected to do additional queuing of packets.
+ * 'n_txq' specifies the exact number of transmission queues to create.
+ * If this function returns successfully, the caller can make 'n_txq'
+ * concurrent calls to netdev_send() (each one with a different 'qid' in the
+ * range [0..'n_txq'-1]).
*
- * Some network devices may not implement support for this function. In such
- * cases this function will always return EOPNOTSUPP. */
+ * On error, the tx queue and rx queue configuration is indeterminant.
+ * Caller should make decision on whether to restore the previous or
+ * the default configuration. Also, caller must make sure there is no
+ * other thread accessing the queues at the same time. */
int
-netdev_send(struct netdev *netdev, const struct ofpbuf *buffer)
+netdev_set_multiq(struct netdev *netdev, unsigned int n_txq,
+ unsigned int n_rxq)
{
int error;
- error = (netdev->netdev_class->send
- ? netdev->netdev_class->send(netdev, buffer->data, buffer->size)
+ error = (netdev->netdev_class->set_multiq
+ ? netdev->netdev_class->set_multiq(netdev,
+ MAX(n_txq, 1),
+ MAX(n_rxq, 1))
: EOPNOTSUPP);
+
+ if (error && error != EOPNOTSUPP) {
+ VLOG_DBG_RL(&rl, "failed to set tx/rx queue for network device %s:"
+ "%s", netdev_get_name(netdev), ovs_strerror(error));
+ }
+
+ return error;
+}
+
+/* Sends 'buffers' on 'netdev'. Returns 0 if successful (for every packet),
+ * otherwise a positive errno value. Returns EAGAIN without blocking if
+ * at least one the packets cannot be queued immediately. Returns EMSGSIZE
+ * if a partial packet was transmitted or if a packet is too big or too small
+ * to transmit on the device.
+ *
+ * If the function returns a non-zero value, some of the packets might have
+ * been sent anyway.
+ *
+ * If 'may_steal' is false, the caller retains ownership of all the packets.
+ * If 'may_steal' is true, the caller transfers ownership of all the packets
+ * to the network device, regardless of success.
+ *
+ * The network device is expected to maintain one or more packet
+ * transmission queues, so that the caller does not ordinarily have to
+ * do additional queuing of packets. 'qid' specifies the queue to use
+ * and can be ignored if the implementation does not support multiple
+ * queues.
+ *
+ * Some network devices may not implement support for this function. In such
+ * cases this function will always return EOPNOTSUPP. */
+int
+netdev_send(struct netdev *netdev, int qid, struct dp_packet **buffers,
+ int cnt, bool may_steal)
+{
+ if (!netdev->netdev_class->send) {
+ if (may_steal) {
+ for (int i = 0; i < cnt; i++) {
+ dp_packet_delete(buffers[i]);
+ }
+ }
+ return EOPNOTSUPP;
+ }
+
+ int error = netdev->netdev_class->send(netdev, qid, buffers, cnt,
+ may_steal);
if (!error) {
COVERAGE_INC(netdev_sent);
}
return error;
}
+int
+netdev_pop_header(struct netdev *netdev, struct dp_packet **buffers, int cnt)
+{
+ int i;
+
+ if (!netdev->netdev_class->pop_header) {
+ return EOPNOTSUPP;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ int err;
+
+ err = netdev->netdev_class->pop_header(buffers[i]);
+ if (err) {
+ dp_packet_clear(buffers[i]);
+ }
+ }
+
+ return 0;
+}
+
+int
+netdev_build_header(const struct netdev *netdev, struct ovs_action_push_tnl *data,
+ const struct flow *tnl_flow)
+{
+ if (netdev->netdev_class->build_header) {
+ return netdev->netdev_class->build_header(netdev, data, tnl_flow);
+ }
+ return EOPNOTSUPP;
+}
+
+int
+netdev_push_header(const struct netdev *netdev,
+ struct dp_packet **buffers, int cnt,
+ const struct ovs_action_push_tnl *data)
+{
+ int i;
+
+ if (!netdev->netdev_class->push_header) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ netdev->netdev_class->push_header(buffers[i], data);
+ pkt_metadata_init(&buffers[i]->md, u32_to_odp(data->out_port));
+ }
+
+ return 0;
+}
+
/* Registers with the poll loop to wake up from the next call to poll_block()
* when the packet transmission queue has sufficient room to transmit a packet
* with netdev_send().
*
- * The kernel maintains a packet transmission queue, so the client is not
- * expected to do additional queuing of packets. Thus, this function is
- * unlikely to ever be used. It is included for completeness. */
+ * The network device is expected to maintain one or more packet
+ * transmission queues, so that the caller does not ordinarily have to
+ * do additional queuing of packets. 'qid' specifies the queue to use
+ * and can be ignored if the implementation does not support multiple
+ * queues. */
void
-netdev_send_wait(struct netdev *netdev)
+netdev_send_wait(struct netdev *netdev, int qid)
{
if (netdev->netdev_class->send_wait) {
- netdev->netdev_class->send_wait(netdev);
+ netdev->netdev_class->send_wait(netdev, qid);
}
}
/* Attempts to set 'netdev''s MAC address to 'mac'. Returns 0 if successful,
* otherwise a positive errno value. */
int
-netdev_set_etheraddr(struct netdev *netdev, const uint8_t mac[ETH_ADDR_LEN])
+netdev_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
{
return netdev->netdev_class->set_etheraddr(netdev, mac);
}
* the MAC address into 'mac'. On failure, returns a positive errno value and
* clears 'mac' to all-zeros. */
int
-netdev_get_etheraddr(const struct netdev *netdev, uint8_t mac[ETH_ADDR_LEN])
+netdev_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
{
return netdev->netdev_class->get_etheraddr(netdev, mac);
}
do_update_flags(struct netdev *netdev, enum netdev_flags off,
enum netdev_flags on, enum netdev_flags *old_flagsp,
struct netdev_saved_flags **sfp)
+ OVS_EXCLUDED(netdev_mutex)
{
struct netdev_saved_flags *sf = NULL;
enum netdev_flags old_flags;
enum netdev_flags new_flags = (old_flags & ~off) | on;
enum netdev_flags changed_flags = old_flags ^ new_flags;
if (changed_flags) {
+ ovs_mutex_lock(&netdev_mutex);
*sfp = sf = xmalloc(sizeof *sf);
sf->netdev = netdev;
list_push_front(&netdev->saved_flags_list, &sf->node);
sf->saved_values = changed_flags & new_flags;
netdev->ref_cnt++;
+ ovs_mutex_unlock(&netdev_mutex);
}
}
* Does nothing if 'sf' is NULL. */
void
netdev_restore_flags(struct netdev_saved_flags *sf)
+ OVS_EXCLUDED(netdev_mutex)
{
if (sf) {
struct netdev *netdev = sf->netdev;
sf->saved_flags & sf->saved_values,
sf->saved_flags & ~sf->saved_values,
&old_flags);
+
+ ovs_mutex_lock(&netdev_mutex);
list_remove(&sf->node);
free(sf);
-
netdev_unref(netdev);
}
}
* ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
int
netdev_arp_lookup(const struct netdev *netdev,
- ovs_be32 ip, uint8_t mac[ETH_ADDR_LEN])
+ ovs_be32 ip, struct eth_addr *mac)
{
int error = (netdev->netdev_class->arp_lookup
? netdev->netdev_class->arp_lookup(netdev, ip, mac)
: EOPNOTSUPP);
if (error) {
- memset(mac, 0, ETH_ADDR_LEN);
+ *mac = eth_addr_zero;
}
return error;
}
return error;
}
-/* Attempts to change the stats for 'netdev' to those provided in 'stats'.
- * Returns 0 if successful, otherwise a positive errno value.
- *
- * This will probably fail for most network devices. Some devices might only
- * allow setting their stats to 0. */
-int
-netdev_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
-{
- return (netdev->netdev_class->set_stats
- ? netdev->netdev_class->set_stats(netdev, stats)
- : EOPNOTSUPP);
-}
-
/* Attempts to set input rate limiting (policing) policy, such that up to
* 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative burst
* size of 'kbits' kb. */
return retval;
}
-/* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
- * its configuration, and the 'aux' specified by the caller. The order of
- * iteration is unspecified, but (when successful) each queue is visited
- * exactly once.
+/* Initializes 'dump' to begin dumping the queues in a netdev.
*
- * Calling this function may be more efficient than calling netdev_get_queue()
- * for every queue.
+ * This function provides no status indication. An error status for the entire
+ * dump operation is provided when it is completed by calling
+ * netdev_queue_dump_done().
+ */
+void
+netdev_queue_dump_start(struct netdev_queue_dump *dump,
+ const struct netdev *netdev)
+{
+ dump->netdev = netdev_ref(netdev);
+ if (netdev->netdev_class->queue_dump_start) {
+ dump->error = netdev->netdev_class->queue_dump_start(netdev,
+ &dump->state);
+ } else {
+ dump->error = EOPNOTSUPP;
+ }
+}
+
+/* Attempts to retrieve another queue from 'dump', which must have been
+ * initialized with netdev_queue_dump_start(). On success, stores a new queue
+ * ID into '*queue_id', fills 'details' with configuration details for the
+ * queue, and returns true. On failure, returns false.
*
- * 'cb' must not modify or free the 'details' argument passed in. It may
- * delete or modify the queue passed in as its 'queue_id' argument. It may
- * modify but must not delete any other queue within 'netdev'. 'cb' should not
- * add new queues because this may cause some queues to be visited twice or not
- * at all.
+ * Queues are not necessarily dumped in increasing order of queue ID (or any
+ * other predictable order).
*
- * Returns 0 if successful, otherwise a positive errno value. On error, some
- * configured queues may not have been included in the iteration. */
+ * Failure might indicate an actual error or merely that the last queue has
+ * been dumped. An error status for the entire dump operation is provided when
+ * it is completed by calling netdev_queue_dump_done().
+ *
+ * The returned contents of 'details' should be documented as valid for the
+ * given 'type' in the "other_config" column in the "Queue" table in
+ * vswitchd/vswitch.xml (which is built as ovs-vswitchd.conf.db(8)).
+ *
+ * The caller must initialize 'details' (e.g. with smap_init()) before calling
+ * this function. This function will clear and replace its contents. The
+ * caller must free 'details' when it is no longer needed (e.g. with
+ * smap_destroy()). */
+bool
+netdev_queue_dump_next(struct netdev_queue_dump *dump,
+ unsigned int *queue_id, struct smap *details)
+{
+ const struct netdev *netdev = dump->netdev;
+
+ if (dump->error) {
+ return false;
+ }
+
+ dump->error = netdev->netdev_class->queue_dump_next(netdev, dump->state,
+ queue_id, details);
+
+ if (dump->error) {
+ netdev->netdev_class->queue_dump_done(netdev, dump->state);
+ return false;
+ }
+ return true;
+}
+
+/* Completes queue table dump operation 'dump', which must have been
+ * initialized with netdev_queue_dump_start(). Returns 0 if the dump operation
+ * was error-free, otherwise a positive errno value describing the problem. */
int
-netdev_dump_queues(const struct netdev *netdev,
- netdev_dump_queues_cb *cb, void *aux)
+netdev_queue_dump_done(struct netdev_queue_dump *dump)
{
- const struct netdev_class *class = netdev->netdev_class;
- return (class->dump_queues
- ? class->dump_queues(netdev, cb, aux)
- : EOPNOTSUPP);
+ const struct netdev *netdev = dump->netdev;
+ if (!dump->error && netdev->netdev_class->queue_dump_done) {
+ dump->error = netdev->netdev_class->queue_dump_done(netdev,
+ dump->state);
+ }
+ netdev_close(dump->netdev);
+ return dump->error == EOF ? 0 : dump->error;
}
/* Iterates over all of 'netdev''s queues, calling 'cb' with the queue's ID,
: EOPNOTSUPP);
}
-/* Returns a sequence number which indicates changes in one of 'netdev''s
- * properties. The returned sequence will be nonzero so that callers have a
- * value which they may use as a reset when tracking 'netdev'.
- *
- * The returned sequence number will change whenever 'netdev''s flags,
- * features, ethernet address, or carrier changes. It may change for other
- * reasons as well, or no reason at all. */
-unsigned int
-netdev_change_seq(const struct netdev *netdev)
-{
- return netdev->netdev_class->change_seq(netdev);
-}
\f
-/* Initializes 'netdev' as a netdev device named 'name' of the specified
- * 'netdev_class'. This function is ordinarily called from a netdev provider's
- * 'create' function.
- *
- * This function adds 'netdev' to a netdev-owned shash, so it is very important
- * that 'netdev' only be freed after calling netdev_uninit(). */
-void
-netdev_init(struct netdev *netdev, const char *name,
- const struct netdev_class *netdev_class)
-{
- ovs_assert(!shash_find(&netdev_shash, name));
-
- memset(netdev, 0, sizeof *netdev);
- netdev->netdev_class = netdev_class;
- netdev->name = xstrdup(name);
- netdev->node = shash_add(&netdev_shash, name, netdev);
- list_init(&netdev->saved_flags_list);
-}
-
-/* Undoes the results of initialization.
- *
- * Normally this function does not need to be called as netdev_close has
- * the same effect when the refcount drops to zero.
- * However, it may be called by providers due to an error on creation
- * that occurs after initialization. It this case netdev_close() would
- * never be called. */
-void
-netdev_uninit(struct netdev *netdev, bool destroy)
-{
- char *name = netdev->name;
-
- ovs_assert(!netdev->ref_cnt);
- ovs_assert(list_is_empty(&netdev->saved_flags_list));
-
- shash_delete(&netdev_shash, netdev->node);
-
- if (destroy) {
- netdev->netdev_class->destroy(netdev);
- }
- free(name);
-}
-
/* Returns the class type of 'netdev'.
*
* The caller must not free the returned value. */
/* Returns the netdev with 'name' or NULL if there is none.
*
- * The caller must not free the returned value. */
+ * The caller must free the returned netdev with netdev_close(). */
struct netdev *
netdev_from_name(const char *name)
+ OVS_EXCLUDED(netdev_mutex)
{
- return shash_find_data(&netdev_shash, name);
+ struct netdev *netdev;
+
+ ovs_mutex_lock(&netdev_mutex);
+ netdev = shash_find_data(&netdev_shash, name);
+ if (netdev) {
+ netdev->ref_cnt++;
+ }
+ ovs_mutex_unlock(&netdev_mutex);
+
+ return netdev;
}
/* Fills 'device_list' with devices that match 'netdev_class'.
*
- * The caller is responsible for initializing and destroying 'device_list'
- * but the contained netdevs must not be freed. */
+ * The caller is responsible for initializing and destroying 'device_list' and
+ * must close each device on the list. */
void
netdev_get_devices(const struct netdev_class *netdev_class,
struct shash *device_list)
+ OVS_EXCLUDED(netdev_mutex)
{
struct shash_node *node;
+
+ ovs_mutex_lock(&netdev_mutex);
SHASH_FOR_EACH (node, &netdev_shash) {
struct netdev *dev = node->data;
if (dev->netdev_class == netdev_class) {
+ dev->ref_cnt++;
shash_add(device_list, node->name, node->data);
}
}
+ ovs_mutex_unlock(&netdev_mutex);
}
-const char *
-netdev_get_type_from_name(const char *name)
-{
- const struct netdev *dev = netdev_from_name(name);
- return dev ? netdev_get_type(dev) : NULL;
-}
-\f
-void
-netdev_rx_init(struct netdev_rx *rx, struct netdev *netdev,
- const struct netdev_rx_class *class)
+/* Extracts pointers to all 'netdev-vports' into an array 'vports'
+ * and returns it. Stores the size of the array into '*size'.
+ *
+ * The caller is responsible for freeing 'vports' and must close
+ * each 'netdev-vport' in the list. */
+struct netdev **
+netdev_get_vports(size_t *size)
+ OVS_EXCLUDED(netdev_mutex)
{
- ovs_assert(netdev->ref_cnt > 0);
- rx->rx_class = class;
- rx->netdev = netdev;
+ struct netdev **vports;
+ struct shash_node *node;
+ size_t n = 0;
+
+ if (!size) {
+ return NULL;
+ }
+
+ /* Explicitly allocates big enough chunk of memory. */
+ vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
+ ovs_mutex_lock(&netdev_mutex);
+ SHASH_FOR_EACH (node, &netdev_shash) {
+ struct netdev *dev = node->data;
+
+ if (netdev_vport_is_vport_class(dev->netdev_class)) {
+ dev->ref_cnt++;
+ vports[n] = dev;
+ n++;
+ }
+ }
+ ovs_mutex_unlock(&netdev_mutex);
+ *size = n;
+
+ return vports;
}
-void
-netdev_rx_uninit(struct netdev_rx *rx OVS_UNUSED)
+const char *
+netdev_get_type_from_name(const char *name)
{
- /* Nothing to do. */
+ struct netdev *dev = netdev_from_name(name);
+ const char *type = dev ? netdev_get_type(dev) : NULL;
+ netdev_close(dev);
+ return type;
}
-
+\f
struct netdev *
-netdev_rx_get_netdev(const struct netdev_rx *rx)
+netdev_rxq_get_netdev(const struct netdev_rxq *rx)
{
ovs_assert(rx->netdev->ref_cnt > 0);
return rx->netdev;
}
const char *
-netdev_rx_get_name(const struct netdev_rx *rx)
+netdev_rxq_get_name(const struct netdev_rxq *rx)
{
- return netdev_get_name(netdev_rx_get_netdev(rx));
+ return netdev_get_name(netdev_rxq_get_netdev(rx));
+}
+
+int
+netdev_rxq_get_queue_id(const struct netdev_rxq *rx)
+{
+ return rx->queue_id;
}
static void
}
}
}
+
+uint64_t
+netdev_get_change_seq(const struct netdev *netdev)
+{
+ return netdev->change_seq;
+}