/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <string.h>
#include <unistd.h>
-#include "connectivity.h"
#include "coverage.h"
#include "dpif.h"
+#include "dp-packet.h"
#include "dynamic-string.h"
#include "fatal-signal.h"
#include "hash.h"
#include "list.h"
+#include "netdev-dpdk.h"
#include "netdev-provider.h"
#include "netdev-vport.h"
-#include "ofpbuf.h"
+#include "odp-netlink.h"
#include "openflow/openflow.h"
#include "packets.h"
#include "poll-loop.h"
#include "smap.h"
#include "sset.h"
#include "svec.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
+#include "flow.h"
VLOG_DEFINE_THIS_MODULE(netdev);
struct netdev_saved_flags {
struct netdev *netdev;
- struct list node; /* In struct netdev's saved_flags_list. */
+ struct ovs_list node; /* In struct netdev's saved_flags_list. */
enum netdev_flags saved_flags;
enum netdev_flags saved_values;
};
/* Protects 'netdev_classes' against insertions or deletions.
*
- * This is not an rwlock for performance reasons but to allow recursive
- * acquisition when calling into providers. For example, netdev_run() calls
- * into provider 'run' functions, which might reasonably want to call one of
- * the netdev functions that takes netdev_class_rwlock read-only. */
-static struct ovs_rwlock netdev_class_rwlock OVS_ACQ_BEFORE(netdev_mutex)
- = OVS_RWLOCK_INITIALIZER;
+ * This is a recursive mutex to allow recursive acquisition when calling into
+ * providers. For example, netdev_run() calls into provider 'run' functions,
+ * which might reasonably want to call one of the netdev functions that takes
+ * netdev_class_mutex. */
+static struct ovs_mutex netdev_class_mutex OVS_ACQ_BEFORE(netdev_mutex);
/* Contains 'struct netdev_registered_class'es. */
-static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_rwlock)
+static struct hmap netdev_classes OVS_GUARDED_BY(netdev_class_mutex)
= HMAP_INITIALIZER(&netdev_classes);
struct netdev_registered_class {
- struct hmap_node hmap_node; /* In 'netdev_classes', by class->type. */
- const struct netdev_class *class;
- atomic_int ref_cnt; /* Number of 'struct netdev's of this class. */
+ /* In 'netdev_classes', by class->type. */
+ struct hmap_node hmap_node OVS_GUARDED_BY(netdev_class_mutex);
+ const struct netdev_class *class OVS_GUARDED_BY(netdev_class_mutex);
+ /* Number of 'struct netdev's of this class. */
+ int ref_cnt OVS_GUARDED_BY(netdev_class_mutex);
};
/* This is set pretty low because we probably won't learn anything from the
static void restore_all_flags(void *aux OVS_UNUSED);
void update_device_args(struct netdev *, const struct shash *args);
+int
+netdev_n_txq(const struct netdev *netdev)
+{
+ return netdev->n_txq;
+}
+
+int
+netdev_n_rxq(const struct netdev *netdev)
+{
+ return netdev->n_rxq;
+}
+
+int
+netdev_requested_n_rxq(const struct netdev *netdev)
+{
+ return netdev->requested_n_rxq;
+}
+
+bool
+netdev_is_pmd(const struct netdev *netdev)
+{
+ return (!strcmp(netdev->netdev_class->type, "dpdk") ||
+ !strcmp(netdev->netdev_class->type, "dpdkr") ||
+ !strcmp(netdev->netdev_class->type, "dpdkvhostcuse") ||
+ !strcmp(netdev->netdev_class->type, "dpdkvhostuser"));
+}
+
+static void
+netdev_class_mutex_initialize(void)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+
+ if (ovsthread_once_start(&once)) {
+ ovs_mutex_init_recursive(&netdev_class_mutex);
+ ovsthread_once_done(&once);
+ }
+}
+
static void
netdev_initialize(void)
- OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
if (ovsthread_once_start(&once)) {
+ netdev_class_mutex_initialize();
+
fatal_signal_add_hook(restore_all_flags, NULL, NULL, true);
netdev_vport_patch_register();
-#ifdef LINUX_DATAPATH
+#ifdef __linux__
netdev_register_provider(&netdev_linux_class);
netdev_register_provider(&netdev_internal_class);
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_tap_class);
netdev_register_provider(&netdev_bsd_class);
#endif
+#ifdef _WIN32
+ netdev_register_provider(&netdev_windows_class);
+ netdev_register_provider(&netdev_internal_class);
+ netdev_vport_tunnel_register();
+#endif
+ netdev_dpdk_register();
ovsthread_once_done(&once);
}
* main poll loop. */
void
netdev_run(void)
- OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
struct netdev_registered_class *rc;
- ovs_rwlock_rdlock(&netdev_class_rwlock);
+ netdev_initialize();
+ ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
if (rc->class->run) {
rc->class->run();
}
}
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_mutex_unlock(&netdev_class_mutex);
}
/* Arranges for poll_block() to wake up when netdev_run() needs to be called.
* main poll loop. */
void
netdev_wait(void)
- OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
struct netdev_registered_class *rc;
- ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
if (rc->class->wait) {
rc->class->wait();
}
}
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_mutex_unlock(&netdev_class_mutex);
}
static struct netdev_registered_class *
netdev_lookup_class(const char *type)
- OVS_REQ_RDLOCK(netdev_class_rwlock)
+ OVS_REQ_RDLOCK(netdev_class_mutex)
{
struct netdev_registered_class *rc;
* registration, new netdevs of that type can be opened using netdev_open(). */
int
netdev_register_provider(const struct netdev_class *new_class)
- OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
int error;
- ovs_rwlock_wrlock(&netdev_class_rwlock);
+ netdev_class_mutex_initialize();
+ ovs_mutex_lock(&netdev_class_mutex);
if (netdev_lookup_class(new_class->type)) {
VLOG_WARN("attempted to register duplicate netdev provider: %s",
new_class->type);
hmap_insert(&netdev_classes, &rc->hmap_node,
hash_string(new_class->type, 0));
rc->class = new_class;
- atomic_init(&rc->ref_cnt, 0);
+ rc->ref_cnt = 0;
} else {
VLOG_ERR("failed to initialize %s network device class: %s",
new_class->type, ovs_strerror(error));
}
}
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_mutex_unlock(&netdev_class_mutex);
return error;
}
* new netdevs of that type cannot be opened using netdev_open(). */
int
netdev_unregister_provider(const char *type)
- OVS_EXCLUDED(netdev_class_rwlock, netdev_mutex)
+ OVS_EXCLUDED(netdev_class_mutex, netdev_mutex)
{
struct netdev_registered_class *rc;
int error;
- ovs_rwlock_wrlock(&netdev_class_rwlock);
+ netdev_initialize();
+
+ ovs_mutex_lock(&netdev_class_mutex);
rc = netdev_lookup_class(type);
if (!rc) {
VLOG_WARN("attempted to unregister a netdev provider that is not "
"registered: %s", type);
error = EAFNOSUPPORT;
} else {
- int ref_cnt;
-
- atomic_read(&rc->ref_cnt, &ref_cnt);
- if (!ref_cnt) {
+ if (!rc->ref_cnt) {
hmap_remove(&netdev_classes, &rc->hmap_node);
- atomic_destroy(&rc->ref_cnt);
free(rc);
error = 0;
} else {
error = EBUSY;
}
}
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_mutex_unlock(&netdev_class_mutex);
return error;
}
netdev_initialize();
sset_clear(types);
- ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
sset_add(types, rc->class->type);
}
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_mutex_unlock(&netdev_class_mutex);
}
/* Check that the network device name is not the same as any of the registered
netdev_initialize();
- ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_class_mutex);
HMAP_FOR_EACH (rc, hmap_node, &netdev_classes) {
const char *dpif_port = netdev_vport_class_get_dpif_port(rc->class);
- if (dpif_port && !strcmp(dpif_port, name)) {
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ if (dpif_port && !strncmp(name, dpif_port, strlen(dpif_port))) {
+ ovs_mutex_unlock(&netdev_class_mutex);
return true;
}
}
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_mutex_unlock(&netdev_class_mutex);
if (!strncmp(name, "ovs-", 4)) {
struct sset types;
netdev_initialize();
- ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_class_mutex);
ovs_mutex_lock(&netdev_mutex);
netdev = shash_find_data(&netdev_shash, name);
if (!netdev) {
memset(netdev, 0, sizeof *netdev);
netdev->netdev_class = rc->class;
netdev->name = xstrdup(name);
+ netdev->change_seq = 1;
netdev->node = shash_add(&netdev_shash, name, netdev);
+
+ /* By default enable one tx and rx queue per netdev. */
+ netdev->n_txq = netdev->netdev_class->send ? 1 : 0;
+ netdev->n_rxq = netdev->netdev_class->rxq_alloc ? 1 : 0;
+ netdev->requested_n_rxq = netdev->n_rxq;
+
list_init(&netdev->saved_flags_list);
error = rc->class->construct(netdev);
if (!error) {
- int old_ref_cnt;
-
- atomic_add(&rc->ref_cnt, 1, &old_ref_cnt);
- seq_change(connectivity_seq_get());
+ rc->ref_cnt++;
+ netdev_change_seq_changed(netdev);
} else {
free(netdev->name);
ovs_assert(list_is_empty(&netdev->saved_flags_list));
error = 0;
}
- ovs_mutex_unlock(&netdev_mutex);
- ovs_rwlock_unlock(&netdev_class_rwlock);
-
if (!error) {
netdev->ref_cnt++;
*netdevp = netdev;
} else {
*netdevp = NULL;
}
+ ovs_mutex_unlock(&netdev_mutex);
+ ovs_mutex_unlock(&netdev_class_mutex);
+
return error;
}
/* Reconfigures the device 'netdev' with 'args'. 'args' may be empty
* or NULL if none are needed. */
int
-netdev_set_config(struct netdev *netdev, const struct smap *args)
+netdev_set_config(struct netdev *netdev, const struct smap *args, char **errp)
OVS_EXCLUDED(netdev_mutex)
{
if (netdev->netdev_class->set_config) {
error = netdev->netdev_class->set_config(netdev,
args ? args : &no_args);
if (error) {
- VLOG_WARN("%s: could not set configuration (%s)",
- netdev_get_name(netdev), ovs_strerror(error));
+ VLOG_WARN_BUF(errp, "%s: could not set configuration (%s)",
+ netdev_get_name(netdev), ovs_strerror(error));
}
return error;
} else if (args && !smap_is_empty(args)) {
- VLOG_WARN("%s: arguments provided to device that is not configurable",
- netdev_get_name(netdev));
+ VLOG_WARN_BUF(errp, "%s: arguments provided to device that is not configurable",
+ netdev_get_name(netdev));
}
return 0;
}
}
}
+/* Returns the id of the numa node the 'netdev' is on. If the function
+ * is not implemented, returns NETDEV_NUMA_UNSPEC. */
+int
+netdev_get_numa_id(const struct netdev *netdev)
+{
+ if (netdev->netdev_class->get_numa_id) {
+ return netdev->netdev_class->get_numa_id(netdev);
+ } else {
+ return NETDEV_NUMA_UNSPEC;
+ }
+}
+
static void
netdev_unref(struct netdev *dev)
OVS_RELEASES(netdev_mutex)
if (!--dev->ref_cnt) {
const struct netdev_class *class = dev->netdev_class;
struct netdev_registered_class *rc;
- int old_ref_cnt;
dev->netdev_class->destruct(dev);
- shash_delete(&netdev_shash, dev->node);
+ if (dev->node) {
+ shash_delete(&netdev_shash, dev->node);
+ }
free(dev->name);
dev->netdev_class->dealloc(dev);
ovs_mutex_unlock(&netdev_mutex);
- ovs_rwlock_rdlock(&netdev_class_rwlock);
+ ovs_mutex_lock(&netdev_class_mutex);
rc = netdev_lookup_class(class->type);
- atomic_sub(&rc->ref_cnt, 1, &old_ref_cnt);
- ovs_assert(old_ref_cnt > 0);
- ovs_rwlock_unlock(&netdev_class_rwlock);
+ ovs_assert(rc->ref_cnt > 0);
+ rc->ref_cnt--;
+ ovs_mutex_unlock(&netdev_class_mutex);
} else {
ovs_mutex_unlock(&netdev_mutex);
}
}
}
+/* Removes 'netdev' from the global shash and unrefs 'netdev'.
+ *
+ * This allows handler and revalidator threads to still retain references
+ * to this netdev while the main thread changes interface configuration.
+ *
+ * This function should only be called by the main thread when closing
+ * netdevs during user configuration changes. Otherwise, netdev_close should be
+ * used to close netdevs. */
+void
+netdev_remove(struct netdev *netdev)
+{
+ if (netdev) {
+ ovs_mutex_lock(&netdev_mutex);
+ if (netdev->node) {
+ shash_delete(&netdev_shash, netdev->node);
+ netdev->node = NULL;
+ netdev_change_seq_changed(netdev);
+ }
+ netdev_unref(netdev);
+ }
+}
+
/* Parses 'netdev_name_', which is of the form [type@]name into its component
* pieces. 'name' and 'type' must be freed by the caller. */
void
}
}
+/* Attempts to open a netdev_rxq handle for obtaining packets received on
+ * 'netdev'. On success, returns 0 and stores a nonnull 'netdev_rxq *' into
+ * '*rxp'. On failure, returns a positive errno value and stores NULL into
+ * '*rxp'.
+ *
+ * Some kinds of network devices might not support receiving packets. This
+ * function returns EOPNOTSUPP in that case.*/
int
-netdev_rx_open(struct netdev *netdev, struct netdev_rx **rxp)
+netdev_rxq_open(struct netdev *netdev, struct netdev_rxq **rxp, int id)
OVS_EXCLUDED(netdev_mutex)
{
int error;
- if (netdev->netdev_class->rx_alloc) {
- struct netdev_rx *rx = netdev->netdev_class->rx_alloc();
+ if (netdev->netdev_class->rxq_alloc && id < netdev->n_rxq) {
+ struct netdev_rxq *rx = netdev->netdev_class->rxq_alloc();
if (rx) {
rx->netdev = netdev;
- error = netdev->netdev_class->rx_construct(rx);
+ rx->queue_id = id;
+ error = netdev->netdev_class->rxq_construct(rx);
if (!error) {
- ovs_mutex_lock(&netdev_mutex);
- netdev->ref_cnt++;
- ovs_mutex_unlock(&netdev_mutex);
-
+ netdev_ref(netdev);
*rxp = rx;
return 0;
}
- netdev->netdev_class->rx_dealloc(rx);
+ netdev->netdev_class->rxq_dealloc(rx);
} else {
error = ENOMEM;
}
return error;
}
+/* Closes 'rx'. */
void
-netdev_rx_close(struct netdev_rx *rx)
+netdev_rxq_close(struct netdev_rxq *rx)
OVS_EXCLUDED(netdev_mutex)
{
if (rx) {
struct netdev *netdev = rx->netdev;
- netdev->netdev_class->rx_destruct(rx);
- netdev->netdev_class->rx_dealloc(rx);
+ netdev->netdev_class->rxq_destruct(rx);
+ netdev->netdev_class->rxq_dealloc(rx);
netdev_close(netdev);
}
}
+/* Attempts to receive a batch of packets from 'rx'. 'pkts' should point to
+ * the beginning of an array of MAX_RX_BATCH pointers to dp_packet. If
+ * successful, this function stores pointers to up to MAX_RX_BATCH dp_packets
+ * into the array, transferring ownership of the packets to the caller, stores
+ * the number of received packets into '*cnt', and returns 0.
+ *
+ * The implementation does not necessarily initialize any non-data members of
+ * 'pkts'. That is, the caller must initialize layer pointers and metadata
+ * itself, if desired, e.g. with pkt_metadata_init() and miniflow_extract().
+ *
+ * Returns EAGAIN immediately if no packet is ready to be received or another
+ * positive errno value if an error was encountered. */
int
-netdev_rx_recv(struct netdev_rx *rx, struct ofpbuf *buffer)
+netdev_rxq_recv(struct netdev_rxq *rx, struct dp_packet **pkts, int *cnt)
{
int retval;
- ovs_assert(buffer->size == 0);
- ovs_assert(ofpbuf_tailroom(buffer) >= ETH_TOTAL_MIN);
-
- retval = rx->netdev->netdev_class->rx_recv(rx, buffer->data,
- ofpbuf_tailroom(buffer));
- if (retval >= 0) {
+ retval = rx->netdev->netdev_class->rxq_recv(rx, pkts, cnt);
+ if (!retval) {
COVERAGE_INC(netdev_received);
- buffer->size += retval;
- if (buffer->size < ETH_TOTAL_MIN) {
- ofpbuf_put_zeros(buffer, ETH_TOTAL_MIN - buffer->size);
- }
- return 0;
} else {
- return -retval;
+ *cnt = 0;
}
+ return retval;
}
+/* Arranges for poll_block() to wake up when a packet is ready to be received
+ * on 'rx'. */
void
-netdev_rx_wait(struct netdev_rx *rx)
+netdev_rxq_wait(struct netdev_rxq *rx)
{
- rx->netdev->netdev_class->rx_wait(rx);
+ rx->netdev->netdev_class->rxq_wait(rx);
}
+/* Discards any packets ready to be received on 'rx'. */
int
-netdev_rx_drain(struct netdev_rx *rx)
+netdev_rxq_drain(struct netdev_rxq *rx)
{
- return (rx->netdev->netdev_class->rx_drain
- ? rx->netdev->netdev_class->rx_drain(rx)
+ return (rx->netdev->netdev_class->rxq_drain
+ ? rx->netdev->netdev_class->rxq_drain(rx)
: 0);
}
-/* Sends 'buffer' on 'netdev'. Returns 0 if successful, otherwise a positive
- * errno value. Returns EAGAIN without blocking if the packet cannot be queued
- * immediately. Returns EMSGSIZE if a partial packet was transmitted or if
- * the packet is too big or too small to transmit on the device.
+/* Configures the number of tx queues and rx queues of 'netdev'.
+ * Return 0 if successful, otherwise a positive errno value.
*
- * The caller retains ownership of 'buffer' in all cases.
+ * 'n_rxq' specifies the maximum number of receive queues to create.
+ * The netdev provider might choose to create less (e.g. if the hardware
+ * supports only a smaller number). The caller can check how many have been
+ * actually created by calling 'netdev_n_rxq()'
*
- * The kernel maintains a packet transmission queue, so the caller is not
- * expected to do additional queuing of packets.
+ * 'n_txq' specifies the exact number of transmission queues to create.
+ * If this function returns successfully, the caller can make 'n_txq'
+ * concurrent calls to netdev_send() (each one with a different 'qid' in the
+ * range [0..'n_txq'-1]).
*
- * Some network devices may not implement support for this function. In such
- * cases this function will always return EOPNOTSUPP. */
+ * On error, the tx queue and rx queue configuration is indeterminant.
+ * Caller should make decision on whether to restore the previous or
+ * the default configuration. Also, caller must make sure there is no
+ * other thread accessing the queues at the same time. */
int
-netdev_send(struct netdev *netdev, const struct ofpbuf *buffer)
+netdev_set_multiq(struct netdev *netdev, unsigned int n_txq,
+ unsigned int n_rxq)
{
int error;
- error = (netdev->netdev_class->send
- ? netdev->netdev_class->send(netdev, buffer->data, buffer->size)
+ error = (netdev->netdev_class->set_multiq
+ ? netdev->netdev_class->set_multiq(netdev,
+ MAX(n_txq, 1),
+ MAX(n_rxq, 1))
: EOPNOTSUPP);
+
+ if (error && error != EOPNOTSUPP) {
+ VLOG_DBG_RL(&rl, "failed to set tx/rx queue for network device %s:"
+ "%s", netdev_get_name(netdev), ovs_strerror(error));
+ }
+
+ return error;
+}
+
+/* Sends 'buffers' on 'netdev'. Returns 0 if successful (for every packet),
+ * otherwise a positive errno value. Returns EAGAIN without blocking if
+ * at least one the packets cannot be queued immediately. Returns EMSGSIZE
+ * if a partial packet was transmitted or if a packet is too big or too small
+ * to transmit on the device.
+ *
+ * If the function returns a non-zero value, some of the packets might have
+ * been sent anyway.
+ *
+ * If 'may_steal' is false, the caller retains ownership of all the packets.
+ * If 'may_steal' is true, the caller transfers ownership of all the packets
+ * to the network device, regardless of success.
+ *
+ * The network device is expected to maintain one or more packet
+ * transmission queues, so that the caller does not ordinarily have to
+ * do additional queuing of packets. 'qid' specifies the queue to use
+ * and can be ignored if the implementation does not support multiple
+ * queues.
+ *
+ * Some network devices may not implement support for this function. In such
+ * cases this function will always return EOPNOTSUPP. */
+int
+netdev_send(struct netdev *netdev, int qid, struct dp_packet **buffers,
+ int cnt, bool may_steal)
+{
+ if (!netdev->netdev_class->send) {
+ if (may_steal) {
+ for (int i = 0; i < cnt; i++) {
+ dp_packet_delete(buffers[i]);
+ }
+ }
+ return EOPNOTSUPP;
+ }
+
+ int error = netdev->netdev_class->send(netdev, qid, buffers, cnt,
+ may_steal);
if (!error) {
COVERAGE_INC(netdev_sent);
}
return error;
}
+int
+netdev_pop_header(struct netdev *netdev, struct dp_packet **buffers, int cnt)
+{
+ int i;
+
+ if (!netdev->netdev_class->pop_header) {
+ return EOPNOTSUPP;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ int err;
+
+ err = netdev->netdev_class->pop_header(buffers[i]);
+ if (err) {
+ dp_packet_clear(buffers[i]);
+ }
+ }
+
+ return 0;
+}
+
+int
+netdev_build_header(const struct netdev *netdev, struct ovs_action_push_tnl *data,
+ const struct flow *tnl_flow)
+{
+ if (netdev->netdev_class->build_header) {
+ return netdev->netdev_class->build_header(netdev, data, tnl_flow);
+ }
+ return EOPNOTSUPP;
+}
+
+int
+netdev_push_header(const struct netdev *netdev,
+ struct dp_packet **buffers, int cnt,
+ const struct ovs_action_push_tnl *data)
+{
+ int i;
+
+ if (!netdev->netdev_class->push_header) {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < cnt; i++) {
+ netdev->netdev_class->push_header(buffers[i], data);
+ pkt_metadata_init(&buffers[i]->md, u32_to_odp(data->out_port));
+ }
+
+ return 0;
+}
+
/* Registers with the poll loop to wake up from the next call to poll_block()
* when the packet transmission queue has sufficient room to transmit a packet
* with netdev_send().
*
- * The kernel maintains a packet transmission queue, so the client is not
- * expected to do additional queuing of packets. Thus, this function is
- * unlikely to ever be used. It is included for completeness. */
+ * The network device is expected to maintain one or more packet
+ * transmission queues, so that the caller does not ordinarily have to
+ * do additional queuing of packets. 'qid' specifies the queue to use
+ * and can be ignored if the implementation does not support multiple
+ * queues. */
void
-netdev_send_wait(struct netdev *netdev)
+netdev_send_wait(struct netdev *netdev, int qid)
{
if (netdev->netdev_class->send_wait) {
- netdev->netdev_class->send_wait(netdev);
+ netdev->netdev_class->send_wait(netdev, qid);
}
}
/* Attempts to set 'netdev''s MAC address to 'mac'. Returns 0 if successful,
* otherwise a positive errno value. */
int
-netdev_set_etheraddr(struct netdev *netdev, const uint8_t mac[ETH_ADDR_LEN])
+netdev_set_etheraddr(struct netdev *netdev, const struct eth_addr mac)
{
return netdev->netdev_class->set_etheraddr(netdev, mac);
}
* the MAC address into 'mac'. On failure, returns a positive errno value and
* clears 'mac' to all-zeros. */
int
-netdev_get_etheraddr(const struct netdev *netdev, uint8_t mac[ETH_ADDR_LEN])
+netdev_get_etheraddr(const struct netdev *netdev, struct eth_addr *mac)
{
return netdev->netdev_class->get_etheraddr(netdev, mac);
}
* ENXIO indicates that there is no ARP table entry for 'ip' on 'netdev'. */
int
netdev_arp_lookup(const struct netdev *netdev,
- ovs_be32 ip, uint8_t mac[ETH_ADDR_LEN])
+ ovs_be32 ip, struct eth_addr *mac)
{
int error = (netdev->netdev_class->arp_lookup
? netdev->netdev_class->arp_lookup(netdev, ip, mac)
: EOPNOTSUPP);
if (error) {
- memset(mac, 0, ETH_ADDR_LEN);
+ *mac = eth_addr_zero;
}
return error;
}
return error;
}
-/* Attempts to change the stats for 'netdev' to those provided in 'stats'.
- * Returns 0 if successful, otherwise a positive errno value.
- *
- * This will probably fail for most network devices. Some devices might only
- * allow setting their stats to 0. */
-int
-netdev_set_stats(struct netdev *netdev, const struct netdev_stats *stats)
-{
- return (netdev->netdev_class->set_stats
- ? netdev->netdev_class->set_stats(netdev, stats)
- : EOPNOTSUPP);
-}
-
/* Attempts to set input rate limiting (policing) policy, such that up to
* 'kbits_rate' kbps of traffic is accepted, with a maximum accumulative burst
* size of 'kbits' kb. */
ovs_mutex_unlock(&netdev_mutex);
}
+/* Extracts pointers to all 'netdev-vports' into an array 'vports'
+ * and returns it. Stores the size of the array into '*size'.
+ *
+ * The caller is responsible for freeing 'vports' and must close
+ * each 'netdev-vport' in the list. */
+struct netdev **
+netdev_get_vports(size_t *size)
+ OVS_EXCLUDED(netdev_mutex)
+{
+ struct netdev **vports;
+ struct shash_node *node;
+ size_t n = 0;
+
+ if (!size) {
+ return NULL;
+ }
+
+ /* Explicitly allocates big enough chunk of memory. */
+ vports = xmalloc(shash_count(&netdev_shash) * sizeof *vports);
+ ovs_mutex_lock(&netdev_mutex);
+ SHASH_FOR_EACH (node, &netdev_shash) {
+ struct netdev *dev = node->data;
+
+ if (netdev_vport_is_vport_class(dev->netdev_class)) {
+ dev->ref_cnt++;
+ vports[n] = dev;
+ n++;
+ }
+ }
+ ovs_mutex_unlock(&netdev_mutex);
+ *size = n;
+
+ return vports;
+}
+
const char *
netdev_get_type_from_name(const char *name)
{
}
\f
struct netdev *
-netdev_rx_get_netdev(const struct netdev_rx *rx)
+netdev_rxq_get_netdev(const struct netdev_rxq *rx)
{
ovs_assert(rx->netdev->ref_cnt > 0);
return rx->netdev;
}
const char *
-netdev_rx_get_name(const struct netdev_rx *rx)
+netdev_rxq_get_name(const struct netdev_rxq *rx)
+{
+ return netdev_get_name(netdev_rxq_get_netdev(rx));
+}
+
+int
+netdev_rxq_get_queue_id(const struct netdev_rxq *rx)
{
- return netdev_get_name(netdev_rx_get_netdev(rx));
+ return rx->queue_id;
}
static void
}
}
}
+
+uint64_t
+netdev_get_change_seq(const struct netdev *netdev)
+{
+ return netdev->change_seq;
+}