#include "ovs-numa.h"
#include "packets.h"
#include "seq.h"
-#include "shash.h"
+#include "openvswitch/shash.h"
#include "smap.h"
#ifdef __cplusplus
extern "C" {
#endif
+struct netdev_tnl_build_header_params;
#define NETDEV_NUMA_UNSPEC OVS_NUMA_UNSPEC
/* A network device (e.g. an Ethernet device).
* 'netdev''s flags, features, ethernet address, or carrier changes. */
uint64_t change_seq;
+ /* A netdev provider might be unable to change some of the device's
+ * parameter (n_rxq, mtu) when the device is in use. In this case
+ * the provider can notify the upper layer by calling
+ * netdev_request_reconfigure(). The upper layer will react by stopping
+ * the operations on the device and calling netdev_reconfigure() to allow
+ * the configuration changes. 'last_reconfigure_seq' remembers the value
+ * of 'reconfigure_seq' when the last reconfiguration happened. */
+ struct seq *reconfigure_seq;
+ uint64_t last_reconfigure_seq;
+
/* The core netdev code initializes these at netdev construction and only
* provide read-only access to its client. Netdev implementations may
* modify them. */
int n_txq;
int n_rxq;
- /* Number of rx queues requested by user. */
- int requested_n_rxq;
int ref_cnt; /* Times this devices was opened. */
struct shash_node *node; /* Pointer to element in global map. */
struct ovs_list saved_flags_list; /* Contains "struct netdev_saved_flags". */
}
}
+static inline void
+netdev_request_reconfigure(struct netdev *netdev)
+{
+ seq_change(netdev->reconfigure_seq);
+}
+
const char *netdev_get_type(const struct netdev *);
const struct netdev_class *netdev_get_class(const struct netdev *);
const char *netdev_get_name(const struct netdev *);
const struct netdev_tunnel_config *
(*get_tunnel_config)(const struct netdev *netdev);
- /* Build Partial Tunnel header. Ethernet and ip header is already built,
- * build_header() is suppose build protocol specific part of header. */
+ /* Build Tunnel header. Ethernet and ip header parameters are passed to
+ * tunnel implementation to build entire outer header for given flow. */
int (*build_header)(const struct netdev *, struct ovs_action_push_tnl *data,
- const struct flow *tnl_flow);
+ const struct netdev_tnl_build_header_params *params);
/* build_header() can not build entire header for all packets for given
* flow. Push header is called for packet to build header specific to
* such info, returns NETDEV_NUMA_UNSPEC. */
int (*get_numa_id)(const struct netdev *netdev);
- /* Configures the number of tx queues and rx queues of 'netdev'.
- * Return 0 if successful, otherwise a positive errno value.
- *
- * 'n_rxq' specifies the maximum number of receive queues to create.
- * The netdev provider might choose to create less (e.g. if the hardware
- * supports only a smaller number). The actual number of queues created
- * is stored in the 'netdev->n_rxq' field.
+ /* Configures the number of tx queues of 'netdev'. Returns 0 if successful,
+ * otherwise a positive errno value.
*
* 'n_txq' specifies the exact number of transmission queues to create.
* The caller will call netdev_send() concurrently from 'n_txq' different
* making sure that these concurrent calls do not create a race condition
* by using multiple hw queues or locking.
*
- * On error, the tx queue and rx queue configuration is indeterminant.
- * Caller should make decision on whether to restore the previous or
- * the default configuration. Also, caller must make sure there is no
- * other thread accessing the queues at the same time. */
- int (*set_multiq)(struct netdev *netdev, unsigned int n_txq,
- unsigned int n_rxq);
+ * The caller will call netdev_reconfigure() (if necessary) before using
+ * netdev_send() on any of the newly configured queues, giving the provider
+ * a chance to adjust its settings.
+ *
+ * On error, the tx queue configuration is unchanged. */
+ int (*set_tx_multiq)(struct netdev *netdev, unsigned int n_txq);
/* Sends buffers on 'netdev'.
* Returns 0 if successful (for every buffer), otherwise a positive errno
* network device from being usefully used by the netdev-based "userspace
* datapath". It will also prevent the OVS implementation of bonding from
* working properly over 'netdev'.) */
- int (*send)(struct netdev *netdev, int qid, struct dp_packet **buffers,
- int cnt, bool may_steal);
+ int (*send)(struct netdev *netdev, int qid, struct dp_packet_batch *batch,
+ bool may_steal);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when the packet transmission queue for 'netdev' has
int (*update_flags)(struct netdev *netdev, enum netdev_flags off,
enum netdev_flags on, enum netdev_flags *old_flags);
+ /* If the provider called netdev_request_reconfigure(), the upper layer
+ * will eventually call this. The provider can update the device
+ * configuration knowing that the upper layer will not call rxq_recv() or
+ * send() until this function returns.
+ *
+ * On error, the configuration is indeterminant and the device cannot be
+ * used to send and receive packets until a successful configuration is
+ * applied. */
+ int (*reconfigure)(struct netdev *netdev);
/* ## -------------------- ## */
/* ## netdev_rxq Functions ## */
/* ## -------------------- ## */
void (*rxq_destruct)(struct netdev_rxq *);
void (*rxq_dealloc)(struct netdev_rxq *);
- /* Attempts to receive a batch of packets from 'rx'. The caller supplies
- * 'pkts' as the pointer to the beginning of an array of MAX_RX_BATCH
- * pointers to dp_packet. If successful, the implementation stores
- * pointers to up to MAX_RX_BATCH dp_packets into the array, transferring
- * ownership of the packets to the caller, stores the number of received
- * packets into '*cnt', and returns 0.
+ /* Attempts to receive a batch of packets from 'rx'. In 'batch', the
+ * caller supplies 'packets' as the pointer to the beginning of an array
+ * of MAX_RX_BATCH pointers to dp_packet. If successful, the
+ * implementation stores pointers to up to MAX_RX_BATCH dp_packets into
+ * the array, transferring ownership of the packets to the caller, stores
+ * the number of received packets into 'count', and returns 0.
*
* The implementation does not necessarily initialize any non-data members
- * of 'pkts'. That is, the caller must initialize layer pointers and
- * metadata itself, if desired, e.g. with pkt_metadata_init() and
- * miniflow_extract().
+ * of 'packets' in 'batch'. That is, the caller must initialize layer
+ * pointers and metadata itself, if desired, e.g. with pkt_metadata_init()
+ * and miniflow_extract().
*
* Implementations should allocate buffers with DP_NETDEV_HEADROOM bytes of
* headroom.
*
* Returns EAGAIN immediately if no packet is ready to be received or
* another positive errno value if an error was encountered. */
- int (*rxq_recv)(struct netdev_rxq *rx, struct dp_packet **pkts,
- int *cnt);
+ int (*rxq_recv)(struct netdev_rxq *rx, struct dp_packet_batch *batch);
/* Registers with the poll loop to wake up from the next call to
* poll_block() when a packet is ready to be received with