/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <sys/stat.h>
#include <unistd.h>
+#include "bitmap.h"
#include "cmap.h"
#include "csum.h"
#include "dp-packet.h"
#include "fat-rwlock.h"
#include "flow.h"
#include "cmap.h"
+#include "coverage.h"
+#include "hmapx.h"
#include "latch.h"
#include "list.h"
#include "match.h"
-#include "meta-flow.h"
#include "netdev.h"
#include "netdev-dpdk.h"
#include "netdev-vport.h"
#include "shash.h"
#include "sset.h"
#include "timeval.h"
-#include "tnl-arp-cache.h"
+#include "tnl-neigh-cache.h"
+#include "tnl-ports.h"
#include "unixctl.h"
#include "util.h"
#include "openvswitch/vlog.h"
static struct vlog_rate_limit upcall_rl = VLOG_RATE_LIMIT_INIT(600, 600);
+static struct odp_support dp_netdev_support = {
+ .max_mpls_depth = SIZE_MAX,
+ .recirc = true,
+};
+
/* Stores a miniflow with inline values */
struct netdev_flow_key {
uint32_t hash; /* Hash function differs for different users. */
uint32_t len; /* Length of the following miniflow (incl. map). */
struct miniflow mf;
- uint64_t buf[FLOW_MAX_PACKET_U64S - MINI_N_INLINE];
+ uint64_t buf[FLOW_MAX_PACKET_U64S];
};
/* Exact match cache for frequently used flows
* If dp_netdev_input is not called from a pmd thread, a mutex is used.
*/
-#define EM_FLOW_HASH_SHIFT 10
+#define EM_FLOW_HASH_SHIFT 13
#define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
#define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
#define EM_FLOW_HASH_SEGS 2
upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
void *upcall_aux;
+ /* Callback function for notifying the purging of dp flows (during
+ * reseting pmd deletion). */
+ dp_purge_callback *dp_purge_cb;
+ void *dp_purge_aux;
+
/* Stores all 'struct dp_netdev_pmd_thread's. */
struct cmap poll_threads;
* 'struct dp_netdev_pmd_thread' in 'per_pmd_key'. */
ovsthread_key_t per_pmd_key;
- /* Number of rx queues for each dpdk interface and the cpu mask
- * for pin of pmd threads. */
- size_t n_dpdk_rxqs;
+ /* Cpu mask for pin of pmd threads. */
char *pmd_cmask;
uint64_t last_tnl_conf_seq;
};
/* A port in a netdev-based datapath. */
struct dp_netdev_port {
- struct cmap_node node; /* Node in dp_netdev's 'ports'. */
odp_port_t port_no;
struct netdev *netdev;
+ struct cmap_node node; /* Node in dp_netdev's 'ports'. */
struct netdev_saved_flags *sf;
struct netdev_rxq **rxq;
struct ovs_refcount ref_cnt;
char *type; /* Port type as requested by user. */
+ int latest_requested_n_rxq; /* Latest requested from netdev number
+ of rx queues. */
};
/* Contained by struct dp_netdev_flow's 'stats' member. */
* requires synchronization, as noted in more detail below.
*/
struct dp_netdev_flow {
- bool dead;
-
+ const struct flow flow; /* Unmasked flow that created this entry. */
/* Hash table index by unmasked flow. */
const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
/* 'flow_table'. */
const ovs_u128 ufid; /* Unique flow identifier. */
- const struct flow flow; /* Unmasked flow that created this entry. */
- const int pmd_id; /* The 'core_id' of pmd thread owning this */
+ const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
/* flow. */
/* Number of references.
* reference. */
struct ovs_refcount ref_cnt;
+ bool dead;
+
/* Statistics. */
struct dp_netdev_flow_stats stats;
/* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
+ /* While processing a group of input packets, the datapath uses the next
+ * member to store a pointer to the output batch for the flow. It is
+ * reset after the batch has been sent out (See dp_netdev_queue_batches(),
+ * packet_batch_init() and packet_batch_execute()). */
+ struct packet_batch *batch;
+
/* Packet classification. */
struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
/* 'cr' must be the last member. */
atomic_ullong n[PMD_N_CYCLES];
};
+/* Contained by struct dp_netdev_pmd_thread's 'poll_list' member. */
+struct rxq_poll {
+ struct dp_netdev_port *port;
+ struct netdev_rxq *rx;
+ struct ovs_list node;
+};
+
/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
* the performance overhead of interrupt processing. Therefore netdev can
* not implement rx-wait for these devices. dpif-netdev needs to poll
pthread_t thread;
int index; /* Idx of this pmd thread among pmd*/
/* threads on same numa node. */
- int core_id; /* CPU core id of this pmd thread. */
+ unsigned core_id; /* CPU core id of this pmd thread. */
int numa_id; /* numa node id of this pmd thread. */
+ atomic_int tx_qid; /* Queue id used by this pmd thread to
+ * send packets on all netdevs */
+
+ struct ovs_mutex poll_mutex; /* Mutex for poll_list. */
+ /* List of rx queues to poll. */
+ struct ovs_list poll_list OVS_GUARDED;
+ int poll_cnt; /* Number of elemints in poll_list. */
/* Only a pmd thread can write on its own 'cycles' and 'stats'.
* The main thread keeps 'stats_zero' and 'cycles_zero' as base
const struct nlattr *actions,
size_t actions_len);
static void dp_netdev_input(struct dp_netdev_pmd_thread *,
- struct dp_packet **, int cnt);
+ struct dp_packet **, int cnt, odp_port_t port_no);
+static void dp_netdev_recirculate(struct dp_netdev_pmd_thread *,
+ struct dp_packet **, int cnt);
static void dp_netdev_disable_upcall(struct dp_netdev *);
-void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
+static void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev *dp, int index,
- int core_id, int numa_id);
+ unsigned core_id, int numa_id);
static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_set_nonpmd(struct dp_netdev *dp);
static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
- int core_id);
+ unsigned core_id);
static struct dp_netdev_pmd_thread *
dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp);
static void dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id);
static void dp_netdev_set_pmds_on_numa(struct dp_netdev *dp, int numa_id);
+static void dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd);
+static void dp_netdev_del_port_from_pmd(struct dp_netdev_port *port,
+ struct dp_netdev_pmd_thread *pmd);
+static void dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp,
+ struct dp_netdev_port *port);
+static void
+dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port);
+static void
+dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
+ struct dp_netdev_port *port, struct netdev_rxq *rx);
+static struct dp_netdev_pmd_thread *
+dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev *dp, int numa_id);
static void dp_netdev_reset_pmd_threads(struct dp_netdev *dp);
static bool dp_netdev_pmd_try_ref(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_pmd_unref(struct dp_netdev_pmd_thread *pmd);
{
int i;
- BUILD_ASSERT(offsetof(struct miniflow, inline_values) == sizeof(uint64_t));
-
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
- flow_cache->entries[i].key.len
- = offsetof(struct miniflow, inline_values);
- miniflow_initialize(&flow_cache->entries[i].key.mf,
- flow_cache->entries[i].key.buf);
+ flow_cache->entries[i].key.len = sizeof(struct miniflow);
+ flowmap_init(&flow_cache->entries[i].key.mf.map);
}
}
flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
}
+/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
+bool
+dpif_is_netdev(const struct dpif *dpif)
+{
+ return dpif->dpif_class->open == dpif_netdev_open;
+}
+
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
{
- ovs_assert(dpif->dpif_class->open == dpif_netdev_open);
+ ovs_assert(dpif_is_netdev(dpif));
return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
}
}
\f
enum pmd_info_type {
- PMD_INFO_SHOW_STATS, /* show how cpu cycles are spent */
- PMD_INFO_CLEAR_STATS /* set the cycles count to 0 */
+ PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */
+ PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */
+ PMD_INFO_SHOW_RXQ /* Show poll-lists of pmd threads. */
};
static void
if (pmd->numa_id != OVS_NUMA_UNSPEC) {
ds_put_format(reply, " numa_id %d", pmd->numa_id);
}
- if (pmd->core_id != OVS_CORE_UNSPEC) {
- ds_put_format(reply, " core_id %d", pmd->core_id);
+ if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) {
+ ds_put_format(reply, " core_id %u", pmd->core_id);
}
ds_put_cstr(reply, ":\n");
}
}
+static void
+pmd_info_show_rxq(struct ds *reply, struct dp_netdev_pmd_thread *pmd)
+{
+ if (pmd->core_id != NON_PMD_CORE_ID) {
+ struct rxq_poll *poll;
+ const char *prev_name = NULL;
+
+ ds_put_format(reply, "pmd thread numa_id %d core_id %u:\n",
+ pmd->numa_id, pmd->core_id);
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ LIST_FOR_EACH (poll, node, &pmd->poll_list) {
+ const char *name = netdev_get_name(poll->port->netdev);
+
+ if (!prev_name || strcmp(name, prev_name)) {
+ if (prev_name) {
+ ds_put_cstr(reply, "\n");
+ }
+ ds_put_format(reply, "\tport: %s\tqueue-id:",
+ netdev_get_name(poll->port->netdev));
+ }
+ ds_put_format(reply, " %d", netdev_rxq_get_queue_id(poll->rx));
+ prev_name = name;
+ }
+ ovs_mutex_unlock(&pmd->poll_mutex);
+ ds_put_cstr(reply, "\n");
+ }
+}
+
static void
dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
void *aux)
}
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
- unsigned long long stats[DP_N_STATS];
- uint64_t cycles[PMD_N_CYCLES];
- int i;
+ if (type == PMD_INFO_SHOW_RXQ) {
+ pmd_info_show_rxq(&reply, pmd);
+ } else {
+ unsigned long long stats[DP_N_STATS];
+ uint64_t cycles[PMD_N_CYCLES];
+ int i;
- /* Read current stats and cycle counters */
- for (i = 0; i < ARRAY_SIZE(stats); i++) {
- atomic_read_relaxed(&pmd->stats.n[i], &stats[i]);
- }
- for (i = 0; i < ARRAY_SIZE(cycles); i++) {
- atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]);
- }
+ /* Read current stats and cycle counters */
+ for (i = 0; i < ARRAY_SIZE(stats); i++) {
+ atomic_read_relaxed(&pmd->stats.n[i], &stats[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]);
+ }
- if (type == PMD_INFO_CLEAR_STATS) {
- pmd_info_clear_stats(&reply, pmd, stats, cycles);
- } else if (type == PMD_INFO_SHOW_STATS) {
- pmd_info_show_stats(&reply, pmd, stats, cycles);
+ if (type == PMD_INFO_CLEAR_STATS) {
+ pmd_info_clear_stats(&reply, pmd, stats, cycles);
+ } else if (type == PMD_INFO_SHOW_STATS) {
+ pmd_info_show_stats(&reply, pmd, stats, cycles);
+ }
}
}
dpif_netdev_init(void)
{
static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS,
- clear_aux = PMD_INFO_CLEAR_STATS;
+ clear_aux = PMD_INFO_CLEAR_STATS,
+ poll_aux = PMD_INFO_SHOW_RXQ;
unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
0, 1, dpif_netdev_pmd_info,
unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
0, 1, dpif_netdev_pmd_info,
(void *)&clear_aux);
+ unixctl_command_register("dpif-netdev/pmd-rxq-show", "[dp]",
+ 0, 1, dpif_netdev_pmd_info,
+ (void *)&poll_aux);
return 0;
}
ovs_mutex_init_recursive(&dp->non_pmd_mutex);
ovsthread_key_create(&dp->per_pmd_key, NULL);
- /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */
- ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID);
dp_netdev_set_nonpmd(dp);
- dp->n_dpdk_rxqs = NR_QUEUE;
ovs_mutex_lock(&dp->port_mutex);
error = do_add_port(dp, name, "internal", ODPP_LOCAL);
shash_find_and_delete(&dp_netdevs, dp->name);
dp_netdev_destroy_all_pmds(dp);
- cmap_destroy(&dp->poll_threads);
ovs_mutex_destroy(&dp->non_pmd_mutex);
ovsthread_key_delete(dp->per_pmd_key);
ovs_mutex_lock(&dp->port_mutex);
CMAP_FOR_EACH (port, node, &dp->ports) {
+ /* PMD threads are destroyed here. do_del_port() cannot quiesce */
do_del_port(dp, port);
}
ovs_mutex_unlock(&dp->port_mutex);
+ cmap_destroy(&dp->poll_threads);
seq_destroy(dp->port_seq);
cmap_destroy(&dp->ports);
ovs_mutex_unlock(&pmd->cond_mutex);
}
-/* Causes all pmd threads to reload its tx/rx devices.
- * Must be called after adding/removing ports. */
-static void
-dp_netdev_reload_pmds(struct dp_netdev *dp)
-{
- struct dp_netdev_pmd_thread *pmd;
-
- CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
- dp_netdev_reload_pmd__(pmd);
- }
-}
-
static uint32_t
hash_port_no(odp_port_t port_no)
{
return ENOENT;
}
/* There can only be ovs_numa_get_n_cores() pmd threads,
- * so creates a txq for each. */
- error = netdev_set_multiq(netdev, n_cores, dp->n_dpdk_rxqs);
+ * so creates a txq for each, and one extra for the non
+ * pmd threads. */
+ error = netdev_set_multiq(netdev, n_cores + 1,
+ netdev_requested_n_rxq(netdev));
if (error && (error != EOPNOTSUPP)) {
VLOG_ERR("%s, cannot set multiq", devname);
return errno;
port->netdev = netdev;
port->rxq = xmalloc(sizeof *port->rxq * netdev_n_rxq(netdev));
port->type = xstrdup(type);
+ port->latest_requested_n_rxq = netdev_requested_n_rxq(netdev);
for (i = 0; i < netdev_n_rxq(netdev); i++) {
error = netdev_rxq_open(netdev, &port->rxq[i], i);
if (error
cmap_insert(&dp->ports, &port->node, hash_port_no(port_no));
if (netdev_is_pmd(netdev)) {
- dp_netdev_set_pmds_on_numa(dp, netdev_get_numa_id(netdev));
- dp_netdev_reload_pmds(dp);
+ dp_netdev_add_port_to_pmds(dp, port);
}
seq_change(dp->port_seq);
}
}
-static bool
-port_try_ref(struct dp_netdev_port *port)
-{
- if (port) {
- return ovs_refcount_try_ref_rcu(&port->ref_cnt);
- }
-
- return false;
-}
-
static void
port_unref(struct dp_netdev_port *port)
{
return ENOENT;
}
+static int
+get_n_pmd_threads(struct dp_netdev *dp)
+{
+ /* There is one non pmd thread in dp->poll_threads */
+ return cmap_count(&dp->poll_threads) - 1;
+}
+
static int
get_n_pmd_threads_on_numa(struct dp_netdev *dp, int numa_id)
{
if (netdev_is_pmd(port->netdev)) {
int numa_id = netdev_get_numa_id(port->netdev);
+ /* PMD threads can not be on invalid numa node. */
+ ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
/* If there is no netdev on the numa node, deletes the pmd threads
- * for that numa. Else, just reloads the queues. */
+ * for that numa. Else, deletes the queues from polling lists. */
if (!has_pmd_port_for_numa(dp, numa_id)) {
dp_netdev_del_pmds_on_numa(dp, numa_id);
+ } else {
+ dp_netdev_del_port_from_all_pmds(dp, port);
}
- dp_netdev_reload_pmds(dp);
}
port_unref(port);
struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
dpcls_remove(&pmd->cls, &flow->cr);
+ flow->cr.mask = NULL; /* Accessing rule's mask after this is not safe. */
+
cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid));
flow->dead = true;
* miniflow_extract(), if the map is different the miniflow is different.
* Therefore we can be faster by comparing the map and the miniflow in a
* single memcmp().
- * _ netdev_flow_key's miniflow has always inline values.
- * - These functions can be inlined by the compiler.
- *
- * The following assertions make sure that what we're doing with miniflow is
- * safe
- */
-BUILD_ASSERT_DECL(offsetof(struct miniflow, inline_values)
- == sizeof(uint64_t));
+ * - These functions can be inlined by the compiler. */
-/* Given the number of bits set in the miniflow map, returns the size of the
+/* Given the number of bits set in miniflow's maps, returns the size of the
* 'netdev_flow_key.mf' */
-static inline uint32_t
-netdev_flow_key_size(uint32_t flow_u32s)
+static inline size_t
+netdev_flow_key_size(size_t flow_u64s)
{
- return offsetof(struct miniflow, inline_values) +
- MINIFLOW_VALUES_SIZE(flow_u32s);
+ return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s);
}
static inline bool
struct dp_packet packet;
uint64_t buf_stub[512 / 8];
- miniflow_initialize(&dst->mf, dst->buf);
-
dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub);
pkt_metadata_from_flow(&packet.md, src);
flow_compose(&packet, src);
miniflow_extract(&packet, &dst->mf);
dp_packet_uninit(&packet);
- dst->len = netdev_flow_key_size(count_1bits(dst->mf.map));
+ dst->len = netdev_flow_key_size(miniflow_n_values(&dst->mf));
dst->hash = 0; /* Not computed yet. */
}
netdev_flow_mask_init(struct netdev_flow_key *mask,
const struct match *match)
{
- const uint64_t *mask_u64 = (const uint64_t *) &match->wc.masks;
- uint64_t *dst = mask->mf.inline_values;
- uint64_t map, mask_map = 0;
+ uint64_t *dst = miniflow_values(&mask->mf);
+ struct flowmap fmap;
uint32_t hash = 0;
- int n;
+ size_t idx;
/* Only check masks that make sense for the flow. */
- map = flow_wc_map(&match->flow);
+ flow_wc_map(&match->flow, &fmap);
+ flowmap_init(&mask->mf.map);
- while (map) {
- uint64_t rm1bit = rightmost_1bit(map);
- int i = raw_ctz(map);
+ FLOWMAP_FOR_EACH_INDEX(idx, fmap) {
+ uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx);
- if (mask_u64[i]) {
- mask_map |= rm1bit;
- *dst++ = mask_u64[i];
- hash = hash_add64(hash, mask_u64[i]);
+ if (mask_u64) {
+ flowmap_set(&mask->mf.map, idx, 1);
+ *dst++ = mask_u64;
+ hash = hash_add64(hash, mask_u64);
}
- map -= rm1bit;
}
- mask->mf.values_inline = true;
- mask->mf.map = mask_map;
+ map_t map;
- hash = hash_add64(hash, mask_map);
+ FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) {
+ hash = hash_add64(hash, map);
+ }
- n = dst - mask->mf.inline_values;
+ size_t n = dst - miniflow_get_values(&mask->mf);
mask->hash = hash_finish(hash, n * 8);
mask->len = netdev_flow_key_size(n);
}
-/* Initializes 'dst' as a copy of 'src' masked with 'mask'. */
+/* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
static inline void
netdev_flow_key_init_masked(struct netdev_flow_key *dst,
const struct flow *flow,
const struct netdev_flow_key *mask)
{
- uint64_t *dst_u64 = dst->mf.inline_values;
- const uint64_t *mask_u64 = mask->mf.inline_values;
+ uint64_t *dst_u64 = miniflow_values(&dst->mf);
+ const uint64_t *mask_u64 = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
uint64_t value;
dst->len = mask->len;
- dst->mf.values_inline = true;
- dst->mf.map = mask->mf.map;
+ dst->mf = mask->mf; /* Copy maps. */
- FLOW_FOR_EACH_IN_MAP(value, flow, mask->mf.map) {
+ FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) {
*dst_u64 = value & *mask_u64++;
hash = hash_add64(hash, *dst_u64++);
}
- dst->hash = hash_finish(hash, (dst_u64 - dst->mf.inline_values) * 8);
+ dst->hash = hash_finish(hash,
+ (dst_u64 - miniflow_get_values(&dst->mf)) * 8);
}
-/* Iterate through all netdev_flow_key u64 values specified by 'MAP' */
-#define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
- for (struct mf_for_each_in_map_aux aux__ \
- = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
- mf_get_next_in_map(&aux__, &(VALUE)); \
- )
+/* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
+#define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
/* Returns a hash value for the bits of 'key' where there are 1-bits in
* 'mask'. */
netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
const struct netdev_flow_key *mask)
{
- const uint64_t *p = mask->mf.inline_values;
+ const uint64_t *p = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
- uint64_t key_u64;
+ uint64_t value;
- NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u64, key, mask->mf.map) {
- hash = hash_add64(hash, key_u64 & *p++);
+ NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) {
+ hash = hash_add64(hash, value & *p++);
}
- return hash_finish(hash, (p - mask->mf.inline_values) * 8);
+ return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
}
static inline bool
if (ufidp) {
CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp),
&pmd->flow_table) {
- if (ovs_u128_equal(&netdev_flow->ufid, ufidp)) {
+ if (ovs_u128_equals(&netdev_flow->ufid, ufidp)) {
return netdev_flow;
}
}
struct flow_wildcards wc;
struct dp_netdev_actions *actions;
size_t offset;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &netdev_flow->flow,
+ .mask = &wc.masks,
+ .support = dp_netdev_support,
+ };
miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
/* Key */
offset = key_buf->size;
flow->key = ofpbuf_tail(key_buf);
- odp_flow_key_from_flow(key_buf, &netdev_flow->flow, &wc.masks,
- netdev_flow->flow.in_port.odp_port, true);
+ odp_parms.odp_in_port = netdev_flow->flow.in_port.odp_port;
+ odp_flow_key_from_flow(&odp_parms, key_buf);
flow->key_len = key_buf->size - offset;
/* Mask */
offset = mask_buf->size;
flow->mask = ofpbuf_tail(mask_buf);
- odp_flow_key_from_mask(mask_buf, &wc.masks, &netdev_flow->flow,
- odp_to_u32(wc.masks.in_port.odp_port),
- SIZE_MAX, true);
+ odp_parms.odp_in_port = wc.masks.in_port.odp_port;
+ odp_parms.key_buf = key_buf;
+ odp_flow_key_from_mask(&odp_parms, mask_buf);
flow->mask_len = mask_buf->size - offset;
/* Actions */
dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
const struct nlattr *mask_key,
uint32_t mask_key_len, const struct flow *flow,
- struct flow *mask)
-{
- if (mask_key_len) {
- enum odp_key_fitness fitness;
-
- fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow);
- if (fitness) {
- /* This should not happen: it indicates that
- * odp_flow_key_from_mask() and odp_flow_key_to_mask()
- * disagree on the acceptable form of a mask. Log the problem
- * as an error, with enough details to enable debugging. */
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-
- if (!VLOG_DROP_ERR(&rl)) {
- struct ds s;
-
- ds_init(&s);
- odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
- true);
- VLOG_ERR("internal error parsing flow mask %s (%s)",
- ds_cstr(&s), odp_key_fitness_to_string(fitness));
- ds_destroy(&s);
- }
+ struct flow_wildcards *wc)
+{
+ enum odp_key_fitness fitness;
- return EINVAL;
- }
- } else {
- enum mf_field_id id;
- /* No mask key, unwildcard everything except fields whose
- * prerequisities are not met. */
- memset(mask, 0x0, sizeof *mask);
-
- for (id = 0; id < MFF_N_IDS; ++id) {
- /* Skip registers and metadata. */
- if (!(id >= MFF_REG0 && id < MFF_REG0 + FLOW_N_REGS)
- && id != MFF_METADATA) {
- const struct mf_field *mf = mf_from_id(id);
- if (mf_are_prereqs_ok(mf, flow)) {
- mf_mask_field(mf, mask);
- }
- }
+ fitness = odp_flow_key_to_mask_udpif(mask_key, mask_key_len, key,
+ key_len, wc, flow);
+ if (fitness) {
+ /* This should not happen: it indicates that
+ * odp_flow_key_from_mask() and odp_flow_key_to_mask()
+ * disagree on the acceptable form of a mask. Log the problem
+ * as an error, with enough details to enable debugging. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ if (!VLOG_DROP_ERR(&rl)) {
+ struct ds s;
+
+ ds_init(&s);
+ odp_flow_format(key, key_len, mask_key, mask_key_len, NULL, &s,
+ true);
+ VLOG_ERR("internal error parsing flow mask %s (%s)",
+ ds_cstr(&s), odp_key_fitness_to_string(fitness));
+ ds_destroy(&s);
}
- }
- /* Force unwildcard the in_port.
- *
- * We need to do this even in the case where we unwildcard "everything"
- * above because "everything" only includes the 16-bit OpenFlow port number
- * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
- * port number mask->in_port.odp_port. */
- mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
+ return EINVAL;
+ }
return 0;
}
{
odp_port_t in_port;
- if (odp_flow_key_to_flow(key, key_len, flow)) {
+ if (odp_flow_key_to_flow_udpif(key, key_len, flow)) {
/* This should not happen: it indicates that odp_flow_key_from_flow()
* and odp_flow_key_to_flow() disagree on the acceptable form of a
* flow. Log the problem as an error, with enough details to enable
return EINVAL;
}
+ /* Userspace datapath doesn't support conntrack. */
+ if (flow->ct_state || flow->ct_zone || flow->ct_mark
+ || !ovs_u128_is_zero(&flow->ct_label)) {
+ return EINVAL;
+ }
+
return 0;
}
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct dp_netdev_pmd_thread *pmd;
- int pmd_id = get->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : get->pmd_id;
+ unsigned pmd_id = get->pmd_id == PMD_ID_NULL
+ ? NON_PMD_CORE_ID : get->pmd_id;
int error = 0;
pmd = dp_netdev_get_pmd(dp, pmd_id);
netdev_flow_mask_init(&mask, match);
/* Make sure wc does not have metadata. */
- ovs_assert(!(mask.mf.map & (MINIFLOW_MAP(metadata) | MINIFLOW_MAP(regs))));
+ ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata)
+ && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs));
/* Do not allocate extra space. */
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
memset(&flow->stats, 0, sizeof flow->stats);
flow->dead = false;
- *CONST_CAST(int *, &flow->pmd_id) = pmd->core_id;
+ flow->batch = NULL;
+ *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
*CONST_CAST(struct flow *, &flow->flow) = match->flow;
*CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
ovs_refcount_init(&flow->ref_cnt);
struct match match;
struct ds ds = DS_EMPTY_INITIALIZER;
+ match.tun_md.valid = false;
match.flow = flow->flow;
miniflow_expand(&flow->cr.mask->mf, &match.wc.masks);
struct dp_netdev_pmd_thread *pmd;
struct match match;
ovs_u128 ufid;
- int pmd_id = put->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : put->pmd_id;
+ unsigned pmd_id = put->pmd_id == PMD_ID_NULL
+ ? NON_PMD_CORE_ID : put->pmd_id;
int error;
error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow);
}
error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
put->mask, put->mask_len,
- &match.flow, &match.wc.masks);
+ &match.flow, &match.wc);
if (error) {
return error;
}
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct dp_netdev_pmd_thread *pmd;
- int pmd_id = del->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : del->pmd_id;
+ unsigned pmd_id = del->pmd_id == PMD_ID_NULL
+ ? NON_PMD_CORE_ID : del->pmd_id;
int error = 0;
pmd = dp_netdev_get_pmd(dp, pmd_id);
/* Returns true if the configuration for rx queues or cpu mask
* is changed. */
static bool
-pmd_config_changed(const struct dp_netdev *dp, size_t rxqs, const char *cmask)
+pmd_config_changed(const struct dp_netdev *dp, const char *cmask)
{
- if (dp->n_dpdk_rxqs != rxqs) {
- return true;
- } else {
- if (dp->pmd_cmask != NULL && cmask != NULL) {
- return strcmp(dp->pmd_cmask, cmask);
- } else {
- return (dp->pmd_cmask != NULL || cmask != NULL);
+ struct dp_netdev_port *port;
+
+ CMAP_FOR_EACH (port, node, &dp->ports) {
+ struct netdev *netdev = port->netdev;
+ int requested_n_rxq = netdev_requested_n_rxq(netdev);
+ if (netdev_is_pmd(netdev)
+ && port->latest_requested_n_rxq != requested_n_rxq) {
+ return true;
}
}
+
+ if (dp->pmd_cmask != NULL && cmask != NULL) {
+ return strcmp(dp->pmd_cmask, cmask);
+ } else {
+ return (dp->pmd_cmask != NULL || cmask != NULL);
+ }
}
/* Resets pmd threads if the configuration for 'rxq's or cpu mask changes. */
static int
-dpif_netdev_pmd_set(struct dpif *dpif, unsigned int n_rxqs, const char *cmask)
+dpif_netdev_pmd_set(struct dpif *dpif, const char *cmask)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
- if (pmd_config_changed(dp, n_rxqs, cmask)) {
+ if (pmd_config_changed(dp, cmask)) {
struct dp_netdev_port *port;
dp_netdev_destroy_all_pmds(dp);
CMAP_FOR_EACH (port, node, &dp->ports) {
- if (netdev_is_pmd(port->netdev)) {
+ struct netdev *netdev = port->netdev;
+ int requested_n_rxq = netdev_requested_n_rxq(netdev);
+ if (netdev_is_pmd(port->netdev)
+ && port->latest_requested_n_rxq != requested_n_rxq) {
int i, err;
/* Closes the existing 'rxq's. */
}
/* Sets the new rx queue config. */
- err = netdev_set_multiq(port->netdev, ovs_numa_get_n_cores(),
- n_rxqs);
+ err = netdev_set_multiq(port->netdev,
+ ovs_numa_get_n_cores() + 1,
+ requested_n_rxq);
if (err && (err != EOPNOTSUPP)) {
VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
" %u", netdev_get_name(port->netdev),
- n_rxqs);
+ requested_n_rxq);
return err;
}
-
+ port->latest_requested_n_rxq = requested_n_rxq;
/* If the set_multiq() above succeeds, reopens the 'rxq's. */
port->rxq = xrealloc(port->rxq, sizeof *port->rxq
* netdev_n_rxq(port->netdev));
}
}
}
- dp->n_dpdk_rxqs = n_rxqs;
-
/* Reconfigures the cpu mask. */
ovs_numa_set_cpu_mask(cmask);
free(dp->pmd_cmask);
struct dp_netdev_port *port,
struct netdev_rxq *rxq)
{
- struct dp_packet *packets[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *packets[NETDEV_MAX_BURST];
int error, cnt;
cycles_count_start(pmd);
error = netdev_rxq_recv(rxq, packets, &cnt);
cycles_count_end(pmd, PMD_CYCLES_POLLING);
if (!error) {
- int i;
-
*recirc_depth_get() = 0;
- /* XXX: initialize md in netdev implementation. */
- for (i = 0; i < cnt; i++) {
- packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no);
- }
cycles_count_start(pmd);
- dp_netdev_input(pmd, packets, cnt);
+ dp_netdev_input(pmd, packets, cnt, port->port_no);
cycles_count_end(pmd, PMD_CYCLES_PROCESSING);
} else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
ovs_mutex_unlock(&dp->non_pmd_mutex);
dp_netdev_pmd_unref(non_pmd);
- tnl_arp_cache_run();
+ tnl_neigh_cache_run();
+ tnl_port_map_run();
new_tnl_seq = seq_read(tnl_conf_seq);
if (dp->last_tnl_conf_seq != new_tnl_seq) {
seq_wait(tnl_conf_seq, dp->last_tnl_conf_seq);
}
-struct rxq_poll {
- struct dp_netdev_port *port;
- struct netdev_rxq *rx;
-};
-
static int
pmd_load_queues(struct dp_netdev_pmd_thread *pmd,
struct rxq_poll **ppoll_list, int poll_cnt)
+ OVS_REQUIRES(pmd->poll_mutex)
{
struct rxq_poll *poll_list = *ppoll_list;
- struct dp_netdev_port *port;
- int n_pmds_on_numa, index, i;
+ struct rxq_poll *poll;
+ int i;
- /* Simple scheduler for netdev rx polling. */
for (i = 0; i < poll_cnt; i++) {
port_unref(poll_list[i].port);
}
- poll_cnt = 0;
- n_pmds_on_numa = get_n_pmd_threads_on_numa(pmd->dp, pmd->numa_id);
- index = 0;
+ poll_list = xrealloc(poll_list, pmd->poll_cnt * sizeof *poll_list);
- CMAP_FOR_EACH (port, node, &pmd->dp->ports) {
- /* Calls port_try_ref() to prevent the main thread
- * from deleting the port. */
- if (port_try_ref(port)) {
- if (netdev_is_pmd(port->netdev)
- && netdev_get_numa_id(port->netdev) == pmd->numa_id) {
- int i;
-
- for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
- if ((index % n_pmds_on_numa) == pmd->index) {
- poll_list = xrealloc(poll_list,
- sizeof *poll_list * (poll_cnt + 1));
-
- port_ref(port);
- poll_list[poll_cnt].port = port;
- poll_list[poll_cnt].rx = port->rxq[i];
- poll_cnt++;
- }
- index++;
- }
- }
- /* Unrefs the port_try_ref(). */
- port_unref(port);
- }
+ i = 0;
+ LIST_FOR_EACH (poll, node, &pmd->poll_list) {
+ port_ref(poll->port);
+ poll_list[i++] = *poll;
}
*ppoll_list = poll_list;
- return poll_cnt;
+ return pmd->poll_cnt;
}
static void *
pmd_thread_setaffinity_cpu(pmd->core_id);
reload:
emc_cache_init(&pmd->flow_cache);
+
+ ovs_mutex_lock(&pmd->poll_mutex);
poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt);
+ ovs_mutex_unlock(&pmd->poll_mutex);
+
+ /* List port/core affinity */
+ for (i = 0; i < poll_cnt; i++) {
+ VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n",
+ pmd->core_id, netdev_get_name(poll_list[i].port->netdev),
+ netdev_rxq_get_queue_id(poll_list[i].rx));
+ }
/* Signal here to make sure the pmd finishes
* reloading the updated configuration. */
dp_netdev_pmd_reload_done(pmd);
for (;;) {
- int i;
-
for (i = 0; i < poll_cnt; i++) {
dp_netdev_process_rxq_port(pmd, poll_list[i].port, poll_list[i].rx);
}
lc = 0;
emc_cache_slow_sweep(&pmd->flow_cache);
+ coverage_try_clear();
ovsrcu_quiesce();
atomic_read_relaxed(&pmd->change_seq, &seq);
}
for (i = 0; i < poll_cnt; i++) {
- port_unref(poll_list[i].port);
+ port_unref(poll_list[i].port);
}
dp_netdev_pmd_reload_done(pmd);
dp_netdev_enable_upcall(dp);
}
-void
+static void
dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd)
{
ovs_mutex_lock(&pmd->cond_mutex);
*
* Caller must unrefs the returned reference. */
static struct dp_netdev_pmd_thread *
-dp_netdev_get_pmd(struct dp_netdev *dp, int core_id)
+dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
{
struct dp_netdev_pmd_thread *pmd;
const struct cmap_node *pnode;
/* Configures the 'pmd' based on the input argument. */
static void
dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
- int index, int core_id, int numa_id)
+ int index, unsigned core_id, int numa_id)
{
pmd->dp = dp;
pmd->index = index;
pmd->core_id = core_id;
pmd->numa_id = numa_id;
+ pmd->poll_cnt = 0;
+
+ atomic_init(&pmd->tx_qid,
+ (core_id == NON_PMD_CORE_ID)
+ ? ovs_numa_get_n_cores()
+ : get_n_pmd_threads(dp));
ovs_refcount_init(&pmd->ref_cnt);
latch_init(&pmd->exit_latch);
xpthread_cond_init(&pmd->cond, NULL);
ovs_mutex_init(&pmd->cond_mutex);
ovs_mutex_init(&pmd->flow_mutex);
+ ovs_mutex_init(&pmd->poll_mutex);
dpcls_init(&pmd->cls);
cmap_init(&pmd->flow_table);
+ list_init(&pmd->poll_list);
/* init the 'flow_cache' since there is no
* actual thread created for NON_PMD_CORE_ID. */
if (core_id == NON_PMD_CORE_ID) {
latch_destroy(&pmd->exit_latch);
xpthread_cond_destroy(&pmd->cond);
ovs_mutex_destroy(&pmd->cond_mutex);
+ ovs_mutex_destroy(&pmd->poll_mutex);
free(pmd);
}
/* Stops the pmd thread, removes it from the 'dp->poll_threads',
* and unrefs the struct. */
static void
-dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd)
+dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
{
/* Uninit the 'flow_cache' since there is
* no actual thread uninit it for NON_PMD_CORE_ID. */
ovs_numa_unpin_core(pmd->core_id);
xpthread_join(pmd->thread, NULL);
}
+
+ /* Unref all ports and free poll_list. */
+ dp_netdev_pmd_clear_poll_list(pmd);
+
+ /* Purges the 'pmd''s flows after stopping the thread, but before
+ * destroying the flows, so that the flow stats can be collected. */
+ if (dp->dp_purge_cb) {
+ dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id);
+ }
cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0));
dp_netdev_pmd_unref(pmd);
}
dp_netdev_destroy_all_pmds(struct dp_netdev *dp)
{
struct dp_netdev_pmd_thread *pmd;
+ struct dp_netdev_pmd_thread **pmd_list;
+ size_t k = 0, n_pmds;
+
+ n_pmds = cmap_count(&dp->poll_threads);
+ pmd_list = xcalloc(n_pmds, sizeof *pmd_list);
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
- dp_netdev_del_pmd(pmd);
+ /* We cannot call dp_netdev_del_pmd(), since it alters
+ * 'dp->poll_threads' (while we're iterating it) and it
+ * might quiesce. */
+ ovs_assert(k < n_pmds);
+ pmd_list[k++] = pmd;
}
+
+ for (size_t i = 0; i < k; i++) {
+ dp_netdev_del_pmd(dp, pmd_list[i]);
+ }
+ free(pmd_list);
}
-/* Deletes all pmd threads on numa node 'numa_id'. */
+/* Deletes all pmd threads on numa node 'numa_id' and
+ * fixes tx_qids of other threads to keep them sequential. */
static void
dp_netdev_del_pmds_on_numa(struct dp_netdev *dp, int numa_id)
{
struct dp_netdev_pmd_thread *pmd;
+ int n_pmds_on_numa, n_pmds;
+ int *free_idx, k = 0;
+ struct dp_netdev_pmd_thread **pmd_list;
+
+ n_pmds_on_numa = get_n_pmd_threads_on_numa(dp, numa_id);
+ free_idx = xcalloc(n_pmds_on_numa, sizeof *free_idx);
+ pmd_list = xcalloc(n_pmds_on_numa, sizeof *pmd_list);
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ /* We cannot call dp_netdev_del_pmd(), since it alters
+ * 'dp->poll_threads' (while we're iterating it) and it
+ * might quiesce. */
+ if (pmd->numa_id == numa_id) {
+ atomic_read_relaxed(&pmd->tx_qid, &free_idx[k]);
+ pmd_list[k] = pmd;
+ ovs_assert(k < n_pmds_on_numa);
+ k++;
+ }
+ }
+
+ for (int i = 0; i < k; i++) {
+ dp_netdev_del_pmd(dp, pmd_list[i]);
+ }
+
+ n_pmds = get_n_pmd_threads(dp);
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ int old_tx_qid;
+
+ atomic_read_relaxed(&pmd->tx_qid, &old_tx_qid);
+
+ if (old_tx_qid >= n_pmds) {
+ int new_tx_qid = free_idx[--k];
+
+ atomic_store_relaxed(&pmd->tx_qid, new_tx_qid);
+ }
+ }
+
+ free(pmd_list);
+ free(free_idx);
+}
+
+/* Deletes all rx queues from pmd->poll_list. */
+static void
+dp_netdev_pmd_clear_poll_list(struct dp_netdev_pmd_thread *pmd)
+{
+ struct rxq_poll *poll;
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ LIST_FOR_EACH_POP (poll, node, &pmd->poll_list) {
+ port_unref(poll->port);
+ free(poll);
+ }
+ pmd->poll_cnt = 0;
+ ovs_mutex_unlock(&pmd->poll_mutex);
+}
+
+/* Deletes all rx queues of 'port' from poll_list of pmd thread and
+ * reloads it if poll_list was changed. */
+static void
+dp_netdev_del_port_from_pmd(struct dp_netdev_port *port,
+ struct dp_netdev_pmd_thread *pmd)
+{
+ struct rxq_poll *poll, *next;
+ bool found = false;
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ LIST_FOR_EACH_SAFE (poll, next, node, &pmd->poll_list) {
+ if (poll->port == port) {
+ found = true;
+ port_unref(poll->port);
+ list_remove(&poll->node);
+ pmd->poll_cnt--;
+ free(poll);
+ }
+ }
+ ovs_mutex_unlock(&pmd->poll_mutex);
+ if (found) {
+ dp_netdev_reload_pmd__(pmd);
+ }
+}
+
+/* Deletes all rx queues of 'port' from all pmd threads of dp and
+ * reloads them if needed. */
+static void
+dp_netdev_del_port_from_all_pmds(struct dp_netdev *dp,
+ struct dp_netdev_port *port)
+{
+ int numa_id = netdev_get_numa_id(port->netdev);
+ struct dp_netdev_pmd_thread *pmd;
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
if (pmd->numa_id == numa_id) {
- dp_netdev_del_pmd(pmd);
+ dp_netdev_del_port_from_pmd(port, pmd);
+ }
+ }
+}
+
+/* Returns PMD thread from this numa node with fewer rx queues to poll.
+ * Returns NULL if there is no PMD threads on this numa node.
+ * Can be called safely only by main thread. */
+static struct dp_netdev_pmd_thread *
+dp_netdev_less_loaded_pmd_on_numa(struct dp_netdev *dp, int numa_id)
+{
+ int min_cnt = -1;
+ struct dp_netdev_pmd_thread *pmd, *res = NULL;
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ if (pmd->numa_id == numa_id
+ && (min_cnt > pmd->poll_cnt || res == NULL)) {
+ min_cnt = pmd->poll_cnt;
+ res = pmd;
+ }
+ }
+
+ return res;
+}
+
+/* Adds rx queue to poll_list of PMD thread. */
+static void
+dp_netdev_add_rxq_to_pmd(struct dp_netdev_pmd_thread *pmd,
+ struct dp_netdev_port *port, struct netdev_rxq *rx)
+ OVS_REQUIRES(pmd->poll_mutex)
+{
+ struct rxq_poll *poll = xmalloc(sizeof *poll);
+
+ port_ref(port);
+ poll->port = port;
+ poll->rx = rx;
+
+ list_push_back(&pmd->poll_list, &poll->node);
+ pmd->poll_cnt++;
+}
+
+/* Distributes all rx queues of 'port' between all PMD threads and reloads
+ * them if needed. */
+static void
+dp_netdev_add_port_to_pmds(struct dp_netdev *dp, struct dp_netdev_port *port)
+{
+ int numa_id = netdev_get_numa_id(port->netdev);
+ struct dp_netdev_pmd_thread *pmd;
+ struct hmapx to_reload;
+ struct hmapx_node *node;
+ int i;
+
+ hmapx_init(&to_reload);
+ /* Cannot create pmd threads for invalid numa node. */
+ ovs_assert(ovs_numa_numa_id_is_valid(numa_id));
+
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ pmd = dp_netdev_less_loaded_pmd_on_numa(dp, numa_id);
+ if (!pmd) {
+ /* There is no pmd threads on this numa node. */
+ dp_netdev_set_pmds_on_numa(dp, numa_id);
+ /* Assigning of rx queues done. */
+ break;
}
+
+ ovs_mutex_lock(&pmd->poll_mutex);
+ dp_netdev_add_rxq_to_pmd(pmd, port, port->rxq[i]);
+ ovs_mutex_unlock(&pmd->poll_mutex);
+
+ hmapx_add(&to_reload, pmd);
}
+
+ HMAPX_FOR_EACH (node, &to_reload) {
+ pmd = (struct dp_netdev_pmd_thread *) node->data;
+ dp_netdev_reload_pmd__(pmd);
+ }
+
+ hmapx_destroy(&to_reload);
}
/* Checks the numa node id of 'netdev' and starts pmd threads for
* in which 'netdev' is on, do nothing. Else, creates the
* pmd threads for the numa node. */
if (!n_pmds) {
- int can_have, n_unpinned, i;
+ int can_have, n_unpinned, i, index = 0;
+ struct dp_netdev_pmd_thread **pmds;
+ struct dp_netdev_port *port;
n_unpinned = ovs_numa_get_n_unpinned_cores_on_numa(numa_id);
if (!n_unpinned) {
/* If cpu mask is specified, uses all unpinned cores, otherwise
* tries creating NR_PMD_THREADS pmd threads. */
can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS);
+ pmds = xzalloc(can_have * sizeof *pmds);
for (i = 0; i < can_have; i++) {
- struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd);
- int core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
+ unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
+ pmds[i] = xzalloc(sizeof **pmds);
+ dp_netdev_configure_pmd(pmds[i], dp, i, core_id, numa_id);
+ }
- dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id);
- /* Each thread will distribute all devices rx-queues among
- * themselves. */
- pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
+ /* Distributes rx queues of this numa node between new pmd threads. */
+ CMAP_FOR_EACH (port, node, &dp->ports) {
+ if (netdev_is_pmd(port->netdev)
+ && netdev_get_numa_id(port->netdev) == numa_id) {
+ for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ /* Make thread-safety analyser happy. */
+ ovs_mutex_lock(&pmds[index]->poll_mutex);
+ dp_netdev_add_rxq_to_pmd(pmds[index], port, port->rxq[i]);
+ ovs_mutex_unlock(&pmds[index]->poll_mutex);
+ index = (index + 1) % can_have;
+ }
+ }
+ }
+
+ /* Actual start of pmd threads. */
+ for (i = 0; i < can_have; i++) {
+ pmds[i]->thread = ovs_thread_create("pmd", pmd_thread_main, pmds[i]);
}
+ free(pmds);
VLOG_INFO("Created %d pmd threads on numa node %d", can_have, numa_id);
}
}
struct ofpbuf *actions, struct ofpbuf *put_actions)
{
struct dp_netdev *dp = pmd->dp;
+ struct flow_tnl orig_tunnel;
+ int err;
if (OVS_UNLIKELY(!dp->upcall_cb)) {
return ENODEV;
}
+ /* Upcall processing expects the Geneve options to be in the translated
+ * format but we need to retain the raw format for datapath use. */
+ orig_tunnel.flags = flow->tunnel.flags;
+ if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
+ orig_tunnel.metadata.present.len = flow->tunnel.metadata.present.len;
+ memcpy(orig_tunnel.metadata.opts.gnv, flow->tunnel.metadata.opts.gnv,
+ flow->tunnel.metadata.present.len);
+ err = tun_metadata_from_geneve_udpif(&orig_tunnel, &orig_tunnel,
+ &flow->tunnel);
+ if (err) {
+ return err;
+ }
+ }
+
if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) {
struct ds ds = DS_EMPTY_INITIALIZER;
char *packet_str;
struct ofpbuf key;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = flow,
+ .mask = &wc->masks,
+ .odp_in_port = flow->in_port.odp_port,
+ .support = dp_netdev_support,
+ };
ofpbuf_init(&key, 0);
- odp_flow_key_from_flow(&key, flow, &wc->masks, flow->in_port.odp_port,
- true);
+ odp_flow_key_from_flow(&odp_parms, &key);
packet_str = ofp_packet_to_string(dp_packet_data(packet_),
dp_packet_size(packet_));
ds_destroy(&ds);
}
- return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
- actions, wc, put_actions, dp->upcall_aux);
+ err = dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
+ actions, wc, put_actions, dp->upcall_aux);
+ if (err && err != ENOSPC) {
+ return err;
+ }
+
+ /* Translate tunnel metadata masks to datapath format. */
+ if (wc) {
+ if (wc->masks.tunnel.metadata.present.map) {
+ struct geneve_opt opts[TLV_TOT_OPT_SIZE /
+ sizeof(struct geneve_opt)];
+
+ if (orig_tunnel.flags & FLOW_TNL_F_UDPIF) {
+ tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
+ &wc->masks.tunnel,
+ orig_tunnel.metadata.opts.gnv,
+ orig_tunnel.metadata.present.len,
+ opts);
+ } else {
+ orig_tunnel.metadata.present.len = 0;
+ }
+
+ memset(&wc->masks.tunnel.metadata, 0,
+ sizeof wc->masks.tunnel.metadata);
+ memcpy(&wc->masks.tunnel.metadata.opts.gnv, opts,
+ orig_tunnel.metadata.present.len);
+ }
+ wc->masks.tunnel.metadata.present.len = 0xff;
+ }
+
+ /* Restore tunnel metadata. We need to use the saved options to ensure
+ * that any unknown options are not lost. The generated mask will have
+ * the same structure, matching on types and lengths but wildcarding
+ * option data we don't care about. */
+ if (orig_tunnel.flags & FLOW_TNL_F_UDPIF) {
+ memcpy(&flow->tunnel.metadata.opts.gnv, orig_tunnel.metadata.opts.gnv,
+ orig_tunnel.metadata.present.len);
+ flow->tunnel.metadata.present.len = orig_tunnel.metadata.present.len;
+ flow->tunnel.flags |= FLOW_TNL_F_UDPIF;
+ }
+
+ return err;
}
static inline uint32_t
-dpif_netdev_packet_get_dp_hash(struct dp_packet *packet,
- const struct miniflow *mf)
+dpif_netdev_packet_get_rss_hash(struct dp_packet *packet,
+ const struct miniflow *mf)
{
- uint32_t hash;
+ uint32_t hash, recirc_depth;
- hash = dp_packet_get_dp_hash(packet);
- if (OVS_UNLIKELY(!hash)) {
+ if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
+ hash = dp_packet_get_rss_hash(packet);
+ } else {
hash = miniflow_hash_5tuple(mf, 0);
- dp_packet_set_dp_hash(packet, hash);
+ dp_packet_set_rss_hash(packet, hash);
+ }
+
+ /* The RSS hash must account for the recirculation depth to avoid
+ * collisions in the exact match cache */
+ recirc_depth = *recirc_depth_get_unsafe();
+ if (OVS_UNLIKELY(recirc_depth)) {
+ hash = hash_finish(hash, recirc_depth);
+ dp_packet_set_rss_hash(packet, hash);
}
return hash;
}
struct dp_netdev_flow *flow;
- struct dp_packet *packets[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *packets[NETDEV_MAX_BURST];
};
static inline void
static inline void
packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow)
{
- batch->flow = flow;
+ flow->batch = batch;
+ batch->flow = flow;
batch->packet_count = 0;
batch->byte_count = 0;
batch->tcp_flags = 0;
static inline void
packet_batch_execute(struct packet_batch *batch,
struct dp_netdev_pmd_thread *pmd,
- enum dp_stat_type hit_type,
long long now)
{
struct dp_netdev_actions *actions;
struct dp_netdev_flow *flow = batch->flow;
- dp_netdev_flow_used(batch->flow, batch->packet_count, batch->byte_count,
+ dp_netdev_flow_used(flow, batch->packet_count, batch->byte_count,
batch->tcp_flags, now);
actions = dp_netdev_flow_get_actions(flow);
dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true,
actions->actions, actions->size);
-
- dp_netdev_count_packet(pmd, hit_type, batch->packet_count);
}
-static inline bool
+static inline void
dp_netdev_queue_batches(struct dp_packet *pkt,
struct dp_netdev_flow *flow, const struct miniflow *mf,
- struct packet_batch *batches, size_t *n_batches,
- size_t max_batches)
-{
- struct packet_batch *batch = NULL;
- int j;
-
- if (OVS_UNLIKELY(!flow)) {
- return false;
- }
- /* XXX: This O(n^2) algortihm makes sense if we're operating under the
- * assumption that the number of distinct flows (and therefore the
- * number of distinct batches) is quite small. If this turns out not
- * to be the case, it may make sense to pre sort based on the
- * netdev_flow pointer. That done we can get the appropriate batching
- * in O(n * log(n)) instead. */
- for (j = *n_batches - 1; j >= 0; j--) {
- if (batches[j].flow == flow) {
- batch = &batches[j];
- packet_batch_update(batch, pkt, mf);
- return true;
- }
- }
- if (OVS_UNLIKELY(*n_batches >= max_batches)) {
- return false;
+ struct packet_batch *batches, size_t *n_batches)
+{
+ struct packet_batch *batch = flow->batch;
+
+ if (OVS_UNLIKELY(!batch)) {
+ batch = &batches[(*n_batches)++];
+ packet_batch_init(batch, flow);
}
- batch = &batches[(*n_batches)++];
- packet_batch_init(batch, flow);
packet_batch_update(batch, pkt, mf);
- return true;
-}
-
-static inline void
-dp_packet_swap(struct dp_packet **a, struct dp_packet **b)
-{
- struct dp_packet *tmp = *a;
- *a = *b;
- *b = tmp;
}
/* Try to process all ('cnt') the 'packets' using only the exact match cache
- * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there
- * is no matching batch for a packet's flow, the miniflow is copied into 'keys'
- * and the packet pointer is moved at the beginning of the 'packets' array.
+ * 'pmd->flow_cache'. If a flow is not found for a packet 'packets[i]', the
+ * miniflow is copied into 'keys' and the packet pointer is moved at the
+ * beginning of the 'packets' array.
*
* The function returns the number of packets that needs to be processed in the
* 'packets' array (they have been moved to the beginning of the vector).
+ *
+ * If 'md_is_valid' is false, the metadata in 'packets' is not valid and must be
+ * initialized by this function using 'port_no'.
*/
static inline size_t
emc_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet **packets,
- size_t cnt, struct netdev_flow_key *keys, long long now)
+ size_t cnt, struct netdev_flow_key *keys,
+ struct packet_batch batches[], size_t *n_batches,
+ bool md_is_valid, odp_port_t port_no)
{
- struct netdev_flow_key key;
- struct packet_batch batches[4];
struct emc_cache *flow_cache = &pmd->flow_cache;
- size_t n_batches, i;
- size_t notfound_cnt = 0;
+ struct netdev_flow_key *key = &keys[0];
+ size_t i, n_missed = 0, n_dropped = 0;
- n_batches = 0;
- miniflow_initialize(&key.mf, key.buf);
for (i = 0; i < cnt; i++) {
struct dp_netdev_flow *flow;
+ struct dp_packet *packet = packets[i];
- if (OVS_UNLIKELY(dp_packet_size(packets[i]) < ETH_HEADER_LEN)) {
- dp_packet_delete(packets[i]);
+ if (OVS_UNLIKELY(dp_packet_size(packet) < ETH_HEADER_LEN)) {
+ dp_packet_delete(packet);
+ n_dropped++;
continue;
}
- miniflow_extract(packets[i], &key.mf);
- key.len = 0; /* Not computed yet. */
- key.hash = dpif_netdev_packet_get_dp_hash(packets[i], &key.mf);
-
- flow = emc_lookup(flow_cache, &key);
- if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets[i], flow, &key.mf,
- batches, &n_batches,
- ARRAY_SIZE(batches)))) {
- if (i != notfound_cnt) {
- dp_packet_swap(&packets[i], &packets[notfound_cnt]);
- }
+ if (i != cnt - 1) {
+ /* Prefetch next packet data and metadata. */
+ OVS_PREFETCH(dp_packet_data(packets[i+1]));
+ pkt_metadata_prefetch_init(&packets[i+1]->md);
+ }
- keys[notfound_cnt++] = key;
+ if (!md_is_valid) {
+ pkt_metadata_init(&packet->md, port_no);
+ }
+ miniflow_extract(packet, &key->mf);
+ key->len = 0; /* Not computed yet. */
+ key->hash = dpif_netdev_packet_get_rss_hash(packet, &key->mf);
+
+ flow = emc_lookup(flow_cache, key);
+ if (OVS_LIKELY(flow)) {
+ dp_netdev_queue_batches(packet, flow, &key->mf, batches,
+ n_batches);
+ } else {
+ /* Exact match cache missed. Group missed packets together at
+ * the beginning of the 'packets' array. */
+ packets[n_missed] = packet;
+ /* 'key[n_missed]' contains the key of the current packet and it
+ * must be returned to the caller. The next key should be extracted
+ * to 'keys[n_missed + 1]'. */
+ key = &keys[++n_missed];
}
}
- for (i = 0; i < n_batches; i++) {
- packet_batch_execute(&batches[i], pmd, DP_STAT_EXACT_HIT, now);
- }
+ dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT, cnt - n_dropped - n_missed);
- return notfound_cnt;
+ return n_missed;
}
static inline void
fast_path_processing(struct dp_netdev_pmd_thread *pmd,
struct dp_packet **packets, size_t cnt,
- struct netdev_flow_key *keys, long long now)
+ struct netdev_flow_key *keys,
+ struct packet_batch batches[], size_t *n_batches)
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
#else
/* Sparse or MSVC doesn't like variable length array. */
- enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
+ enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
#endif
- struct packet_batch batches[PKT_ARRAY_SIZE];
struct dpcls_rule *rules[PKT_ARRAY_SIZE];
struct dp_netdev *dp = pmd->dp;
struct emc_cache *flow_cache = &pmd->flow_cache;
- size_t n_batches, i;
+ int miss_cnt = 0, lost_cnt = 0;
bool any_miss;
+ size_t i;
for (i = 0; i < cnt; i++) {
/* Key length is needed in all the cases, hash computed on demand. */
- keys[i].len = netdev_flow_key_size(count_1bits(keys[i].mf.map));
+ keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf));
}
any_miss = !dpcls_lookup(&pmd->cls, keys, rules, cnt);
if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
struct ofpbuf actions, put_actions;
- int miss_cnt = 0, lost_cnt = 0;
ovs_u128 ufid;
ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
miss_cnt++;
+ match.tun_md.valid = false;
miniflow_expand(&keys[i].mf, &match.flow);
ofpbuf_clear(&actions);
continue;
}
+ /* The Netlink encoding of datapath flow keys cannot express
+ * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
+ * tag is interpreted as exact match on the fact that there is no
+ * VLAN. Unless we refactor a lot of code that translates between
+ * Netlink and struct flow representations, we have to do the same
+ * here. */
+ if (!match.wc.masks.vlan_tci) {
+ match.wc.masks.vlan_tci = htons(0xffff);
+ }
+
/* We can't allow the packet batching in the next loop to execute
* the actions. Otherwise, if there are any slow path actions,
* we'll send the packet up twice. */
ofpbuf_uninit(&actions);
ofpbuf_uninit(&put_actions);
fat_rwlock_unlock(&dp->upcall_rwlock);
- dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt);
dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt);
} else if (OVS_UNLIKELY(any_miss)) {
- int dropped_cnt = 0;
-
for (i = 0; i < cnt; i++) {
if (OVS_UNLIKELY(!rules[i])) {
dp_packet_delete(packets[i]);
- dropped_cnt++;
+ lost_cnt++;
+ miss_cnt++;
}
}
-
- dp_netdev_count_packet(pmd, DP_STAT_MISS, dropped_cnt);
- dp_netdev_count_packet(pmd, DP_STAT_LOST, dropped_cnt);
}
- n_batches = 0;
for (i = 0; i < cnt; i++) {
struct dp_packet *packet = packets[i];
struct dp_netdev_flow *flow;
flow = dp_netdev_flow_cast(rules[i]);
emc_insert(flow_cache, &keys[i], flow);
- dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches,
- &n_batches, ARRAY_SIZE(batches));
+ dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches);
}
- for (i = 0; i < n_batches; i++) {
- packet_batch_execute(&batches[i], pmd, DP_STAT_MASKED_HIT, now);
- }
+ dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt);
+ dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt);
+ dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt);
}
+/* Packets enter the datapath from a port (or from recirculation) here.
+ *
+ * For performance reasons a caller may choose not to initialize the metadata
+ * in 'packets': in this case 'mdinit' is false and this function needs to
+ * initialize it using 'port_no'. If the metadata in 'packets' is already
+ * valid, 'md_is_valid' must be true and 'port_no' will be ignored. */
static void
-dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
- struct dp_packet **packets, int cnt)
+dp_netdev_input__(struct dp_netdev_pmd_thread *pmd,
+ struct dp_packet **packets, int cnt,
+ bool md_is_valid, odp_port_t port_no)
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
#else
/* Sparse or MSVC doesn't like variable length array. */
- enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
+ enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
#endif
struct netdev_flow_key keys[PKT_ARRAY_SIZE];
+ struct packet_batch batches[PKT_ARRAY_SIZE];
long long now = time_msec();
- size_t newcnt;
+ size_t newcnt, n_batches, i;
- newcnt = emc_processing(pmd, packets, cnt, keys, now);
+ n_batches = 0;
+ newcnt = emc_processing(pmd, packets, cnt, keys, batches, &n_batches,
+ md_is_valid, port_no);
if (OVS_UNLIKELY(newcnt)) {
- fast_path_processing(pmd, packets, newcnt, keys, now);
+ fast_path_processing(pmd, packets, newcnt, keys, batches, &n_batches);
+ }
+
+ for (i = 0; i < n_batches; i++) {
+ batches[i].flow->batch = NULL;
+ }
+
+ for (i = 0; i < n_batches; i++) {
+ packet_batch_execute(&batches[i], pmd, now);
}
}
+static void
+dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
+ struct dp_packet **packets, int cnt,
+ odp_port_t port_no)
+{
+ dp_netdev_input__(pmd, packets, cnt, false, port_no);
+}
+
+static void
+dp_netdev_recirculate(struct dp_netdev_pmd_thread *pmd,
+ struct dp_packet **packets, int cnt)
+{
+ dp_netdev_input__(pmd, packets, cnt, true, 0);
+}
+
struct dp_netdev_execute_aux {
struct dp_netdev_pmd_thread *pmd;
};
+static void
+dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb,
+ void *aux)
+{
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+ dp->dp_purge_aux = aux;
+ dp->dp_purge_cb = cb;
+}
+
static void
dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb,
void *aux)
}
static void
-dp_netdev_drop_packets(struct dp_packet ** packets, int cnt, bool may_steal)
+dp_netdev_drop_packets(struct dp_packet **packets, int cnt, bool may_steal)
{
if (may_steal) {
int i;
}
static void
-dp_netdev_clone_pkt_batch(struct dp_packet **tnl_pkt,
- struct dp_packet **packets, int cnt)
+dp_netdev_clone_pkt_batch(struct dp_packet **dst_pkts,
+ struct dp_packet **src_pkts, int cnt)
{
int i;
for (i = 0; i < cnt; i++) {
- tnl_pkt[i] = dp_packet_clone(packets[i]);
+ dst_pkts[i] = dp_packet_clone(src_pkts[i]);
}
}
{
struct dp_netdev_execute_aux *aux = aux_;
uint32_t *depth = recirc_depth_get();
- struct dp_netdev_pmd_thread *pmd= aux->pmd;
- struct dp_netdev *dp= pmd->dp;
+ struct dp_netdev_pmd_thread *pmd = aux->pmd;
+ struct dp_netdev *dp = pmd->dp;
int type = nl_attr_type(a);
struct dp_netdev_port *p;
int i;
case OVS_ACTION_ATTR_OUTPUT:
p = dp_netdev_lookup_port(dp, u32_to_odp(nl_attr_get_u32(a)));
if (OVS_LIKELY(p)) {
- netdev_send(p->netdev, pmd->core_id, packets, cnt, may_steal);
+ int tx_qid;
+
+ atomic_read_relaxed(&pmd->tx_qid, &tx_qid);
+
+ netdev_send(p->netdev, tx_qid, packets, cnt, may_steal);
return;
}
break;
case OVS_ACTION_ATTR_TUNNEL_PUSH:
if (*depth < MAX_RECIRC_DEPTH) {
- struct dp_packet *tnl_pkt[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *tnl_pkt[NETDEV_MAX_BURST];
int err;
if (!may_steal) {
err = push_tnl_action(dp, a, packets, cnt);
if (!err) {
(*depth)++;
- dp_netdev_input(pmd, packets, cnt);
+ dp_netdev_recirculate(pmd, packets, cnt);
(*depth)--;
} else {
dp_netdev_drop_packets(tnl_pkt, cnt, !may_steal);
p = dp_netdev_lookup_port(dp, portno);
if (p) {
- struct dp_packet *tnl_pkt[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *tnl_pkt[NETDEV_MAX_BURST];
int err;
if (!may_steal) {
}
(*depth)++;
- dp_netdev_input(pmd, packets, cnt);
+ dp_netdev_recirculate(pmd, packets, cnt);
(*depth)--;
} else {
dp_netdev_drop_packets(tnl_pkt, cnt, !may_steal);
case OVS_ACTION_ATTR_RECIRC:
if (*depth < MAX_RECIRC_DEPTH) {
+ struct dp_packet *recirc_pkts[NETDEV_MAX_BURST];
- (*depth)++;
- for (i = 0; i < cnt; i++) {
- struct dp_packet *recirc_pkt;
-
- recirc_pkt = (may_steal) ? packets[i]
- : dp_packet_clone(packets[i]);
-
- recirc_pkt->md.recirc_id = nl_attr_get_u32(a);
-
- /* Hash is private to each packet */
- recirc_pkt->md.dp_hash = dp_packet_get_dp_hash(packets[i]);
+ if (!may_steal) {
+ dp_netdev_clone_pkt_batch(recirc_pkts, packets, cnt);
+ packets = recirc_pkts;
+ }
- dp_netdev_input(pmd, &recirc_pkt, 1);
+ for (i = 0; i < cnt; i++) {
+ packets[i]->md.recirc_id = nl_attr_get_u32(a);
}
+
+ (*depth)++;
+ dp_netdev_recirculate(pmd, packets, cnt);
(*depth)--;
return;
VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
break;
+ case OVS_ACTION_ATTR_CT:
+ /* If a flow with this action is slow-pathed, datapath assistance is
+ * required to implement it. However, we don't support this action
+ * in the userspace datapath. */
+ VLOG_WARN("Cannot execute conntrack action in userspace.");
+ break;
+
case OVS_ACTION_ATTR_PUSH_VLAN:
case OVS_ACTION_ATTR_POP_VLAN:
case OVS_ACTION_ATTR_PUSH_MPLS:
NULL, /* recv */
NULL, /* recv_wait */
NULL, /* recv_purge */
+ dpif_netdev_register_dp_purge_cb,
dpif_netdev_register_upcall_cb,
dpif_netdev_enable_upcall,
dpif_netdev_disable_upcall,
dpif_netdev_get_datapath_version,
+ NULL, /* ct_dump_start */
+ NULL, /* ct_dump_next */
+ NULL, /* ct_dump_done */
+ NULL, /* ct_flush */
};
static void
dp_register_provider(class);
}
+static void
+dpif_dummy_override(const char *type)
+{
+ int error;
+
+ /*
+ * Ignore EAFNOSUPPORT to allow --enable-dummy=system with
+ * a userland-only build. It's useful for testsuite.
+ */
+ error = dp_unregister_provider(type);
+ if (error == 0 || error == EAFNOSUPPORT) {
+ dpif_dummy_register__(type);
+ }
+}
+
void
-dpif_dummy_register(bool override)
+dpif_dummy_register(enum dummy_level level)
{
- if (override) {
+ if (level == DUMMY_OVERRIDE_ALL) {
struct sset types;
const char *type;
sset_init(&types);
dp_enumerate_types(&types);
SSET_FOR_EACH (type, &types) {
- if (!dp_unregister_provider(type)) {
- dpif_dummy_register__(type);
- }
+ dpif_dummy_override(type);
}
sset_destroy(&types);
+ } else if (level == DUMMY_OVERRIDE_SYSTEM) {
+ dpif_dummy_override("system");
}
dpif_dummy_register__("dummy");
struct dpcls_subtable *subtable;
CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
+ ovs_assert(cmap_count(&subtable->rules) == 0);
dpcls_destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
}
}
-/* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit
- * in 'mask' the values in 'key' and 'target' are the same.
- *
- * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */
+/* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
+ * in 'mask' the values in 'key' and 'target' are the same. */
static inline bool
dpcls_rule_matches_key(const struct dpcls_rule *rule,
const struct netdev_flow_key *target)
{
- const uint64_t *keyp = rule->flow.mf.inline_values;
- const uint64_t *maskp = rule->mask->mf.inline_values;
- uint64_t target_u64;
+ const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
+ const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
+ uint64_t value;
- NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64, target, rule->flow.mf.map) {
- if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) {
+ NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) {
+ if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) {
return false;
}
}
#if !defined(__CHECKER__) && !defined(_WIN32)
const int N_MAPS = DIV_ROUND_UP(cnt, MAP_BITS);
#else
- enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_RX_BATCH, MAP_BITS) };
+ enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_BURST, MAP_BITS) };
#endif
map_type maps[N_MAPS];
struct dpcls_subtable *subtable;
}
/* Compute hashes for the remaining keys. */
- ULONG_FOR_EACH_1(i, map) {
+ ULLONG_FOR_EACH_1(i, map) {
hashes[i] = netdev_flow_key_hash_in_mask(&mkeys[i],
&subtable->mask);
}
/* Lookup. */
map = cmap_find_batch(&subtable->rules, map, hashes, nodes);
/* Check results. */
- ULONG_FOR_EACH_1(i, map) {
+ ULLONG_FOR_EACH_1(i, map) {
struct dpcls_rule *rule;
CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
goto next;
}
}
- ULONG_SET0(map, i); /* Did not match. */
+ ULLONG_SET0(map, i); /* Did not match. */
next:
; /* Keep Sparse happy. */
}