#include <sys/stat.h>
#include <unistd.h>
+#include "bitmap.h"
#include "cmap.h"
#include "csum.h"
#include "dp-packet.h"
#include "fat-rwlock.h"
#include "flow.h"
#include "cmap.h"
+#include "coverage.h"
#include "latch.h"
#include "list.h"
#include "match.h"
-#include "meta-flow.h"
#include "netdev.h"
#include "netdev-dpdk.h"
#include "netdev-vport.h"
#include "sset.h"
#include "timeval.h"
#include "tnl-arp-cache.h"
+#include "tnl-ports.h"
#include "unixctl.h"
#include "util.h"
#include "openvswitch/vlog.h"
uint32_t hash; /* Hash function differs for different users. */
uint32_t len; /* Length of the following miniflow (incl. map). */
struct miniflow mf;
- uint64_t buf[FLOW_MAX_PACKET_U64S - MINI_N_INLINE];
+ uint64_t buf[FLOW_MAX_PACKET_U64S];
};
/* Exact match cache for frequently used flows
upcall_callback *upcall_cb; /* Callback function for executing upcalls. */
void *upcall_aux;
+ /* Callback function for notifying the purging of dp flows (during
+ * reseting pmd deletion). */
+ dp_purge_callback *dp_purge_cb;
+ void *dp_purge_aux;
+
/* Stores all 'struct dp_netdev_pmd_thread's. */
struct cmap poll_threads;
{
int i;
- BUILD_ASSERT(offsetof(struct miniflow, inline_values) == sizeof(uint64_t));
-
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
- flow_cache->entries[i].key.len
- = offsetof(struct miniflow, inline_values);
- miniflow_initialize(&flow_cache->entries[i].key.mf,
- flow_cache->entries[i].key.buf);
+ flow_cache->entries[i].key.len = sizeof(struct miniflow);
+ flowmap_init(&flow_cache->entries[i].key.mf.map);
}
}
struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
dpcls_remove(&pmd->cls, &flow->cr);
+ flow->cr.mask = NULL; /* Accessing rule's mask after this is not safe. */
+
cmap_remove(&pmd->flow_table, node, dp_netdev_flow_hash(&flow->ufid));
flow->dead = true;
* miniflow_extract(), if the map is different the miniflow is different.
* Therefore we can be faster by comparing the map and the miniflow in a
* single memcmp().
- * _ netdev_flow_key's miniflow has always inline values.
- * - These functions can be inlined by the compiler.
- *
- * The following assertions make sure that what we're doing with miniflow is
- * safe
- */
-BUILD_ASSERT_DECL(offsetof(struct miniflow, inline_values)
- == sizeof(uint64_t));
+ * - These functions can be inlined by the compiler. */
-/* Given the number of bits set in the miniflow map, returns the size of the
+/* Given the number of bits set in miniflow's maps, returns the size of the
* 'netdev_flow_key.mf' */
-static inline uint32_t
-netdev_flow_key_size(uint32_t flow_u32s)
+static inline size_t
+netdev_flow_key_size(size_t flow_u64s)
{
- return offsetof(struct miniflow, inline_values) +
- MINIFLOW_VALUES_SIZE(flow_u32s);
+ return sizeof(struct miniflow) + MINIFLOW_VALUES_SIZE(flow_u64s);
}
static inline bool
struct dp_packet packet;
uint64_t buf_stub[512 / 8];
- miniflow_initialize(&dst->mf, dst->buf);
-
dp_packet_use_stub(&packet, buf_stub, sizeof buf_stub);
pkt_metadata_from_flow(&packet.md, src);
flow_compose(&packet, src);
miniflow_extract(&packet, &dst->mf);
dp_packet_uninit(&packet);
- dst->len = netdev_flow_key_size(count_1bits(dst->mf.map));
+ dst->len = netdev_flow_key_size(miniflow_n_values(&dst->mf));
dst->hash = 0; /* Not computed yet. */
}
netdev_flow_mask_init(struct netdev_flow_key *mask,
const struct match *match)
{
- const uint64_t *mask_u64 = (const uint64_t *) &match->wc.masks;
- uint64_t *dst = mask->mf.inline_values;
- uint64_t map, mask_map = 0;
+ uint64_t *dst = miniflow_values(&mask->mf);
+ struct flowmap fmap;
uint32_t hash = 0;
- int n;
+ size_t idx;
/* Only check masks that make sense for the flow. */
- map = flow_wc_map(&match->flow);
+ flow_wc_map(&match->flow, &fmap);
+ flowmap_init(&mask->mf.map);
- while (map) {
- uint64_t rm1bit = rightmost_1bit(map);
- int i = raw_ctz(map);
+ FLOWMAP_FOR_EACH_INDEX(idx, fmap) {
+ uint64_t mask_u64 = flow_u64_value(&match->wc.masks, idx);
- if (mask_u64[i]) {
- mask_map |= rm1bit;
- *dst++ = mask_u64[i];
- hash = hash_add64(hash, mask_u64[i]);
+ if (mask_u64) {
+ flowmap_set(&mask->mf.map, idx, 1);
+ *dst++ = mask_u64;
+ hash = hash_add64(hash, mask_u64);
}
- map -= rm1bit;
}
- mask->mf.values_inline = true;
- mask->mf.map = mask_map;
+ map_t map;
- hash = hash_add64(hash, mask_map);
+ FLOWMAP_FOR_EACH_MAP (map, mask->mf.map) {
+ hash = hash_add64(hash, map);
+ }
- n = dst - mask->mf.inline_values;
+ size_t n = dst - miniflow_get_values(&mask->mf);
mask->hash = hash_finish(hash, n * 8);
mask->len = netdev_flow_key_size(n);
}
-/* Initializes 'dst' as a copy of 'src' masked with 'mask'. */
+/* Initializes 'dst' as a copy of 'flow' masked with 'mask'. */
static inline void
netdev_flow_key_init_masked(struct netdev_flow_key *dst,
const struct flow *flow,
const struct netdev_flow_key *mask)
{
- uint64_t *dst_u64 = dst->mf.inline_values;
- const uint64_t *mask_u64 = mask->mf.inline_values;
+ uint64_t *dst_u64 = miniflow_values(&dst->mf);
+ const uint64_t *mask_u64 = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
uint64_t value;
dst->len = mask->len;
- dst->mf.values_inline = true;
- dst->mf.map = mask->mf.map;
+ dst->mf = mask->mf; /* Copy maps. */
- FLOW_FOR_EACH_IN_MAP(value, flow, mask->mf.map) {
+ FLOW_FOR_EACH_IN_MAPS(value, flow, mask->mf.map) {
*dst_u64 = value & *mask_u64++;
hash = hash_add64(hash, *dst_u64++);
}
- dst->hash = hash_finish(hash, (dst_u64 - dst->mf.inline_values) * 8);
+ dst->hash = hash_finish(hash,
+ (dst_u64 - miniflow_get_values(&dst->mf)) * 8);
}
-/* Iterate through all netdev_flow_key u64 values specified by 'MAP' */
-#define NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(VALUE, KEY, MAP) \
- for (struct mf_for_each_in_map_aux aux__ \
- = { (KEY)->mf.inline_values, (KEY)->mf.map, MAP }; \
- mf_get_next_in_map(&aux__, &(VALUE)); \
- )
+/* Iterate through netdev_flow_key TNL u64 values specified by 'FLOWMAP'. */
+#define NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(VALUE, KEY, FLOWMAP) \
+ MINIFLOW_FOR_EACH_IN_FLOWMAP(VALUE, &(KEY)->mf, FLOWMAP)
/* Returns a hash value for the bits of 'key' where there are 1-bits in
* 'mask'. */
netdev_flow_key_hash_in_mask(const struct netdev_flow_key *key,
const struct netdev_flow_key *mask)
{
- const uint64_t *p = mask->mf.inline_values;
+ const uint64_t *p = miniflow_get_values(&mask->mf);
uint32_t hash = 0;
- uint64_t key_u64;
+ uint64_t value;
- NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(key_u64, key, mask->mf.map) {
- hash = hash_add64(hash, key_u64 & *p++);
+ NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, key, mask->mf.map) {
+ hash = hash_add64(hash, value & *p++);
}
- return hash_finish(hash, (p - mask->mf.inline_values) * 8);
+ return hash_finish(hash, (p - miniflow_get_values(&mask->mf)) * 8);
}
static inline bool
dpif_netdev_mask_from_nlattrs(const struct nlattr *key, uint32_t key_len,
const struct nlattr *mask_key,
uint32_t mask_key_len, const struct flow *flow,
- struct flow *mask)
+ struct flow_wildcards *wc)
{
if (mask_key_len) {
enum odp_key_fitness fitness;
- fitness = odp_flow_key_to_mask(mask_key, mask_key_len, key, key_len,
- mask, flow);
+ fitness = odp_flow_key_to_mask_udpif(mask_key, mask_key_len, key,
+ key_len, &wc->masks, flow);
if (fitness) {
/* This should not happen: it indicates that
* odp_flow_key_from_mask() and odp_flow_key_to_mask()
return EINVAL;
}
} else {
- enum mf_field_id id;
- /* No mask key, unwildcard everything except fields whose
- * prerequisities are not met. */
- memset(mask, 0x0, sizeof *mask);
-
- for (id = 0; id < MFF_N_IDS; ++id) {
- /* Skip registers and metadata. */
- if (!(id >= MFF_REG0 && id < MFF_REG0 + FLOW_N_REGS)
- && id != MFF_METADATA) {
- const struct mf_field *mf = mf_from_id(id);
- if (mf_are_prereqs_ok(mf, flow)) {
- mf_mask_field(mf, mask);
- }
- }
- }
+ flow_wildcards_init_for_packet(wc, flow);
}
- /* Force unwildcard the in_port.
- *
- * We need to do this even in the case where we unwildcard "everything"
- * above because "everything" only includes the 16-bit OpenFlow port number
- * mask->in_port.ofp_port, which only covers half of the 32-bit datapath
- * port number mask->in_port.odp_port. */
- mask->in_port.odp_port = u32_to_odp(UINT32_MAX);
-
return 0;
}
{
odp_port_t in_port;
- if (odp_flow_key_to_flow(key, key_len, flow)) {
+ if (odp_flow_key_to_flow_udpif(key, key_len, flow)) {
/* This should not happen: it indicates that odp_flow_key_from_flow()
* and odp_flow_key_to_flow() disagree on the acceptable form of a
* flow. Log the problem as an error, with enough details to enable
return EINVAL;
}
+ /* Userspace datapath doesn't support conntrack. */
+ if (flow->ct_state || flow->ct_zone) {
+ return EINVAL;
+ }
+
return 0;
}
netdev_flow_mask_init(&mask, match);
/* Make sure wc does not have metadata. */
- ovs_assert(!(mask.mf.map & (MINIFLOW_MAP(metadata) | MINIFLOW_MAP(regs))));
+ ovs_assert(!FLOWMAP_HAS_FIELD(&mask.mf.map, metadata)
+ && !FLOWMAP_HAS_FIELD(&mask.mf.map, regs));
/* Do not allocate extra space. */
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
}
error = dpif_netdev_mask_from_nlattrs(put->key, put->key_len,
put->mask, put->mask_len,
- &match.flow, &match.wc.masks);
+ &match.flow, &match.wc);
if (error) {
return error;
}
dp_netdev_pmd_unref(non_pmd);
tnl_arp_cache_run();
+ tnl_port_map_run();
new_tnl_seq = seq_read(tnl_conf_seq);
if (dp->last_tnl_conf_seq != new_tnl_seq) {
lc = 0;
emc_cache_slow_sweep(&pmd->flow_cache);
+ coverage_try_clear();
ovsrcu_quiesce();
atomic_read_relaxed(&pmd->change_seq, &seq);
/* Stops the pmd thread, removes it from the 'dp->poll_threads',
* and unrefs the struct. */
static void
-dp_netdev_del_pmd(struct dp_netdev_pmd_thread *pmd)
+dp_netdev_del_pmd(struct dp_netdev *dp, struct dp_netdev_pmd_thread *pmd)
{
/* Uninit the 'flow_cache' since there is
* no actual thread uninit it for NON_PMD_CORE_ID. */
ovs_numa_unpin_core(pmd->core_id);
xpthread_join(pmd->thread, NULL);
}
+ /* Purges the 'pmd''s flows after stopping the thread, but before
+ * destroying the flows, so that the flow stats can be collected. */
+ if (dp->dp_purge_cb) {
+ dp->dp_purge_cb(dp->dp_purge_aux, pmd->core_id);
+ }
cmap_remove(&pmd->dp->poll_threads, &pmd->node, hash_int(pmd->core_id, 0));
dp_netdev_pmd_unref(pmd);
}
struct dp_netdev_pmd_thread *pmd;
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
- dp_netdev_del_pmd(pmd);
+ dp_netdev_del_pmd(dp, pmd);
}
}
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
if (pmd->numa_id == numa_id) {
- dp_netdev_del_pmd(pmd);
+ dp_netdev_del_pmd(dp, pmd);
}
}
}
* pmd threads for the numa node. */
if (!n_pmds) {
int can_have, n_unpinned, i;
+ struct dp_netdev_pmd_thread **pmds;
n_unpinned = ovs_numa_get_n_unpinned_cores_on_numa(numa_id);
if (!n_unpinned) {
/* If cpu mask is specified, uses all unpinned cores, otherwise
* tries creating NR_PMD_THREADS pmd threads. */
can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS);
+ pmds = xzalloc(can_have * sizeof *pmds);
for (i = 0; i < can_have; i++) {
- struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd);
unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
-
- dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id);
+ pmds[i] = xzalloc(sizeof **pmds);
+ dp_netdev_configure_pmd(pmds[i], dp, i, core_id, numa_id);
+ }
+ /* The pmd thread code needs to see all the others configured pmd
+ * threads on the same numa node. That's why we call
+ * 'dp_netdev_configure_pmd()' on all the threads and then we actually
+ * start them. */
+ for (i = 0; i < can_have; i++) {
/* Each thread will distribute all devices rx-queues among
* themselves. */
- pmd->thread = ovs_thread_create("pmd", pmd_thread_main, pmd);
+ pmds[i]->thread = ovs_thread_create("pmd", pmd_thread_main, pmds[i]);
}
+ free(pmds);
VLOG_INFO("Created %d pmd threads on numa node %d", can_have, numa_id);
}
}
struct ofpbuf *actions, struct ofpbuf *put_actions)
{
struct dp_netdev *dp = pmd->dp;
+ struct flow_tnl orig_tunnel;
+ int err;
if (OVS_UNLIKELY(!dp->upcall_cb)) {
return ENODEV;
}
+ /* Upcall processing expects the Geneve options to be in the translated
+ * format but we need to retain the raw format for datapath use. */
+ orig_tunnel.flags = flow->tunnel.flags;
+ if (flow->tunnel.flags & FLOW_TNL_F_UDPIF) {
+ orig_tunnel.metadata.present.len = flow->tunnel.metadata.present.len;
+ memcpy(orig_tunnel.metadata.opts.gnv, flow->tunnel.metadata.opts.gnv,
+ flow->tunnel.metadata.present.len);
+ err = tun_metadata_from_geneve_udpif(&orig_tunnel, &orig_tunnel,
+ &flow->tunnel);
+ if (err) {
+ return err;
+ }
+ }
+
if (OVS_UNLIKELY(!VLOG_DROP_DBG(&upcall_rl))) {
struct ds ds = DS_EMPTY_INITIALIZER;
char *packet_str;
ds_destroy(&ds);
}
- return dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
- actions, wc, put_actions, dp->upcall_aux);
+ err = dp->upcall_cb(packet_, flow, ufid, pmd->core_id, type, userdata,
+ actions, wc, put_actions, dp->upcall_aux);
+ if (err && err != ENOSPC) {
+ return err;
+ }
+
+ /* Translate tunnel metadata masks to datapath format. */
+ if (wc) {
+ if (wc->masks.tunnel.metadata.present.map) {
+ struct geneve_opt opts[GENEVE_TOT_OPT_SIZE /
+ sizeof(struct geneve_opt)];
+
+ tun_metadata_to_geneve_udpif_mask(&flow->tunnel,
+ &wc->masks.tunnel,
+ orig_tunnel.metadata.opts.gnv,
+ orig_tunnel.metadata.present.len,
+ opts);
+
+ memset(&wc->masks.tunnel.metadata, 0,
+ sizeof wc->masks.tunnel.metadata);
+ memcpy(&wc->masks.tunnel.metadata.opts.gnv, opts,
+ orig_tunnel.metadata.present.len);
+ }
+ wc->masks.tunnel.metadata.present.len = 0xff;
+ }
+
+ /* Restore tunnel metadata. We need to use the saved options to ensure
+ * that any unknown options are not lost. The generated mask will have
+ * the same structure, matching on types and lengths but wildcarding
+ * option data we don't care about. */
+ if (orig_tunnel.flags & FLOW_TNL_F_UDPIF) {
+ memcpy(&flow->tunnel.metadata.opts.gnv, orig_tunnel.metadata.opts.gnv,
+ orig_tunnel.metadata.present.len);
+ flow->tunnel.metadata.present.len = orig_tunnel.metadata.present.len;
+ flow->tunnel.flags |= FLOW_TNL_F_UDPIF;
+ }
+
+ return err;
}
static inline uint32_t
{
uint32_t hash, recirc_depth;
- hash = dp_packet_get_rss_hash(packet);
- if (OVS_UNLIKELY(!hash)) {
+ if (OVS_LIKELY(dp_packet_rss_valid(packet))) {
+ hash = dp_packet_get_rss_hash(packet);
+ } else {
hash = miniflow_hash_5tuple(mf, 0);
dp_packet_set_rss_hash(packet, hash);
}
struct netdev_flow_key key;
size_t i, notfound_cnt = 0;
- miniflow_initialize(&key.mf, key.buf);
for (i = 0; i < cnt; i++) {
struct dp_netdev_flow *flow;
for (i = 0; i < cnt; i++) {
/* Key length is needed in all the cases, hash computed on demand. */
- keys[i].len = netdev_flow_key_size(count_1bits(keys[i].mf.map));
+ keys[i].len = netdev_flow_key_size(miniflow_n_values(&keys[i].mf));
}
any_miss = !dpcls_lookup(&pmd->cls, keys, rules, cnt);
if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
continue;
}
+ /* The Netlink encoding of datapath flow keys cannot express
+ * wildcarding the presence of a VLAN tag. Instead, a missing VLAN
+ * tag is interpreted as exact match on the fact that there is no
+ * VLAN. Unless we refactor a lot of code that translates between
+ * Netlink and struct flow representations, we have to do the same
+ * here. */
+ if (!match.wc.masks.vlan_tci) {
+ match.wc.masks.vlan_tci = htons(0xffff);
+ }
+
/* We can't allow the packet batching in the next loop to execute
* the actions. Otherwise, if there are any slow path actions,
* we'll send the packet up twice. */
struct dp_netdev_pmd_thread *pmd;
};
+static void
+dpif_netdev_register_dp_purge_cb(struct dpif *dpif, dp_purge_callback *cb,
+ void *aux)
+{
+ struct dp_netdev *dp = get_dp_netdev(dpif);
+ dp->dp_purge_aux = aux;
+ dp->dp_purge_cb = cb;
+}
+
static void
dpif_netdev_register_upcall_cb(struct dpif *dpif, upcall_callback *cb,
void *aux)
VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
break;
+ case OVS_ACTION_ATTR_CT:
+ /* If a flow with this action is slow-pathed, datapath assistance is
+ * required to implement it. However, we don't support this action
+ * in the userspace datapath. */
+ VLOG_WARN("Cannot execute conntrack action in userspace.");
+ break;
+
case OVS_ACTION_ATTR_PUSH_VLAN:
case OVS_ACTION_ATTR_POP_VLAN:
case OVS_ACTION_ATTR_PUSH_MPLS:
NULL, /* recv */
NULL, /* recv_wait */
NULL, /* recv_purge */
+ dpif_netdev_register_dp_purge_cb,
dpif_netdev_register_upcall_cb,
dpif_netdev_enable_upcall,
dpif_netdev_disable_upcall,
struct dpcls_subtable *subtable;
CMAP_FOR_EACH (subtable, cmap_node, &cls->subtables_map) {
+ ovs_assert(cmap_count(&subtable->rules) == 0);
dpcls_destroy_subtable(cls, subtable);
}
cmap_destroy(&cls->subtables_map);
}
}
-/* Returns true if 'target' satisifies 'key' in 'mask', that is, if each 1-bit
- * in 'mask' the values in 'key' and 'target' are the same.
- *
- * Note: 'key' and 'mask' have the same mask, and 'key' is already masked. */
+/* Returns true if 'target' satisfies 'key' in 'mask', that is, if each 1-bit
+ * in 'mask' the values in 'key' and 'target' are the same. */
static inline bool
dpcls_rule_matches_key(const struct dpcls_rule *rule,
const struct netdev_flow_key *target)
{
- const uint64_t *keyp = rule->flow.mf.inline_values;
- const uint64_t *maskp = rule->mask->mf.inline_values;
- uint64_t target_u64;
+ const uint64_t *keyp = miniflow_get_values(&rule->flow.mf);
+ const uint64_t *maskp = miniflow_get_values(&rule->mask->mf);
+ uint64_t value;
- NETDEV_FLOW_KEY_FOR_EACH_IN_MAP(target_u64, target, rule->flow.mf.map) {
- if (OVS_UNLIKELY((target_u64 & *maskp++) != *keyp++)) {
+ NETDEV_FLOW_KEY_FOR_EACH_IN_FLOWMAP(value, target, rule->flow.mf.map) {
+ if (OVS_UNLIKELY((value & *maskp++) != *keyp++)) {
return false;
}
}