* If dp_netdev_input is not called from a pmd thread, a mutex is used.
*/
-#define EM_FLOW_HASH_SHIFT 10
+#define EM_FLOW_HASH_SHIFT 13
#define EM_FLOW_HASH_ENTRIES (1u << EM_FLOW_HASH_SHIFT)
#define EM_FLOW_HASH_MASK (EM_FLOW_HASH_ENTRIES - 1)
#define EM_FLOW_HASH_SEGS 2
odp_port_t);
enum dp_stat_type {
- DP_STAT_HIT, /* Packets that matched in the flow table. */
+ DP_STAT_EXACT_HIT, /* Packets that had an exact match (emc). */
+ DP_STAT_MASKED_HIT, /* Packets that matched in the flow table. */
DP_STAT_MISS, /* Packets that did not match. */
DP_STAT_LOST, /* Packets not passed up to the client. */
DP_N_STATS
};
+enum pmd_cycles_counter_type {
+ PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */
+ PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */
+ PMD_N_CYCLES
+};
+
/* A port in a netdev-based datapath. */
struct dp_netdev_port {
- struct cmap_node node; /* Node in dp_netdev's 'ports'. */
odp_port_t port_no;
struct netdev *netdev;
+ struct cmap_node node; /* Node in dp_netdev's 'ports'. */
struct netdev_saved_flags *sf;
struct netdev_rxq **rxq;
struct ovs_refcount ref_cnt;
/* Contained by struct dp_netdev_flow's 'stats' member. */
struct dp_netdev_flow_stats {
- long long int used; /* Last used time, in monotonic msecs. */
- long long int packet_count; /* Number of packets matched. */
- long long int byte_count; /* Number of bytes matched. */
- uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
+ atomic_llong used; /* Last used time, in monotonic msecs. */
+ atomic_ullong packet_count; /* Number of packets matched. */
+ atomic_ullong byte_count; /* Number of bytes matched. */
+ atomic_uint16_t tcp_flags; /* Bitwise-OR of seen tcp_flags values. */
};
/* A flow in 'dp_netdev_pmd_thread's 'flow_table'.
* requires synchronization, as noted in more detail below.
*/
struct dp_netdev_flow {
- bool dead;
-
+ const struct flow flow; /* Unmasked flow that created this entry. */
/* Hash table index by unmasked flow. */
const struct cmap_node node; /* In owning dp_netdev_pmd_thread's */
/* 'flow_table'. */
const ovs_u128 ufid; /* Unique flow identifier. */
- const struct flow flow; /* Unmasked flow that created this entry. */
- const int pmd_id; /* The 'core_id' of pmd thread owning this */
+ const unsigned pmd_id; /* The 'core_id' of pmd thread owning this */
/* flow. */
/* Number of references.
* reference. */
struct ovs_refcount ref_cnt;
+ bool dead;
+
/* Statistics. */
struct dp_netdev_flow_stats stats;
/* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
+ /* While processing a group of input packets, the datapath uses the next
+ * member to store a pointer to the output batch for the flow. It is
+ * reset after the batch has been sent out (See dp_netdev_queue_batches(),
+ * packet_batch_init() and packet_batch_execute()). */
+ struct packet_batch *batch;
+
/* Packet classification. */
struct dpcls_rule cr; /* In owning dp_netdev's 'cls'. */
/* 'cr' must be the last member. */
struct dp_netdev_actions {
/* These members are immutable: they do not change during the struct's
* lifetime. */
- struct nlattr *actions; /* Sequence of OVS_ACTION_ATTR_* attributes. */
unsigned int size; /* Size of 'actions', in bytes. */
+ struct nlattr actions[]; /* Sequence of OVS_ACTION_ATTR_* attributes. */
};
struct dp_netdev_actions *dp_netdev_actions_create(const struct nlattr *,
/* Contained by struct dp_netdev_pmd_thread's 'stats' member. */
struct dp_netdev_pmd_stats {
/* Indexed by DP_STAT_*. */
- unsigned long long int n[DP_N_STATS];
+ atomic_ullong n[DP_N_STATS];
+};
+
+/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */
+struct dp_netdev_pmd_cycles {
+ /* Indexed by PMD_CYCLES_*. */
+ atomic_ullong n[PMD_N_CYCLES];
};
/* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate
/* Statistics. */
struct dp_netdev_pmd_stats stats;
+ /* Cycles counters */
+ struct dp_netdev_pmd_cycles cycles;
+
+ /* Used to count cicles. See 'cycles_counter_end()' */
+ unsigned long long last_cycles;
+
struct latch exit_latch; /* For terminating the pmd thread. */
atomic_uint change_seq; /* For reloading pmd ports. */
pthread_t thread;
int index; /* Idx of this pmd thread among pmd*/
/* threads on same numa node. */
- int core_id; /* CPU core id of this pmd thread. */
+ unsigned core_id; /* CPU core id of this pmd thread. */
int numa_id; /* numa node id of this pmd thread. */
+ int tx_qid; /* Queue id used by this pmd thread to
+ * send packets on all netdevs */
+
+ /* Only a pmd thread can write on its own 'cycles' and 'stats'.
+ * The main thread keeps 'stats_zero' and 'cycles_zero' as base
+ * values and subtracts them from 'stats' and 'cycles' before
+ * reporting to the user */
+ unsigned long long stats_zero[DP_N_STATS];
+ uint64_t cycles_zero[PMD_N_CYCLES];
};
#define PMD_INITIAL_SEQ 1
void dp_netdev_pmd_reload_done(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev *dp, int index,
- int core_id, int numa_id);
+ unsigned core_id, int numa_id);
static void dp_netdev_destroy_pmd(struct dp_netdev_pmd_thread *pmd);
static void dp_netdev_set_nonpmd(struct dp_netdev *dp);
static struct dp_netdev_pmd_thread *dp_netdev_get_pmd(struct dp_netdev *dp,
- int core_id);
+ unsigned core_id);
static struct dp_netdev_pmd_thread *
dp_netdev_pmd_get_next(struct dp_netdev *dp, struct cmap_position *pos);
static void dp_netdev_destroy_all_pmds(struct dp_netdev *dp);
flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
}
+/* Returns true if 'dpif' is a netdev or dummy dpif, false otherwise. */
+bool
+dpif_is_netdev(const struct dpif *dpif)
+{
+ return dpif->dpif_class->open == dpif_netdev_open;
+}
+
static struct dpif_netdev *
dpif_netdev_cast(const struct dpif *dpif)
{
- ovs_assert(dpif->dpif_class->open == dpif_netdev_open);
+ ovs_assert(dpif_is_netdev(dpif));
return CONTAINER_OF(dpif, struct dpif_netdev, dpif);
}
{
return dpif_netdev_cast(dpif)->dp;
}
+\f
+enum pmd_info_type {
+ PMD_INFO_SHOW_STATS, /* show how cpu cycles are spent */
+ PMD_INFO_CLEAR_STATS /* set the cycles count to 0 */
+};
+
+static void
+pmd_info_show_stats(struct ds *reply,
+ struct dp_netdev_pmd_thread *pmd,
+ unsigned long long stats[DP_N_STATS],
+ uint64_t cycles[PMD_N_CYCLES])
+{
+ unsigned long long total_packets = 0;
+ uint64_t total_cycles = 0;
+ int i;
+
+ /* These loops subtracts reference values ('*_zero') from the counters.
+ * Since loads and stores are relaxed, it might be possible for a '*_zero'
+ * value to be more recent than the current value we're reading from the
+ * counter. This is not a big problem, since these numbers are not
+ * supposed to be too accurate, but we should at least make sure that
+ * the result is not negative. */
+ for (i = 0; i < DP_N_STATS; i++) {
+ if (stats[i] > pmd->stats_zero[i]) {
+ stats[i] -= pmd->stats_zero[i];
+ } else {
+ stats[i] = 0;
+ }
+
+ if (i != DP_STAT_LOST) {
+ /* Lost packets are already included in DP_STAT_MISS */
+ total_packets += stats[i];
+ }
+ }
+
+ for (i = 0; i < PMD_N_CYCLES; i++) {
+ if (cycles[i] > pmd->cycles_zero[i]) {
+ cycles[i] -= pmd->cycles_zero[i];
+ } else {
+ cycles[i] = 0;
+ }
+
+ total_cycles += cycles[i];
+ }
+
+ ds_put_cstr(reply, (pmd->core_id == NON_PMD_CORE_ID)
+ ? "main thread" : "pmd thread");
+
+ if (pmd->numa_id != OVS_NUMA_UNSPEC) {
+ ds_put_format(reply, " numa_id %d", pmd->numa_id);
+ }
+ if (pmd->core_id != OVS_CORE_UNSPEC && pmd->core_id != NON_PMD_CORE_ID) {
+ ds_put_format(reply, " core_id %u", pmd->core_id);
+ }
+ ds_put_cstr(reply, ":\n");
+
+ ds_put_format(reply,
+ "\temc hits:%llu\n\tmegaflow hits:%llu\n"
+ "\tmiss:%llu\n\tlost:%llu\n",
+ stats[DP_STAT_EXACT_HIT], stats[DP_STAT_MASKED_HIT],
+ stats[DP_STAT_MISS], stats[DP_STAT_LOST]);
+
+ if (total_cycles == 0) {
+ return;
+ }
+
+ ds_put_format(reply,
+ "\tpolling cycles:%"PRIu64" (%.02f%%)\n"
+ "\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
+ cycles[PMD_CYCLES_POLLING],
+ cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100,
+ cycles[PMD_CYCLES_PROCESSING],
+ cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles * 100);
+
+ if (total_packets == 0) {
+ return;
+ }
+
+ ds_put_format(reply,
+ "\tavg cycles per packet: %.02f (%"PRIu64"/%llu)\n",
+ total_cycles / (double)total_packets,
+ total_cycles, total_packets);
+
+ ds_put_format(reply,
+ "\tavg processing cycles per packet: "
+ "%.02f (%"PRIu64"/%llu)\n",
+ cycles[PMD_CYCLES_PROCESSING] / (double)total_packets,
+ cycles[PMD_CYCLES_PROCESSING], total_packets);
+}
+
+static void
+pmd_info_clear_stats(struct ds *reply OVS_UNUSED,
+ struct dp_netdev_pmd_thread *pmd,
+ unsigned long long stats[DP_N_STATS],
+ uint64_t cycles[PMD_N_CYCLES])
+{
+ int i;
+
+ /* We cannot write 'stats' and 'cycles' (because they're written by other
+ * threads) and we shouldn't change 'stats' (because they're used to count
+ * datapath stats, which must not be cleared here). Instead, we save the
+ * current values and subtract them from the values to be displayed in the
+ * future */
+ for (i = 0; i < DP_N_STATS; i++) {
+ pmd->stats_zero[i] = stats[i];
+ }
+ for (i = 0; i < PMD_N_CYCLES; i++) {
+ pmd->cycles_zero[i] = cycles[i];
+ }
+}
+
+static void
+dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[],
+ void *aux)
+{
+ struct ds reply = DS_EMPTY_INITIALIZER;
+ struct dp_netdev_pmd_thread *pmd;
+ struct dp_netdev *dp = NULL;
+ enum pmd_info_type type = *(enum pmd_info_type *) aux;
+
+ ovs_mutex_lock(&dp_netdev_mutex);
+
+ if (argc == 2) {
+ dp = shash_find_data(&dp_netdevs, argv[1]);
+ } else if (shash_count(&dp_netdevs) == 1) {
+ /* There's only one datapath */
+ dp = shash_first(&dp_netdevs)->data;
+ }
+
+ if (!dp) {
+ ovs_mutex_unlock(&dp_netdev_mutex);
+ unixctl_command_reply_error(conn,
+ "please specify an existing datapath");
+ return;
+ }
+
+ CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ unsigned long long stats[DP_N_STATS];
+ uint64_t cycles[PMD_N_CYCLES];
+ int i;
+
+ /* Read current stats and cycle counters */
+ for (i = 0; i < ARRAY_SIZE(stats); i++) {
+ atomic_read_relaxed(&pmd->stats.n[i], &stats[i]);
+ }
+ for (i = 0; i < ARRAY_SIZE(cycles); i++) {
+ atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]);
+ }
+
+ if (type == PMD_INFO_CLEAR_STATS) {
+ pmd_info_clear_stats(&reply, pmd, stats, cycles);
+ } else if (type == PMD_INFO_SHOW_STATS) {
+ pmd_info_show_stats(&reply, pmd, stats, cycles);
+ }
+ }
+
+ ovs_mutex_unlock(&dp_netdev_mutex);
+
+ unixctl_command_reply(conn, ds_cstr(&reply));
+ ds_destroy(&reply);
+}
+\f
+static int
+dpif_netdev_init(void)
+{
+ static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS,
+ clear_aux = PMD_INFO_CLEAR_STATS;
+
+ unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]",
+ 0, 1, dpif_netdev_pmd_info,
+ (void *)&show_aux);
+ unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]",
+ 0, 1, dpif_netdev_pmd_info,
+ (void *)&clear_aux);
+ return 0;
+}
static int
dpif_netdev_enumerate(struct sset *all_dps,
ovs_mutex_init_recursive(&dp->non_pmd_mutex);
ovsthread_key_create(&dp->per_pmd_key, NULL);
- /* Reserves the core NON_PMD_CORE_ID for all non-pmd threads. */
- ovs_numa_try_pin_core_specific(NON_PMD_CORE_ID);
dp_netdev_set_nonpmd(dp);
dp->n_dpdk_rxqs = NR_QUEUE;
return 0;
}
+/* Add 'n' to the atomic variable 'var' non-atomically and using relaxed
+ * load/store semantics. While the increment is not atomic, the load and
+ * store operations are, making it impossible to read inconsistent values.
+ *
+ * This is used to update thread local stats counters. */
+static void
+non_atomic_ullong_add(atomic_ullong *var, unsigned long long n)
+{
+ unsigned long long tmp;
+
+ atomic_read_relaxed(var, &tmp);
+ tmp += n;
+ atomic_store_relaxed(var, tmp);
+}
+
static int
dpif_netdev_get_stats(const struct dpif *dpif, struct dpif_dp_stats *stats)
{
stats->n_flows = stats->n_hit = stats->n_missed = stats->n_lost = 0;
CMAP_FOR_EACH (pmd, node, &dp->poll_threads) {
+ unsigned long long n;
stats->n_flows += cmap_count(&pmd->flow_table);
- stats->n_hit += pmd->stats.n[DP_STAT_HIT];
- stats->n_missed += pmd->stats.n[DP_STAT_MISS];
- stats->n_lost += pmd->stats.n[DP_STAT_LOST];
+
+ atomic_read_relaxed(&pmd->stats.n[DP_STAT_MASKED_HIT], &n);
+ stats->n_hit += n;
+ atomic_read_relaxed(&pmd->stats.n[DP_STAT_EXACT_HIT], &n);
+ stats->n_hit += n;
+ atomic_read_relaxed(&pmd->stats.n[DP_STAT_MISS], &n);
+ stats->n_missed += n;
+ atomic_read_relaxed(&pmd->stats.n[DP_STAT_LOST], &n);
+ stats->n_lost += n;
}
stats->n_masks = UINT32_MAX;
stats->n_mask_hit = UINT64_MAX;
int error;
int i;
- /* XXX reject devices already in some dp_netdev. */
+ /* Reject devices already in 'dp'. */
+ if (!get_port_by_name(dp, devname, &port)) {
+ return EEXIST;
+ }
/* Open and validate network device. */
open_type = dpif_netdev_port_open_type(dp->class, type);
return ENOENT;
}
/* There can only be ovs_numa_get_n_cores() pmd threads,
- * so creates a txq for each. */
- error = netdev_set_multiq(netdev, n_cores, dp->n_dpdk_rxqs);
+ * so creates a txq for each, and one extra for the non
+ * pmd threads. */
+ error = netdev_set_multiq(netdev, n_cores + 1, dp->n_dpdk_rxqs);
if (error && (error != EOPNOTSUPP)) {
VLOG_ERR("%s, cannot set multiq", devname);
return errno;
if (ufidp) {
CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, dp_netdev_flow_hash(ufidp),
&pmd->flow_table) {
- if (ovs_u128_equal(&netdev_flow->ufid, ufidp)) {
+ if (ovs_u128_equals(&netdev_flow->ufid, ufidp)) {
return netdev_flow;
}
}
}
static void
-get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow,
+get_dpif_flow_stats(const struct dp_netdev_flow *netdev_flow_,
struct dpif_flow_stats *stats)
{
- stats->n_packets = netdev_flow->stats.packet_count;
- stats->n_bytes = netdev_flow->stats.byte_count;
- stats->used = netdev_flow->stats.used;
- stats->tcp_flags = netdev_flow->stats.tcp_flags;
+ struct dp_netdev_flow *netdev_flow;
+ unsigned long long n;
+ long long used;
+ uint16_t flags;
+
+ netdev_flow = CONST_CAST(struct dp_netdev_flow *, netdev_flow_);
+
+ atomic_read_relaxed(&netdev_flow->stats.packet_count, &n);
+ stats->n_packets = n;
+ atomic_read_relaxed(&netdev_flow->stats.byte_count, &n);
+ stats->n_bytes = n;
+ atomic_read_relaxed(&netdev_flow->stats.used, &used);
+ stats->used = used;
+ atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
+ stats->tcp_flags = flags;
}
/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
struct flow_wildcards wc;
struct dp_netdev_actions *actions;
size_t offset;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = &netdev_flow->flow,
+ .mask = &wc.masks,
+ .recirc = true,
+ .max_mpls_depth = SIZE_MAX,
+ };
miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
/* Key */
offset = key_buf->size;
flow->key = ofpbuf_tail(key_buf);
- odp_flow_key_from_flow(key_buf, &netdev_flow->flow, &wc.masks,
- netdev_flow->flow.in_port.odp_port, true);
+ odp_parms.odp_in_port = netdev_flow->flow.in_port.odp_port;
+ odp_flow_key_from_flow(&odp_parms, key_buf);
flow->key_len = key_buf->size - offset;
/* Mask */
offset = mask_buf->size;
flow->mask = ofpbuf_tail(mask_buf);
- odp_flow_key_from_mask(mask_buf, &wc.masks, &netdev_flow->flow,
- odp_to_u32(wc.masks.in_port.odp_port),
- SIZE_MAX, true);
+ odp_parms.odp_in_port = wc.masks.in_port.odp_port;
+ odp_parms.key_buf = key_buf;
+ odp_flow_key_from_mask(&odp_parms, mask_buf);
flow->mask_len = mask_buf->size - offset;
/* Actions */
if (mask_key_len) {
enum odp_key_fitness fitness;
- fitness = odp_flow_key_to_mask(mask_key, mask_key_len, mask, flow);
+ fitness = odp_flow_key_to_mask(mask_key, mask_key_len, key, key_len,
+ mask, flow);
if (fitness) {
/* This should not happen: it indicates that
* odp_flow_key_from_mask() and odp_flow_key_to_mask()
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct dp_netdev_pmd_thread *pmd;
- int pmd_id = get->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : get->pmd_id;
+ unsigned pmd_id = get->pmd_id == PMD_ID_NULL
+ ? NON_PMD_CORE_ID : get->pmd_id;
int error = 0;
pmd = dp_netdev_get_pmd(dp, pmd_id);
flow = xmalloc(sizeof *flow - sizeof flow->cr.flow.mf + mask.len);
memset(&flow->stats, 0, sizeof flow->stats);
flow->dead = false;
- *CONST_CAST(int *, &flow->pmd_id) = pmd->core_id;
+ flow->batch = NULL;
+ *CONST_CAST(unsigned *, &flow->pmd_id) = pmd->core_id;
*CONST_CAST(struct flow *, &flow->flow) = match->flow;
*CONST_CAST(ovs_u128 *, &flow->ufid) = *ufid;
ovs_refcount_init(&flow->ref_cnt);
struct dp_netdev_pmd_thread *pmd;
struct match match;
ovs_u128 ufid;
- int pmd_id = put->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : put->pmd_id;
+ unsigned pmd_id = put->pmd_id == PMD_ID_NULL
+ ? NON_PMD_CORE_ID : put->pmd_id;
int error;
error = dpif_netdev_flow_from_nlattrs(put->key, put->key_len, &match.flow);
get_dpif_flow_stats(netdev_flow, put->stats);
}
if (put->flags & DPIF_FP_ZERO_STATS) {
- memset(&netdev_flow->stats, 0, sizeof netdev_flow->stats);
+ /* XXX: The userspace datapath uses thread local statistics
+ * (for flows), which should be updated only by the owning
+ * thread. Since we cannot write on stats memory here,
+ * we choose not to support this flag. Please note:
+ * - This feature is currently used only by dpctl commands with
+ * option --clear.
+ * - Should the need arise, this operation can be implemented
+ * by keeping a base value (to be update here) for each
+ * counter, and subtracting it before outputting the stats */
+ error = EOPNOTSUPP;
}
ovsrcu_postpone(dp_netdev_actions_free, old_actions);
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
struct dp_netdev_pmd_thread *pmd;
- int pmd_id = del->pmd_id == PMD_ID_NULL ? NON_PMD_CORE_ID : del->pmd_id;
+ unsigned pmd_id = del->pmd_id == PMD_ID_NULL
+ ? NON_PMD_CORE_ID : del->pmd_id;
int error = 0;
pmd = dp_netdev_get_pmd(dp, pmd_id);
}
/* Sets the new rx queue config. */
- err = netdev_set_multiq(port->netdev, ovs_numa_get_n_cores(),
+ err = netdev_set_multiq(port->netdev,
+ ovs_numa_get_n_cores() + 1,
n_rxqs);
if (err && (err != EOPNOTSUPP)) {
VLOG_ERR("Failed to set dpdk interface %s rx_queue to:"
}
\f
-/* Creates and returns a new 'struct dp_netdev_actions', with a reference count
- * of 1, whose actions are a copy of from the 'ofpacts_len' bytes of
- * 'ofpacts'. */
+/* Creates and returns a new 'struct dp_netdev_actions', whose actions are
+ * a copy of the 'ofpacts_len' bytes of 'ofpacts'. */
struct dp_netdev_actions *
dp_netdev_actions_create(const struct nlattr *actions, size_t size)
{
struct dp_netdev_actions *netdev_actions;
- netdev_actions = xmalloc(sizeof *netdev_actions);
- netdev_actions->actions = xmemdup(actions, size);
+ netdev_actions = xmalloc(sizeof *netdev_actions + size);
+ memcpy(netdev_actions->actions, actions, size);
netdev_actions->size = size;
return netdev_actions;
static void
dp_netdev_actions_free(struct dp_netdev_actions *actions)
{
- free(actions->actions);
free(actions);
}
\f
+static inline unsigned long long
+cycles_counter(void)
+{
+#ifdef DPDK_NETDEV
+ return rte_get_tsc_cycles();
+#else
+ return 0;
+#endif
+}
+
+/* Fake mutex to make sure that the calls to cycles_count_* are balanced */
+extern struct ovs_mutex cycles_counter_fake_mutex;
+
+/* Start counting cycles. Must be followed by 'cycles_count_end()' */
+static inline void
+cycles_count_start(struct dp_netdev_pmd_thread *pmd)
+ OVS_ACQUIRES(&cycles_counter_fake_mutex)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ pmd->last_cycles = cycles_counter();
+}
+
+/* Stop counting cycles and add them to the counter 'type' */
+static inline void
+cycles_count_end(struct dp_netdev_pmd_thread *pmd,
+ enum pmd_cycles_counter_type type)
+ OVS_RELEASES(&cycles_counter_fake_mutex)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ unsigned long long interval = cycles_counter() - pmd->last_cycles;
+
+ non_atomic_ullong_add(&pmd->cycles.n[type], interval);
+}
static void
dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev_port *port,
struct netdev_rxq *rxq)
{
- struct dp_packet *packets[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *packets[NETDEV_MAX_BURST];
int error, cnt;
+ cycles_count_start(pmd);
error = netdev_rxq_recv(rxq, packets, &cnt);
+ cycles_count_end(pmd, PMD_CYCLES_POLLING);
if (!error) {
int i;
/* XXX: initialize md in netdev implementation. */
for (i = 0; i < cnt; i++) {
- packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no);
+ pkt_metadata_init(&packets[i]->md, port->port_no);
}
+ cycles_count_start(pmd);
dp_netdev_input(pmd, packets, cnt);
+ cycles_count_end(pmd, PMD_CYCLES_PROCESSING);
} else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
emc_cache_init(&pmd->flow_cache);
poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt);
+ /* List port/core affinity */
+ for (i = 0; i < poll_cnt; i++) {
+ VLOG_INFO("Core %d processing port \'%s\'\n", pmd->core_id, netdev_get_name(poll_list[i].port->netdev));
+ }
+
/* Signal here to make sure the pmd finishes
* reloading the updated configuration. */
dp_netdev_pmd_reload_done(pmd);
*
* Caller must unrefs the returned reference. */
static struct dp_netdev_pmd_thread *
-dp_netdev_get_pmd(struct dp_netdev *dp, int core_id)
+dp_netdev_get_pmd(struct dp_netdev *dp, unsigned core_id)
{
struct dp_netdev_pmd_thread *pmd;
const struct cmap_node *pnode;
return next;
}
+static int
+core_id_to_qid(unsigned core_id)
+{
+ if (core_id != NON_PMD_CORE_ID) {
+ return core_id;
+ } else {
+ return ovs_numa_get_n_cores();
+ }
+}
+
/* Configures the 'pmd' based on the input argument. */
static void
dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp,
- int index, int core_id, int numa_id)
+ int index, unsigned core_id, int numa_id)
{
pmd->dp = dp;
pmd->index = index;
pmd->core_id = core_id;
+ pmd->tx_qid = core_id_to_qid(core_id);
pmd->numa_id = numa_id;
ovs_refcount_init(&pmd->ref_cnt);
can_have = dp->pmd_cmask ? n_unpinned : MIN(n_unpinned, NR_PMD_THREADS);
for (i = 0; i < can_have; i++) {
struct dp_netdev_pmd_thread *pmd = xzalloc(sizeof *pmd);
- int core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
+ unsigned core_id = ovs_numa_get_unpinned_core_on_numa(numa_id);
dp_netdev_configure_pmd(pmd, dp, i, core_id, numa_id);
/* Each thread will distribute all devices rx-queues among
static void
dp_netdev_flow_used(struct dp_netdev_flow *netdev_flow, int cnt, int size,
- uint16_t tcp_flags)
+ uint16_t tcp_flags, long long now)
{
- long long int now = time_msec();
+ uint16_t flags;
- netdev_flow->stats.used = MAX(now, netdev_flow->stats.used);
- netdev_flow->stats.packet_count += cnt;
- netdev_flow->stats.byte_count += size;
- netdev_flow->stats.tcp_flags |= tcp_flags;
+ atomic_store_relaxed(&netdev_flow->stats.used, now);
+ non_atomic_ullong_add(&netdev_flow->stats.packet_count, cnt);
+ non_atomic_ullong_add(&netdev_flow->stats.byte_count, size);
+ atomic_read_relaxed(&netdev_flow->stats.tcp_flags, &flags);
+ flags |= tcp_flags;
+ atomic_store_relaxed(&netdev_flow->stats.tcp_flags, flags);
}
static void
dp_netdev_count_packet(struct dp_netdev_pmd_thread *pmd,
enum dp_stat_type type, int cnt)
{
- pmd->stats.n[type] += cnt;
+ non_atomic_ullong_add(&pmd->stats.n[type], cnt);
}
static int
{
struct dp_netdev *dp = pmd->dp;
- if (type == DPIF_UC_MISS) {
- dp_netdev_count_packet(pmd, DP_STAT_MISS, 1);
- }
-
if (OVS_UNLIKELY(!dp->upcall_cb)) {
return ENODEV;
}
struct ds ds = DS_EMPTY_INITIALIZER;
char *packet_str;
struct ofpbuf key;
+ struct odp_flow_key_parms odp_parms = {
+ .flow = flow,
+ .mask = &wc->masks,
+ .odp_in_port = flow->in_port.odp_port,
+ .recirc = true,
+ };
ofpbuf_init(&key, 0);
- odp_flow_key_from_flow(&key, flow, &wc->masks, flow->in_port.odp_port,
- true);
+ odp_flow_key_from_flow(&odp_parms, &key);
packet_str = ofp_packet_to_string(dp_packet_data(packet_),
dp_packet_size(packet_));
}
static inline uint32_t
-dpif_netdev_packet_get_dp_hash(struct dp_packet *packet,
- const struct miniflow *mf)
+dpif_netdev_packet_get_rss_hash(struct dp_packet *packet,
+ const struct miniflow *mf)
{
- uint32_t hash;
+ uint32_t hash, recirc_depth;
- hash = dp_packet_get_dp_hash(packet);
+ hash = dp_packet_get_rss_hash(packet);
if (OVS_UNLIKELY(!hash)) {
hash = miniflow_hash_5tuple(mf, 0);
- dp_packet_set_dp_hash(packet, hash);
+ dp_packet_set_rss_hash(packet, hash);
+ }
+
+ /* The RSS hash must account for the recirculation depth to avoid
+ * collisions in the exact match cache */
+ recirc_depth = *recirc_depth_get_unsafe();
+ if (OVS_UNLIKELY(recirc_depth)) {
+ hash = hash_finish(hash, recirc_depth);
+ dp_packet_set_rss_hash(packet, hash);
}
return hash;
}
struct dp_netdev_flow *flow;
- struct dp_packet *packets[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *packets[NETDEV_MAX_BURST];
};
static inline void
static inline void
packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow)
{
- batch->flow = flow;
+ flow->batch = batch;
+ batch->flow = flow;
batch->packet_count = 0;
batch->byte_count = 0;
batch->tcp_flags = 0;
static inline void
packet_batch_execute(struct packet_batch *batch,
- struct dp_netdev_pmd_thread *pmd)
+ struct dp_netdev_pmd_thread *pmd,
+ long long now)
{
struct dp_netdev_actions *actions;
struct dp_netdev_flow *flow = batch->flow;
- dp_netdev_flow_used(batch->flow, batch->packet_count, batch->byte_count,
- batch->tcp_flags);
+ dp_netdev_flow_used(flow, batch->packet_count, batch->byte_count,
+ batch->tcp_flags, now);
actions = dp_netdev_flow_get_actions(flow);
dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true,
actions->actions, actions->size);
-
- dp_netdev_count_packet(pmd, DP_STAT_HIT, batch->packet_count);
}
-static inline bool
+static inline void
dp_netdev_queue_batches(struct dp_packet *pkt,
struct dp_netdev_flow *flow, const struct miniflow *mf,
- struct packet_batch *batches, size_t *n_batches,
- size_t max_batches)
-{
- struct packet_batch *batch = NULL;
- int j;
-
- if (OVS_UNLIKELY(!flow)) {
- return false;
- }
- /* XXX: This O(n^2) algortihm makes sense if we're operating under the
- * assumption that the number of distinct flows (and therefore the
- * number of distinct batches) is quite small. If this turns out not
- * to be the case, it may make sense to pre sort based on the
- * netdev_flow pointer. That done we can get the appropriate batching
- * in O(n * log(n)) instead. */
- for (j = *n_batches - 1; j >= 0; j--) {
- if (batches[j].flow == flow) {
- batch = &batches[j];
- packet_batch_update(batch, pkt, mf);
- return true;
- }
- }
- if (OVS_UNLIKELY(*n_batches >= max_batches)) {
- return false;
+ struct packet_batch *batches, size_t *n_batches)
+{
+ struct packet_batch *batch = flow->batch;
+
+ if (OVS_LIKELY(batch)) {
+ packet_batch_update(batch, pkt, mf);
+ return;
}
batch = &batches[(*n_batches)++];
packet_batch_init(batch, flow);
packet_batch_update(batch, pkt, mf);
- return true;
}
static inline void
}
/* Try to process all ('cnt') the 'packets' using only the exact match cache
- * 'flow_cache'. If a flow is not found for a packet 'packets[i]', or if there
- * is no matching batch for a packet's flow, the miniflow is copied into 'keys'
- * and the packet pointer is moved at the beginning of the 'packets' array.
+ * 'flow_cache'. If a flow is not found for a packet 'packets[i]', the
+ * miniflow is copied into 'keys' and the packet pointer is moved at the
+ * beginning of the 'packets' array.
*
* The function returns the number of packets that needs to be processed in the
* 'packets' array (they have been moved to the beginning of the vector).
*/
static inline size_t
emc_processing(struct dp_netdev_pmd_thread *pmd, struct dp_packet **packets,
- size_t cnt, struct netdev_flow_key *keys)
+ size_t cnt, struct netdev_flow_key *keys,
+ struct packet_batch batches[], size_t *n_batches)
{
- struct netdev_flow_key key;
- struct packet_batch batches[4];
struct emc_cache *flow_cache = &pmd->flow_cache;
- size_t n_batches, i;
- size_t notfound_cnt = 0;
+ struct netdev_flow_key key;
+ size_t i, notfound_cnt = 0;
- n_batches = 0;
miniflow_initialize(&key.mf, key.buf);
for (i = 0; i < cnt; i++) {
struct dp_netdev_flow *flow;
continue;
}
+ if (i != cnt - 1) {
+ /* Prefetch next packet data */
+ OVS_PREFETCH(dp_packet_data(packets[i+1]));
+ }
+
miniflow_extract(packets[i], &key.mf);
key.len = 0; /* Not computed yet. */
- key.hash = dpif_netdev_packet_get_dp_hash(packets[i], &key.mf);
+ key.hash = dpif_netdev_packet_get_rss_hash(packets[i], &key.mf);
flow = emc_lookup(flow_cache, &key);
- if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets[i], flow, &key.mf,
- batches, &n_batches,
- ARRAY_SIZE(batches)))) {
+ if (OVS_LIKELY(flow)) {
+ dp_netdev_queue_batches(packets[i], flow, &key.mf, batches,
+ n_batches);
+ } else {
if (i != notfound_cnt) {
dp_packet_swap(&packets[i], &packets[notfound_cnt]);
}
}
}
- for (i = 0; i < n_batches; i++) {
- packet_batch_execute(&batches[i], pmd);
- }
+ dp_netdev_count_packet(pmd, DP_STAT_EXACT_HIT, cnt - notfound_cnt);
return notfound_cnt;
}
static inline void
fast_path_processing(struct dp_netdev_pmd_thread *pmd,
struct dp_packet **packets, size_t cnt,
- struct netdev_flow_key *keys)
+ struct netdev_flow_key *keys,
+ struct packet_batch batches[], size_t *n_batches)
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
#else
/* Sparse or MSVC doesn't like variable length array. */
- enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
+ enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
#endif
- struct packet_batch batches[PKT_ARRAY_SIZE];
struct dpcls_rule *rules[PKT_ARRAY_SIZE];
struct dp_netdev *dp = pmd->dp;
struct emc_cache *flow_cache = &pmd->flow_cache;
- size_t n_batches, i;
+ int miss_cnt = 0, lost_cnt = 0;
bool any_miss;
+ size_t i;
for (i = 0; i < cnt; i++) {
/* Key length is needed in all the cases, hash computed on demand. */
continue;
}
+ miss_cnt++;
+
miniflow_expand(&keys[i].mf, &match.flow);
ofpbuf_clear(&actions);
&ufid, DPIF_UC_MISS, NULL, &actions,
&put_actions);
if (OVS_UNLIKELY(error && error != ENOSPC)) {
+ dp_packet_delete(packets[i]);
+ lost_cnt++;
continue;
}
ofpbuf_uninit(&actions);
ofpbuf_uninit(&put_actions);
fat_rwlock_unlock(&dp->upcall_rwlock);
+ dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt);
} else if (OVS_UNLIKELY(any_miss)) {
- int dropped_cnt = 0;
-
for (i = 0; i < cnt; i++) {
if (OVS_UNLIKELY(!rules[i])) {
dp_packet_delete(packets[i]);
- dropped_cnt++;
+ lost_cnt++;
+ miss_cnt++;
}
}
-
- dp_netdev_count_packet(pmd, DP_STAT_LOST, dropped_cnt);
}
- n_batches = 0;
for (i = 0; i < cnt; i++) {
struct dp_packet *packet = packets[i];
struct dp_netdev_flow *flow;
flow = dp_netdev_flow_cast(rules[i]);
emc_insert(flow_cache, &keys[i], flow);
- dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches,
- &n_batches, ARRAY_SIZE(batches));
+ dp_netdev_queue_batches(packet, flow, &keys[i].mf, batches, n_batches);
}
- for (i = 0; i < n_batches; i++) {
- packet_batch_execute(&batches[i], pmd);
- }
+ dp_netdev_count_packet(pmd, DP_STAT_MASKED_HIT, cnt - miss_cnt);
+ dp_netdev_count_packet(pmd, DP_STAT_MISS, miss_cnt);
+ dp_netdev_count_packet(pmd, DP_STAT_LOST, lost_cnt);
}
static void
const size_t PKT_ARRAY_SIZE = cnt;
#else
/* Sparse or MSVC doesn't like variable length array. */
- enum { PKT_ARRAY_SIZE = NETDEV_MAX_RX_BATCH };
+ enum { PKT_ARRAY_SIZE = NETDEV_MAX_BURST };
#endif
struct netdev_flow_key keys[PKT_ARRAY_SIZE];
- size_t newcnt;
+ struct packet_batch batches[PKT_ARRAY_SIZE];
+ long long now = time_msec();
+ size_t newcnt, n_batches, i;
- newcnt = emc_processing(pmd, packets, cnt, keys);
+ n_batches = 0;
+ newcnt = emc_processing(pmd, packets, cnt, keys, batches, &n_batches);
if (OVS_UNLIKELY(newcnt)) {
- fast_path_processing(pmd, packets, newcnt, keys);
+ fast_path_processing(pmd, packets, newcnt, keys, batches, &n_batches);
+ }
+
+ for (i = 0; i < n_batches; i++) {
+ batches[i].flow->batch = NULL;
+ }
+
+ for (i = 0; i < n_batches; i++) {
+ packet_batch_execute(&batches[i], pmd, now);
}
}
}
static void
-dp_netdev_drop_packets(struct dp_packet ** packets, int cnt, bool may_steal)
+dp_netdev_drop_packets(struct dp_packet **packets, int cnt, bool may_steal)
{
if (may_steal) {
int i;
}
static void
-dp_netdev_clone_pkt_batch(struct dp_packet **tnl_pkt,
- struct dp_packet **packets, int cnt)
+dp_netdev_clone_pkt_batch(struct dp_packet **dst_pkts,
+ struct dp_packet **src_pkts, int cnt)
{
int i;
for (i = 0; i < cnt; i++) {
- tnl_pkt[i] = dp_packet_clone(packets[i]);
+ dst_pkts[i] = dp_packet_clone(src_pkts[i]);
}
}
{
struct dp_netdev_execute_aux *aux = aux_;
uint32_t *depth = recirc_depth_get();
- struct dp_netdev_pmd_thread *pmd= aux->pmd;
- struct dp_netdev *dp= pmd->dp;
+ struct dp_netdev_pmd_thread *pmd = aux->pmd;
+ struct dp_netdev *dp = pmd->dp;
int type = nl_attr_type(a);
struct dp_netdev_port *p;
int i;
case OVS_ACTION_ATTR_OUTPUT:
p = dp_netdev_lookup_port(dp, u32_to_odp(nl_attr_get_u32(a)));
if (OVS_LIKELY(p)) {
- netdev_send(p->netdev, pmd->core_id, packets, cnt, may_steal);
+ netdev_send(p->netdev, pmd->tx_qid, packets, cnt, may_steal);
return;
}
break;
case OVS_ACTION_ATTR_TUNNEL_PUSH:
if (*depth < MAX_RECIRC_DEPTH) {
- struct dp_packet *tnl_pkt[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *tnl_pkt[NETDEV_MAX_BURST];
int err;
if (!may_steal) {
p = dp_netdev_lookup_port(dp, portno);
if (p) {
- struct dp_packet *tnl_pkt[NETDEV_MAX_RX_BATCH];
+ struct dp_packet *tnl_pkt[NETDEV_MAX_BURST];
int err;
if (!may_steal) {
case OVS_ACTION_ATTR_RECIRC:
if (*depth < MAX_RECIRC_DEPTH) {
+ struct dp_packet *recirc_pkts[NETDEV_MAX_BURST];
- (*depth)++;
- for (i = 0; i < cnt; i++) {
- struct dp_packet *recirc_pkt;
-
- recirc_pkt = (may_steal) ? packets[i]
- : dp_packet_clone(packets[i]);
-
- recirc_pkt->md.recirc_id = nl_attr_get_u32(a);
-
- /* Hash is private to each packet */
- recirc_pkt->md.dp_hash = dp_packet_get_dp_hash(packets[i]);
+ if (!may_steal) {
+ dp_netdev_clone_pkt_batch(recirc_pkts, packets, cnt);
+ packets = recirc_pkts;
+ }
- dp_netdev_input(pmd, &recirc_pkt, 1);
+ for (i = 0; i < cnt; i++) {
+ packets[i]->md.recirc_id = nl_attr_get_u32(a);
}
+
+ (*depth)++;
+ dp_netdev_input(pmd, packets, cnt);
(*depth)--;
return;
const struct dpif_class dpif_netdev_class = {
"netdev",
+ dpif_netdev_init,
dpif_netdev_enumerate,
dpif_netdev_port_open_type,
dpif_netdev_open,
dp_register_provider(class);
}
+static void
+dpif_dummy_override(const char *type)
+{
+ if (!dp_unregister_provider(type)) {
+ dpif_dummy_register__(type);
+ }
+}
+
void
-dpif_dummy_register(bool override)
+dpif_dummy_register(enum dummy_level level)
{
- if (override) {
+ if (level == DUMMY_OVERRIDE_ALL) {
struct sset types;
const char *type;
sset_init(&types);
dp_enumerate_types(&types);
SSET_FOR_EACH (type, &types) {
- if (!dp_unregister_provider(type)) {
- dpif_dummy_register__(type);
- }
+ dpif_dummy_override(type);
}
sset_destroy(&types);
+ } else if (level == DUMMY_OVERRIDE_SYSTEM) {
+ dpif_dummy_override("system");
}
dpif_dummy_register__("dummy");
#if !defined(__CHECKER__) && !defined(_WIN32)
const int N_MAPS = DIV_ROUND_UP(cnt, MAP_BITS);
#else
- enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_RX_BATCH, MAP_BITS) };
+ enum { N_MAPS = DIV_ROUND_UP(NETDEV_MAX_BURST, MAP_BITS) };
#endif
map_type maps[N_MAPS];
struct dpcls_subtable *subtable;
}
/* Compute hashes for the remaining keys. */
- ULONG_FOR_EACH_1(i, map) {
+ ULLONG_FOR_EACH_1(i, map) {
hashes[i] = netdev_flow_key_hash_in_mask(&mkeys[i],
&subtable->mask);
}
/* Lookup. */
map = cmap_find_batch(&subtable->rules, map, hashes, nodes);
/* Check results. */
- ULONG_FOR_EACH_1(i, map) {
+ ULLONG_FOR_EACH_1(i, map) {
struct dpcls_rule *rule;
CMAP_NODE_FOR_EACH (rule, cmap_node, nodes[i]) {
goto next;
}
}
- ULONG_SET0(map, i); /* Did not match. */
+ ULLONG_SET0(map, i); /* Did not match. */
next:
; /* Keep Sparse happy. */
}