*/
#include <config.h>
-#include "dpif.h"
+#include "dpif-netdev.h"
#include <ctype.h>
#include <errno.h>
#include "dpif-provider.h"
#include "dummy.h"
#include "dynamic-string.h"
+#include "fat-rwlock.h"
#include "flow.h"
-#include "hmap.h"
+#include "cmap.h"
#include "latch.h"
#include "list.h"
#include "meta-flow.h"
#define NETDEV_RULE_PRIORITY 0x8000
#define FLOW_DUMP_MAX_BATCH 50
-#define NR_THREADS 1
/* Use per thread recirc_depth to prevent recirculation loop. */
#define MAX_RECIRC_DEPTH 5
DEFINE_STATIC_PER_THREAD_DATA(uint32_t, recirc_depth, 0)
* dp_netdev_mutex (global)
* port_mutex
* flow_mutex
- * cls.rwlock
* queue_rwlock
*/
struct dp_netdev {
/* Flows.
*
- * Readers of 'cls' and 'flow_table' must take a 'cls->rwlock' read lock.
- *
- * Writers of 'cls' and 'flow_table' must take the 'flow_mutex' and then
- * the 'cls->rwlock' write lock. (The outer 'flow_mutex' allows writers to
- * atomically perform multiple operations on 'cls' and 'flow_table'.)
+ * Writers of 'flow_table' must take the 'flow_mutex'. Corresponding
+ * changes to 'cls' must be made while still holding the 'flow_mutex'.
*/
struct ovs_mutex flow_mutex;
- struct classifier cls; /* Classifier. Protected by cls.rwlock. */
- struct hmap flow_table OVS_GUARDED; /* Flow table. */
+ struct classifier cls;
+ struct cmap flow_table OVS_GUARDED; /* Flow table. */
/* Queues.
*
/* There are fields in the flow structure that we never use. Therefore we can
* save a few words of memory */
-#define NETDEV_KEY_BUF_SIZE_U32 (FLOW_U32S \
+#define NETDEV_KEY_BUF_SIZE_U32 (FLOW_U32S - MINI_N_INLINE \
- FLOW_U32_SIZE(regs) \
- FLOW_U32_SIZE(metadata) \
)
const struct cls_rule cr; /* In owning dp_netdev's 'cls'. */
/* Hash table index by unmasked flow. */
- const struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */
+ const struct cmap_node node; /* In owning dp_netdev's 'flow_table'. */
const struct flow flow; /* The flow that created this entry. */
/* Statistics.
OVS_REQ_WRLOCK(dp->queue_rwlock);
static int dpif_netdev_open(const struct dpif_class *, const char *name,
bool create, struct dpif **);
-static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf **,
- int cnt, int queue_no, int type,
+static int dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *,
+ int queue_no, int type,
const struct miniflow *,
const struct nlattr *userdata);
static void dp_netdev_execute_actions(struct dp_netdev *dp,
ovs_mutex_init(&dp->flow_mutex);
classifier_init(&dp->cls, NULL);
- hmap_init(&dp->flow_table);
+ cmap_init(&dp->flow_table);
fat_rwlock_init(&dp->queue_rwlock);
fat_rwlock_destroy(&dp->queue_rwlock);
classifier_destroy(&dp->cls);
- hmap_destroy(&dp->flow_table);
+ cmap_destroy(&dp->flow_table);
ovs_mutex_destroy(&dp->flow_mutex);
seq_destroy(dp->port_seq);
cmap_destroy(&dp->ports);
/* Take dp_netdev_mutex so that, if dp->ref_cnt falls to zero, we can't
* get a new reference to 'dp' through the 'dp_netdevs' shash. */
ovs_mutex_lock(&dp_netdev_mutex);
- if (ovs_refcount_unref(&dp->ref_cnt) == 1) {
+ if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
dp_netdev_free(dp);
}
ovs_mutex_unlock(&dp_netdev_mutex);
struct dp_netdev *dp = get_dp_netdev(dpif);
if (!atomic_flag_test_and_set(&dp->destroyed)) {
- if (ovs_refcount_unref(&dp->ref_cnt) == 1) {
+ if (ovs_refcount_unref_relaxed(&dp->ref_cnt) == 1) {
/* Can't happen: 'dpif' still owns a reference to 'dp'. */
OVS_NOT_REACHED();
}
struct dp_netdev_stats *bucket;
size_t i;
- fat_rwlock_rdlock(&dp->cls.rwlock);
- stats->n_flows = hmap_count(&dp->flow_table);
- fat_rwlock_unlock(&dp->cls.rwlock);
+ stats->n_flows = cmap_count(&dp->flow_table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
OVSTHREAD_STATS_FOR_EACH_BUCKET (bucket, i, &dp->stats) {
if (netdev_is_pmd(netdev)) {
dp->pmd_count++;
- dp_netdev_set_pmd_threads(dp, NR_THREADS);
+ dp_netdev_set_pmd_threads(dp, NR_PMD_THREADS);
dp_netdev_reload_pmd_threads(dp);
}
ovs_refcount_init(&port->ref_cnt);
static void
port_unref(struct dp_netdev_port *port)
{
- if (port && ovs_refcount_unref(&port->ref_cnt) == 1) {
+ if (port && ovs_refcount_unref_relaxed(&port->ref_cnt) == 1) {
ovsrcu_postpone(port_destroy__, port);
}
}
static void
dp_netdev_remove_flow(struct dp_netdev *dp, struct dp_netdev_flow *flow)
- OVS_REQ_WRLOCK(dp->cls.rwlock)
OVS_REQUIRES(dp->flow_mutex)
{
struct cls_rule *cr = CONST_CAST(struct cls_rule *, &flow->cr);
- struct hmap_node *node = CONST_CAST(struct hmap_node *, &flow->node);
+ struct cmap_node *node = CONST_CAST(struct cmap_node *, &flow->node);
classifier_remove(&dp->cls, cr);
- hmap_remove(&dp->flow_table, node);
+ cmap_remove(&dp->flow_table, node, flow_hash(&flow->flow, 0));
ovsrcu_postpone(dp_netdev_flow_free, flow);
}
static void
dp_netdev_flow_flush(struct dp_netdev *dp)
{
- struct dp_netdev_flow *netdev_flow, *next;
+ struct dp_netdev_flow *netdev_flow;
ovs_mutex_lock(&dp->flow_mutex);
- fat_rwlock_wrlock(&dp->cls.rwlock);
- HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
+ CMAP_FOR_EACH_SAFE (netdev_flow, node, &dp->flow_table) {
dp_netdev_remove_flow(dp, netdev_flow);
}
- fat_rwlock_unlock(&dp->cls.rwlock);
ovs_mutex_unlock(&dp->flow_mutex);
}
static struct dp_netdev_flow *
dp_netdev_lookup_flow(const struct dp_netdev *dp, const struct miniflow *key)
- OVS_EXCLUDED(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
struct cls_rule *rule;
- fat_rwlock_rdlock(&dp->cls.rwlock);
- rule = classifier_lookup_miniflow_first(&dp->cls, key);
+ classifier_lookup_miniflow_batch(&dp->cls, &key, &rule, 1);
netdev_flow = dp_netdev_flow_cast(rule);
- fat_rwlock_unlock(&dp->cls.rwlock);
return netdev_flow;
}
static struct dp_netdev_flow *
dp_netdev_find_flow(const struct dp_netdev *dp, const struct flow *flow)
- OVS_REQ_RDLOCK(dp->cls.rwlock)
{
struct dp_netdev_flow *netdev_flow;
- HMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0),
+ CMAP_FOR_EACH_WITH_HASH (netdev_flow, node, flow_hash(flow, 0),
&dp->flow_table) {
if (flow_equal(&netdev_flow->flow, flow)) {
return netdev_flow;
static int
dpif_netdev_flow_get(const struct dpif *dpif,
const struct nlattr *nl_key, size_t nl_key_len,
- struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
+ struct ofpbuf **bufp,
+ struct nlattr **maskp, size_t *mask_len,
+ struct nlattr **actionsp, size_t *actions_len,
+ struct dpif_flow_stats *stats)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
return error;
}
- fat_rwlock_rdlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_find_flow(dp, &key);
- fat_rwlock_unlock(&dp->cls.rwlock);
if (netdev_flow) {
if (stats) {
get_dpif_flow_stats(netdev_flow, stats);
}
+ if (maskp) {
+ struct flow_wildcards wc;
+
+ *bufp = ofpbuf_new(sizeof(struct odputil_keybuf));
+ minimask_expand(&netdev_flow->cr.match.mask, &wc);
+ odp_flow_key_from_mask(*bufp, &wc.masks, &netdev_flow->flow,
+ odp_to_u32(wc.masks.in_port.odp_port),
+ SIZE_MAX, true);
+ *maskp = ofpbuf_data(*bufp);
+ *mask_len = ofpbuf_size(*bufp);
+ }
if (actionsp) {
struct dp_netdev_actions *actions;
actions = dp_netdev_flow_get_actions(netdev_flow);
- *actionsp = ofpbuf_clone_data(actions->actions, actions->size);
+ *actionsp = actions->actions;
+ *actions_len = actions->size;
}
} else {
error = ENOENT;
match_init(&match, flow, wc);
cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr),
&match, NETDEV_RULE_PRIORITY);
- fat_rwlock_wrlock(&dp->cls.rwlock);
+ cmap_insert(&dp->flow_table,
+ CONST_CAST(struct cmap_node *, &netdev_flow->node),
+ flow_hash(flow, 0));
classifier_insert(&dp->cls,
CONST_CAST(struct cls_rule *, &netdev_flow->cr));
- hmap_insert(&dp->flow_table,
- CONST_CAST(struct hmap_node *, &netdev_flow->node),
- flow_hash(flow, 0));
- fat_rwlock_unlock(&dp->cls.rwlock);
return 0;
}
netdev_flow = dp_netdev_lookup_flow(dp, &miniflow);
if (!netdev_flow) {
if (put->flags & DPIF_FP_CREATE) {
- if (hmap_count(&dp->flow_table) < MAX_FLOWS) {
+ if (cmap_count(&dp->flow_table) < MAX_FLOWS) {
if (put->stats) {
memset(put->stats, 0, sizeof *put->stats);
}
}
ovs_mutex_lock(&dp->flow_mutex);
- fat_rwlock_wrlock(&dp->cls.rwlock);
netdev_flow = dp_netdev_find_flow(dp, &key);
if (netdev_flow) {
if (del->stats) {
} else {
error = ENOENT;
}
- fat_rwlock_unlock(&dp->cls.rwlock);
ovs_mutex_unlock(&dp->flow_mutex);
return error;
struct dpif_netdev_flow_dump {
struct dpif_flow_dump up;
- uint32_t bucket;
- uint32_t offset;
+ struct cmap_position pos;
int status;
struct ovs_mutex mutex;
};
dump = xmalloc(sizeof *dump);
dpif_flow_dump_init(&dump->up, dpif_);
- dump->bucket = 0;
- dump->offset = 0;
+ memset(&dump->pos, 0, sizeof dump->pos);
dump->status = 0;
ovs_mutex_init(&dump->mutex);
free(thread);
}
-/* XXX the caller must use 'actions' without quiescing */
static int
dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
struct dpif_flow *flows, int max_flows)
ovs_mutex_lock(&dump->mutex);
if (!dump->status) {
- fat_rwlock_rdlock(&dp->cls.rwlock);
for (n_flows = 0; n_flows < MIN(max_flows, FLOW_DUMP_MAX_BATCH);
n_flows++) {
- struct hmap_node *node;
+ struct cmap_node *node;
- node = hmap_at_position(&dp->flow_table, &dump->bucket,
- &dump->offset);
+ node = cmap_next_position(&dp->flow_table, &dump->pos);
if (!node) {
dump->status = EOF;
break;
netdev_flows[n_flows] = CONTAINER_OF(node, struct dp_netdev_flow,
node);
}
- fat_rwlock_unlock(&dp->cls.rwlock);
}
ovs_mutex_unlock(&dump->mutex);
static inline void
packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow,
- struct dpif_packet *packet, struct pkt_metadata *md,
- const struct miniflow *mf)
+ struct pkt_metadata *md)
{
batch->flow = flow;
batch->md = *md;
- batch->packets[0] = packet;
batch->packet_count = 0;
batch->byte_count = 0;
batch->tcp_flags = 0;
-
- packet_batch_update(batch, packet, mf);
}
static inline void
dp_netdev_input(struct dp_netdev *dp, struct dpif_packet **packets, int cnt,
struct pkt_metadata *md)
{
- struct packet_batch batch;
-
- struct netdev_flow_key key;
+ struct packet_batch batches[NETDEV_MAX_RX_BATCH];
+ struct netdev_flow_key keys[NETDEV_MAX_RX_BATCH];
+ const struct miniflow *mfs[NETDEV_MAX_RX_BATCH]; /* NULL at bad packets. */
+ struct cls_rule *rules[NETDEV_MAX_RX_BATCH];
+ size_t n_batches, i;
- int i;
+ for (i = 0; i < cnt; i++) {
+ if (OVS_UNLIKELY(ofpbuf_size(&packets[i]->ofpbuf) < ETH_HEADER_LEN)) {
+ dpif_packet_delete(packets[i]);
+ mfs[i] = NULL;
+ continue;
+ }
- batch.flow = NULL;
+ miniflow_initialize(&keys[i].flow, keys[i].buf);
+ miniflow_extract(&packets[i]->ofpbuf, md, &keys[i].flow);
+ mfs[i] = &keys[i].flow;
+ }
- miniflow_initialize(&key.flow, key.buf);
+ classifier_lookup_miniflow_batch(&dp->cls, mfs, rules, cnt);
+ n_batches = 0;
for (i = 0; i < cnt; i++) {
- struct dp_netdev_flow *netdev_flow;
- struct ofpbuf *buf = &packets[i]->ofpbuf;
+ struct dp_netdev_flow *flow;
+ struct packet_batch *batch;
+ size_t j;
- if (ofpbuf_size(buf) < ETH_HEADER_LEN) {
- dpif_packet_delete(packets[i]);
+ if (OVS_UNLIKELY(!mfs[i])) {
continue;
}
- miniflow_extract(buf, md, &key.flow);
+ if (OVS_UNLIKELY(!rules[i])) {
- netdev_flow = dp_netdev_lookup_flow(dp, &key.flow);
+ dp_netdev_count_packet(dp, DP_STAT_MISS, 1);
- if (netdev_flow) {
- if (!batch.flow) {
- packet_batch_init(&batch, netdev_flow, packets[i], md,
- &key.flow);
- } else if (batch.flow == netdev_flow) {
- packet_batch_update(&batch, packets[i], &key.flow);
- } else {
- packet_batch_execute(&batch, dp);
- packet_batch_init(&batch, netdev_flow, packets[i], md,
- &key.flow);
+ if (OVS_LIKELY(dp->handler_queues)) {
+ uint32_t hash = miniflow_hash_5tuple(mfs[i], 0);
+ struct ofpbuf *buf = &packets[i]->ofpbuf;
+
+ dp_netdev_output_userspace(dp, buf, hash % dp->n_handlers,
+ DPIF_UC_MISS, mfs[i], NULL);
}
- } else {
- /* Packet's flow not in datapath */
- dp_netdev_count_packet(dp, DP_STAT_MISS, 1);
- if (dp->handler_queues) {
- /* Upcall */
- dp_netdev_output_userspace(dp, &buf, 1,
- miniflow_hash_5tuple(&key.flow, 0)
- % dp->n_handlers,
- DPIF_UC_MISS, &key.flow, NULL);
- } else {
- /* No upcall queue. Freeing the packet */
- dpif_packet_delete(packets[i]);
+ dpif_packet_delete(packets[i]);
+ continue;
+ }
+
+ /* XXX: This O(n^2) algortihm makes sense if we're operating under the
+ * assumption that the number of distinct flows (and therefore the
+ * number of distinct batches) is quite small. If this turns out not
+ * to be the case, it may make sense to pre sort based on the
+ * netdev_flow pointer. That done we can get the appropriate batching
+ * in O(n * log(n)) instead. */
+ batch = NULL;
+ flow = dp_netdev_flow_cast(rules[i]);
+ for (j = 0; j < n_batches; j++) {
+ if (batches[j].flow == flow) {
+ batch = &batches[j];
+ break;
}
}
+
+ if (!batch) {
+ batch = &batches[n_batches++];
+ packet_batch_init(batch, flow, md);
+ }
+ packet_batch_update(batch, packets[i], mfs[i]);
}
- if (batch.flow) {
- packet_batch_execute(&batch, dp);
+ for (i = 0; i < n_batches; i++) {
+ packet_batch_execute(&batches[i], dp);
}
}
struct ofpbuf *buf = &u->buf;
size_t buf_size;
struct flow flow;
+ void *data;
upcall->type = type;
if (userdata) {
buf_size += NLA_ALIGN(userdata->nla_len);
}
+ buf_size += ofpbuf_size(packet);
ofpbuf_init(buf, buf_size);
/* Put ODP flow. */
NLA_ALIGN(userdata->nla_len));
}
- upcall->packet = *packet;
+ /* We have to perform a copy of the packet, because we cannot send DPDK
+ * mbufs to a non pmd thread. When the upcall processing will be done
+ * in the pmd thread, this copy can be avoided */
+ data = ofpbuf_put(buf, ofpbuf_data(packet), ofpbuf_size(packet));
+ ofpbuf_use_stub(&upcall->packet, data, ofpbuf_size(packet));
+ ofpbuf_set_size(&upcall->packet, ofpbuf_size(packet));
seq_change(q->seq);
return 0;
} else {
- ofpbuf_delete(packet);
return ENOBUFS;
}
-
}
static int
-dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf **packets,
- int cnt, int queue_no, int type,
+dp_netdev_output_userspace(struct dp_netdev *dp, struct ofpbuf *packet,
+ int queue_no, int type,
const struct miniflow *key,
const struct nlattr *userdata)
{
struct dp_netdev_queue *q;
int error;
- int i;
fat_rwlock_rdlock(&dp->queue_rwlock);
q = &dp->handler_queues[queue_no];
ovs_mutex_lock(&q->mutex);
- for (i = 0; i < cnt; i++) {
- struct ofpbuf *packet = packets[i];
-
- error = dp_netdev_queue_userspace_packet(q, packet, type, key,
- userdata);
- if (error == ENOBUFS) {
- dp_netdev_count_packet(dp, DP_STAT_LOST, 1);
- }
+ error = dp_netdev_queue_userspace_packet(q, packet, type, key,
+ userdata);
+ if (error == ENOBUFS) {
+ dp_netdev_count_packet(dp, DP_STAT_LOST, 1);
}
ovs_mutex_unlock(&q->mutex);
fat_rwlock_unlock(&dp->queue_rwlock);
miniflow_initialize(&key.flow, key.buf);
for (i = 0; i < cnt; i++) {
- struct ofpbuf *packet, *userspace_packet;
+ struct ofpbuf *packet;
packet = &packets[i]->ofpbuf;
miniflow_extract(packet, md, &key.flow);
- userspace_packet = may_steal ? packet : ofpbuf_clone(packet);
-
- dp_netdev_output_userspace(aux->dp, &userspace_packet, 1,
+ dp_netdev_output_userspace(aux->dp, packet,
miniflow_hash_5tuple(&key.flow, 0)
% aux->dp->n_handlers,
DPIF_UC_ACTION, &key.flow,
userdata);
+ if (may_steal) {
+ dpif_packet_delete(packets[i]);
+ }
}
break;
}