*
* 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
* flow from being deleted from 'cls' (that's 'cls->rwlock') and it doesn't
- * protect members of 'flow' from modification (that's 'flow->mutex').
- *
- * 'flow->mutex' protects the members of 'flow' from modification. It doesn't
- * protect the flow from being deleted from 'cls' (that's 'cls->rwlock') and it
- * doesn't prevent the flow from being freed (that's 'flow->ref_cnt').
+ * protect members of 'flow' from modification.
*
* Some members, marked 'const', are immutable. Accessing other members
* requires synchronization, as noted in more detail below.
const struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */
const struct flow flow; /* The flow that created this entry. */
- /* Protects members marked OVS_GUARDED.
- *
- * Acquire after datapath's flow_mutex. */
- struct ovs_mutex mutex OVS_ACQ_AFTER(dp_netdev_mutex);
-
/* Statistics.
*
* Reading or writing these members requires 'mutex'. */
struct ovsthread_stats stats; /* Contains "struct dp_netdev_flow_stats". */
- /* Actions.
- *
- * Reading 'actions' requires 'mutex'.
- * Writing 'actions' requires 'mutex' and (to allow for transactions) the
- * datapath's flow_mutex. */
+ /* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
};
* Thread-safety
* =============
*
- * A struct dp_netdev_actions 'actions' may be accessed without a risk of being
- * freed by code that holds a read-lock or write-lock on 'flow->mutex' (where
- * 'flow' is the dp_netdev_flow for which 'flow->actions == actions') or that
- * owns a reference to 'actions->ref_cnt' (or both). */
+ * A struct dp_netdev_actions 'actions' is protected with RCU. */
struct dp_netdev_actions {
/* These members are immutable: they do not change during the struct's
* lifetime. */
pthread_t thread;
int id;
atomic_uint change_seq;
- char *name;
};
/* Interface to netdev-based datapath. */
port_unref(struct dp_netdev_port *port)
{
if (port && ovs_refcount_unref(&port->ref_cnt) == 1) {
+ int n_rxq;
int i;
netdev_close(port->netdev);
netdev_restore_flags(port->sf);
- for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ n_rxq = netdev_n_rxq(port->netdev);
+ for (i = 0; i < n_rxq; i++) {
netdev_rxq_close(port->rxq[i]);
}
free(port->type);
cls_rule_destroy(CONST_CAST(struct cls_rule *, &flow->cr));
dp_netdev_actions_free(dp_netdev_flow_get_actions(flow));
- ovs_mutex_destroy(&flow->mutex);
free(flow);
}
netdev_flow = xzalloc(sizeof *netdev_flow);
*CONST_CAST(struct flow *, &netdev_flow->flow) = *flow;
- ovs_mutex_init(&netdev_flow->mutex);
-
ovsthread_stats_init(&netdev_flow->stats);
ovsrcu_set(&netdev_flow->actions,
return error;
}
-struct dp_netdev_flow_state {
- struct dp_netdev_actions *actions;
- struct odputil_keybuf keybuf;
- struct odputil_keybuf maskbuf;
- struct dpif_flow_stats stats;
-};
-
-struct dp_netdev_flow_iter {
+struct dpif_netdev_flow_dump {
+ struct dpif_flow_dump up;
uint32_t bucket;
uint32_t offset;
int status;
struct ovs_mutex mutex;
};
-static void
-dpif_netdev_flow_dump_state_init(void **statep)
+static struct dpif_netdev_flow_dump *
+dpif_netdev_flow_dump_cast(struct dpif_flow_dump *dump)
{
- struct dp_netdev_flow_state *state;
-
- *statep = state = xmalloc(sizeof *state);
- state->actions = NULL;
+ return CONTAINER_OF(dump, struct dpif_netdev_flow_dump, up);
}
-static void
-dpif_netdev_flow_dump_state_uninit(void *state_)
+static struct dpif_flow_dump *
+dpif_netdev_flow_dump_create(const struct dpif *dpif_)
{
- struct dp_netdev_flow_state *state = state_;
+ struct dpif_netdev_flow_dump *dump;
- free(state);
+ dump = xmalloc(sizeof *dump);
+ dpif_flow_dump_init(&dump->up, dpif_);
+ dump->bucket = 0;
+ dump->offset = 0;
+ dump->status = 0;
+ ovs_mutex_init(&dump->mutex);
+
+ return &dump->up;
}
static int
-dpif_netdev_flow_dump_start(const struct dpif *dpif OVS_UNUSED, void **iterp)
+dpif_netdev_flow_dump_destroy(struct dpif_flow_dump *dump_)
{
- struct dp_netdev_flow_iter *iter;
+ struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
- *iterp = iter = xmalloc(sizeof *iter);
- iter->bucket = 0;
- iter->offset = 0;
- iter->status = 0;
- ovs_mutex_init(&iter->mutex);
+ ovs_mutex_destroy(&dump->mutex);
+ free(dump);
return 0;
}
+struct dpif_netdev_flow_dump_thread {
+ struct dpif_flow_dump_thread up;
+ struct dpif_netdev_flow_dump *dump;
+ struct odputil_keybuf keybuf;
+ struct odputil_keybuf maskbuf;
+};
+
+static struct dpif_netdev_flow_dump_thread *
+dpif_netdev_flow_dump_thread_cast(struct dpif_flow_dump_thread *thread)
+{
+ return CONTAINER_OF(thread, struct dpif_netdev_flow_dump_thread, up);
+}
+
+static struct dpif_flow_dump_thread *
+dpif_netdev_flow_dump_thread_create(struct dpif_flow_dump *dump_)
+{
+ struct dpif_netdev_flow_dump *dump = dpif_netdev_flow_dump_cast(dump_);
+ struct dpif_netdev_flow_dump_thread *thread;
+
+ thread = xmalloc(sizeof *thread);
+ dpif_flow_dump_thread_init(&thread->up, &dump->up);
+ thread->dump = dump;
+ return &thread->up;
+}
+
+static void
+dpif_netdev_flow_dump_thread_destroy(struct dpif_flow_dump_thread *thread_)
+{
+ struct dpif_netdev_flow_dump_thread *thread
+ = dpif_netdev_flow_dump_thread_cast(thread_);
+
+ free(thread);
+}
+
/* XXX the caller must use 'actions' without quiescing */
static int
-dpif_netdev_flow_dump_next(const struct dpif *dpif, void *iter_, void *state_,
- const struct nlattr **key, size_t *key_len,
- const struct nlattr **mask, size_t *mask_len,
- const struct nlattr **actions, size_t *actions_len,
- const struct dpif_flow_stats **stats)
-{
- struct dp_netdev_flow_iter *iter = iter_;
- struct dp_netdev_flow_state *state = state_;
- struct dp_netdev *dp = get_dp_netdev(dpif);
+dpif_netdev_flow_dump_next(struct dpif_flow_dump_thread *thread_,
+ struct dpif_flow *f, int max_flows OVS_UNUSED)
+{
+ struct dpif_netdev_flow_dump_thread *thread
+ = dpif_netdev_flow_dump_thread_cast(thread_);
+ struct dpif_netdev_flow_dump *dump = thread->dump;
+ struct dpif_netdev *dpif = dpif_netdev_cast(thread->up.dpif);
+ struct dp_netdev *dp = get_dp_netdev(&dpif->dpif);
struct dp_netdev_flow *netdev_flow;
struct flow_wildcards wc;
+ struct dp_netdev_actions *dp_actions;
+ struct ofpbuf buf;
int error;
- ovs_mutex_lock(&iter->mutex);
- error = iter->status;
+ ovs_mutex_lock(&dump->mutex);
+ error = dump->status;
if (!error) {
struct hmap_node *node;
fat_rwlock_rdlock(&dp->cls.rwlock);
- node = hmap_at_position(&dp->flow_table, &iter->bucket, &iter->offset);
+ node = hmap_at_position(&dp->flow_table, &dump->bucket, &dump->offset);
if (node) {
netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
}
fat_rwlock_unlock(&dp->cls.rwlock);
if (!node) {
- iter->status = error = EOF;
+ dump->status = error = EOF;
}
}
- ovs_mutex_unlock(&iter->mutex);
+ ovs_mutex_unlock(&dump->mutex);
if (error) {
- return error;
+ return 0;
}
minimask_expand(&netdev_flow->cr.match.mask, &wc);
- if (key) {
- struct ofpbuf buf;
-
- ofpbuf_use_stack(&buf, &state->keybuf, sizeof state->keybuf);
- odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
- netdev_flow->flow.in_port.odp_port);
-
- *key = ofpbuf_data(&buf);
- *key_len = ofpbuf_size(&buf);
- }
-
- if (key && mask) {
- struct ofpbuf buf;
-
- ofpbuf_use_stack(&buf, &state->maskbuf, sizeof state->maskbuf);
- odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
- odp_to_u32(wc.masks.in_port.odp_port),
- SIZE_MAX);
+ /* Key. */
+ ofpbuf_use_stack(&buf, &thread->keybuf, sizeof thread->keybuf);
+ odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
+ netdev_flow->flow.in_port.odp_port, true);
+ f->key = ofpbuf_data(&buf);
+ f->key_len = ofpbuf_size(&buf);
- *mask = ofpbuf_data(&buf);
- *mask_len = ofpbuf_size(&buf);
- }
+ /* Mask. */
+ ofpbuf_use_stack(&buf, &thread->maskbuf, sizeof thread->maskbuf);
+ odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
+ odp_to_u32(wc.masks.in_port.odp_port),
+ SIZE_MAX, true);
+ f->mask = ofpbuf_data(&buf);
+ f->mask_len = ofpbuf_size(&buf);
- if (actions || stats) {
- state->actions = NULL;
+ /* Actions. */
+ dp_actions = dp_netdev_flow_get_actions(netdev_flow);
+ f->actions = dp_actions->actions;
+ f->actions_len = dp_actions->size;
- if (actions) {
- state->actions = dp_netdev_flow_get_actions(netdev_flow);
- *actions = state->actions->actions;
- *actions_len = state->actions->size;
- }
+ /* Stats. */
+ get_dpif_flow_stats(netdev_flow, &f->stats);
- if (stats) {
- get_dpif_flow_stats(netdev_flow, &state->stats);
- *stats = &state->stats;
- }
- }
-
- return 0;
-}
-
-static int
-dpif_netdev_flow_dump_done(const struct dpif *dpif OVS_UNUSED, void *iter_)
-{
- struct dp_netdev_flow_iter *iter = iter_;
-
- ovs_mutex_destroy(&iter->mutex);
- free(iter);
- return 0;
+ return 1;
}
static int
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct pkt_metadata *md = &execute->md;
- struct miniflow key;
- uint32_t buf[FLOW_U32S];
+ struct {
+ struct miniflow flow;
+ uint32_t buf[FLOW_U32S];
+ } key;
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
ofpbuf_size(execute->packet) > UINT16_MAX) {
}
/* Extract flow key. */
- miniflow_initialize(&key, buf);
- miniflow_extract(execute->packet, md, &key);
+ miniflow_initialize(&key.flow, key.buf);
+ miniflow_extract(execute->packet, md, &key.flow);
ovs_rwlock_rdlock(&dp->port_rwlock);
- dp_netdev_execute_actions(dp, &key, execute->packet, false, md,
+ dp_netdev_execute_actions(dp, &key.flow, execute->packet, false, md,
execute->actions, execute->actions_len);
ovs_rwlock_unlock(&dp->port_rwlock);
int poll_cnt;
int i;
- f->name = xasprintf("pmd_%u", ovsthread_id_self());
- set_subprogram_name("%s", f->name);
poll_cnt = 0;
poll_list = NULL;
}
free(poll_list);
- free(f->name);
return NULL;
}
/* Each thread will distribute all devices rx-queues among
* themselves. */
- xpthread_create(&f->thread, NULL, pmd_thread_main, f);
+ f->thread = ovs_thread_create("pmd", pmd_thread_main, f);
}
}
OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_flow *netdev_flow;
- struct miniflow key;
- uint32_t buf[FLOW_U32S];
+ struct {
+ struct miniflow flow;
+ uint32_t buf[FLOW_U32S];
+ } key;
if (ofpbuf_size(packet) < ETH_HEADER_LEN) {
ofpbuf_delete(packet);
return;
}
- miniflow_initialize(&key, buf);
- miniflow_extract(packet, md, &key);
+ miniflow_initialize(&key.flow, key.buf);
+ miniflow_extract(packet, md, &key.flow);
- netdev_flow = dp_netdev_lookup_flow(dp, &key);
+ netdev_flow = dp_netdev_lookup_flow(dp, &key.flow);
if (netdev_flow) {
struct dp_netdev_actions *actions;
- dp_netdev_flow_used(netdev_flow, packet, &key);
+ dp_netdev_flow_used(netdev_flow, packet, &key.flow);
actions = dp_netdev_flow_get_actions(netdev_flow);
- dp_netdev_execute_actions(dp, &key, packet, true, md,
+ dp_netdev_execute_actions(dp, &key.flow, packet, true, md,
actions->actions, actions->size);
dp_netdev_count_packet(dp, DP_STAT_HIT);
} else if (dp->handler_queues) {
dp_netdev_count_packet(dp, DP_STAT_MISS);
dp_netdev_output_userspace(dp, packet,
- miniflow_hash_5tuple(&key, 0)
+ miniflow_hash_5tuple(&key.flow, 0)
% dp->n_handlers,
- DPIF_UC_MISS, &key, NULL);
+ DPIF_UC_MISS, &key.flow, NULL);
ofpbuf_delete(packet);
}
}
/* Put ODP flow. */
miniflow_expand(key, &flow);
- odp_flow_key_from_flow(buf, &flow, NULL, flow.in_port.odp_port);
+ odp_flow_key_from_flow(buf, &flow, NULL, flow.in_port.odp_port, true);
upcall->key = ofpbuf_data(buf);
upcall->key_len = ofpbuf_size(buf);
if (hash_act->hash_alg == OVS_HASH_ALG_L4) {
/* Hash need not be symmetric, nor does it need to include
* L2 fields. */
- hash = miniflow_hash_5tuple(aux->key, hash_act->hash_bias);
+ hash = miniflow_hash_5tuple(aux->key, hash_act->hash_basis);
if (!hash) {
hash = 1; /* 0 is not valid */
}
dpif_netdev_flow_put,
dpif_netdev_flow_del,
dpif_netdev_flow_flush,
- dpif_netdev_flow_dump_state_init,
- dpif_netdev_flow_dump_start,
+ dpif_netdev_flow_dump_create,
+ dpif_netdev_flow_dump_destroy,
+ dpif_netdev_flow_dump_thread_create,
+ dpif_netdev_flow_dump_thread_destroy,
dpif_netdev_flow_dump_next,
- NULL,
- dpif_netdev_flow_dump_done,
- dpif_netdev_flow_dump_state_uninit,
dpif_netdev_execute,
NULL, /* operate */
dpif_netdev_recv_set,