*
* 'flow->ref_cnt' protects 'flow' from being freed. It doesn't protect the
* flow from being deleted from 'cls' (that's 'cls->rwlock') and it doesn't
- * protect members of 'flow' from modification (that's 'flow->mutex').
- *
- * 'flow->mutex' protects the members of 'flow' from modification. It doesn't
- * protect the flow from being deleted from 'cls' (that's 'cls->rwlock') and it
- * doesn't prevent the flow from being freed (that's 'flow->ref_cnt').
+ * protect members of 'flow' from modification.
*
* Some members, marked 'const', are immutable. Accessing other members
* requires synchronization, as noted in more detail below.
const struct hmap_node node; /* In owning dp_netdev's 'flow_table'. */
const struct flow flow; /* The flow that created this entry. */
- /* Protects members marked OVS_GUARDED.
- *
- * Acquire after datapath's flow_mutex. */
- struct ovs_mutex mutex OVS_ACQ_AFTER(dp_netdev_mutex);
-
/* Statistics.
*
* Reading or writing these members requires 'mutex'. */
struct ovsthread_stats stats; /* Contains "struct dp_netdev_flow_stats". */
- /* Actions.
- *
- * Reading 'actions' requires 'mutex'.
- * Writing 'actions' requires 'mutex' and (to allow for transactions) the
- * datapath's flow_mutex. */
+ /* Actions. */
OVSRCU_TYPE(struct dp_netdev_actions *) actions;
};
* Thread-safety
* =============
*
- * A struct dp_netdev_actions 'actions' may be accessed without a risk of being
- * freed by code that holds a read-lock or write-lock on 'flow->mutex' (where
- * 'flow' is the dp_netdev_flow for which 'flow->actions == actions') or that
- * owns a reference to 'actions->ref_cnt' (or both). */
+ * A struct dp_netdev_actions 'actions' is protected with RCU. */
struct dp_netdev_actions {
/* These members are immutable: they do not change during the struct's
* lifetime. */
port_unref(struct dp_netdev_port *port)
{
if (port && ovs_refcount_unref(&port->ref_cnt) == 1) {
+ int n_rxq = netdev_n_rxq(port->netdev);
int i;
netdev_close(port->netdev);
netdev_restore_flags(port->sf);
- for (i = 0; i < netdev_n_rxq(port->netdev); i++) {
+ for (i = 0; i < n_rxq; i++) {
netdev_rxq_close(port->rxq[i]);
}
+ free(port->rxq);
free(port->type);
free(port);
}
cls_rule_destroy(CONST_CAST(struct cls_rule *, &flow->cr));
dp_netdev_actions_free(dp_netdev_flow_get_actions(flow));
- ovs_mutex_destroy(&flow->mutex);
free(flow);
}
static int
dpif_netdev_flow_get(const struct dpif *dpif,
const struct nlattr *nl_key, size_t nl_key_len,
- struct ofpbuf **actionsp, struct dpif_flow_stats *stats)
+ struct ofpbuf **bufp,
+ struct nlattr **maskp, size_t *mask_len,
+ struct nlattr **actionsp, size_t *actions_len,
+ struct dpif_flow_stats *stats)
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_flow *netdev_flow;
get_dpif_flow_stats(netdev_flow, stats);
}
- if (actionsp) {
+ if (maskp || actionsp) {
struct dp_netdev_actions *actions;
+ size_t len = 0;
actions = dp_netdev_flow_get_actions(netdev_flow);
- *actionsp = ofpbuf_clone_data(actions->actions, actions->size);
+ len += maskp ? sizeof(struct odputil_keybuf) : 0;
+ len += actionsp ? actions->size : 0;
+
+ *bufp = ofpbuf_new(len);
+ if (maskp) {
+ struct flow_wildcards wc;
+
+ minimask_expand(&netdev_flow->cr.match.mask, &wc);
+ odp_flow_key_from_mask(*bufp, &wc.masks, &netdev_flow->flow,
+ odp_to_u32(wc.masks.in_port.odp_port),
+ SIZE_MAX);
+ *maskp = ofpbuf_data(*bufp);
+ *mask_len = ofpbuf_size(*bufp);
+ }
+ if (actionsp) {
+ struct dp_netdev_actions *actions;
+
+ actions = dp_netdev_flow_get_actions(netdev_flow);
+ *actionsp = ofpbuf_put(*bufp, actions->actions, actions->size);
+ *actions_len = actions->size;
+ }
}
} else {
error = ENOENT;
netdev_flow = xzalloc(sizeof *netdev_flow);
*CONST_CAST(struct flow *, &netdev_flow->flow) = *flow;
- ovs_mutex_init(&netdev_flow->mutex);
-
ovsthread_stats_init(&netdev_flow->stats);
ovsrcu_set(&netdev_flow->actions,
}
}
ovs_mutex_unlock(&dp->flow_mutex);
+ miniflow_destroy(&miniflow);
return error;
}
}
struct dp_netdev_flow_state {
- struct dp_netdev_actions *actions;
struct odputil_keybuf keybuf;
struct odputil_keybuf maskbuf;
struct dpif_flow_stats stats;
struct dp_netdev_flow_state *state;
*statep = state = xmalloc(sizeof *state);
- state->actions = NULL;
}
static void
}
if (actions || stats) {
- state->actions = NULL;
-
if (actions) {
- state->actions = dp_netdev_flow_get_actions(netdev_flow);
- *actions = state->actions->actions;
- *actions_len = state->actions->size;
+ struct dp_netdev_actions *dp_actions =
+ dp_netdev_flow_get_actions(netdev_flow);
+
+ *actions = dp_actions->actions;
+ *actions_len = dp_actions->size;
}
if (stats) {
{
struct dp_netdev *dp = get_dp_netdev(dpif);
struct pkt_metadata *md = &execute->md;
- struct miniflow key;
- uint32_t buf[FLOW_U32S];
+ struct {
+ struct miniflow flow;
+ uint32_t buf[FLOW_U32S];
+ } key;
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
ofpbuf_size(execute->packet) > UINT16_MAX) {
}
/* Extract flow key. */
- miniflow_initialize(&key, buf);
- miniflow_extract(execute->packet, md, &key);
+ miniflow_initialize(&key.flow, key.buf);
+ miniflow_extract(execute->packet, md, &key.flow);
ovs_rwlock_rdlock(&dp->port_rwlock);
- dp_netdev_execute_actions(dp, &key, execute->packet, false, md,
+ dp_netdev_execute_actions(dp, &key.flow, execute->packet, false, md,
execute->actions, execute->actions_len);
ovs_rwlock_unlock(&dp->port_rwlock);
OVS_REQ_RDLOCK(dp->port_rwlock)
{
struct dp_netdev_flow *netdev_flow;
- struct miniflow key;
- uint32_t buf[FLOW_U32S];
+ struct {
+ struct miniflow flow;
+ uint32_t buf[FLOW_U32S];
+ } key;
if (ofpbuf_size(packet) < ETH_HEADER_LEN) {
ofpbuf_delete(packet);
return;
}
- miniflow_initialize(&key, buf);
- miniflow_extract(packet, md, &key);
+ miniflow_initialize(&key.flow, key.buf);
+ miniflow_extract(packet, md, &key.flow);
- netdev_flow = dp_netdev_lookup_flow(dp, &key);
+ netdev_flow = dp_netdev_lookup_flow(dp, &key.flow);
if (netdev_flow) {
struct dp_netdev_actions *actions;
- dp_netdev_flow_used(netdev_flow, packet, &key);
+ dp_netdev_flow_used(netdev_flow, packet, &key.flow);
actions = dp_netdev_flow_get_actions(netdev_flow);
- dp_netdev_execute_actions(dp, &key, packet, true, md,
+ dp_netdev_execute_actions(dp, &key.flow, packet, true, md,
actions->actions, actions->size);
dp_netdev_count_packet(dp, DP_STAT_HIT);
} else if (dp->handler_queues) {
dp_netdev_count_packet(dp, DP_STAT_MISS);
dp_netdev_output_userspace(dp, packet,
- miniflow_hash_5tuple(&key, 0)
+ miniflow_hash_5tuple(&key.flow, 0)
% dp->n_handlers,
- DPIF_UC_MISS, &key, NULL);
+ DPIF_UC_MISS, &key.flow, NULL);
ofpbuf_delete(packet);
}
}
struct ofpbuf *buf = &u->buf;
size_t buf_size;
struct flow flow;
+ void *data;
upcall->type = type;
NLA_ALIGN(userdata->nla_len));
}
- ofpbuf_set_data(&upcall->packet,
- ofpbuf_put(buf, ofpbuf_data(packet), ofpbuf_size(packet)));
+ data = ofpbuf_put(buf, ofpbuf_data(packet), ofpbuf_size(packet));
+ ofpbuf_use_stub(&upcall->packet, data, ofpbuf_size(packet));
ofpbuf_set_size(&upcall->packet, ofpbuf_size(packet));
seq_change(q->seq);
p = dp_netdev_lookup_port(aux->dp, u32_to_odp(nl_attr_get_u32(a)));
if (p) {
netdev_send(p->netdev, packet, may_steal);
+ } else if (may_steal) {
+ ofpbuf_delete(packet);
}
+
break;
case OVS_ACTION_ATTR_USERSPACE: {
break;
} else {
+ if (may_steal) {
+ ofpbuf_delete(packet);
+ }
VLOG_WARN("Packet dropped. Max recirculation depth exceeded.");
}
break;