}
}
+/* Converts to the dpif_flow format, using 'key_buf' and 'mask_buf' for
+ * storing the netlink-formatted key/mask. 'key_buf' may be the same as
+ * 'mask_buf'. Actions will be returned without copying, by relying on RCU to
+ * protect them. */
static void
-dp_netdev_flow_to_dpif_flow(const struct dp_netdev_flow *netdev_flow,
- struct ofpbuf *buffer, struct dpif_flow *flow)
+dp_netdev_flow_to_dpif_flow(const struct dpif *dpif,
+ const struct dp_netdev_flow *netdev_flow,
+ struct ofpbuf *key_buf, struct ofpbuf *mask_buf,
+ struct dpif_flow *flow)
{
struct flow_wildcards wc;
struct dp_netdev_actions *actions;
+ size_t offset;
miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
- odp_flow_key_from_mask(buffer, &wc.masks, &netdev_flow->flow,
+
+ /* Key */
+ offset = ofpbuf_size(key_buf);
+ flow->key = ofpbuf_tail(key_buf);
+ odp_flow_key_from_flow(key_buf, &netdev_flow->flow, &wc.masks,
+ netdev_flow->flow.in_port.odp_port, true);
+ flow->key_len = ofpbuf_size(key_buf) - offset;
+
+ /* Mask */
+ offset = ofpbuf_size(mask_buf);
+ flow->mask = ofpbuf_tail(mask_buf);
+ odp_flow_key_from_mask(mask_buf, &wc.masks, &netdev_flow->flow,
odp_to_u32(wc.masks.in_port.odp_port),
SIZE_MAX, true);
- flow->mask = ofpbuf_data(buffer);
- flow->mask_len = ofpbuf_size(buffer);
+ flow->mask_len = ofpbuf_size(mask_buf) - offset;
+ /* Actions */
actions = dp_netdev_flow_get_actions(netdev_flow);
flow->actions = actions->actions;
flow->actions_len = actions->size;
+ dpif_flow_hash(dpif, &netdev_flow->flow, sizeof netdev_flow->flow,
+ &flow->ufid);
get_dpif_flow_stats(netdev_flow, &flow->stats);
}
netdev_flow = dp_netdev_find_flow(dp, &key);
if (netdev_flow) {
- dp_netdev_flow_to_dpif_flow(netdev_flow, get->buffer, get->flow);
+ dp_netdev_flow_to_dpif_flow(dpif, netdev_flow, get->buffer,
+ get->buffer, get->flow);
} else {
error = ENOENT;
}
struct odputil_keybuf *keybuf = &thread->keybuf[i];
struct dp_netdev_flow *netdev_flow = netdev_flows[i];
struct dpif_flow *f = &flows[i];
- struct dp_netdev_actions *dp_actions;
- struct flow_wildcards wc;
- struct ofpbuf buf;
-
- miniflow_expand(&netdev_flow->cr.mask->mf, &wc.masks);
-
- /* Key. */
- ofpbuf_use_stack(&buf, keybuf, sizeof *keybuf);
- odp_flow_key_from_flow(&buf, &netdev_flow->flow, &wc.masks,
- netdev_flow->flow.in_port.odp_port, true);
- f->key = ofpbuf_data(&buf);
- f->key_len = ofpbuf_size(&buf);
-
- /* Mask. */
- ofpbuf_use_stack(&buf, maskbuf, sizeof *maskbuf);
- odp_flow_key_from_mask(&buf, &wc.masks, &netdev_flow->flow,
- odp_to_u32(wc.masks.in_port.odp_port),
- SIZE_MAX, true);
- f->mask = ofpbuf_data(&buf);
- f->mask_len = ofpbuf_size(&buf);
-
- /* Actions. */
- dp_actions = dp_netdev_flow_get_actions(netdev_flow);
- f->actions = dp_actions->actions;
- f->actions_len = dp_actions->size;
+ struct ofpbuf key, mask;
- /* Stats. */
- get_dpif_flow_stats(netdev_flow, &f->stats);
+ ofpbuf_use_stack(&key, keybuf, sizeof *keybuf);
+ ofpbuf_use_stack(&mask, maskbuf, sizeof *maskbuf);
+ dp_netdev_flow_to_dpif_flow(&dpif->dpif, netdev_flow, &key, &mask, f);
}
return n_flows;
* the 'non_pmd_mutex'. */
if (pmd->core_id == NON_PMD_CORE_ID) {
ovs_mutex_lock(&dp->non_pmd_mutex);
+ ovs_mutex_lock(&dp->port_mutex);
}
dp_netdev_execute_actions(pmd, &pp, 1, false, execute->actions,
execute->actions_len);
if (pmd->core_id == NON_PMD_CORE_ID) {
+ ovs_mutex_unlock(&dp->port_mutex);
ovs_mutex_unlock(&dp->non_pmd_mutex);
}
static int
dp_netdev_upcall(struct dp_netdev *dp, struct dpif_packet *packet_,
- struct flow *flow, struct flow_wildcards *wc,
+ struct flow *flow, struct flow_wildcards *wc, ovs_u128 *ufid,
enum dpif_upcall_type type, const struct nlattr *userdata,
struct ofpbuf *actions, struct ofpbuf *put_actions)
{
ds_destroy(&ds);
}
- return dp->upcall_cb(packet, flow, type, userdata, actions, wc,
+ return dp->upcall_cb(packet, flow, ufid, type, userdata, actions, wc,
put_actions, dp->upcall_aux);
}
if (OVS_UNLIKELY(any_miss) && !fat_rwlock_tryrdlock(&dp->upcall_rwlock)) {
uint64_t actions_stub[512 / 8], slow_stub[512 / 8];
struct ofpbuf actions, put_actions;
+ ovs_u128 ufid;
ofpbuf_use_stub(&actions, actions_stub, sizeof actions_stub);
ofpbuf_use_stub(&put_actions, slow_stub, sizeof slow_stub);
ofpbuf_clear(&actions);
ofpbuf_clear(&put_actions);
+ dpif_flow_hash(dp->dpif, &match.flow, sizeof match.flow, &ufid);
error = dp_netdev_upcall(dp, packets[i], &match.flow, &match.wc,
- DPIF_UC_MISS, NULL, &actions,
+ &ufid, DPIF_UC_MISS, NULL, &actions,
&put_actions);
if (OVS_UNLIKELY(error && error != ENOSPC)) {
continue;
const struct nlattr *userdata;
struct ofpbuf actions;
struct flow flow;
+ ovs_u128 ufid;
userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
ofpbuf_init(&actions, 0);
ofpbuf_clear(&actions);
flow_extract(&packets[i]->ofpbuf, &packets[i]->md, &flow);
- error = dp_netdev_upcall(dp, packets[i], &flow, NULL,
- DPIF_UC_ACTION, userdata, &actions,
+ dpif_flow_hash(dp->dpif, &flow, sizeof flow, &ufid);
+ error = dp_netdev_upcall(dp, packets[i], &flow, NULL, &ufid,
+ DPIF_UC_ACTION, userdata,&actions,
NULL);
if (!error || error == ENOSPC) {
dp_netdev_execute_actions(pmd, &packets[i], 1, may_steal,