Today dpif-netdev has single metadat for given batch, since one
batch belongs to one port, but soon packets fro single tunnel ports
can belong to different ports, so we need to have per packet metadata.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
bool create, struct dpif **);
static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
struct dpif_packet **, int c,
bool create, struct dpif **);
static void dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
struct dpif_packet **, int c,
- bool may_steal, struct pkt_metadata *,
const struct nlattr *actions,
size_t actions_len);
static void dp_netdev_input(struct dp_netdev_pmd_thread *,
const struct nlattr *actions,
size_t actions_len);
static void dp_netdev_input(struct dp_netdev_pmd_thread *,
- struct dpif_packet **, int cnt,
- struct pkt_metadata *);
+ struct dpif_packet **, int cnt);
+
static void dp_netdev_disable_upcall(struct dp_netdev *);
static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev *dp, int index,
static void dp_netdev_disable_upcall(struct dp_netdev *);
static void dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd,
struct dp_netdev *dp, int index,
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_pmd_thread *pmd;
struct dpif_packet packet, *pp;
struct dp_netdev *dp = get_dp_netdev(dpif);
struct dp_netdev_pmd_thread *pmd;
struct dpif_packet packet, *pp;
- struct pkt_metadata *md = &execute->md;
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
ofpbuf_size(execute->packet) > UINT16_MAX) {
if (ofpbuf_size(execute->packet) < ETH_HEADER_LEN ||
ofpbuf_size(execute->packet) > UINT16_MAX) {
}
packet.ofpbuf = *execute->packet;
}
packet.ofpbuf = *execute->packet;
+ packet.md = execute->md;
pp = &packet;
/* Tries finding the 'pmd'. If NULL is returned, that means
pp = &packet;
/* Tries finding the 'pmd'. If NULL is returned, that means
if (pmd->core_id == NON_PMD_CORE_ID) {
ovs_mutex_lock(&dp->non_pmd_mutex);
}
if (pmd->core_id == NON_PMD_CORE_ID) {
ovs_mutex_lock(&dp->non_pmd_mutex);
}
- dp_netdev_execute_actions(pmd, &pp, 1, false, md, execute->actions,
+ dp_netdev_execute_actions(pmd, &pp, 1, false, execute->actions,
execute->actions_len);
if (pmd->core_id == NON_PMD_CORE_ID) {
ovs_mutex_unlock(&dp->non_pmd_mutex);
execute->actions_len);
if (pmd->core_id == NON_PMD_CORE_ID) {
ovs_mutex_unlock(&dp->non_pmd_mutex);
* reallocate the ofpbuf memory. We need to pass those changes to the
* caller */
*execute->packet = packet.ofpbuf;
* reallocate the ofpbuf memory. We need to pass those changes to the
* caller */
*execute->packet = packet.ofpbuf;
+ execute->md = packet.md;
error = netdev_rxq_recv(rxq, packets, &cnt);
if (!error) {
error = netdev_rxq_recv(rxq, packets, &cnt);
if (!error) {
- struct pkt_metadata md = PKT_METADATA_INITIALIZER(port->port_no);
- dp_netdev_input(pmd, packets, cnt, &md);
+
+ /* XXX: initialize md in netdev implementation. */
+ for (i = 0; i < cnt; i++) {
+ packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no);
+ }
+ dp_netdev_input(pmd, packets, cnt);
} else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
} else if (error != EAGAIN && error != EOPNOTSUPP) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
struct dp_netdev_flow *flow;
struct dpif_packet *packets[NETDEV_MAX_RX_BATCH];
struct dp_netdev_flow *flow;
struct dpif_packet *packets[NETDEV_MAX_RX_BATCH];
- struct pkt_metadata md;
-packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow,
- struct pkt_metadata *md)
+packet_batch_init(struct packet_batch *batch, struct dp_netdev_flow *flow)
batch->packet_count = 0;
batch->byte_count = 0;
batch->packet_count = 0;
batch->byte_count = 0;
actions = dp_netdev_flow_get_actions(flow);
dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true,
actions = dp_netdev_flow_get_actions(flow);
dp_netdev_execute_actions(pmd, batch->packets, batch->packet_count, true,
- &batch->md, actions->actions, actions->size);
+ actions->actions, actions->size);
dp_netdev_count_packet(pmd->dp, DP_STAT_HIT, batch->packet_count);
}
static inline bool
dp_netdev_count_packet(pmd->dp, DP_STAT_HIT, batch->packet_count);
}
static inline bool
-dp_netdev_queue_batches(struct dpif_packet *pkt, struct pkt_metadata *md,
+dp_netdev_queue_batches(struct dpif_packet *pkt,
struct dp_netdev_flow *flow, const struct miniflow *mf,
struct packet_batch *batches, size_t *n_batches,
size_t max_batches)
struct dp_netdev_flow *flow, const struct miniflow *mf,
struct packet_batch *batches, size_t *n_batches,
size_t max_batches)
}
batch = &batches[(*n_batches)++];
}
batch = &batches[(*n_batches)++];
- packet_batch_init(batch, flow, md);
+ packet_batch_init(batch, flow);
packet_batch_update(batch, pkt, mf);
return true;
}
packet_batch_update(batch, pkt, mf);
return true;
}
*/
static inline size_t
emc_processing(struct dp_netdev_pmd_thread *pmd, struct dpif_packet **packets,
*/
static inline size_t
emc_processing(struct dp_netdev_pmd_thread *pmd, struct dpif_packet **packets,
- size_t cnt, struct pkt_metadata *md,
- struct netdev_flow_key *keys)
+ size_t cnt, struct netdev_flow_key *keys)
{
struct netdev_flow_key key;
struct packet_batch batches[4];
{
struct netdev_flow_key key;
struct packet_batch batches[4];
- miniflow_extract(&packets[i]->ofpbuf, md, &key.flow);
+ miniflow_extract(&packets[i]->ofpbuf, &packets[i]->md, &key.flow);
hash = dpif_netdev_packet_get_dp_hash(packets[i], &key.flow);
flow = emc_lookup(flow_cache, &key.flow, hash);
hash = dpif_netdev_packet_get_dp_hash(packets[i], &key.flow);
flow = emc_lookup(flow_cache, &key.flow, hash);
- if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets[i], md,
+ if (OVS_UNLIKELY(!dp_netdev_queue_batches(packets[i],
flow, &key.flow,
batches, &n_batches,
ARRAY_SIZE(batches)))) {
flow, &key.flow,
batches, &n_batches,
ARRAY_SIZE(batches)))) {
static inline void
fast_path_processing(struct dp_netdev_pmd_thread *pmd,
struct dpif_packet **packets, size_t cnt,
static inline void
fast_path_processing(struct dp_netdev_pmd_thread *pmd,
struct dpif_packet **packets, size_t cnt,
- struct pkt_metadata *md, struct netdev_flow_key *keys)
+ struct netdev_flow_key *keys)
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
/* We can't allow the packet batching in the next loop to execute
* the actions. Otherwise, if there are any slow path actions,
* we'll send the packet up twice. */
/* We can't allow the packet batching in the next loop to execute
* the actions. Otherwise, if there are any slow path actions,
* we'll send the packet up twice. */
- dp_netdev_execute_actions(pmd, &packets[i], 1, true, md,
+ dp_netdev_execute_actions(pmd, &packets[i], 1, true,
ofpbuf_data(&actions),
ofpbuf_size(&actions));
ofpbuf_data(&actions),
ofpbuf_size(&actions));
flow = dp_netdev_flow_cast(rules[i]);
emc_insert(flow_cache, mfs[i], dpif_packet_get_dp_hash(packet),
flow);
flow = dp_netdev_flow_cast(rules[i]);
emc_insert(flow_cache, mfs[i], dpif_packet_get_dp_hash(packet),
flow);
- dp_netdev_queue_batches(packet, md, flow, mfs[i], batches, &n_batches,
+ dp_netdev_queue_batches(packet, flow, mfs[i], batches, &n_batches,
static void
dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
static void
dp_netdev_input(struct dp_netdev_pmd_thread *pmd,
- struct dpif_packet **packets, int cnt, struct pkt_metadata *md)
+ struct dpif_packet **packets, int cnt)
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
{
#if !defined(__CHECKER__) && !defined(_WIN32)
const size_t PKT_ARRAY_SIZE = cnt;
struct netdev_flow_key keys[PKT_ARRAY_SIZE];
size_t newcnt;
struct netdev_flow_key keys[PKT_ARRAY_SIZE];
size_t newcnt;
- newcnt = emc_processing(pmd, packets, cnt, md, keys);
+ newcnt = emc_processing(pmd, packets, cnt, keys);
if (OVS_UNLIKELY(newcnt)) {
if (OVS_UNLIKELY(newcnt)) {
- fast_path_processing(pmd, packets, newcnt, md, keys);
+ fast_path_processing(pmd, packets, newcnt, keys);
static void
dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt,
static void
dp_execute_cb(void *aux_, struct dpif_packet **packets, int cnt,
- struct pkt_metadata *md,
const struct nlattr *a, bool may_steal)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
const struct nlattr *a, bool may_steal)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
- flow_extract(&packets[i]->ofpbuf, md, &flow);
+ flow_extract(&packets[i]->ofpbuf, &packets[i]->md, &flow);
error = dp_netdev_upcall(dp, packets[i], &flow, NULL,
DPIF_UC_ACTION, userdata, &actions,
NULL);
if (!error || error == ENOSPC) {
dp_netdev_execute_actions(pmd, &packets[i], 1, may_steal,
error = dp_netdev_upcall(dp, packets[i], &flow, NULL,
DPIF_UC_ACTION, userdata, &actions,
NULL);
if (!error || error == ENOSPC) {
dp_netdev_execute_actions(pmd, &packets[i], 1, may_steal,
- md, ofpbuf_data(&actions),
ofpbuf_size(&actions));
} else if (may_steal) {
dpif_packet_delete(packets[i]);
ofpbuf_size(&actions));
} else if (may_steal) {
dpif_packet_delete(packets[i]);
hash = 1; /* 0 is not valid */
}
hash = 1; /* 0 is not valid */
}
- if (i == 0) {
- md->dp_hash = hash;
- }
dpif_packet_set_dp_hash(packets[i], hash);
}
return;
dpif_packet_set_dp_hash(packets[i], hash);
}
return;
(*depth)++;
for (i = 0; i < cnt; i++) {
struct dpif_packet *recirc_pkt;
(*depth)++;
for (i = 0; i < cnt; i++) {
struct dpif_packet *recirc_pkt;
- struct pkt_metadata recirc_md = *md;
recirc_pkt = (may_steal) ? packets[i]
: dpif_packet_clone(packets[i]);
recirc_pkt = (may_steal) ? packets[i]
: dpif_packet_clone(packets[i]);
- recirc_md.recirc_id = nl_attr_get_u32(a);
+ recirc_pkt->md.recirc_id = nl_attr_get_u32(a);
/* Hash is private to each packet */
/* Hash is private to each packet */
- recirc_md.dp_hash = dpif_packet_get_dp_hash(packets[i]);
+ recirc_pkt->md.dp_hash = dpif_packet_get_dp_hash(packets[i]);
- dp_netdev_input(pmd, &recirc_pkt, 1,
- &recirc_md);
+ dp_netdev_input(pmd, &recirc_pkt, 1);
static void
dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
struct dpif_packet **packets, int cnt,
static void
dp_netdev_execute_actions(struct dp_netdev_pmd_thread *pmd,
struct dpif_packet **packets, int cnt,
- bool may_steal, struct pkt_metadata *md,
const struct nlattr *actions, size_t actions_len)
{
const struct nlattr *actions, size_t actions_len)
{
- struct dp_netdev_execute_aux aux = {pmd};
+ struct dp_netdev_execute_aux aux = { pmd };
- odp_execute_actions(&aux, packets, cnt, may_steal, md, actions,
+ odp_execute_actions(&aux, packets, cnt, may_steal, actions,
actions_len, dp_execute_cb);
}
actions_len, dp_execute_cb);
}
* meaningful. */
static void
dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
* meaningful. */
static void
dpif_execute_helper_cb(void *aux_, struct dpif_packet **packets, int cnt,
- struct pkt_metadata *md,
const struct nlattr *action, bool may_steal OVS_UNUSED)
{
struct dpif_execute_helper_aux *aux = aux_;
int type = nl_attr_type(action);
const struct nlattr *action, bool may_steal OVS_UNUSED)
{
struct dpif_execute_helper_aux *aux = aux_;
int type = nl_attr_type(action);
- struct ofpbuf * packet = &packets[0]->ofpbuf;
+ struct ofpbuf *packet = &packets[0]->ofpbuf;
+ struct pkt_metadata *md = &packets[0]->md;
COVERAGE_INC(dpif_execute_with_help);
packet.ofpbuf = *execute->packet;
COVERAGE_INC(dpif_execute_with_help);
packet.ofpbuf = *execute->packet;
+ packet.md = execute->md;
- odp_execute_actions(&aux, &pp, 1, false, &execute->md, execute->actions,
+ odp_execute_actions(&aux, &pp, 1, false, execute->actions,
execute->actions_len, dpif_execute_helper_cb);
/* Even though may_steal is set to false, some actions could modify or
* reallocate the ofpbuf memory. We need to pass those changes to the
* caller */
*execute->packet = packet.ofpbuf;
execute->actions_len, dpif_execute_helper_cb);
/* Even though may_steal is set to false, some actions could modify or
* reallocate the ofpbuf memory. We need to pass those changes to the
* caller */
*execute->packet = packet.ofpbuf;
+ execute->md = packet.md;
-odp_execute_set_action(struct dpif_packet *packet, const struct nlattr *a,
- struct pkt_metadata *md)
+odp_execute_set_action(struct dpif_packet *packet, const struct nlattr *a)
{
enum ovs_key_attr type = nl_attr_type(a);
const struct ovs_key_ipv4 *ipv4_key;
{
enum ovs_key_attr type = nl_attr_type(a);
const struct ovs_key_ipv4 *ipv4_key;
const struct ovs_key_tcp *tcp_key;
const struct ovs_key_udp *udp_key;
const struct ovs_key_sctp *sctp_key;
const struct ovs_key_tcp *tcp_key;
const struct ovs_key_udp *udp_key;
const struct ovs_key_sctp *sctp_key;
+ struct pkt_metadata *md = &packet->md;
switch (type) {
case OVS_KEY_ATTR_PRIORITY:
switch (type) {
case OVS_KEY_ATTR_PRIORITY:
static void
odp_execute_masked_set_action(struct dpif_packet *packet,
static void
odp_execute_masked_set_action(struct dpif_packet *packet,
- const struct nlattr *a, struct pkt_metadata *md)
+ const struct nlattr *a)
+ struct pkt_metadata *md = &packet->md;
enum ovs_key_attr type = nl_attr_type(a);
struct mpls_hdr *mh;
enum ovs_key_attr type = nl_attr_type(a);
struct mpls_hdr *mh;
static void
odp_execute_sample(void *dp, struct dpif_packet *packet, bool steal,
static void
odp_execute_sample(void *dp, struct dpif_packet *packet, bool steal,
- struct pkt_metadata *md, const struct nlattr *action,
+ const struct nlattr *action,
odp_execute_cb dp_execute_action)
{
const struct nlattr *subactions = NULL;
odp_execute_cb dp_execute_action)
{
const struct nlattr *subactions = NULL;
- odp_execute_actions(dp, &packet, 1, steal, md, nl_attr_get(subactions),
+ odp_execute_actions(dp, &packet, 1, steal, nl_attr_get(subactions),
nl_attr_get_size(subactions), dp_execute_action);
}
void
nl_attr_get_size(subactions), dp_execute_action);
}
void
-odp_execute_actions(void *dp, struct dpif_packet **packets, int cnt,
- bool steal, struct pkt_metadata *md,
+odp_execute_actions(void *dp, struct dpif_packet **packets, int cnt, bool steal,
const struct nlattr *actions, size_t actions_len,
odp_execute_cb dp_execute_action)
{
const struct nlattr *actions, size_t actions_len,
odp_execute_cb dp_execute_action)
{
* not need it any more. */
bool may_steal = steal && last_action;
* not need it any more. */
bool may_steal = steal && last_action;
- dp_execute_action(dp, packets, cnt, md, a, may_steal);
+ dp_execute_action(dp, packets, cnt, a, may_steal);
if (last_action) {
/* We do not need to free the packets. dp_execute_actions()
if (last_action) {
/* We do not need to free the packets. dp_execute_actions()
uint32_t hash;
for (i = 0; i < cnt; i++) {
uint32_t hash;
for (i = 0; i < cnt; i++) {
- struct ofpbuf *buf = &packets[i]->ofpbuf;
-
- flow_extract(buf, md, &flow);
+ flow_extract(&packets[i]->ofpbuf, &packets[i]->md, &flow);
hash = flow_hash_5tuple(&flow, hash_act->hash_basis);
hash = flow_hash_5tuple(&flow, hash_act->hash_basis);
- /* The hash of the first packet is in shared metadata */
- if (i == 0) {
- md->dp_hash = hash ? hash : 1;
- }
-
/* We also store the hash value with each packet */
dpif_packet_set_dp_hash(packets[i], hash ? hash : 1);
}
/* We also store the hash value with each packet */
dpif_packet_set_dp_hash(packets[i], hash ? hash : 1);
}
case OVS_ACTION_ATTR_SET:
for (i = 0; i < cnt; i++) {
case OVS_ACTION_ATTR_SET:
for (i = 0; i < cnt; i++) {
- odp_execute_set_action(packets[i], nl_attr_get(a), md);
+ odp_execute_set_action(packets[i], nl_attr_get(a));
}
break;
case OVS_ACTION_ATTR_SET_MASKED:
for (i = 0; i < cnt; i++) {
}
break;
case OVS_ACTION_ATTR_SET_MASKED:
for (i = 0; i < cnt; i++) {
- odp_execute_masked_set_action(packets[i], nl_attr_get(a), md);
+ odp_execute_masked_set_action(packets[i], nl_attr_get(a));
}
break;
case OVS_ACTION_ATTR_SAMPLE:
for (i = 0; i < cnt; i++) {
}
break;
case OVS_ACTION_ATTR_SAMPLE:
for (i = 0; i < cnt; i++) {
- odp_execute_sample(dp, packets[i], steal && last_action, md, a,
+ odp_execute_sample(dp, packets[i], steal && last_action, a,
struct pkt_metadata;
typedef void (*odp_execute_cb)(void *dp, struct dpif_packet **packets, int cnt,
struct pkt_metadata;
typedef void (*odp_execute_cb)(void *dp, struct dpif_packet **packets, int cnt,
const struct nlattr *action, bool may_steal);
/* Actions that need to be executed in the context of a datapath are handed
const struct nlattr *action, bool may_steal);
/* Actions that need to be executed in the context of a datapath are handed
* actions OVS_ACTION_ATTR_OUTPUT and OVS_ACTION_ATTR_USERSPACE so
* 'dp_execute_action' needs to handle only these. */
void odp_execute_actions(void *dp, struct dpif_packet **packets, int cnt,
* actions OVS_ACTION_ATTR_OUTPUT and OVS_ACTION_ATTR_USERSPACE so
* 'dp_execute_action' needs to handle only these. */
void odp_execute_actions(void *dp, struct dpif_packet **packets, int cnt,
- bool steal, struct pkt_metadata *,
const struct nlattr *actions, size_t actions_len,
odp_execute_cb dp_execute_action);
#endif
const struct nlattr *actions, size_t actions_len,
odp_execute_cb dp_execute_action);
#endif
ofpbuf_init(b, size + headroom);
ofpbuf_reserve(b, headroom);
ofpbuf_init(b, size + headroom);
ofpbuf_reserve(b, headroom);
+ p->md = PKT_METADATA_INITIALIZER(0);
size_t headroom = ofpbuf_headroom(b);
ofpbuf_init(&p->ofpbuf, ofpbuf_size(b) + headroom);
size_t headroom = ofpbuf_headroom(b);
ofpbuf_init(&p->ofpbuf, ofpbuf_size(b) + headroom);
+ p->md = PKT_METADATA_INITIALIZER(0);
ofpbuf_reserve(&p->ofpbuf, headroom);
ofpbuf_put(&p->ofpbuf, ofpbuf_data(b), ofpbuf_size(b));
ofpbuf_reserve(&p->ofpbuf, headroom);
ofpbuf_put(&p->ofpbuf, ofpbuf_data(b), ofpbuf_size(b));
struct dpif_packet *newp;
newp = dpif_packet_clone_from_ofpbuf(&p->ofpbuf);
struct dpif_packet *newp;
newp = dpif_packet_clone_from_ofpbuf(&p->ofpbuf);
+ memcpy(&newp->md, &p->md, sizeof p->md);
dpif_packet_set_dp_hash(newp, dpif_packet_get_dp_hash(p));
dpif_packet_set_dp_hash(newp, dpif_packet_get_dp_hash(p));
#ifndef DPDK_NETDEV
uint32_t dp_hash; /* Packet hash. */
#endif
#ifndef DPDK_NETDEV
uint32_t dp_hash; /* Packet hash. */
#endif
+ struct pkt_metadata md;
};
struct dpif_packet *dpif_packet_new_with_headroom(size_t size,
};
struct dpif_packet *dpif_packet_new_with_headroom(size_t size,
{
struct ofproto_packet_in *pin;
struct dpif_packet *packet;
{
struct ofproto_packet_in *pin;
struct dpif_packet *packet;
- struct pkt_metadata md = PKT_METADATA_INITIALIZER(0);
ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
&ctx->xout->wc,
ctx->xbridge->masked_set_action);
&ctx->xout->wc,
ctx->xbridge->masked_set_action);
- odp_execute_actions(NULL, &packet, 1, false, &md,
+ odp_execute_actions(NULL, &packet, 1, false,
ofpbuf_data(ctx->xout->odp_actions),
ofpbuf_size(ctx->xout->odp_actions), NULL);
ofpbuf_data(ctx->xout->odp_actions),
ofpbuf_size(ctx->xout->odp_actions), NULL);