bool xout_initialized; /* True if 'xout' must be uninitialized. */
struct xlate_out xout; /* Result of xlate_actions(). */
+ struct ofpbuf odp_actions; /* Datapath actions from xlate_actions(). */
struct flow_wildcards wc; /* Dependencies that megaflow must match. */
struct ofpbuf put_actions; /* Actions 'put' in the fastpath. */
const struct nlattr *key; /* Datapath flow key. */
size_t key_len; /* Datapath flow key length. */
const struct nlattr *out_tun_key; /* Datapath output tunnel key. */
+
+ uint64_t odp_actions_stub[1024 / 8]; /* Stub for odp_actions. */
};
/* 'udpif_key's are responsible for tracking the little bit of state udpif
pkt_metadata_from_flow(&dupcall->packet.md, flow);
flow_extract(&dupcall->packet, flow);
- error = process_upcall(udpif, upcall, NULL, &upcall->wc);
+ error = process_upcall(udpif, upcall,
+ &upcall->odp_actions, &upcall->wc);
if (error) {
goto cleanup;
}
upcall->pmd_id = pmd_id;
upcall->type = type;
upcall->userdata = userdata;
+ ofpbuf_use_stub(&upcall->odp_actions, upcall->odp_actions_stub,
+ sizeof upcall->odp_actions_stub);
ofpbuf_init(&upcall->put_actions, 0);
upcall->xout_initialized = false;
stats.tcp_flags = ntohs(upcall->flow->tcp_flags);
xlate_in_init(&xin, upcall->ofproto, upcall->flow, upcall->in_port, NULL,
- stats.tcp_flags, upcall->packet, wc);
- xin.odp_actions = odp_actions;
+ stats.tcp_flags, upcall->packet, wc, odp_actions);
if (upcall->type == DPIF_UC_MISS) {
xin.resubmit_stats = &stats;
if (!upcall->xout.slow) {
ofpbuf_use_const(&upcall->put_actions,
- upcall->xout.odp_actions->data,
- upcall->xout.odp_actions->size);
+ odp_actions->data, odp_actions->size);
} else {
ofpbuf_init(&upcall->put_actions, 0);
compose_slow_path(udpif, &upcall->xout, upcall->flow,
if (upcall->xout_initialized) {
xlate_out_uninit(&upcall->xout);
}
+ ofpbuf_uninit(&upcall->odp_actions);
ofpbuf_uninit(&upcall->put_actions);
if (upcall->ukey) {
if (!upcall->ukey_persists) {
* actions were composed assuming that the packet contained no
* VLAN. So, we must remove the VLAN header from the packet before
* trying to execute the actions. */
- if (upcall->xout.odp_actions->size) {
+ if (upcall->odp_actions.size) {
eth_pop_vlan(CONST_CAST(struct dp_packet *, upcall->packet));
}
op->dop.u.flow_put.actions_len = ukey->actions->size;
}
- if (upcall->xout.odp_actions->size) {
+ if (upcall->odp_actions.size) {
op = &ops[n_ops++];
op->ukey = NULL;
op->dop.type = DPIF_OP_EXECUTE;
op->dop.u.execute.packet = CONST_CAST(struct dp_packet *, packet);
odp_key_to_pkt_metadata(upcall->key, upcall->key_len,
&op->dop.u.execute.packet->md);
- op->dop.u.execute.actions = upcall->xout.odp_actions->data;
- op->dop.u.execute.actions_len = upcall->xout.odp_actions->size;
+ op->dop.u.execute.actions = upcall->odp_actions.data;
+ op->dop.u.execute.actions_len = upcall->odp_actions.size;
op->dop.u.execute.needs_help = (upcall->xout.slow & SLOW_ACTION) != 0;
op->dop.u.execute.probe = false;
}
const struct dpif_flow_stats *stats, uint64_t reval_seq)
OVS_REQUIRES(ukey->mutex)
{
- uint64_t slow_path_buf[128 / 8];
+ uint64_t odp_actions_stub[1024 / 8];
+ struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
+
struct xlate_out xout, *xoutp;
struct netflow *netflow;
struct ofproto_dpif *ofproto;
struct dpif_flow_stats push;
- struct ofpbuf xout_actions;
struct flow flow, dp_mask;
struct flow_wildcards wc;
uint64_t *dp64, *xout64;
}
xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL, push.tcp_flags,
- NULL, need_revalidate ? &wc : NULL);
+ NULL, need_revalidate ? &wc : NULL, &odp_actions);
if (push.n_packets) {
xin.resubmit_stats = &push;
xin.may_learn = true;
goto exit;
}
- if (!xout.slow) {
- ofpbuf_use_const(&xout_actions, xout.odp_actions->data,
- xout.odp_actions->size);
- } else {
- ofpbuf_use_stack(&xout_actions, slow_path_buf, sizeof slow_path_buf);
+ if (xout.slow) {
+ ofpbuf_clear(&odp_actions);
compose_slow_path(udpif, &xout, &flow, flow.in_port.odp_port,
- &xout_actions);
+ &odp_actions);
}
- if (!ofpbuf_equal(&xout_actions, ukey->actions)) {
+ if (!ofpbuf_equal(&odp_actions, ukey->actions)) {
goto exit;
}
netflow_flow_clear(netflow, &flow);
}
xlate_out_uninit(xoutp);
+ ofpbuf_uninit(&odp_actions);
return ok;
}
struct xlate_in xin;
xlate_in_init(&xin, ofproto, &flow, ofp_in_port, NULL,
- push->tcp_flags, NULL, NULL);
+ push->tcp_flags, NULL, NULL, NULL);
xin.resubmit_stats = push->n_packets ? push : NULL;
xin.may_learn = push->n_packets > 0;
xlate_actions_for_side_effects(&xin);
* caller really wants wildcards. */
struct flow_wildcards *wc;
+ /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
+ * this is the same pointer. When 'xin->odp_actions' is null, this points
+ * to a scratch ofpbuf. This allows code to add actions to
+ * 'ctx->odp_actions' without worrying about whether the caller really
+ * wants actions. */
+ struct ofpbuf *odp_actions;
+
/* Resubmit statistics, via xlate_table_action(). */
int recurse; /* Current resubmit nesting depth. */
int resubmits; /* Total number of resubmits. */
"%s, which is reserved exclusively for mirroring",
ctx->xbridge->name, in_xbundle->name);
}
- ofpbuf_clear(ctx->xout->odp_actions);
+ ofpbuf_clear(ctx->odp_actions);
return;
}
add_sflow_action(struct xlate_ctx *ctx)
{
ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
&ctx->xin->flow, ODPP_NONE);
ctx->sflow_odp_port = 0;
ctx->sflow_n_outputs = 0;
static void
add_ipfix_action(struct xlate_ctx *ctx)
{
- compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
+ compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
&ctx->xin->flow, ODPP_NONE);
}
static void
add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port)
{
- compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
+ compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
&ctx->xin->flow, port);
}
return;
}
- cookie = ofpbuf_at(ctx->xout->odp_actions, ctx->user_cookie_offset,
+ cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
sizeof cookie->sflow);
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
}
tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
- odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data);
+ odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
return 0;
}
/* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
* the learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
- size_t old_size = ctx->xout->odp_actions->size;
+ size_t old_size = ctx->odp_actions->size;
mirror_mask_t old_mirrors = ctx->xout->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
- ctx->xout->odp_actions->size = old_size;
+ ctx->odp_actions->size = old_size;
/* Undo changes that may have been done for recirculation. */
if (exit_recirculates(ctx)) {
tnl_push_pop_send = true;
} else {
xlate_report(ctx, "output to kernel tunnel");
- commit_odp_tunnel_action(flow, &ctx->base_flow,
- ctx->xout->odp_actions);
+ commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
flow->tunnel = flow_tnl; /* Restore tunnel metadata */
}
} else {
bool use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
wc, use_masked);
if (xr) {
struct ovs_action_hash *act_hash;
/* Hash action. */
- act_hash = nl_msg_put_unspec_uninit(ctx->xout->odp_actions,
+ act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
OVS_ACTION_ATTR_HASH,
sizeof *act_hash);
act_hash->hash_alg = xr->hash_alg;
act_hash->hash_basis = xr->hash_basis;
/* Recirc action. */
- nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
xr->recirc_id);
} else {
}
if (odp_tnl_port != ODPP_NONE) {
- nl_msg_put_odp_port(ctx->xout->odp_actions,
+ nl_msg_put_odp_port(ctx->odp_actions,
OVS_ACTION_ATTR_TUNNEL_POP,
odp_tnl_port);
} else {
/* Tunnel push-pop action is not compatible with
* IPFIX action. */
add_ipfix_output_action(ctx, out_port);
- nl_msg_put_odp_port(ctx->xout->odp_actions,
+ nl_msg_put_odp_port(ctx->odp_actions,
OVS_ACTION_ATTR_OUTPUT,
out_port);
}
MAX_RESUBMIT_RECURSION);
} else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
- } else if (ctx->xout->odp_actions->size > UINT16_MAX) {
+ } else if (ctx->odp_actions->size > UINT16_MAX) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
} else if (ctx->stack.size >= 65536) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
ctx->wc, use_masked);
odp_execute_actions(NULL, &packet, 1, false,
- ctx->xout->odp_actions->data,
- ctx->xout->odp_actions->size, NULL);
+ ctx->odp_actions->data, ctx->odp_actions->size, NULL);
pin = xmalloc(sizeof *pin);
pin->up.packet_len = dp_packet_size(packet);
use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
ctx->wc, use_masked);
recirc_metadata_from_flow(&md, &ctx->xin->flow);
* fail all revalidations as zero is not a valid recirculation ID. */
}
- nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
/* Undo changes done by recirculation. */
ctx->action_set.size = ctx->recirc_action_offset;
bool use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
ctx->wc, use_masked);
} else if (n >= FLOW_MAX_MPLS_LABELS) {
if (ctx->xin->packet != NULL) {
ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
}
ctx->exit = true;
- ofpbuf_clear(ctx->xout->odp_actions);
+ ofpbuf_clear(ctx->odp_actions);
}
}
use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
ctx->wc, use_masked);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
- compose_sample_action(ctx->xbridge, ctx->xout->odp_actions,
+ compose_sample_action(ctx->xbridge, ctx->odp_actions,
&ctx->xin->flow, probability, &cookie,
sizeof cookie.flow_sample, ODPP_NONE,
false);
xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
const struct flow *flow, ofp_port_t in_port,
struct rule_dpif *rule, uint16_t tcp_flags,
- const struct dp_packet *packet, struct flow_wildcards *wc)
+ const struct dp_packet *packet, struct flow_wildcards *wc,
+ struct ofpbuf *odp_actions)
{
xin->ofproto = ofproto;
xin->flow = *flow;
xin->report_hook = NULL;
xin->resubmit_stats = NULL;
xin->wc = wc;
- xin->odp_actions = NULL;
+ xin->odp_actions = odp_actions;
/* Do recirc lookup. */
xin->recirc = flow->recirc_id
xlate_out_uninit(struct xlate_out *xout)
{
if (xout) {
- if (xout->odp_actions == &xout->odp_actions_buf) {
- ofpbuf_uninit(xout->odp_actions);
- }
xlate_out_free_recircs(xout);
}
}
const struct nlattr *a;
unsigned int left;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions->data,
- ctx->xout->odp_actions->size) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
+ ctx->odp_actions->size) {
if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
&& nl_attr_get_odp_port(a) == local_odp_port) {
return true;
union mf_subvalue stack_stub[1024 / sizeof(union mf_subvalue)];
uint64_t action_set_stub[1024 / 8];
struct flow_wildcards scratch_wc;
+ uint64_t actions_stub[256 / 8];
+ struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
struct xlate_ctx ctx = {
.xin = xin,
.xout = xout,
.stack = OFPBUF_STUB_INITIALIZER(stack_stub),
.rule = xin->rule,
.wc = xin->wc ? xin->wc : &scratch_wc,
+ .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
.recurse = 0,
.resubmits = 0,
.action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
};
memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+ ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
enum slow_path_reason special;
const struct ofpact *ofpacts;
* kernel does. If we wish to maintain the original values an action
* needs to be generated. */
- xout->odp_actions = xin->odp_actions;
- if (!xout->odp_actions) {
- xout->odp_actions = &xout->odp_actions_buf;
- ofpbuf_use_stub(xout->odp_actions, xout->odp_actions_stub,
- sizeof xout->odp_actions_stub);
- }
- ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE);
-
if (xin->wc) {
flow_wildcards_init_catchall(ctx.wc);
memset(&ctx.wc->masks.in_port, 0xff, sizeof ctx.wc->masks.in_port);
xin->ofpacts_len > 0
? "actions"
: "rule");
- return;
+ goto exit;
}
/* Set the bridge for post-recirculation processing if needed. */
/* Drop the packet if the bridge cannot be found. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists.");
- return;
+ goto exit;
}
ctx.xbridge = new_bridge;
}
VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32,
flow->recirc_id);
- return;
+ goto exit;
}
/* The bridge is now known so obtain its table version. */
ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
if (!xin->recirc) {
add_sflow_action(&ctx);
add_ipfix_action(&ctx);
- sample_actions_len = ctx.xout->odp_actions->size;
+ sample_actions_len = ctx.odp_actions->size;
} else {
sample_actions_len = 0;
}
if (in_port && (!xport_stp_forward_state(in_port) ||
!xport_rstp_forward_state(in_port))) {
/* Drop all actions added by do_xlate_actions() above. */
- ctx.xout->odp_actions->size = sample_actions_len;
+ ctx.odp_actions->size = sample_actions_len;
/* Undo changes that may have been done for recirculation. */
if (exit_recirculates(&ctx)) {
}
}
- if (nl_attr_oversized(ctx.xout->odp_actions->size)) {
+ if (nl_attr_oversized(ctx.odp_actions->size)) {
/* These datapath actions are too big for a Netlink attribute, so we
* can't hand them to the kernel directly. dpif_execute() can execute
* them one by one with help, so just mark the result as SLOW_ACTION to
* prevent the flow from being installed. */
COVERAGE_INC(xlate_actions_oversize);
ctx.xout->slow |= SLOW_ACTION;
- } else if (too_many_output_actions(ctx.xout->odp_actions)) {
+ } else if (too_many_output_actions(ctx.odp_actions)) {
COVERAGE_INC(xlate_actions_too_many_output);
ctx.xout->slow |= SLOW_ACTION;
}
}
}
- ofpbuf_uninit(&ctx.stack);
- ofpbuf_uninit(&ctx.action_set);
-
if (xin->wc) {
/* Clear the metadata and register wildcard masks, because we won't
* use non-header fields as part of the cache. */
ctx.wc->masks.vlan_tci |= htons(VLAN_CFI);
}
}
+
+exit:
+ ofpbuf_uninit(&ctx.stack);
+ ofpbuf_uninit(&ctx.action_set);
+ ofpbuf_uninit(&scratch_actions);
}
/* Sends 'packet' out 'ofport'.
rule_dpif_credit_stats(rule, &stats);
}
+ uint64_t odp_actions_stub[1024 / 8];
+ struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
- stats.tcp_flags, packet, NULL);
+ stats.tcp_flags, packet, NULL, &odp_actions);
xin.ofpacts = ofpacts;
xin.ofpacts_len = ofpacts_len;
xin.resubmit_stats = &stats;
xlate_actions(&xin, &xout);
- execute.actions = xout.odp_actions->data;
- execute.actions_len = xout.odp_actions->size;
+ execute.actions = odp_actions.data;
+ execute.actions_len = odp_actions.size;
pkt_metadata_from_flow(&packet->md, flow);
execute.packet = packet;
error = dpif_execute(ofproto->backer->dpif, &execute);
xlate_out_uninit(&xout);
+ ofpbuf_uninit(&odp_actions);
return error;
}
const struct flow *key;
struct flow flow;
struct ds *result;
+ struct flow_wildcards wc;
+ struct ofpbuf odp_actions;
};
static void
trace_format_odp(struct ds *result, int level, const char *title,
struct trace_ctx *trace)
{
- struct ofpbuf *odp_actions = trace->xout.odp_actions;
+ struct ofpbuf *odp_actions = &trace->odp_actions;
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
- match_init(&match, trace->key, &trace->xout.wc);
+ match_init(&match, trace->key, &trace->wc);
match_format(&match, result, OFP_DEFAULT_PRIORITY);
ds_put_char(result, '\n');
}
flow_format(ds, flow);
ds_put_char(ds, '\n');
+ ofpbuf_init(&trace.odp_actions, 0);
+
trace.result = ds;
trace.key = flow; /* Original flow key, used for megaflow. */
trace.flow = *flow; /* May be modified by actions. */
xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL,
- ntohs(flow->tcp_flags), packet, &trace.wc);
+ ntohs(flow->tcp_flags), packet, &trace.wc,
+ &trace.odp_actions);
trace.xin.ofpacts = ofpacts;
trace.xin.ofpacts_len = ofpacts_len;
trace.xin.resubmit_hook = trace_resubmit;
trace_format_megaflow(ds, 0, "Megaflow", &trace);
ds_put_cstr(ds, "Datapath actions: ");
- format_odp_actions(ds, trace.xout.odp_actions->data,
- trace.xout.odp_actions->size);
+ format_odp_actions(ds, trace.odp_actions.data, trace.odp_actions.size);
if (trace.xout.slow) {
enum slow_path_reason slow;
}
xlate_out_uninit(&trace.xout);
+ ofpbuf_uninit(&trace.odp_actions);
}
/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list