uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint32_t sflow_n_outputs; /* Number of output ports. */
odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
- uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
+ mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
/* These are used for non-bond recirculation. The recirculation IDs are
* stored in xout and must be associated with a datapath flow (ukey),
uint16_t vlan;
uint16_t vid;
- mirrors = ctx->xout->mirrors;
- ctx->xout->mirrors = 0;
+ mirrors = ctx->mirrors;
+ ctx->mirrors = 0;
in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
ctx->xin->packet != NULL, NULL);
}
mirrors &= ~dup_mirrors;
- ctx->xout->mirrors |= dup_mirrors;
+ ctx->mirrors |= dup_mirrors;
if (out) {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
}
}
-/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
- * the number of packets out of UINT32_MAX to sample. The given
- * cookie is passed back in the callback for each sampled packet.
+/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
+ * 'probability' is the number of packets out of UINT32_MAX to sample. The
+ * 'cookie' (of length 'cookie_size' bytes) is passed back in the callback for
+ * each sampled packet. 'tunnel_out_port', if not ODPP_NONE, is added as the
+ * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions', an
+ * OVS_USERSPACE_ATTR_ACTIONS attribute is added.
*/
static size_t
-compose_sample_action(const struct xbridge *xbridge,
- struct ofpbuf *odp_actions,
- const struct flow *flow,
+compose_sample_action(struct xlate_ctx *ctx,
const uint32_t probability,
const union user_action_cookie *cookie,
const size_t cookie_size,
const odp_port_t tunnel_out_port,
bool include_actions)
{
- size_t sample_offset, actions_offset;
- odp_port_t odp_port;
- int cookie_offset;
- uint32_t pid;
+ size_t sample_offset = nl_msg_start_nested(ctx->odp_actions,
+ OVS_ACTION_ATTR_SAMPLE);
- sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
+ nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
- nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
+ size_t actions_offset = nl_msg_start_nested(ctx->odp_actions,
+ OVS_SAMPLE_ATTR_ACTIONS);
- actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
+ odp_port_t odp_port = ofp_port_to_odp_port(
+ ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
+ uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
+ flow_hash_5tuple(&ctx->xin->flow, 0));
+ int cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
+ tunnel_out_port,
+ include_actions,
+ ctx->odp_actions);
- odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
- pid = dpif_port_get_pid(xbridge->dpif, odp_port,
- flow_hash_5tuple(flow, 0));
- cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
- tunnel_out_port,
- include_actions,
- odp_actions);
+ nl_msg_end_nested(ctx->odp_actions, actions_offset);
+ nl_msg_end_nested(ctx->odp_actions, sample_offset);
- nl_msg_end_nested(odp_actions, actions_offset);
- nl_msg_end_nested(odp_actions, sample_offset);
return cookie_offset;
}
-static void
-compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
- odp_port_t odp_port, unsigned int n_outputs,
- union user_action_cookie *cookie)
-{
- int ifindex;
-
- cookie->type = USER_ACTION_COOKIE_SFLOW;
- cookie->sflow.vlan_tci = vlan_tci;
-
- /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
- * port information") for the interpretation of cookie->output. */
- switch (n_outputs) {
- case 0:
- /* 0x40000000 | 256 means "packet dropped for unknown reason". */
- cookie->sflow.output = 0x40000000 | 256;
- break;
-
- case 1:
- ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
- if (ifindex) {
- cookie->sflow.output = ifindex;
- break;
- }
- /* Fall through. */
- default:
- /* 0x80000000 means "multiple output ports. */
- cookie->sflow.output = 0x80000000 | n_outputs;
- break;
- }
-}
-
-/* Compose SAMPLE action for sFlow bridge sampling. */
+/* If sFLow is not enabled, returns 0 without doing anything.
+ *
+ * If sFlow is enabled, appends a template "sample" action to the ODP actions
+ * in 'ctx'. This action is a template because some of the information needed
+ * to fill it out is not available until flow translation is complete. In this
+ * case, this functions returns an offset, which is always nonzero, to pass
+ * later to fix_sflow_action() to fill in the rest of the template. */
static size_t
-compose_sflow_action(const struct xbridge *xbridge,
- struct ofpbuf *odp_actions,
- const struct flow *flow,
- odp_port_t odp_port)
+compose_sflow_action(struct xlate_ctx *ctx)
{
- uint32_t probability;
- union user_action_cookie cookie;
-
- if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
+ struct dpif_sflow *sflow = ctx->xbridge->sflow;
+ if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
return 0;
}
- probability = dpif_sflow_get_probability(xbridge->sflow);
- compose_sflow_cookie(xbridge, htons(0), odp_port,
- odp_port == ODPP_NONE ? 0 : 1, &cookie);
-
- return compose_sample_action(xbridge, odp_actions, flow, probability,
+ union user_action_cookie cookie = { .type = USER_ACTION_COOKIE_SFLOW };
+ return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
&cookie, sizeof cookie.sflow, ODPP_NONE,
true);
}
+/* If IPFIX is enabled, this appends a "sample" action to implement IPFIX to
+ * 'ctx->odp_actions'. */
static void
-compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
- uint32_t obs_domain_id, uint32_t obs_point_id,
- union user_action_cookie *cookie)
-{
- cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
- cookie->flow_sample.probability = probability;
- cookie->flow_sample.collector_set_id = collector_set_id;
- cookie->flow_sample.obs_domain_id = obs_domain_id;
- cookie->flow_sample.obs_point_id = obs_point_id;
-}
-
-static void
-compose_ipfix_cookie(union user_action_cookie *cookie,
- odp_port_t output_odp_port)
+compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
{
- cookie->type = USER_ACTION_COOKIE_IPFIX;
- cookie->ipfix.output_odp_port = output_odp_port;
-}
-
-/* Compose SAMPLE action for IPFIX bridge sampling. */
-static void
-compose_ipfix_action(const struct xbridge *xbridge,
- struct ofpbuf *odp_actions,
- const struct flow *flow,
- odp_port_t output_odp_port)
-{
- uint32_t probability;
- union user_action_cookie cookie;
+ struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
odp_port_t tunnel_out_port = ODPP_NONE;
- if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
+ if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
return;
}
/* For input case, output_odp_port is ODPP_NONE, which is an invalid port
* number. */
if (output_odp_port == ODPP_NONE &&
- !dpif_ipfix_get_bridge_exporter_input_sampling(xbridge->ipfix)) {
+ !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
return;
}
/* For output case, output_odp_port is valid*/
if (output_odp_port != ODPP_NONE) {
- if (!dpif_ipfix_get_bridge_exporter_output_sampling(xbridge->ipfix)) {
+ if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
return;
}
/* If tunnel sampling is enabled, put an additional option attribute:
* OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
*/
- if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(xbridge->ipfix) &&
- dpif_ipfix_get_tunnel_port(xbridge->ipfix, output_odp_port) ) {
+ if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
+ dpif_ipfix_get_tunnel_port(ipfix, output_odp_port) ) {
tunnel_out_port = output_odp_port;
}
}
- probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
- compose_ipfix_cookie(&cookie, output_odp_port);
-
- compose_sample_action(xbridge, odp_actions, flow, probability,
+ union user_action_cookie cookie = {
+ .ipfix = {
+ .type = USER_ACTION_COOKIE_IPFIX,
+ .output_odp_port = output_odp_port,
+ }
+ };
+ compose_sample_action(ctx,
+ dpif_ipfix_get_bridge_exporter_probability(ipfix),
&cookie, sizeof cookie.ipfix, tunnel_out_port,
false);
}
-/* SAMPLE action for sFlow must be first action in any given list of
- * actions. At this point we do not have all information required to
- * build it. So try to build sample action as complete as possible. */
-static void
-add_sflow_action(struct xlate_ctx *ctx)
-{
- ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
- ctx->odp_actions,
- &ctx->xin->flow, ODPP_NONE);
- ctx->sflow_odp_port = 0;
- ctx->sflow_n_outputs = 0;
-}
-
-/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
- * of actions, eventually after the SAMPLE action for sFlow. */
-static void
-add_ipfix_action(struct xlate_ctx *ctx)
-{
- compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
- &ctx->xin->flow, ODPP_NONE);
-}
-
-static void
-add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port)
-{
- compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
- &ctx->xin->flow, port);
-}
-
-/* Fix SAMPLE action according to data collected while composing ODP actions.
- * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
- * USERSPACE action's user-cookie which is required for sflow. */
+/* Fix "sample" action according to data collected while composing ODP actions,
+ * as described in compose_sflow_action().
+ *
+ * 'user_cookie_offset' must be the offset returned by add_sflow_action(). */
static void
-fix_sflow_action(struct xlate_ctx *ctx)
+fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
{
const struct flow *base = &ctx->base_flow;
union user_action_cookie *cookie;
- if (!ctx->user_cookie_offset) {
- return;
- }
-
- cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
+ cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset,
sizeof cookie->sflow);
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
- compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
- ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
+ cookie->type = USER_ACTION_COOKIE_SFLOW;
+ cookie->sflow.vlan_tci = base->vlan_tci;
+
+ /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
+ * port information") for the interpretation of cookie->output. */
+ switch (ctx->sflow_n_outputs) {
+ case 0:
+ /* 0x40000000 | 256 means "packet dropped for unknown reason". */
+ cookie->sflow.output = 0x40000000 | 256;
+ break;
+
+ case 1:
+ cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
+ ctx->xbridge->sflow, ctx->sflow_odp_port);
+ if (cookie->sflow.output) {
+ break;
+ }
+ /* Fall through. */
+ default:
+ /* 0x80000000 means "multiple output ports. */
+ cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
+ break;
+ }
}
-static enum slow_path_reason
-process_special(struct xlate_ctx *ctx, const struct flow *flow,
- const struct xport *xport, const struct dp_packet *packet)
+static bool
+process_special(struct xlate_ctx *ctx, const struct xport *xport)
{
+ const struct flow *flow = &ctx->xin->flow;
struct flow_wildcards *wc = ctx->wc;
const struct xbridge *xbridge = ctx->xbridge;
+ const struct dp_packet *packet = ctx->xin->packet;
+ enum slow_path_reason slow;
if (!xport) {
- return 0;
+ slow = 0;
} else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
if (packet) {
cfm_process_heartbeat(xport->cfm, packet);
}
- return SLOW_CFM;
+ slow = SLOW_CFM;
} else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
if (packet) {
bfd_process_packet(xport->bfd, flow, packet);
ofproto_dpif_monitor_port_send_soon(xport->ofport);
}
}
- return SLOW_BFD;
+ slow = SLOW_BFD;
} else if (xport->xbundle && xport->xbundle->lacp
&& flow->dl_type == htons(ETH_TYPE_LACP)) {
if (packet) {
lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
}
- return SLOW_LACP;
+ slow = SLOW_LACP;
} else if ((xbridge->stp || xbridge->rstp) &&
stp_should_process_flow(flow, wc)) {
if (packet) {
? stp_process_packet(xport, packet)
: rstp_process_packet(xport, packet);
}
- return SLOW_STP;
+ slow = SLOW_STP;
} else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
if (packet) {
lldp_process_packet(xport->lldp, packet);
}
- return SLOW_LLDP;
+ slow = SLOW_LLDP;
} else {
- return 0;
+ slow = 0;
+ }
+
+ if (slow) {
+ ctx->xout->slow |= slow;
+ return true;
+ } else {
+ return false;
}
}
}
if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
- ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
- xport->xbundle);
+ ctx->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
+ xport->xbundle);
}
if (xport->peer) {
struct flow old_flow = ctx->xin->flow;
bool old_was_mpls = ctx->was_mpls;
cls_version_t old_version = ctx->tables_version;
- enum slow_path_reason special;
struct ofpbuf old_stack = ctx->stack;
union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
struct ofpbuf old_action_set = ctx->action_set;
ctx->tables_version
= ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
- special = process_special(ctx, &ctx->xin->flow, peer,
- ctx->xin->packet);
- if (special) {
- ctx->xout->slow |= special;
- } else if (may_receive(peer, ctx)) {
+ if (!process_special(ctx, peer) && may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
if (ctx->action_set.size) {
* the learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
size_t old_size = ctx->odp_actions->size;
- mirror_mask_t old_mirrors = ctx->xout->mirrors;
+ mirror_mask_t old_mirrors = ctx->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
- ctx->xout->mirrors = old_mirrors;
+ ctx->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
ctx->odp_actions->size = old_size;
} else {
/* Tunnel push-pop action is not compatible with
* IPFIX action. */
- add_ipfix_output_action(ctx, out_port);
+ compose_ipfix_action(ctx, out_port);
nl_msg_put_odp_port(ctx->odp_actions,
OVS_ACTION_ATTR_OUTPUT,
out_port);
xlate_sample_action(struct xlate_ctx *ctx,
const struct ofpact_sample *os)
{
- union user_action_cookie cookie;
/* Scale the probability from 16-bit to 32-bit while representing
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
ctx->odp_actions,
ctx->wc, use_masked);
- compose_flow_sample_cookie(os->probability, os->collector_set_id,
- os->obs_domain_id, os->obs_point_id, &cookie);
- compose_sample_action(ctx->xbridge, ctx->odp_actions,
- &ctx->xin->flow, probability, &cookie,
- sizeof cookie.flow_sample, ODPP_NONE,
- false);
+ union user_action_cookie cookie = {
+ .flow_sample = {
+ .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
+ .probability = os->probability,
+ .collector_set_id = os->collector_set_id,
+ .obs_domain_id = os->obs_domain_id,
+ .obs_point_id = os->obs_point_id,
+ }
+ };
+ compose_sample_action(ctx, probability, &cookie, sizeof cookie.flow_sample,
+ ODPP_NONE, false);
}
static bool
#endif
}
+static void
+xlate_wc_init(struct xlate_ctx *ctx)
+{
+ flow_wildcards_init_catchall(ctx->wc);
+
+ /* Some fields we consider to always be examined. */
+ memset(&ctx->wc->masks.in_port, 0xff, sizeof ctx->wc->masks.in_port);
+ memset(&ctx->wc->masks.dl_type, 0xff, sizeof ctx->wc->masks.dl_type);
+ if (is_ip_any(&ctx->xin->flow)) {
+ ctx->wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ }
+
+ if (ctx->xbridge->support.odp.recirc) {
+ /* Always exactly match recirc_id when datapath supports
+ * recirculation. */
+ ctx->wc->masks.recirc_id = UINT32_MAX;
+ }
+
+ if (ctx->xbridge->netflow) {
+ netflow_mask_wc(&ctx->xin->flow, ctx->wc);
+ }
+
+ tnl_wc_init(&ctx->xin->flow, ctx->wc);
+}
+
+static void
+xlate_wc_finish(struct xlate_ctx *ctx)
+{
+ /* Clear the metadata and register wildcard masks, because we won't
+ * use non-header fields as part of the cache. */
+ flow_wildcards_clear_non_packet_fields(ctx->wc);
+
+ /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
+ * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
+ * represent these fields. The datapath interface, on the other hand,
+ * represents them with just 8 bits each. This means that if the high
+ * 8 bits of the masks for these fields somehow become set, then they
+ * will get chopped off by a round trip through the datapath, and
+ * revalidation will spot that as an inconsistency and delete the flow.
+ * Avoid the problem here by making sure that only the low 8 bits of
+ * either field can be unwildcarded for ICMP.
+ */
+ if (is_icmpv4(&ctx->xin->flow) || is_icmpv6(&ctx->xin->flow)) {
+ ctx->wc->masks.tp_src &= htons(UINT8_MAX);
+ ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
+ }
+ /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
+ if (ctx->wc->masks.vlan_tci) {
+ ctx->wc->masks.vlan_tci |= htons(VLAN_CFI);
+ }
+}
+
/* Translates the flow, actions, or rule in 'xin' into datapath actions in
* 'xout'.
* The caller must take responsibility for eventually freeing 'xout', with
.has_normal = false,
.has_fin_timeout = false,
.nf_output_iface = NF_OUT_DROP,
- .mirrors = 0,
.n_recircs = 0,
};
.orig_skb_priority = flow->skb_priority,
.sflow_n_outputs = 0,
.sflow_odp_port = 0,
- .user_cookie_offset = 0,
.exit = false,
+ .mirrors = 0,
.recirc_action_offset = -1,
.last_unroll_offset = -1,
.action_set_has_group = false,
.action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
};
+
+ /* 'base_flow' reflects the packet as it came in, but we need it to reflect
+ * the packet as the datapath will treat it for output actions:
+ *
+ * - Our datapath doesn't retain tunneling information without us
+ * re-setting it, so clear the tunnel data.
+ *
+ * - For VLAN splinters, a higher layer may pretend that the packet
+ * came in on 'flow->in_port.ofp_port' with 'flow->vlan_tci'
+ * attached, because that's how we want to treat it from an OpenFlow
+ * perspective. But from the datapath's perspective it actually came
+ * in on a VLAN device without any VLAN attached. So here we put the
+ * datapath's view of the VLAN information in 'base_flow' to ensure
+ * correct treatment.
+ */
memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+ if (flow->in_port.ofp_port
+ != vsp_realdev_to_vlandev(xbridge->ofproto,
+ flow->in_port.ofp_port,
+ flow->vlan_tci)) {
+ ctx.base_flow.vlan_tci = 0;
+ }
+
ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
+ if (xin->wc) {
+ xlate_wc_init(&ctx);
+ }
- enum slow_path_reason special;
struct xport *in_port;
- struct flow orig_flow;
- bool tnl_may_send;
- bool is_icmp;
COVERAGE_INC(xlate_actions);
- /* Flow initialization rules:
- * - 'base_flow' must match the kernel's view of the packet at the
- * time that action processing starts. 'flow' represents any
- * transformations we wish to make through actions.
- * - By default 'base_flow' and 'flow' are the same since the input
- * packet matches the output before any actions are applied.
- * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
- * of the received packet as seen by the kernel. If we later output
- * to another device without any modifications this will cause us to
- * insert a new tag since the original one was stripped off by the
- * VLAN device.
- * - Tunnel metadata as received is retained in 'flow'. This allows
- * tunnel metadata matching also in later tables.
- * Since a kernel action for setting the tunnel metadata will only be
- * generated with actual tunnel output, changing the tunnel metadata
- * values in 'flow' (such as tun_id) will only have effect with a later
- * tunnel output action.
- * - Tunnel 'base_flow' is completely cleared since that is what the
- * kernel does. If we wish to maintain the original values an action
- * needs to be generated. */
-
- if (xin->wc) {
- flow_wildcards_init_catchall(ctx.wc);
- memset(&ctx.wc->masks.in_port, 0xff, sizeof ctx.wc->masks.in_port);
- memset(&ctx.wc->masks.dl_type, 0xff, sizeof ctx.wc->masks.dl_type);
- if (is_ip_any(flow)) {
- ctx.wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
- }
- if (xbridge->support.odp.recirc) {
- /* Always exactly match recirc_id when datapath supports
- * recirculation. */
- ctx.wc->masks.recirc_id = UINT32_MAX;
- }
- if (xbridge->netflow) {
- netflow_mask_wc(flow, ctx.wc);
- }
- }
- is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
-
- tnl_may_send = tnl_xlate_init(flow, xin->wc);
-
/* The in_port of the original packet before recirculation. */
in_port = get_ofp_port(xbridge, flow->in_port.ofp_port);
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
+ struct flow orig_flow;
if (mbridge_has_mirrors(xbridge->mbridge)) {
/* Do this conditionally because the copy is expensive enough that it
* shows up in profiles. */
/* Do not perform special processing on recirculated packets,
* as recirculated packets are not really received by the bridge. */
- if (!xin->recirc &&
- (special = process_special(&ctx, flow, in_port, ctx.xin->packet))) {
- ctx.xout->slow |= special;
- } else {
- size_t sample_actions_len;
-
- if (flow->in_port.ofp_port
- != vsp_realdev_to_vlandev(xbridge->ofproto,
- flow->in_port.ofp_port,
- flow->vlan_tci)) {
- ctx.base_flow.vlan_tci = 0;
- }
-
+ if (xin->recirc || !process_special(&ctx, in_port)) {
/* Sampling is done only for packets really received by the bridge. */
+ unsigned int user_cookie_offset = 0;
if (!xin->recirc) {
- add_sflow_action(&ctx);
- add_ipfix_action(&ctx);
- sample_actions_len = ctx.odp_actions->size;
- } else {
- sample_actions_len = 0;
+ user_cookie_offset = compose_sflow_action(&ctx);
+ compose_ipfix_action(&ctx, ODPP_NONE);
}
+ size_t sample_actions_len = ctx.odp_actions->size;
- if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
+ if (tnl_process_ecn(flow)
+ && (!in_port || may_receive(in_port, &ctx))) {
const struct ofpact *ofpacts;
size_t ofpacts_len;
compose_output_action(&ctx, OFPP_LOCAL, NULL);
}
- if (!xin->recirc) {
- fix_sflow_action(&ctx);
+ if (user_cookie_offset) {
+ fix_sflow_action(&ctx, user_cookie_offset);
}
/* Only mirror fully processed packets. */
if (!exit_recirculates(&ctx)
/* Update mirror stats only for packets really received by the bridge. */
if (!xin->recirc && mbridge_has_mirrors(xbridge->mbridge)) {
if (ctx.xin->resubmit_stats) {
- mirror_update_stats(xbridge->mbridge, xout->mirrors,
+ mirror_update_stats(xbridge->mbridge, ctx.mirrors,
ctx.xin->resubmit_stats->n_packets,
ctx.xin->resubmit_stats->n_bytes);
}
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
entry->u.mirror.mbridge = mbridge_ref(xbridge->mbridge);
- entry->u.mirror.mirrors = xout->mirrors;
+ entry->u.mirror.mirrors = ctx.mirrors;
}
}
}
if (xin->wc) {
- /* Clear the metadata and register wildcard masks, because we won't
- * use non-header fields as part of the cache. */
- flow_wildcards_clear_non_packet_fields(ctx.wc);
-
- /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
- * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
- * represent these fields. The datapath interface, on the other hand,
- * represents them with just 8 bits each. This means that if the high
- * 8 bits of the masks for these fields somehow become set, then they
- * will get chopped off by a round trip through the datapath, and
- * revalidation will spot that as an inconsistency and delete the flow.
- * Avoid the problem here by making sure that only the low 8 bits of
- * either field can be unwildcarded for ICMP.
- */
- if (is_icmp) {
- ctx.wc->masks.tp_src &= htons(UINT8_MAX);
- ctx.wc->masks.tp_dst &= htons(UINT8_MAX);
- }
- /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
- if (ctx.wc->masks.vlan_tci) {
- ctx.wc->masks.vlan_tci |= htons(VLAN_CFI);
- }
+ xlate_wc_finish(&ctx);
}
exit: