#include <sys/socket.h>
#include <netinet/in.h>
-#include "tnl-arp-cache.h"
+#include "tnl-neigh-cache.h"
#include "bfd.h"
#include "bitmap.h"
#include "bond.h"
#include "ofproto/ofproto-dpif-sflow.h"
#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
+#include "packets.h"
#include "ovs-router.h"
#include "tnl-ports.h"
#include "tunnel.h"
* which might lead to an infinite loop. This could happen easily
* if a tunnel is marked as 'ip_remote=flow', and the flow does not
* actually set the tun_dst field. */
- ovs_be32 orig_tunnel_ip_dst;
+ struct in6_addr orig_tunnel_ipv6_dst;
/* Stack for the push and pop actions. Each stack element is of type
* "union mf_subvalue". */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint32_t sflow_n_outputs; /* Number of output ports. */
odp_port_t sflow_odp_port; /* Output port for composing sFlow action. */
- uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
+ ofp_port_t nf_output_iface; /* Output interface index for NetFlow. */
bool exit; /* No further actions should be processed. */
+ mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
/* These are used for non-bond recirculation. The recirculation IDs are
* stored in xout and must be associated with a datapath flow (ukey),
* the MPLS label stack that was originally present. */
bool was_mpls;
+ /* True if conntrack has been performed on this packet during processing
+ * on the current bridge. This is used to determine whether conntrack
+ * state from the datapath should be honored after recirculation. */
+ bool conntracked;
+
+ /* Pointer to an embedded NAT action in a conntrack action, or NULL. */
+ struct ofpact_nat *ct_nat_action;
+
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
* datapath actions. */
bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
struct ofpbuf action_set; /* Action set. */
+
+ enum xlate_error error; /* Translation failed. */
};
+const char *xlate_strerror(enum xlate_error error)
+{
+ switch (error) {
+ case XLATE_OK:
+ return "OK";
+ case XLATE_BRIDGE_NOT_FOUND:
+ return "Bridge not found";
+ case XLATE_RECURSION_TOO_DEEP:
+ return "Recursion too deep";
+ case XLATE_TOO_MANY_RESUBMITS:
+ return "Too many resubmits";
+ case XLATE_STACK_TOO_DEEP:
+ return "Stack too deep";
+ case XLATE_NO_RECIRCULATION_CONTEXT:
+ return "No recirculation context";
+ case XLATE_RECIRCULATION_CONFLICT:
+ return "Recirculation conflict";
+ case XLATE_TOO_MANY_MPLS_LABELS:
+ return "Too many MPLS labels";
+ }
+ return "Unknown error";
+}
+
static void xlate_action_set(struct xlate_ctx *ctx);
+static void xlate_commit_actions(struct xlate_ctx *ctx);
static void
ctx_trigger_recirculation(struct xlate_ctx *ctx)
XC_NORMAL,
XC_FIN_TIMEOUT,
XC_GROUP,
- XC_TNL_ARP,
+ XC_TNL_NEIGH,
};
/* xlate_cache entries hold enough information to perform the side effects of
} group;
struct {
char br_name[IFNAMSIZ];
- ovs_be32 d_ip;
- } tnl_arp_cache;
+ struct in6_addr d_ipv6;
+ } tnl_neigh_cache;
} u;
};
}
}
+static struct vlog_rate_limit error_report_rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+#define XLATE_REPORT_ERROR(CTX, ...) \
+ do { \
+ if (OVS_UNLIKELY((CTX)->xin->report_hook)) { \
+ xlate_report(CTX, __VA_ARGS__); \
+ } else { \
+ VLOG_ERR_RL(&error_report_rl, __VA_ARGS__); \
+ } \
+ } while (0)
+
+static inline void
+xlate_report_actions(struct xlate_ctx *ctx, const char *title,
+ const struct ofpact *ofpacts, size_t ofpacts_len)
+{
+ if (OVS_UNLIKELY(ctx->xin->report_hook)) {
+ struct ds s = DS_EMPTY_INITIALIZER;
+ ofpacts_format(ofpacts, ofpacts_len, &s);
+ xlate_report(ctx, "%s: %s", title, ds_cstr(&s));
+ ds_destroy(&s);
+ }
+}
+
static void
xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
{
}
static void
-add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
+mirror_packet(struct xlate_ctx *ctx, struct xbundle *xbundle,
+ mirror_mask_t mirrors)
{
- const struct xbridge *xbridge = ctx->xbridge;
- mirror_mask_t mirrors;
- struct xbundle *in_xbundle;
- uint16_t vlan;
- uint16_t vid;
-
- mirrors = ctx->xout->mirrors;
- ctx->xout->mirrors = 0;
-
- in_xbundle = lookup_input_bundle(xbridge, orig_flow->in_port.ofp_port,
- ctx->xin->packet != NULL, NULL);
- if (!in_xbundle) {
+ bool warn = ctx->xin->packet != NULL;
+ uint16_t vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci);
+ if (!input_vid_is_valid(vid, xbundle, warn)) {
return;
}
- mirrors |= xbundle_mirror_src(xbridge, in_xbundle);
+ uint16_t vlan = input_vid_to_vlan(xbundle, vid);
- /* Drop frames on bundles reserved for mirroring. */
- if (xbundle_mirror_out(xbridge, in_xbundle)) {
- if (ctx->xin->packet != NULL) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
- "%s, which is reserved exclusively for mirroring",
- ctx->xbridge->name, in_xbundle->name);
- }
- ofpbuf_clear(ctx->odp_actions);
- return;
- }
+ const struct xbridge *xbridge = ctx->xbridge;
- /* Check VLAN. */
- vid = vlan_tci_to_vid(orig_flow->vlan_tci);
- if (!input_vid_is_valid(vid, in_xbundle, ctx->xin->packet != NULL)) {
+ /* Don't mirror to destinations that we've already mirrored to. */
+ mirrors &= ~ctx->mirrors;
+ if (!mirrors) {
return;
}
- vlan = input_vid_to_vlan(in_xbundle, vid);
- if (!mirrors) {
- return;
+ /* Record these mirrors so that we don't mirror to them again. */
+ ctx->mirrors |= mirrors;
+
+ if (ctx->xin->resubmit_stats) {
+ mirror_update_stats(xbridge->mbridge, mirrors,
+ ctx->xin->resubmit_stats->n_packets,
+ ctx->xin->resubmit_stats->n_bytes);
}
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
- /* Restore the original packet before adding the mirror actions. */
- ctx->xin->flow = *orig_flow;
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_MIRROR);
+ entry->u.mirror.mbridge = mbridge_ref(xbridge->mbridge);
+ entry->u.mirror.mirrors = mirrors;
+ }
while (mirrors) {
+ const unsigned long *vlans;
mirror_mask_t dup_mirrors;
struct ofbundle *out;
- const unsigned long *vlans;
- bool vlan_mirrored;
- bool has_mirror;
int out_vlan;
- has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
- &vlans, &dup_mirrors, &out, &out_vlan);
+ bool has_mirror = mirror_get(xbridge->mbridge, raw_ctz(mirrors),
+ &vlans, &dup_mirrors, &out, &out_vlan);
ovs_assert(has_mirror);
if (vlans) {
ctx->wc->masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
}
- vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
- if (!vlan_mirrored) {
+ if (vlans && !bitmap_is_set(vlans, vlan)) {
mirrors = zero_rightmost_1bit(mirrors);
continue;
}
mirrors &= ~dup_mirrors;
- ctx->xout->mirrors |= dup_mirrors;
+ ctx->mirrors |= dup_mirrors;
if (out) {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
struct xbundle *out_xbundle = xbundle_lookup(xcfg, out);
output_normal(ctx, out_xbundle, vlan);
}
} else if (vlan != out_vlan
- && !eth_addr_is_reserved(orig_flow->dl_dst)) {
+ && !eth_addr_is_reserved(ctx->xin->flow.dl_dst)) {
struct xbundle *xbundle;
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
}
}
+static void
+mirror_ingress_packet(struct xlate_ctx *ctx)
+{
+ if (mbridge_has_mirrors(ctx->xbridge->mbridge)) {
+ bool warn = ctx->xin->packet != NULL;
+ struct xbundle *xbundle = lookup_input_bundle(
+ ctx->xbridge, ctx->xin->flow.in_port.ofp_port, warn, NULL);
+ if (xbundle) {
+ mirror_packet(ctx, xbundle,
+ xbundle_mirror_src(ctx->xbridge, xbundle));
+ }
+ }
+}
+
/* Given 'vid', the VID obtained from the 802.1Q header that was received as
* part of a packet (specify 0 if there was no 802.1Q header), and 'in_xbundle',
* the bundle on which the packet was received, returns the VLAN to which the
output_normal(ctx, xbundle, vlan);
}
}
- ctx->xout->nf_output_iface = NF_OUT_FLOOD;
+ ctx->nf_output_iface = NF_OUT_FLOOD;
}
static void
uint16_t vlan;
uint16_t vid;
- ctx->xout->has_normal = true;
-
memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
wc->masks.vlan_tci |= htons(VLAN_VID_MASK | VLAN_CFI);
}
}
-/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
- * the number of packets out of UINT32_MAX to sample. The given
- * cookie is passed back in the callback for each sampled packet.
+/* Appends a "sample" action for sFlow or IPFIX to 'ctx->odp_actions'. The
+ * 'probability' is the number of packets out of UINT32_MAX to sample. The
+ * 'cookie' (of length 'cookie_size' bytes) is passed back in the callback for
+ * each sampled packet. 'tunnel_out_port', if not ODPP_NONE, is added as the
+ * OVS_USERSPACE_ATTR_EGRESS_TUN_PORT attribute. If 'include_actions', an
+ * OVS_USERSPACE_ATTR_ACTIONS attribute is added.
*/
static size_t
-compose_sample_action(const struct xbridge *xbridge,
- struct ofpbuf *odp_actions,
- const struct flow *flow,
+compose_sample_action(struct xlate_ctx *ctx,
const uint32_t probability,
const union user_action_cookie *cookie,
const size_t cookie_size,
const odp_port_t tunnel_out_port,
bool include_actions)
{
- size_t sample_offset, actions_offset;
- odp_port_t odp_port;
- int cookie_offset;
- uint32_t pid;
+ size_t sample_offset = nl_msg_start_nested(ctx->odp_actions,
+ OVS_ACTION_ATTR_SAMPLE);
- sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
+ nl_msg_put_u32(ctx->odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
- nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
+ size_t actions_offset = nl_msg_start_nested(ctx->odp_actions,
+ OVS_SAMPLE_ATTR_ACTIONS);
- actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
+ odp_port_t odp_port = ofp_port_to_odp_port(
+ ctx->xbridge, ctx->xin->flow.in_port.ofp_port);
+ uint32_t pid = dpif_port_get_pid(ctx->xbridge->dpif, odp_port,
+ flow_hash_5tuple(&ctx->xin->flow, 0));
+ int cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
+ tunnel_out_port,
+ include_actions,
+ ctx->odp_actions);
- odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port);
- pid = dpif_port_get_pid(xbridge->dpif, odp_port,
- flow_hash_5tuple(flow, 0));
- cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
- tunnel_out_port,
- include_actions,
- odp_actions);
+ nl_msg_end_nested(ctx->odp_actions, actions_offset);
+ nl_msg_end_nested(ctx->odp_actions, sample_offset);
- nl_msg_end_nested(odp_actions, actions_offset);
- nl_msg_end_nested(odp_actions, sample_offset);
return cookie_offset;
}
-static void
-compose_sflow_cookie(const struct xbridge *xbridge, ovs_be16 vlan_tci,
- odp_port_t odp_port, unsigned int n_outputs,
- union user_action_cookie *cookie)
-{
- int ifindex;
-
- cookie->type = USER_ACTION_COOKIE_SFLOW;
- cookie->sflow.vlan_tci = vlan_tci;
-
- /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
- * port information") for the interpretation of cookie->output. */
- switch (n_outputs) {
- case 0:
- /* 0x40000000 | 256 means "packet dropped for unknown reason". */
- cookie->sflow.output = 0x40000000 | 256;
- break;
-
- case 1:
- ifindex = dpif_sflow_odp_port_to_ifindex(xbridge->sflow, odp_port);
- if (ifindex) {
- cookie->sflow.output = ifindex;
- break;
- }
- /* Fall through. */
- default:
- /* 0x80000000 means "multiple output ports. */
- cookie->sflow.output = 0x80000000 | n_outputs;
- break;
- }
-}
-
-/* Compose SAMPLE action for sFlow bridge sampling. */
+/* If sFLow is not enabled, returns 0 without doing anything.
+ *
+ * If sFlow is enabled, appends a template "sample" action to the ODP actions
+ * in 'ctx'. This action is a template because some of the information needed
+ * to fill it out is not available until flow translation is complete. In this
+ * case, this functions returns an offset, which is always nonzero, to pass
+ * later to fix_sflow_action() to fill in the rest of the template. */
static size_t
-compose_sflow_action(const struct xbridge *xbridge,
- struct ofpbuf *odp_actions,
- const struct flow *flow,
- odp_port_t odp_port)
+compose_sflow_action(struct xlate_ctx *ctx)
{
- uint32_t probability;
- union user_action_cookie cookie;
-
- if (!xbridge->sflow || flow->in_port.ofp_port == OFPP_NONE) {
+ struct dpif_sflow *sflow = ctx->xbridge->sflow;
+ if (!sflow || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
return 0;
}
- probability = dpif_sflow_get_probability(xbridge->sflow);
- compose_sflow_cookie(xbridge, htons(0), odp_port,
- odp_port == ODPP_NONE ? 0 : 1, &cookie);
-
- return compose_sample_action(xbridge, odp_actions, flow, probability,
+ union user_action_cookie cookie = { .type = USER_ACTION_COOKIE_SFLOW };
+ return compose_sample_action(ctx, dpif_sflow_get_probability(sflow),
&cookie, sizeof cookie.sflow, ODPP_NONE,
true);
}
+/* If IPFIX is enabled, this appends a "sample" action to implement IPFIX to
+ * 'ctx->odp_actions'. */
static void
-compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
- uint32_t obs_domain_id, uint32_t obs_point_id,
- union user_action_cookie *cookie)
-{
- cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
- cookie->flow_sample.probability = probability;
- cookie->flow_sample.collector_set_id = collector_set_id;
- cookie->flow_sample.obs_domain_id = obs_domain_id;
- cookie->flow_sample.obs_point_id = obs_point_id;
-}
-
-static void
-compose_ipfix_cookie(union user_action_cookie *cookie,
- odp_port_t output_odp_port)
+compose_ipfix_action(struct xlate_ctx *ctx, odp_port_t output_odp_port)
{
- cookie->type = USER_ACTION_COOKIE_IPFIX;
- cookie->ipfix.output_odp_port = output_odp_port;
-}
-
-/* Compose SAMPLE action for IPFIX bridge sampling. */
-static void
-compose_ipfix_action(const struct xbridge *xbridge,
- struct ofpbuf *odp_actions,
- const struct flow *flow,
- odp_port_t output_odp_port)
-{
- uint32_t probability;
- union user_action_cookie cookie;
+ struct dpif_ipfix *ipfix = ctx->xbridge->ipfix;
odp_port_t tunnel_out_port = ODPP_NONE;
- if (!xbridge->ipfix || flow->in_port.ofp_port == OFPP_NONE) {
+ if (!ipfix || ctx->xin->flow.in_port.ofp_port == OFPP_NONE) {
return;
}
/* For input case, output_odp_port is ODPP_NONE, which is an invalid port
* number. */
if (output_odp_port == ODPP_NONE &&
- !dpif_ipfix_get_bridge_exporter_input_sampling(xbridge->ipfix)) {
+ !dpif_ipfix_get_bridge_exporter_input_sampling(ipfix)) {
return;
}
/* For output case, output_odp_port is valid*/
if (output_odp_port != ODPP_NONE) {
- if (!dpif_ipfix_get_bridge_exporter_output_sampling(xbridge->ipfix)) {
+ if (!dpif_ipfix_get_bridge_exporter_output_sampling(ipfix)) {
return;
}
/* If tunnel sampling is enabled, put an additional option attribute:
* OVS_USERSPACE_ATTR_TUNNEL_OUT_PORT
*/
- if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(xbridge->ipfix) &&
- dpif_ipfix_get_tunnel_port(xbridge->ipfix, output_odp_port) ) {
+ if (dpif_ipfix_get_bridge_exporter_tunnel_sampling(ipfix) &&
+ dpif_ipfix_get_tunnel_port(ipfix, output_odp_port) ) {
tunnel_out_port = output_odp_port;
}
}
- probability = dpif_ipfix_get_bridge_exporter_probability(xbridge->ipfix);
- compose_ipfix_cookie(&cookie, output_odp_port);
-
- compose_sample_action(xbridge, odp_actions, flow, probability,
+ union user_action_cookie cookie = {
+ .ipfix = {
+ .type = USER_ACTION_COOKIE_IPFIX,
+ .output_odp_port = output_odp_port,
+ }
+ };
+ compose_sample_action(ctx,
+ dpif_ipfix_get_bridge_exporter_probability(ipfix),
&cookie, sizeof cookie.ipfix, tunnel_out_port,
false);
}
-/* SAMPLE action for sFlow must be first action in any given list of
- * actions. At this point we do not have all information required to
- * build it. So try to build sample action as complete as possible. */
-static void
-add_sflow_action(struct xlate_ctx *ctx)
-{
- ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
- ctx->odp_actions,
- &ctx->xin->flow, ODPP_NONE);
- ctx->sflow_odp_port = 0;
- ctx->sflow_n_outputs = 0;
-}
-
-/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
- * of actions, eventually after the SAMPLE action for sFlow. */
-static void
-add_ipfix_action(struct xlate_ctx *ctx)
-{
- compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
- &ctx->xin->flow, ODPP_NONE);
-}
-
-static void
-add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port)
-{
- compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
- &ctx->xin->flow, port);
-}
-
-/* Fix SAMPLE action according to data collected while composing ODP actions.
- * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
- * USERSPACE action's user-cookie which is required for sflow. */
+/* Fix "sample" action according to data collected while composing ODP actions,
+ * as described in compose_sflow_action().
+ *
+ * 'user_cookie_offset' must be the offset returned by add_sflow_action(). */
static void
-fix_sflow_action(struct xlate_ctx *ctx)
+fix_sflow_action(struct xlate_ctx *ctx, unsigned int user_cookie_offset)
{
const struct flow *base = &ctx->base_flow;
union user_action_cookie *cookie;
- if (!ctx->user_cookie_offset) {
- return;
- }
-
- cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
+ cookie = ofpbuf_at(ctx->odp_actions, user_cookie_offset,
sizeof cookie->sflow);
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
- compose_sflow_cookie(ctx->xbridge, base->vlan_tci,
- ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
+ cookie->type = USER_ACTION_COOKIE_SFLOW;
+ cookie->sflow.vlan_tci = base->vlan_tci;
+
+ /* See http://www.sflow.org/sflow_version_5.txt (search for "Input/output
+ * port information") for the interpretation of cookie->output. */
+ switch (ctx->sflow_n_outputs) {
+ case 0:
+ /* 0x40000000 | 256 means "packet dropped for unknown reason". */
+ cookie->sflow.output = 0x40000000 | 256;
+ break;
+
+ case 1:
+ cookie->sflow.output = dpif_sflow_odp_port_to_ifindex(
+ ctx->xbridge->sflow, ctx->sflow_odp_port);
+ if (cookie->sflow.output) {
+ break;
+ }
+ /* Fall through. */
+ default:
+ /* 0x80000000 means "multiple output ports. */
+ cookie->sflow.output = 0x80000000 | ctx->sflow_n_outputs;
+ break;
+ }
}
static bool
static int
tnl_route_lookup_flow(const struct flow *oflow,
- ovs_be32 *ip, struct xport **out_port)
+ struct in6_addr *ip, struct xport **out_port)
{
char out_dev[IFNAMSIZ];
struct xbridge *xbridge;
struct xlate_cfg *xcfg;
- ovs_be32 gw;
+ struct in6_addr gw;
+ struct in6_addr dst;
- if (!ovs_router_lookup(oflow->tunnel.ip_dst, out_dev, &gw)) {
+ dst = flow_tnl_dst(&oflow->tunnel);
+ if (!ovs_router_lookup(&dst, out_dev, &gw)) {
return -ENOENT;
}
- if (gw) {
+ if (ipv6_addr_is_set(&gw) &&
+ (!IN6_IS_ADDR_V4MAPPED(&gw) || in6_addr_get_mapped_ipv4(&gw))) {
*ip = gw;
} else {
- *ip = oflow->tunnel.ip_dst;
+ *ip = dst;
}
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
}
static int
-xlate_flood_packet(struct xbridge *xbridge, struct dp_packet *packet)
+compose_table_xlate(struct xlate_ctx *ctx, const struct xport *out_dev,
+ struct dp_packet *packet)
{
+ struct xbridge *xbridge = out_dev->xbridge;
struct ofpact_output output;
struct flow flow;
ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
- /* Use OFPP_NONE as the in_port to avoid special packet processing. */
flow_extract(packet, &flow);
- flow.in_port.ofp_port = OFPP_NONE;
- output.port = OFPP_FLOOD;
+ flow.in_port.ofp_port = out_dev->ofp_port;
+ output.port = OFPP_TABLE;
output.max_len = 0;
- return ofproto_dpif_execute_actions(xbridge->ofproto, &flow, NULL,
- &output.ofpact, sizeof output,
- packet);
+ return ofproto_dpif_execute_actions__(xbridge->ofproto, &flow, NULL,
+ &output.ofpact, sizeof output,
+ ctx->recurse, ctx->resubmits, packet);
}
static void
-tnl_send_arp_request(const struct xport *out_dev, const uint8_t eth_src[ETH_ADDR_LEN],
+tnl_send_nd_request(struct xlate_ctx *ctx, const struct xport *out_dev,
+ const struct eth_addr eth_src,
+ struct in6_addr * ipv6_src, struct in6_addr * ipv6_dst)
+{
+ struct dp_packet packet;
+
+ dp_packet_init(&packet, 0);
+ compose_nd(&packet, eth_src, ipv6_src, ipv6_dst);
+ compose_table_xlate(ctx, out_dev, &packet);
+ dp_packet_uninit(&packet);
+}
+
+static void
+tnl_send_arp_request(struct xlate_ctx *ctx, const struct xport *out_dev,
+ const struct eth_addr eth_src,
ovs_be32 ip_src, ovs_be32 ip_dst)
{
- struct xbridge *xbridge = out_dev->xbridge;
struct dp_packet packet;
dp_packet_init(&packet, 0);
compose_arp(&packet, ARP_OP_REQUEST,
eth_src, eth_addr_zero, true, ip_src, ip_dst);
- xlate_flood_packet(xbridge, &packet);
+ compose_table_xlate(ctx, out_dev, &packet);
dp_packet_uninit(&packet);
}
{
struct ovs_action_push_tnl tnl_push_data;
struct xport *out_dev = NULL;
- ovs_be32 s_ip, d_ip = 0;
- uint8_t smac[ETH_ADDR_LEN];
- uint8_t dmac[ETH_ADDR_LEN];
+ ovs_be32 s_ip = 0, d_ip = 0;
+ struct in6_addr s_ip6 = in6addr_any;
+ struct in6_addr d_ip6 = in6addr_any;
+ struct eth_addr smac;
+ struct eth_addr dmac;
int err;
+ char buf_sip6[INET6_ADDRSTRLEN];
+ char buf_dip6[INET6_ADDRSTRLEN];
- err = tnl_route_lookup_flow(flow, &d_ip, &out_dev);
+ err = tnl_route_lookup_flow(flow, &d_ip6, &out_dev);
if (err) {
xlate_report(ctx, "native tunnel routing failed");
return err;
}
- xlate_report(ctx, "tunneling to "IP_FMT" via %s",
- IP_ARGS(d_ip), netdev_get_name(out_dev->netdev));
+
+ xlate_report(ctx, "tunneling to %s via %s",
+ ipv6_string_mapped(buf_dip6, &d_ip6),
+ netdev_get_name(out_dev->netdev));
/* Use mac addr of bridge port of the peer. */
- err = netdev_get_etheraddr(out_dev->netdev, smac);
+ err = netdev_get_etheraddr(out_dev->netdev, &smac);
if (err) {
xlate_report(ctx, "tunnel output device lacks Ethernet address");
return err;
}
- err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
- if (err) {
- xlate_report(ctx, "tunnel output device lacks IPv4 address");
- return err;
+ d_ip = in6_addr_get_mapped_ipv4(&d_ip6);
+ if (d_ip) {
+ err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
+ if (err) {
+ xlate_report(ctx, "tunnel output device lacks IPv4 address");
+ return err;
+ }
+ in6_addr_set_mapped_ipv4(&s_ip6, s_ip);
+ } else {
+ err = netdev_get_in6(out_dev->netdev, &s_ip6);
+ if (err) {
+ xlate_report(ctx, "tunnel output device lacks IPv6 address");
+ return err;
+ }
}
- err = tnl_arp_lookup(out_dev->xbridge->name, d_ip, dmac);
+ err = tnl_neigh_lookup(out_dev->xbridge->name, &d_ip6, &dmac);
if (err) {
- xlate_report(ctx, "ARP cache miss for "IP_FMT" on bridge %s, "
- "sending ARP request",
- IP_ARGS(d_ip), out_dev->xbridge->name);
- tnl_send_arp_request(out_dev, smac, s_ip, d_ip);
+ xlate_report(ctx, "neighbor cache miss for %s on bridge %s, "
+ "sending %s request",
+ buf_dip6, out_dev->xbridge->name, d_ip ? "ARP" : "ND");
+ if (d_ip) {
+ tnl_send_arp_request(ctx, out_dev, smac, s_ip, d_ip);
+ } else {
+ tnl_send_nd_request(ctx, out_dev, smac, &s_ip6, &d_ip6);
+ }
return err;
}
+
if (ctx->xin->xcache) {
struct xc_entry *entry;
- entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_ARP);
- ovs_strlcpy(entry->u.tnl_arp_cache.br_name, out_dev->xbridge->name,
- sizeof entry->u.tnl_arp_cache.br_name);
- entry->u.tnl_arp_cache.d_ip = d_ip;
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_NEIGH);
+ ovs_strlcpy(entry->u.tnl_neigh_cache.br_name, out_dev->xbridge->name,
+ sizeof entry->u.tnl_neigh_cache.br_name);
+ entry->u.tnl_neigh_cache.d_ipv6 = d_ip6;
}
- xlate_report(ctx, "tunneling from "ETH_ADDR_FMT" "IP_FMT
- " to "ETH_ADDR_FMT" "IP_FMT,
- ETH_ADDR_ARGS(smac), IP_ARGS(s_ip),
- ETH_ADDR_ARGS(dmac), IP_ARGS(d_ip));
+ xlate_report(ctx, "tunneling from "ETH_ADDR_FMT" %s"
+ " to "ETH_ADDR_FMT" %s",
+ ETH_ADDR_ARGS(smac), ipv6_string_mapped(buf_sip6, &s_ip6),
+ ETH_ADDR_ARGS(dmac), buf_dip6);
+
err = tnl_port_build_header(xport->ofport, flow,
- dmac, smac, s_ip, &tnl_push_data);
+ dmac, smac, &s_ip6, &tnl_push_data);
if (err) {
return err;
}
return 0;
}
+static void
+xlate_commit_actions(struct xlate_ctx *ctx)
+{
+ bool use_masked = ctx->xbridge->support.masked_set_action;
+
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ ctx->odp_actions, ctx->wc,
+ use_masked);
+}
+
+static void
+clear_conntrack(struct flow *flow)
+{
+ flow->ct_state = 0;
+ flow->ct_zone = 0;
+ flow->ct_mark = 0;
+ memset(&flow->ct_label, 0, sizeof flow->ct_label);
+}
+
static void
compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
const struct xlate_bond_recirc *xr, bool check_stp)
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 35);
memset(&flow_tnl, 0, sizeof flow_tnl);
if (!xport) {
}
}
- if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
- ctx->xout->mirrors |= xbundle_mirror_dst(xport->xbundle->xbridge,
- xport->xbundle);
- }
-
if (xport->peer) {
const struct xport *peer = xport->peer;
struct flow old_flow = ctx->xin->flow;
+ bool old_conntrack = ctx->conntracked;
bool old_was_mpls = ctx->was_mpls;
cls_version_t old_version = ctx->tables_version;
struct ofpbuf old_stack = ctx->stack;
memset(&flow->tunnel, 0, sizeof flow->tunnel);
memset(flow->regs, 0, sizeof flow->regs);
flow->actset_output = OFPP_UNSET;
+ ctx->conntracked = false;
+ clear_conntrack(flow);
/* The bridge is now known so obtain its table version. */
ctx->tables_version
* the learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
size_t old_size = ctx->odp_actions->size;
- mirror_mask_t old_mirrors = ctx->xout->mirrors;
+ mirror_mask_t old_mirrors = ctx->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
- ctx->xout->mirrors = old_mirrors;
+ ctx->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
ctx->odp_actions->size = old_size;
* bridge. */
ctx->was_mpls = old_was_mpls;
+ /* The peer bridge's conntrack execution should have no effect on the
+ * original bridge. */
+ ctx->conntracked = old_conntrack;
+
/* The fact that the peer bridge exits (for any reason) does not mean
* that the original bridge should exit. Specifically, if the peer
* bridge recirculates (which typically modifies the packet), the
* recirculated packet! */
ctx->exit = false;
+ /* Peer bridge errors do not propagate back. */
+ ctx->error = XLATE_OK;
+
if (ctx->xin->resubmit_stats) {
netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats);
netdev_vport_inc_rx(peer->netdev, ctx->xin->resubmit_stats);
}
if (xport->is_tunnel) {
+ struct in6_addr dst;
/* Save tunnel metadata so that changes made due to
* the Logical (tunnel) Port are not visible for any further
* matches, while explicit set actions on tunnel metadata are.
xlate_report(ctx, "Tunneling decided against output");
goto out; /* restore flow_nw_tos */
}
- if (flow->tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
+ dst = flow_tnl_dst(&flow->tunnel);
+ if (ipv6_addr_equals(&dst, &ctx->orig_tunnel_ipv6_dst)) {
xlate_report(ctx, "Not tunneling to our own address");
goto out; /* restore flow_nw_tos */
}
}
if (out_port != ODPP_NONE) {
- bool use_masked = ctx->xbridge->support.masked_set_action;
-
- ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
- ctx->odp_actions,
- wc, use_masked);
+ xlate_commit_actions(ctx);
if (xr) {
struct ovs_action_hash *act_hash;
} else {
/* Tunnel push-pop action is not compatible with
* IPFIX action. */
- add_ipfix_output_action(ctx, out_port);
+ compose_ipfix_action(ctx, out_port);
nl_msg_put_odp_port(ctx->odp_actions,
OVS_ACTION_ATTR_OUTPUT,
out_port);
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
- ctx->xout->nf_output_iface = ofp_port;
+ ctx->nf_output_iface = ofp_port;
+ }
+
+ if (mbridge_has_mirrors(ctx->xbridge->mbridge) && xport->xbundle) {
+ mirror_packet(ctx, xport->xbundle,
+ xbundle_mirror_dst(xport->xbundle->xbridge,
+ xport->xbundle));
}
out:
static bool
xlate_resubmit_resource_check(struct xlate_ctx *ctx)
{
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-
if (ctx->recurse >= MAX_RESUBMIT_RECURSION + MAX_INTERNAL_RESUBMITS) {
- VLOG_ERR_RL(&rl, "resubmit actions recursed over %d times",
- MAX_RESUBMIT_RECURSION);
+ XLATE_REPORT_ERROR(ctx, "resubmit actions recursed over %d times",
+ MAX_RESUBMIT_RECURSION);
+ ctx->error = XLATE_RECURSION_TOO_DEEP;
} else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
- VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
+ XLATE_REPORT_ERROR(ctx, "over %d resubmit actions", MAX_RESUBMITS);
+ ctx->error = XLATE_TOO_MANY_RESUBMITS;
} else if (ctx->odp_actions->size > UINT16_MAX) {
- VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
+ XLATE_REPORT_ERROR(ctx, "resubmits yielded over 64 kB of actions");
+ /* NOT an error, as we'll be slow-pathing the flow in this case? */
+ ctx->exit = true; /* XXX: translation still terminated! */
} else if (ctx->stack.size >= 65536) {
- VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
+ XLATE_REPORT_ERROR(ctx, "resubmits yielded over 64 kB of stack");
+ ctx->error = XLATE_STACK_TOO_DEEP;
} else {
return true;
}
rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
ctx->tables_version,
&ctx->xin->flow, ctx->xin->wc,
- ctx->xin->xcache != NULL,
ctx->xin->resubmit_stats,
&ctx->table_id, in_port,
may_packet_in, honor_table_miss);
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
entry->u.rule = rule;
+ rule_dpif_ref(rule);
}
xlate_recursively(ctx, rule);
}
ctx->table_id = old_table_id;
return;
}
-
- ctx->exit = true;
}
static void
}
basis = hash_bytes(&value, mf->n_bytes, basis);
+ /* For tunnels, hash in whether the field is present. */
+ if (mf_is_tun_metadata(mf)) {
+ basis = hash_boolean(mf_is_set(mf, &ctx->xin->flow), basis);
+ }
+
mf_mask_field(mf, &ctx->wc->masks);
}
}
}
}
- ctx->xout->nf_output_iface = NF_OUT_FLOOD;
+ ctx->nf_output_iface = NF_OUT_FLOOD;
}
static void
{
struct ofproto_packet_in *pin;
struct dp_packet *packet;
- bool use_masked;
ctx->xout->slow |= SLOW_CONTROLLER;
+ xlate_commit_actions(ctx);
if (!ctx->xin->packet) {
return;
}
packet = dp_packet_clone(ctx->xin->packet);
- use_masked = ctx->xbridge->support.masked_set_action;
- ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->odp_actions,
- ctx->wc, use_masked);
-
odp_execute_actions(NULL, &packet, 1, false,
ctx->odp_actions->data, ctx->odp_actions->size, NULL);
dp_packet_delete(packet);
}
-/* Called only when ctx->recirc_action_offset is set. */
static void
-compose_recirculate_action(struct xlate_ctx *ctx)
+compose_recirculate_action__(struct xlate_ctx *ctx, uint8_t table)
{
struct recirc_metadata md;
- bool use_masked;
uint32_t id;
- use_masked = ctx->xbridge->support.masked_set_action;
- ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->odp_actions,
- ctx->wc, use_masked);
-
recirc_metadata_from_flow(&md, &ctx->xin->flow);
ovs_assert(ctx->recirc_action_offset >= 0);
- /* Only allocate recirculation ID if we have a packet. */
- if (ctx->xin->packet) {
- /* Allocate a unique recirc id for the given metadata state in the
- * flow. The life-cycle of this recirc id is managed by associating it
- * with the udpif key ('ukey') created for each new datapath flow. */
- id = recirc_alloc_id_ctx(ctx->xbridge->ofproto, 0, &md, &ctx->stack,
- ctx->recirc_action_offset,
- ctx->action_set.size, ctx->action_set.data);
- if (!id) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_ERR_RL(&rl, "Failed to allocate recirculation id");
- return;
- }
- xlate_out_add_recirc(ctx->xout, id);
- } else {
- /* Look up an existing recirc id for the given metadata state in the
- * flow. No new reference is taken, as the ID is RCU protected and is
- * only required temporarily for verification. */
- id = recirc_find_id(ctx->xbridge->ofproto, 0, &md, &ctx->stack,
- ctx->recirc_action_offset,
- ctx->action_set.size, ctx->action_set.data);
- /* We let zero 'id' to be used in the RECIRC action below, which will
- * fail all revalidations as zero is not a valid recirculation ID. */
+ struct recirc_state state = {
+ .table_id = table,
+ .ofproto = ctx->xbridge->ofproto,
+ .metadata = md,
+ .stack = &ctx->stack,
+ .mirrors = ctx->mirrors,
+ .conntracked = ctx->conntracked,
+ .action_set_len = ctx->recirc_action_offset,
+ .ofpacts_len = ctx->action_set.size,
+ .ofpacts = ctx->action_set.data,
+ };
+
+ /* Allocate a unique recirc id for the given metadata state in the
+ * flow. An existing id, with a new reference to the corresponding
+ * recirculation context, will be returned if possible.
+ * The life-cycle of this recirc id is managed by associating it
+ * with the udpif key ('ukey') created for each new datapath flow. */
+ id = recirc_alloc_id_ctx(&state);
+ if (!id) {
+ XLATE_REPORT_ERROR(ctx, "Failed to allocate recirculation id");
+ ctx->error = XLATE_NO_RECIRCULATION_CONTEXT;
+ return;
}
+ recirc_refs_add(&ctx->xout->recircs, id);
nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
ctx->last_unroll_offset = -1;
}
+/* Called only when ctx->recirc_action_offset is set. */
+static void
+compose_recirculate_action(struct xlate_ctx *ctx)
+{
+ xlate_commit_actions(ctx);
+ compose_recirculate_action__(ctx, 0);
+}
+
+/* Fork the pipeline here. The current packet will continue processing the
+ * current action list. A clone of the current packet will recirculate, skip
+ * the remainder of the current action list and asynchronously resume pipeline
+ * processing in 'table' with the current metadata and action set. */
+static void
+compose_recirculate_and_fork(struct xlate_ctx *ctx, uint8_t table)
+{
+ ctx->recirc_action_offset = ctx->action_set.size;
+ compose_recirculate_action__(ctx, table);
+}
+
static void
compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
{
n = flow_count_mpls_labels(flow, ctx->wc);
if (!n) {
- bool use_masked = ctx->xbridge->support.masked_set_action;
-
- ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
- ctx->odp_actions,
- ctx->wc, use_masked);
+ xlate_commit_actions(ctx);
} else if (n >= FLOW_MAX_MPLS_LABELS) {
if (ctx->xin->packet != NULL) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
+ XLATE_REPORT_ERROR(ctx, "bridge %s: dropping packet on which an "
"MPLS push action can't be performed as it would "
"have more MPLS LSEs than the %d supported.",
ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
}
- ctx->exit = true;
+ ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
return;
}
}
} else if (n >= FLOW_MAX_MPLS_LABELS) {
if (ctx->xin->packet != NULL) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
- VLOG_WARN_RL(&rl, "bridge %s: dropping packet on which an "
+ XLATE_REPORT_ERROR(ctx, "bridge %s: dropping packet on which an "
"MPLS pop action can't be performed as it has "
"more MPLS LSEs than the %d supported.",
ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
}
- ctx->exit = true;
+ ctx->error = XLATE_TOO_MANY_MPLS_LABELS;
ofpbuf_clear(ctx->odp_actions);
}
}
xlate_output_action(struct xlate_ctx *ctx,
ofp_port_t port, uint16_t max_len, bool may_packet_in)
{
- ofp_port_t prev_nf_output_iface = ctx->xout->nf_output_iface;
+ ofp_port_t prev_nf_output_iface = ctx->nf_output_iface;
- ctx->xout->nf_output_iface = NF_OUT_DROP;
+ ctx->nf_output_iface = NF_OUT_DROP;
switch (port) {
case OFPP_IN_PORT:
}
if (prev_nf_output_iface == NF_OUT_FLOOD) {
- ctx->xout->nf_output_iface = NF_OUT_FLOOD;
- } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
- ctx->xout->nf_output_iface = prev_nf_output_iface;
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = prev_nf_output_iface;
} else if (prev_nf_output_iface != NF_OUT_DROP &&
- ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
- ctx->xout->nf_output_iface = NF_OUT_MULTI;
+ ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
}
}
ctx->xin->flow.skb_priority = flow_priority;
/* Update NetFlow output port. */
- if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
- ctx->xout->nf_output_iface = ofp_port;
- } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
- ctx->xout->nf_output_iface = NF_OUT_MULTI;
+ if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = ofp_port;
+ } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
}
}
static void
xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
{
- ctx->xout->has_learn = true;
learn_mask(learn, ctx->wc);
if (ctx->xin->xcache) {
xlate_sample_action(struct xlate_ctx *ctx,
const struct ofpact_sample *os)
{
- union user_action_cookie cookie;
/* Scale the probability from 16-bit to 32-bit while representing
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
- bool use_masked;
if (!ctx->xbridge->support.variable_length_userdata) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
return;
}
- use_masked = ctx->xbridge->support.masked_set_action;
- ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->odp_actions,
- ctx->wc, use_masked);
-
- compose_flow_sample_cookie(os->probability, os->collector_set_id,
- os->obs_domain_id, os->obs_point_id, &cookie);
- compose_sample_action(ctx->xbridge, ctx->odp_actions,
- &ctx->xin->flow, probability, &cookie,
- sizeof cookie.flow_sample, ODPP_NONE,
- false);
+ xlate_commit_actions(ctx);
+
+ union user_action_cookie cookie = {
+ .flow_sample = {
+ .type = USER_ACTION_COOKIE_FLOW_SAMPLE,
+ .probability = os->probability,
+ .collector_set_id = os->collector_set_id,
+ .obs_domain_id = os->obs_domain_id,
+ .obs_point_id = os->obs_point_id,
+ }
+ };
+ compose_sample_action(ctx, probability, &cookie, sizeof cookie.flow_sample,
+ ODPP_NONE, false);
}
static bool
}
ofpbuf_put(&ctx->action_set, on->actions, on_len);
- ofpact_pad(&ctx->action_set);
}
static void
case OFPACT_WRITE_ACTIONS:
case OFPACT_METER:
case OFPACT_SAMPLE:
+ case OFPACT_DEBUG_RECIRC:
+ case OFPACT_CT:
+ case OFPACT_NAT:
break;
/* These need not be copied for restoration. */
CHECK_MPLS_RECIRCULATION(); \
}
+static void
+put_ct_mark(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct {
+ uint32_t key;
+ uint32_t mask;
+ } odp_attr;
+
+ odp_attr.key = flow->ct_mark;
+ odp_attr.mask = wc->masks.ct_mark;
+
+ if (odp_attr.mask && odp_attr.key != base_flow->ct_mark) {
+ nl_msg_put_unspec(odp_actions, OVS_CT_ATTR_MARK, &odp_attr,
+ sizeof(odp_attr));
+ }
+}
+
+static void
+put_ct_label(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ if (!ovs_u128_is_zero(&wc->masks.ct_label)
+ && !ovs_u128_equals(&flow->ct_label, &base_flow->ct_label)) {
+ struct {
+ ovs_u128 key;
+ ovs_u128 mask;
+ } *odp_ct_label;
+
+ odp_ct_label = nl_msg_put_unspec_uninit(odp_actions,
+ OVS_CT_ATTR_LABELS,
+ sizeof(*odp_ct_label));
+ odp_ct_label->key = flow->ct_label;
+ odp_ct_label->mask = wc->masks.ct_label;
+ }
+}
+
+static void
+put_ct_helper(struct ofpbuf *odp_actions, struct ofpact_conntrack *ofc)
+{
+ if (ofc->alg) {
+ if (ofc->alg == IPPORT_FTP) {
+ nl_msg_put_string(odp_actions, OVS_CT_ATTR_HELPER, "ftp");
+ } else {
+ VLOG_WARN("Cannot serialize ct_helper %d\n", ofc->alg);
+ }
+ }
+}
+
+static void
+put_ct_nat(struct xlate_ctx *ctx)
+{
+ struct ofpact_nat *ofn = ctx->ct_nat_action;
+ size_t nat_offset;
+
+ if (!ofn) {
+ return;
+ }
+
+ nat_offset = nl_msg_start_nested(ctx->odp_actions, OVS_CT_ATTR_NAT);
+ if (ofn->flags & NX_NAT_F_SRC || ofn->flags & NX_NAT_F_DST) {
+ nl_msg_put_flag(ctx->odp_actions, ofn->flags & NX_NAT_F_SRC
+ ? OVS_NAT_ATTR_SRC : OVS_NAT_ATTR_DST);
+ if (ofn->flags & NX_NAT_F_PERSISTENT) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PERSISTENT);
+ }
+ if (ofn->flags & NX_NAT_F_PROTO_HASH) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_HASH);
+ } else if (ofn->flags & NX_NAT_F_PROTO_RANDOM) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_NAT_ATTR_PROTO_RANDOM);
+ }
+ if (ofn->range_af == AF_INET) {
+ nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
+ ofn->range.addr.ipv4.min);
+ if (ofn->range.addr.ipv4.max &&
+ (ntohl(ofn->range.addr.ipv4.max)
+ > ntohl(ofn->range.addr.ipv4.min))) {
+ nl_msg_put_be32(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
+ ofn->range.addr.ipv4.max);
+ }
+ } else if (ofn->range_af == AF_INET6) {
+ nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MIN,
+ &ofn->range.addr.ipv6.min,
+ sizeof ofn->range.addr.ipv6.min);
+ if (!ipv6_mask_is_any(&ofn->range.addr.ipv6.max) &&
+ memcmp(&ofn->range.addr.ipv6.max, &ofn->range.addr.ipv6.min,
+ sizeof ofn->range.addr.ipv6.max) > 0) {
+ nl_msg_put_unspec(ctx->odp_actions, OVS_NAT_ATTR_IP_MAX,
+ &ofn->range.addr.ipv6.max,
+ sizeof ofn->range.addr.ipv6.max);
+ }
+ }
+ if (ofn->range_af != AF_UNSPEC && ofn->range.proto.min) {
+ nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MIN,
+ ofn->range.proto.min);
+ if (ofn->range.proto.max &&
+ ofn->range.proto.max > ofn->range.proto.min) {
+ nl_msg_put_u16(ctx->odp_actions, OVS_NAT_ATTR_PROTO_MAX,
+ ofn->range.proto.max);
+ }
+ }
+ }
+ nl_msg_end_nested(ctx->odp_actions, nat_offset);
+}
+
+static void
+compose_conntrack_action(struct xlate_ctx *ctx, struct ofpact_conntrack *ofc)
+{
+ ovs_u128 old_ct_label = ctx->base_flow.ct_label;
+ uint32_t old_ct_mark = ctx->base_flow.ct_mark;
+ size_t ct_offset;
+ uint16_t zone;
+
+ /* Ensure that any prior actions are applied before composing the new
+ * conntrack action. */
+ xlate_commit_actions(ctx);
+
+ /* Process nested actions first, to populate the key. */
+ ctx->ct_nat_action = NULL;
+ do_xlate_actions(ofc->actions, ofpact_ct_get_action_len(ofc), ctx);
+
+ if (ofc->zone_src.field) {
+ zone = mf_get_subfield(&ofc->zone_src, &ctx->xin->flow);
+ } else {
+ zone = ofc->zone_imm;
+ }
+
+ ct_offset = nl_msg_start_nested(ctx->odp_actions, OVS_ACTION_ATTR_CT);
+ if (ofc->flags & NX_CT_F_COMMIT) {
+ nl_msg_put_flag(ctx->odp_actions, OVS_CT_ATTR_COMMIT);
+ }
+ nl_msg_put_u16(ctx->odp_actions, OVS_CT_ATTR_ZONE, zone);
+ put_ct_mark(&ctx->xin->flow, &ctx->base_flow, ctx->odp_actions, ctx->wc);
+ put_ct_label(&ctx->xin->flow, &ctx->base_flow, ctx->odp_actions, ctx->wc);
+ put_ct_helper(ctx->odp_actions, ofc);
+ put_ct_nat(ctx);
+ ctx->ct_nat_action = NULL;
+ nl_msg_end_nested(ctx->odp_actions, ct_offset);
+
+ /* Restore the original ct fields in the key. These should only be exposed
+ * after recirculation to another table. */
+ ctx->base_flow.ct_mark = old_ct_mark;
+ ctx->base_flow.ct_label = old_ct_label;
+
+ if (ofc->recirc_table == NX_CT_RECIRC_NONE) {
+ /* If we do not recirculate as part of this action, hide the results of
+ * connection tracking from subsequent recirculations. */
+ ctx->conntracked = false;
+ } else {
+ /* Use ct_* fields from datapath during recirculation upcall. */
+ ctx->conntracked = true;
+ compose_recirculate_and_fork(ctx, ofc->recirc_table);
+ }
+}
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct xlate_ctx *ctx)
const struct ofpact *a;
if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
- tnl_arp_snoop(flow, wc, ctx->xbridge->name);
+ tnl_neigh_snoop(flow, wc, ctx->xbridge->name);
}
/* dl_type already in the mask, not set below. */
const struct ofpact_set_field *set_field;
const struct mf_field *mf;
+ if (ctx->error) {
+ break;
+ }
+
if (ctx->exit) {
/* Check if need to store the remaining actions for later
* execution. */
break;
case OFPACT_SET_ETH_SRC:
- memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
- memcpy(flow->dl_src, ofpact_get_SET_ETH_SRC(a)->mac, ETH_ADDR_LEN);
+ WC_MASK_FIELD(wc, dl_src);
+ flow->dl_src = ofpact_get_SET_ETH_SRC(a)->mac;
break;
case OFPACT_SET_ETH_DST:
- memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- memcpy(flow->dl_dst, ofpact_get_SET_ETH_DST(a)->mac, ETH_ADDR_LEN);
+ WC_MASK_FIELD(wc, dl_dst);
+ flow->dl_dst = ofpact_get_SET_ETH_DST(a)->mac;
break;
case OFPACT_SET_IPV4_SRC:
&& !eth_type_mpls(flow->dl_type)) {
break;
}
- /* A flow may wildcard nw_frag. Do nothing if setting a trasport
+ /* A flow may wildcard nw_frag. Do nothing if setting a transport
* header field on a packet that does not have them. */
- mf_mask_field_and_prereqs(mf, &wc->masks);
+ mf_mask_field_and_prereqs(mf, wc);
if (mf_are_prereqs_ok(mf, flow)) {
mf_set_flow_value_masked(mf, &set_field->value,
&set_field->mask, flow);
case OFPACT_FIN_TIMEOUT:
CHECK_MPLS_RECIRCULATION();
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
- ctx->xout->has_fin_timeout = true;
xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
break;
case OFPACT_SAMPLE:
xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
break;
+
+ case OFPACT_CT:
+ CHECK_MPLS_RECIRCULATION();
+ compose_conntrack_action(ctx, ofpact_get_CT(a));
+ break;
+
+ case OFPACT_NAT:
+ /* This will be processed by compose_conntrack_action(). */
+ ctx->ct_nat_action = ofpact_get_NAT(a);
+ break;
+
+ case OFPACT_DEBUG_RECIRC:
+ ctx_trigger_recirculation(ctx);
+ a = ofpact_next(a);
+ break;
}
/* Check if need to store this and the remaining actions for later
* execution. */
- if (ctx->exit && ctx_first_recirculation_action(ctx)) {
+ if (!ctx->error && ctx->exit && ctx_first_recirculation_action(ctx)) {
recirc_unroll_actions(a, OFPACT_ALIGN(ofpacts_len -
((uint8_t *)a -
(uint8_t *)ofpacts)),
xin->resubmit_hook = NULL;
xin->report_hook = NULL;
xin->resubmit_stats = NULL;
+ xin->recurse = 0;
+ xin->resubmits = 0;
xin->wc = wc;
xin->odp_actions = odp_actions;
xlate_out_uninit(struct xlate_out *xout)
{
if (xout) {
- xlate_out_free_recircs(xout);
+ recirc_refs_unref(&xout->recircs);
}
}
xlate_actions_for_side_effects(struct xlate_in *xin)
{
struct xlate_out xout;
+ enum xlate_error error;
+
+ error = xlate_actions(xin, &xout);
+ if (error) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ VLOG_WARN_RL(&rl, "xlate_actions failed (%s)!", xlate_strerror(error));
+ }
- xlate_actions(xin, &xout);
xlate_out_uninit(&xout);
}
\f
stream = fopen(filename, "r");
if (!stream) {
- VLOG_WARN("%s: open failed (%s)", filename, ovs_strerror(errno));
+ VLOG_INFO("%s: open failed (%s)", filename, ovs_strerror(errno));
} else {
if (fscanf(stream, "%d", &n) != 1) {
VLOG_WARN("%s: read error", filename);
#endif
}
+static void
+xlate_wc_init(struct xlate_ctx *ctx)
+{
+ flow_wildcards_init_catchall(ctx->wc);
+
+ /* Some fields we consider to always be examined. */
+ WC_MASK_FIELD(ctx->wc, in_port);
+ WC_MASK_FIELD(ctx->wc, dl_type);
+ if (is_ip_any(&ctx->xin->flow)) {
+ WC_MASK_FIELD_MASK(ctx->wc, nw_frag, FLOW_NW_FRAG_MASK);
+ }
+
+ if (ctx->xbridge->support.odp.recirc) {
+ /* Always exactly match recirc_id when datapath supports
+ * recirculation. */
+ WC_MASK_FIELD(ctx->wc, recirc_id);
+ }
+
+ if (ctx->xbridge->netflow) {
+ netflow_mask_wc(&ctx->xin->flow, ctx->wc);
+ }
+
+ tnl_wc_init(&ctx->xin->flow, ctx->wc);
+}
+
+static void
+xlate_wc_finish(struct xlate_ctx *ctx)
+{
+ /* Clear the metadata and register wildcard masks, because we won't
+ * use non-header fields as part of the cache. */
+ flow_wildcards_clear_non_packet_fields(ctx->wc);
+
+ /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
+ * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
+ * represent these fields. The datapath interface, on the other hand,
+ * represents them with just 8 bits each. This means that if the high
+ * 8 bits of the masks for these fields somehow become set, then they
+ * will get chopped off by a round trip through the datapath, and
+ * revalidation will spot that as an inconsistency and delete the flow.
+ * Avoid the problem here by making sure that only the low 8 bits of
+ * either field can be unwildcarded for ICMP.
+ */
+ if (is_icmpv4(&ctx->xin->flow) || is_icmpv6(&ctx->xin->flow)) {
+ ctx->wc->masks.tp_src &= htons(UINT8_MAX);
+ ctx->wc->masks.tp_dst &= htons(UINT8_MAX);
+ }
+ /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
+ if (ctx->wc->masks.vlan_tci) {
+ ctx->wc->masks.vlan_tci |= htons(VLAN_CFI);
+ }
+}
+
/* Translates the flow, actions, or rule in 'xin' into datapath actions in
* 'xout'.
* The caller must take responsibility for eventually freeing 'xout', with
- * xlate_out_uninit(). */
-void
+ * xlate_out_uninit().
+ * Returns 'XLATE_OK' if translation was successful. In case of an error an
+ * empty set of actions will be returned in 'xin->odp_actions' (if non-NULL),
+ * so that most callers may ignore the return value and transparently install a
+ * drop flow when the translation fails. */
+enum xlate_error
xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
{
*xout = (struct xlate_out) {
.slow = 0,
.fail_open = false,
- .has_learn = false,
- .has_normal = false,
- .has_fin_timeout = false,
- .nf_output_iface = NF_OUT_DROP,
- .mirrors = 0,
- .n_recircs = 0,
+ .recircs = RECIRC_REFS_EMPTY_INITIALIZER,
};
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
if (!xbridge) {
- return;
+ return XLATE_BRIDGE_NOT_FOUND;
}
struct flow *flow = &xin->flow;
.xin = xin,
.xout = xout,
.base_flow = *flow,
- .orig_tunnel_ip_dst = flow->tunnel.ip_dst,
+ .orig_tunnel_ipv6_dst = flow_tnl_dst(&flow->tunnel),
.xbridge = xbridge,
.stack = OFPBUF_STUB_INITIALIZER(stack_stub),
.rule = xin->rule,
.wc = xin->wc ? xin->wc : &scratch_wc,
.odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
- .recurse = 0,
- .resubmits = 0,
+ .recurse = xin->recurse,
+ .resubmits = xin->resubmits,
.in_group = false,
.in_action_set = false,
.orig_skb_priority = flow->skb_priority,
.sflow_n_outputs = 0,
.sflow_odp_port = 0,
- .user_cookie_offset = 0,
+ .nf_output_iface = NF_OUT_DROP,
.exit = false,
+ .error = XLATE_OK,
+ .mirrors = 0,
.recirc_action_offset = -1,
.last_unroll_offset = -1,
.was_mpls = false,
+ .conntracked = false,
+
+ .ct_nat_action = NULL,
.action_set_has_group = false,
.action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
};
- memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
- ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
- struct xport *in_port;
- bool tnl_may_send;
-
- COVERAGE_INC(xlate_actions);
-
- /* Flow initialization rules:
- * - 'base_flow' must match the kernel's view of the packet at the
- * time that action processing starts. 'flow' represents any
- * transformations we wish to make through actions.
- * - By default 'base_flow' and 'flow' are the same since the input
- * packet matches the output before any actions are applied.
- * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
- * of the received packet as seen by the kernel. If we later output
- * to another device without any modifications this will cause us to
- * insert a new tag since the original one was stripped off by the
- * VLAN device.
- * - Tunnel metadata as received is retained in 'flow'. This allows
- * tunnel metadata matching also in later tables.
- * Since a kernel action for setting the tunnel metadata will only be
- * generated with actual tunnel output, changing the tunnel metadata
- * values in 'flow' (such as tun_id) will only have effect with a later
- * tunnel output action.
- * - Tunnel 'base_flow' is completely cleared since that is what the
- * kernel does. If we wish to maintain the original values an action
- * needs to be generated. */
+ /* 'base_flow' reflects the packet as it came in, but we need it to reflect
+ * the packet as the datapath will treat it for output actions:
+ *
+ * - Our datapath doesn't retain tunneling information without us
+ * re-setting it, so clear the tunnel data.
+ *
+ * - For VLAN splinters, a higher layer may pretend that the packet
+ * came in on 'flow->in_port.ofp_port' with 'flow->vlan_tci'
+ * attached, because that's how we want to treat it from an OpenFlow
+ * perspective. But from the datapath's perspective it actually came
+ * in on a VLAN device without any VLAN attached. So here we put the
+ * datapath's view of the VLAN information in 'base_flow' to ensure
+ * correct treatment.
+ */
+ memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+ if (flow->in_port.ofp_port
+ != vsp_realdev_to_vlandev(xbridge->ofproto,
+ flow->in_port.ofp_port,
+ flow->vlan_tci)) {
+ ctx.base_flow.vlan_tci = 0;
+ }
+ ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
if (xin->wc) {
- flow_wildcards_init_catchall(ctx.wc);
- memset(&ctx.wc->masks.in_port, 0xff, sizeof ctx.wc->masks.in_port);
- memset(&ctx.wc->masks.dl_type, 0xff, sizeof ctx.wc->masks.dl_type);
- if (is_ip_any(flow)) {
- ctx.wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
- }
- if (xbridge->support.odp.recirc) {
- /* Always exactly match recirc_id when datapath supports
- * recirculation. */
- ctx.wc->masks.recirc_id = UINT32_MAX;
- }
- if (xbridge->netflow) {
- netflow_mask_wc(flow, ctx.wc);
- }
- tnl_wc_init(flow, xin->wc);
+ xlate_wc_init(&ctx);
}
- tnl_may_send = tnl_process_ecn(flow);
-
- /* The in_port of the original packet before recirculation. */
- in_port = get_ofp_port(xbridge, flow->in_port.ofp_port);
+ COVERAGE_INC(xlate_actions);
if (xin->recirc) {
- const struct recirc_id_node *recirc = xin->recirc;
+ const struct recirc_state *state = &xin->recirc->state;
+
+ xlate_report(&ctx, "Restoring state post-recirculation:");
if (xin->ofpacts_len > 0 || ctx.rule) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ const char *conflict = xin->ofpacts_len ? "actions" : "rule";
- VLOG_WARN_RL(&rl, "Recirculation conflict (%s)!",
- xin->ofpacts_len > 0
- ? "actions"
- : "rule");
+ VLOG_WARN_RL(&rl, "Recirculation conflict (%s)!", conflict);
+ xlate_report(&ctx, "- Recirculation conflict (%s)!", conflict);
+ ctx.error = XLATE_RECIRCULATION_CONFLICT;
goto exit;
}
/* Set the bridge for post-recirculation processing if needed. */
- if (ctx.xbridge->ofproto != recirc->ofproto) {
+ if (ctx.xbridge->ofproto != state->ofproto) {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
- const struct xbridge *new_bridge = xbridge_lookup(xcfg,
- recirc->ofproto);
+ const struct xbridge *new_bridge
+ = xbridge_lookup(xcfg, state->ofproto);
if (OVS_UNLIKELY(!new_bridge)) {
/* Drop the packet if the bridge cannot be found. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists.");
+ xlate_report(&ctx, "- Recirculation bridge no longer exists.");
+ ctx.error = XLATE_BRIDGE_NOT_FOUND;
goto exit;
}
ctx.xbridge = new_bridge;
/* Set the post-recirculation table id. Note: A table lookup is done
* only if there are no post-recirculation actions. */
- ctx.table_id = recirc->table_id;
+ ctx.table_id = state->table_id;
+ xlate_report(&ctx, "- Resuming from table %"PRIu8, ctx.table_id);
+
+ if (!state->conntracked) {
+ clear_conntrack(flow);
+ }
/* Restore pipeline metadata. May change flow's in_port and other
* metadata to the values that existed when recirculation was
* triggered. */
- recirc_metadata_to_flow(&recirc->metadata, flow);
+ recirc_metadata_to_flow(&state->metadata, flow);
/* Restore stack, if any. */
- if (recirc->stack) {
- ofpbuf_put(&ctx.stack, recirc->stack->data, recirc->stack->size);
+ if (state->stack) {
+ ofpbuf_put(&ctx.stack, state->stack->data, state->stack->size);
}
+ /* Restore mirror state. */
+ ctx.mirrors = state->mirrors;
+
/* Restore action set, if any. */
- if (recirc->action_set_len) {
+ if (state->action_set_len) {
const struct ofpact *a;
- ofpbuf_put(&ctx.action_set, recirc->ofpacts,
- recirc->action_set_len);
+ xlate_report_actions(&ctx, "- Restoring action set",
+ state->ofpacts, state->action_set_len);
- OFPACT_FOR_EACH(a, recirc->ofpacts, recirc->action_set_len) {
+ ofpbuf_put(&ctx.action_set, state->ofpacts, state->action_set_len);
+
+ OFPACT_FOR_EACH(a, state->ofpacts, state->action_set_len) {
if (a->type == OFPACT_GROUP) {
ctx.action_set_has_group = true;
break;
/* Restore recirculation actions. If there are no actions, processing
* will start with a lookup in the table set above. */
- if (recirc->ofpacts_len > recirc->action_set_len) {
- xin->ofpacts_len = recirc->ofpacts_len - recirc->action_set_len;
- xin->ofpacts = recirc->ofpacts +
- recirc->action_set_len / sizeof *recirc->ofpacts;
+ if (state->ofpacts_len > state->action_set_len) {
+ xin->ofpacts_len = state->ofpacts_len - state->action_set_len;
+ xin->ofpacts = state->ofpacts +
+ state->action_set_len / sizeof *state->ofpacts;
+
+ xlate_report_actions(&ctx, "- Restoring actions",
+ xin->ofpacts, xin->ofpacts_len);
}
} else if (OVS_UNLIKELY(flow->recirc_id)) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32,
flow->recirc_id);
+ ctx.error = XLATE_NO_RECIRCULATION_CONTEXT;
goto exit;
}
/* The bridge is now known so obtain its table version. */
if (!xin->ofpacts && !ctx.rule) {
ctx.rule = rule_dpif_lookup_from_table(
ctx.xbridge->ofproto, ctx.tables_version, flow, xin->wc,
- ctx.xin->xcache != NULL, ctx.xin->resubmit_stats, &ctx.table_id,
+ ctx.xin->resubmit_stats, &ctx.table_id,
flow->in_port.ofp_port, true, true);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
entry->u.rule = ctx.rule;
+ rule_dpif_ref(ctx.rule);
}
if (OVS_UNLIKELY(ctx.xin->resubmit_hook)) {
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
- struct flow orig_flow;
- if (mbridge_has_mirrors(xbridge->mbridge)) {
- /* Do this conditionally because the copy is expensive enough that it
- * shows up in profiles. */
- orig_flow = *flow;
- }
+ /* Get the proximate input port of the packet. (If xin->recirc,
+ * flow->in_port is the ultimate input port of the packet.) */
+ struct xport *in_port = get_ofp_port(xbridge,
+ ctx.base_flow.in_port.ofp_port);
/* Tunnel stats only for non-recirculated packets. */
if (!xin->recirc && in_port && in_port->is_tunnel) {
}
}
- /* Do not perform special processing on recirculated packets,
- * as recirculated packets are not really received by the bridge. */
- if (xin->recirc || !process_special(&ctx, in_port)) {
- if (flow->in_port.ofp_port
- != vsp_realdev_to_vlandev(xbridge->ofproto,
- flow->in_port.ofp_port,
- flow->vlan_tci)) {
- ctx.base_flow.vlan_tci = 0;
+ if (!xin->recirc && process_special(&ctx, in_port)) {
+ /* process_special() did all the processing for this packet.
+ *
+ * We do not perform special processing on recirculated packets, as
+ * recirculated packets are not really received by the bridge.*/
+ } else if (in_port && in_port->xbundle
+ && xbundle_mirror_out(xbridge, in_port->xbundle)) {
+ if (ctx.xin->packet != NULL) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
+ "%s, which is reserved exclusively for mirroring",
+ ctx.xbridge->name, in_port->xbundle->name);
}
-
+ } else {
/* Sampling is done only for packets really received by the bridge. */
+ unsigned int user_cookie_offset = 0;
if (!xin->recirc) {
- add_sflow_action(&ctx);
- add_ipfix_action(&ctx);
+ user_cookie_offset = compose_sflow_action(&ctx);
+ compose_ipfix_action(&ctx, ODPP_NONE);
}
size_t sample_actions_len = ctx.odp_actions->size;
- if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
+ if (tnl_process_ecn(flow)
+ && (!in_port || may_receive(in_port, &ctx))) {
const struct ofpact *ofpacts;
size_t ofpacts_len;
OVS_NOT_REACHED();
}
+ mirror_ingress_packet(&ctx);
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
+ if (ctx.error) {
+ goto exit;
+ }
/* We've let OFPP_NORMAL and the learning action look at the
* packet, so drop it now if forwarding is disabled. */
compose_output_action(&ctx, OFPP_LOCAL, NULL);
}
- if (!xin->recirc) {
- fix_sflow_action(&ctx);
- }
- /* Only mirror fully processed packets. */
- if (!exit_recirculates(&ctx)
- && mbridge_has_mirrors(xbridge->mbridge)) {
- add_mirror_actions(&ctx, &orig_flow);
+ if (user_cookie_offset) {
+ fix_sflow_action(&ctx, user_cookie_offset);
}
}
ctx.xout->slow |= SLOW_ACTION;
}
- /* Update mirror stats only for packets really received by the bridge. */
- if (!xin->recirc && mbridge_has_mirrors(xbridge->mbridge)) {
- if (ctx.xin->resubmit_stats) {
- mirror_update_stats(xbridge->mbridge, xout->mirrors,
- ctx.xin->resubmit_stats->n_packets,
- ctx.xin->resubmit_stats->n_bytes);
- }
- if (ctx.xin->xcache) {
- struct xc_entry *entry;
-
- entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR);
- entry->u.mirror.mbridge = mbridge_ref(xbridge->mbridge);
- entry->u.mirror.mirrors = xout->mirrors;
- }
- }
-
/* Do netflow only for packets really received by the bridge and not sent
* to the controller. We consider packets sent to the controller to be
* part of the control plane rather than the data plane. */
if (!xin->recirc && xbridge->netflow && !(xout->slow & SLOW_CONTROLLER)) {
if (ctx.xin->resubmit_stats) {
netflow_flow_update(xbridge->netflow, flow,
- xout->nf_output_iface,
+ ctx.nf_output_iface,
ctx.xin->resubmit_stats);
}
if (ctx.xin->xcache) {
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
entry->u.nf.netflow = netflow_ref(xbridge->netflow);
entry->u.nf.flow = xmemdup(flow, sizeof *flow);
- entry->u.nf.iface = xout->nf_output_iface;
+ entry->u.nf.iface = ctx.nf_output_iface;
}
}
if (xin->wc) {
- /* Clear the metadata and register wildcard masks, because we won't
- * use non-header fields as part of the cache. */
- flow_wildcards_clear_non_packet_fields(ctx.wc);
-
- /* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
- * uses the low 8 bits of the 16-bit tp_src and tp_dst members to
- * represent these fields. The datapath interface, on the other hand,
- * represents them with just 8 bits each. This means that if the high
- * 8 bits of the masks for these fields somehow become set, then they
- * will get chopped off by a round trip through the datapath, and
- * revalidation will spot that as an inconsistency and delete the flow.
- * Avoid the problem here by making sure that only the low 8 bits of
- * either field can be unwildcarded for ICMP.
- */
- if (is_icmpv4(flow) || is_icmpv6(flow)) {
- ctx.wc->masks.tp_src &= htons(UINT8_MAX);
- ctx.wc->masks.tp_dst &= htons(UINT8_MAX);
- }
- /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
- if (ctx.wc->masks.vlan_tci) {
- ctx.wc->masks.vlan_tci |= htons(VLAN_CFI);
- }
+ xlate_wc_finish(&ctx);
}
exit:
ofpbuf_uninit(&ctx.stack);
ofpbuf_uninit(&ctx.action_set);
ofpbuf_uninit(&scratch_actions);
+
+ /* Make sure we return a "drop flow" in case of an error. */
+ if (ctx.error) {
+ xout->slow = 0;
+ if (xin->odp_actions) {
+ ofpbuf_clear(xin->odp_actions);
+ }
+ }
+ return ctx.error;
}
/* Sends 'packet' out 'ofport'.
{
struct xc_entry *entry;
struct ofpbuf entries = xcache->entries;
- uint8_t dmac[ETH_ADDR_LEN];
+ struct eth_addr dmac;
if (!stats->n_packets) {
return;
group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
stats);
break;
- case XC_TNL_ARP:
- /* Lookup arp to avoid arp timeout. */
- tnl_arp_lookup(entry->u.tnl_arp_cache.br_name, entry->u.tnl_arp_cache.d_ip, dmac);
+ case XC_TNL_NEIGH:
+ /* Lookup neighbor to avoid timeout. */
+ tnl_neigh_lookup(entry->u.tnl_neigh_cache.br_name,
+ &entry->u.tnl_neigh_cache.d_ipv6, &dmac);
break;
default:
OVS_NOT_REACHED();
case XC_GROUP:
group_dpif_unref(entry->u.group.group);
break;
- case XC_TNL_ARP:
+ case XC_TNL_NEIGH:
break;
default:
OVS_NOT_REACHED();