bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
- /* True if the datapath supports recirculation. */
- bool enable_recirc;
-
- /* True if the datapath supports variable-length
- * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions.
- * False if the datapath supports only 8-byte (or shorter) userdata. */
- bool variable_length_userdata;
-
- /* Number of MPLS label stack entries that the datapath supports
- * in matches. */
- size_t max_mpls_depth;
-
- /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
- * actions. */
- bool masked_set_action;
+ /* Datapath feature support. */
+ struct dpif_backer_support support;
};
struct xbundle {
const struct xbridge *xbridge;
+ /* Flow tables version at the beginning of the translation. */
+ cls_version_t tables_version;
+
/* Flow at the last commit. */
struct flow base_flow;
/* Stack for the push and pop actions. Each stack element is of type
* "union mf_subvalue". */
- union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
struct ofpbuf stack;
/* The rule that we are currently translating, or NULL. */
struct rule_dpif *rule;
+ /* Flow translation populates this with wildcards relevant in translation.
+ * When 'xin->wc' is nonnull, this is the same pointer. When 'xin->wc' is
+ * null, this is a pointer to uninitialized scratch memory. This allows
+ * code to blindly write to 'ctx->wc' without worrying about whether the
+ * caller really wants wildcards. */
+ struct flow_wildcards *wc;
+
+ /* Output buffer for datapath actions. When 'xin->odp_actions' is nonnull,
+ * this is the same pointer. When 'xin->odp_actions' is null, this points
+ * to a scratch ofpbuf. This allows code to add actions to
+ * 'ctx->odp_actions' without worrying about whether the caller really
+ * wants actions. */
+ struct ofpbuf *odp_actions;
+
/* Resubmit statistics, via xlate_table_action(). */
int recurse; /* Current resubmit nesting depth. */
int resubmits; /* Total number of resubmits. */
* datapath actions. */
bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
struct ofpbuf action_set; /* Action set. */
- uint64_t action_set_stub[1024 / 8];
};
static void xlate_action_set(struct xlate_ctx *ctx);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *);
static void xlate_normal(struct xlate_ctx *);
-static inline void xlate_report(struct xlate_ctx *, const char *);
+static inline void xlate_report(struct xlate_ctx *, const char *, ...)
+ OVS_PRINTF_FORMAT(2, 3);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
uint8_t table_id, bool may_packet_in,
bool honor_table_miss);
const struct dpif_ipfix *,
const struct netflow *,
bool forward_bpdu, bool has_in_band,
- bool enable_recirc,
- bool variable_length_userdata,
- size_t max_mpls_depth,
- bool masked_set_action);
+ const struct dpif_backer_support *);
static void xlate_xbundle_set(struct xbundle *xbundle,
enum port_vlan_mode vlan_mode, int vlan,
unsigned long *trunks, bool use_priority_tags,
static void xlate_xcfg_free(struct xlate_cfg *);
static inline void
-xlate_report(struct xlate_ctx *ctx, const char *s)
+xlate_report(struct xlate_ctx *ctx, const char *format, ...)
{
if (OVS_UNLIKELY(ctx->xin->report_hook)) {
- ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
+ va_list args;
+
+ va_start(args, format);
+ ctx->xin->report_hook(ctx->xin, ctx->recurse, format, args);
+ va_end(args);
}
}
const struct dpif_ipfix *ipfix,
const struct netflow *netflow,
bool forward_bpdu, bool has_in_band,
- bool enable_recirc,
- bool variable_length_userdata,
- size_t max_mpls_depth,
- bool masked_set_action)
+ const struct dpif_backer_support *support)
{
if (xbridge->ml != ml) {
mac_learning_unref(xbridge->ml);
xbridge->dpif = dpif;
xbridge->forward_bpdu = forward_bpdu;
xbridge->has_in_band = has_in_band;
- xbridge->enable_recirc = enable_recirc;
- xbridge->variable_length_userdata = variable_length_userdata;
- xbridge->max_mpls_depth = max_mpls_depth;
- xbridge->masked_set_action = masked_set_action;
+ xbridge->support = *support;
}
static void
xbridge->dpif, xbridge->ml, xbridge->stp,
xbridge->rstp, xbridge->ms, xbridge->mbridge,
xbridge->sflow, xbridge->ipfix, xbridge->netflow,
- xbridge->forward_bpdu,
- xbridge->has_in_band, xbridge->enable_recirc,
- xbridge->variable_length_userdata,
- xbridge->max_mpls_depth, xbridge->masked_set_action);
+ xbridge->forward_bpdu, xbridge->has_in_band,
+ &xbridge->support);
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
xlate_xbundle_copy(new_xbridge, xbundle);
}
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow,
- bool forward_bpdu, bool has_in_band, bool enable_recirc,
- bool variable_length_userdata, size_t max_mpls_depth,
- bool masked_set_action)
+ bool forward_bpdu, bool has_in_band,
+ const struct dpif_backer_support *support)
{
struct xbridge *xbridge;
xbridge->name = xstrdup(name);
xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
- netflow, forward_bpdu, has_in_band, enable_recirc,
- variable_length_userdata, max_mpls_depth,
- masked_set_action);
+ netflow, forward_bpdu, has_in_band, support);
}
static void
"%s, which is reserved exclusively for mirroring",
ctx->xbridge->name, in_xbundle->name);
}
- ofpbuf_clear(ctx->xout->odp_actions);
+ ofpbuf_clear(ctx->odp_actions);
return;
}
while (mirrors) {
mirror_mask_t dup_mirrors;
struct ofbundle *out;
- unsigned long *vlans;
+ const unsigned long *vlans;
bool vlan_mirrored;
bool has_mirror;
int out_vlan;
ovs_assert(has_mirror);
if (vlans) {
- ctx->xout->wc.masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
+ ctx->wc->masks.vlan_tci |= htons(VLAN_CFI | VLAN_VID_MASK);
}
vlan_mirrored = !vlans || bitmap_is_set(vlans, vlan);
- free(vlans);
if (!vlan_mirrored) {
mirrors = zero_rightmost_1bit(mirrors);
bundle_node);
} else {
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
- struct flow_wildcards *wc = &ctx->xout->wc;
+ struct flow_wildcards *wc = ctx->wc;
struct ofport_dpif *ofport;
- if (ctx->xbridge->enable_recirc) {
+ if (ctx->xbridge->support.odp.recirc) {
use_recirc = bond_may_recirc(
out_xbundle->bond, &xr.recirc_id, &xr.hash_basis);
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
if (mac
&& mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
- && (!is_gratuitous_arp(flow, &ctx->xout->wc)
+ && (!is_gratuitous_arp(flow, ctx->wc)
|| mac_entry_is_grat_arp_locked(mac))) {
ovs_rwlock_unlock(&xbridge->ml->rwlock);
xlate_report(ctx, "SLB bond thinks this packet looped back, "
/* Updates multicast snooping table 'ms' given that a packet matching 'flow'
* was received on 'in_xbundle' in 'vlan' and is either Report or Query. */
static void
-update_mcast_snooping_table__(const struct xbridge *xbridge,
- const struct flow *flow,
- struct mcast_snooping *ms,
- ovs_be32 ip4, int vlan,
- struct xbundle *in_xbundle)
+update_mcast_snooping_table4__(const struct xbridge *xbridge,
+ const struct flow *flow,
+ struct mcast_snooping *ms, int vlan,
+ struct xbundle *in_xbundle,
+ const struct dp_packet *packet)
OVS_REQ_WRLOCK(ms->rwlock)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 30);
+ int count;
+ ovs_be32 ip4 = flow->igmp_group_ip4;
switch (ntohs(flow->tp_src)) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
- if (mcast_snooping_add_group(ms, ip4, vlan, in_xbundle->ofbundle)) {
+ if (mcast_snooping_add_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
VLOG_DBG_RL(&rl, "bridge %s: multicast snooping learned that "
IP_FMT" is on port %s in VLAN %d",
xbridge->name, IP_ARGS(ip4), in_xbundle->name, vlan);
}
break;
case IGMP_HOST_LEAVE_MESSAGE:
- if (mcast_snooping_leave_group(ms, ip4, vlan, in_xbundle->ofbundle)) {
+ if (mcast_snooping_leave_group4(ms, ip4, vlan, in_xbundle->ofbundle)) {
VLOG_DBG_RL(&rl, "bridge %s: multicast snooping leaving "
IP_FMT" is on port %s in VLAN %d",
xbridge->name, IP_ARGS(ip4), in_xbundle->name, vlan);
in_xbundle->name, vlan);
}
break;
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ if ((count = mcast_snooping_add_report(ms, packet, vlan,
+ in_xbundle->ofbundle))) {
+ VLOG_DBG_RL(&rl, "bridge %s: multicast snooping processed %d "
+ "addresses on port %s in VLAN %d",
+ xbridge->name, count, in_xbundle->name, vlan);
+ }
+ break;
+ }
+}
+
+static void
+update_mcast_snooping_table6__(const struct xbridge *xbridge,
+ const struct flow *flow,
+ struct mcast_snooping *ms, int vlan,
+ struct xbundle *in_xbundle,
+ const struct dp_packet *packet)
+ OVS_REQ_WRLOCK(ms->rwlock)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 30);
+ int count;
+
+ switch (ntohs(flow->tp_src)) {
+ case MLD_QUERY:
+ if (!ipv6_addr_equals(&flow->ipv6_src, &in6addr_any)
+ && mcast_snooping_add_mrouter(ms, vlan, in_xbundle->ofbundle)) {
+ VLOG_DBG_RL(&rl, "bridge %s: multicast snooping query on port %s"
+ "in VLAN %d",
+ xbridge->name, in_xbundle->name, vlan);
+ }
+ break;
+ case MLD_REPORT:
+ case MLD_DONE:
+ case MLD2_REPORT:
+ count = mcast_snooping_add_mld(ms, packet, vlan, in_xbundle->ofbundle);
+ if (count) {
+ VLOG_DBG_RL(&rl, "bridge %s: multicast snooping processed %d "
+ "addresses on port %s in VLAN %d",
+ xbridge->name, count, in_xbundle->name, vlan);
+ }
+ break;
}
}
static void
update_mcast_snooping_table(const struct xbridge *xbridge,
const struct flow *flow, int vlan,
- struct xbundle *in_xbundle)
+ struct xbundle *in_xbundle,
+ const struct dp_packet *packet)
{
struct mcast_snooping *ms = xbridge->ms;
struct xlate_cfg *xcfg;
}
if (!mcast_xbundle || mcast_xbundle != in_xbundle) {
- update_mcast_snooping_table__(xbridge, flow, ms, flow->igmp_group_ip4,
- vlan, in_xbundle);
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ update_mcast_snooping_table4__(xbridge, flow, ms, vlan,
+ in_xbundle, packet);
+ } else {
+ update_mcast_snooping_table6__(xbridge, flow, ms, vlan,
+ in_xbundle, packet);
+ }
}
ovs_rwlock_unlock(&ms->rwlock);
}
static void
xlate_normal(struct xlate_ctx *ctx)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
+ struct flow_wildcards *wc = ctx->wc;
struct flow *flow = &ctx->xin->flow;
struct xbundle *in_xbundle;
struct xport *in_port;
if (mcast_snooping_enabled(ctx->xbridge->ms)
&& !eth_addr_is_broadcast(flow->dl_dst)
&& eth_addr_is_multicast(flow->dl_dst)
- && flow->dl_type == htons(ETH_TYPE_IP)) {
+ && is_ip_any(flow)) {
struct mcast_snooping *ms = ctx->xbridge->ms;
- struct mcast_group *grp;
+ struct mcast_group *grp = NULL;
- if (flow->nw_proto == IPPROTO_IGMP) {
- if (ctx->xin->may_learn) {
- if (mcast_snooping_is_membership(flow->tp_src) ||
- mcast_snooping_is_query(flow->tp_src)) {
+ if (is_igmp(flow)) {
+ if (mcast_snooping_is_membership(flow->tp_src) ||
+ mcast_snooping_is_query(flow->tp_src)) {
+ if (ctx->xin->may_learn) {
update_mcast_snooping_table(ctx->xbridge, flow, vlan,
- in_xbundle);
- }
+ in_xbundle, ctx->xin->packet);
+ }
+ /*
+ * IGMP packets need to take the slow path, in order to be
+ * processed for mdb updates. That will prevent expires
+ * firing off even after hosts have sent reports.
+ */
+ ctx->xout->slow |= SLOW_ACTION;
}
if (mcast_snooping_is_membership(flow->tp_src)) {
xlate_normal_flood(ctx, in_xbundle, vlan);
}
return;
+ } else if (is_mld(flow)) {
+ ctx->xout->slow |= SLOW_ACTION;
+ if (ctx->xin->may_learn) {
+ update_mcast_snooping_table(ctx->xbridge, flow, vlan,
+ in_xbundle, ctx->xin->packet);
+ }
+ if (is_mld_report(flow)) {
+ ovs_rwlock_rdlock(&ms->rwlock);
+ xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
+ xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
+ ovs_rwlock_unlock(&ms->rwlock);
+ } else {
+ xlate_report(ctx, "MLD query, flooding");
+ xlate_normal_flood(ctx, in_xbundle, vlan);
+ }
} else {
- if (ip_is_local_multicast(flow->nw_dst)) {
+ if ((flow->dl_type == htons(ETH_TYPE_IP)
+ && ip_is_local_multicast(flow->nw_dst))
+ || (flow->dl_type == htons(ETH_TYPE_IPV6)
+ && ipv6_is_all_hosts(&flow->ipv6_dst))) {
/* RFC4541: section 2.1.2, item 2: Packets with a dst IP
* address in the 224.0.0.x range which are not IGMP must
* be forwarded on all ports */
/* forwarding to group base ports */
ovs_rwlock_rdlock(&ms->rwlock);
- grp = mcast_snooping_lookup(ms, flow->nw_dst, vlan);
+ if (flow->dl_type == htons(ETH_TYPE_IP)) {
+ grp = mcast_snooping_lookup4(ms, flow->nw_dst, vlan);
+ } else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
+ grp = mcast_snooping_lookup(ms, &flow->ipv6_dst, vlan);
+ }
if (grp) {
xlate_normal_mcast_send_group(ctx, ms, grp, in_xbundle, vlan);
xlate_normal_mcast_send_fports(ctx, ms, in_xbundle, vlan);
const uint32_t probability,
const union user_action_cookie *cookie,
const size_t cookie_size,
- const odp_port_t tunnel_out_port)
+ const odp_port_t tunnel_out_port,
+ bool include_actions)
{
size_t sample_offset, actions_offset;
odp_port_t odp_port;
pid = dpif_port_get_pid(xbridge->dpif, odp_port,
flow_hash_5tuple(flow, 0));
cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size,
- tunnel_out_port, odp_actions);
+ tunnel_out_port,
+ include_actions,
+ odp_actions);
nl_msg_end_nested(odp_actions, actions_offset);
nl_msg_end_nested(odp_actions, sample_offset);
odp_port == ODPP_NONE ? 0 : 1, &cookie);
return compose_sample_action(xbridge, odp_actions, flow, probability,
- &cookie, sizeof cookie.sflow, ODPP_NONE);
+ &cookie, sizeof cookie.sflow, ODPP_NONE,
+ true);
}
static void
compose_ipfix_cookie(&cookie, output_odp_port);
compose_sample_action(xbridge, odp_actions, flow, probability,
- &cookie, sizeof cookie.ipfix, tunnel_out_port);
+ &cookie, sizeof cookie.ipfix, tunnel_out_port,
+ false);
}
/* SAMPLE action for sFlow must be first action in any given list of
add_sflow_action(struct xlate_ctx *ctx)
{
ctx->user_cookie_offset = compose_sflow_action(ctx->xbridge,
- ctx->xout->odp_actions,
+ ctx->odp_actions,
&ctx->xin->flow, ODPP_NONE);
ctx->sflow_odp_port = 0;
ctx->sflow_n_outputs = 0;
static void
add_ipfix_action(struct xlate_ctx *ctx)
{
- compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
+ compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
&ctx->xin->flow, ODPP_NONE);
}
static void
add_ipfix_output_action(struct xlate_ctx *ctx, odp_port_t port)
{
- compose_ipfix_action(ctx->xbridge, ctx->xout->odp_actions,
+ compose_ipfix_action(ctx->xbridge, ctx->odp_actions,
&ctx->xin->flow, port);
}
return;
}
- cookie = ofpbuf_at(ctx->xout->odp_actions, ctx->user_cookie_offset,
+ cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
sizeof cookie->sflow);
ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
}
-static enum slow_path_reason
-process_special(struct xlate_ctx *ctx, const struct flow *flow,
- const struct xport *xport, const struct dp_packet *packet)
+static bool
+process_special(struct xlate_ctx *ctx, const struct xport *xport)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
+ const struct flow *flow = &ctx->xin->flow;
+ struct flow_wildcards *wc = ctx->wc;
const struct xbridge *xbridge = ctx->xbridge;
+ const struct dp_packet *packet = ctx->xin->packet;
+ enum slow_path_reason slow;
if (!xport) {
- return 0;
+ slow = 0;
} else if (xport->cfm && cfm_should_process_flow(xport->cfm, flow, wc)) {
if (packet) {
cfm_process_heartbeat(xport->cfm, packet);
}
- return SLOW_CFM;
+ slow = SLOW_CFM;
} else if (xport->bfd && bfd_should_process_flow(xport->bfd, flow, wc)) {
if (packet) {
bfd_process_packet(xport->bfd, flow, packet);
ofproto_dpif_monitor_port_send_soon(xport->ofport);
}
}
- return SLOW_BFD;
+ slow = SLOW_BFD;
} else if (xport->xbundle && xport->xbundle->lacp
&& flow->dl_type == htons(ETH_TYPE_LACP)) {
if (packet) {
lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
}
- return SLOW_LACP;
+ slow = SLOW_LACP;
} else if ((xbridge->stp || xbridge->rstp) &&
stp_should_process_flow(flow, wc)) {
if (packet) {
? stp_process_packet(xport, packet)
: rstp_process_packet(xport, packet);
}
- return SLOW_STP;
- } else if (xport->lldp && lldp_should_process_flow(flow)) {
+ slow = SLOW_STP;
+ } else if (xport->lldp && lldp_should_process_flow(xport->lldp, flow)) {
if (packet) {
lldp_process_packet(xport->lldp, packet);
}
- return SLOW_LLDP;
+ slow = SLOW_LLDP;
} else {
- return 0;
+ slow = 0;
+ }
+
+ if (slow) {
+ ctx->xout->slow |= slow;
+ return true;
+ } else {
+ return false;
}
}
struct dp_packet packet;
dp_packet_init(&packet, 0);
- compose_arp(&packet, eth_src, ip_src, ip_dst);
+ compose_arp(&packet, ARP_OP_REQUEST,
+ eth_src, eth_addr_zero, true, ip_src, ip_dst);
xlate_flood_packet(xbridge, &packet);
dp_packet_uninit(&packet);
}
static int
-build_tunnel_send(const struct xlate_ctx *ctx, const struct xport *xport,
+build_tunnel_send(struct xlate_ctx *ctx, const struct xport *xport,
const struct flow *flow, odp_port_t tunnel_odp_port)
{
struct ovs_action_push_tnl tnl_push_data;
err = tnl_route_lookup_flow(flow, &d_ip, &out_dev);
if (err) {
+ xlate_report(ctx, "native tunnel routing failed");
return err;
}
+ xlate_report(ctx, "tunneling to "IP_FMT" via %s",
+ IP_ARGS(d_ip), netdev_get_name(out_dev->netdev));
/* Use mac addr of bridge port of the peer. */
err = netdev_get_etheraddr(out_dev->netdev, smac);
if (err) {
+ xlate_report(ctx, "tunnel output device lacks Ethernet address");
return err;
}
err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
if (err) {
+ xlate_report(ctx, "tunnel output device lacks IPv4 address");
return err;
}
err = tnl_arp_lookup(out_dev->xbridge->name, d_ip, dmac);
if (err) {
+ xlate_report(ctx, "ARP cache miss for "IP_FMT" on bridge %s, "
+ "sending ARP request",
+ IP_ARGS(d_ip), out_dev->xbridge->name);
tnl_send_arp_request(out_dev, smac, s_ip, d_ip);
return err;
}
sizeof entry->u.tnl_arp_cache.br_name);
entry->u.tnl_arp_cache.d_ip = d_ip;
}
+
+ xlate_report(ctx, "tunneling from "ETH_ADDR_FMT" "IP_FMT
+ " to "ETH_ADDR_FMT" "IP_FMT,
+ ETH_ADDR_ARGS(smac), IP_ARGS(s_ip),
+ ETH_ADDR_ARGS(dmac), IP_ARGS(d_ip));
err = tnl_port_build_header(xport->ofport, flow,
dmac, smac, s_ip, &tnl_push_data);
if (err) {
}
tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
- odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data);
+ odp_put_tnl_push_action(ctx->odp_actions, &tnl_push_data);
return 0;
}
const struct xlate_bond_recirc *xr, bool check_stp)
{
const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
- struct flow_wildcards *wc = &ctx->xout->wc;
+ struct flow_wildcards *wc = ctx->wc;
struct flow *flow = &ctx->xin->flow;
struct flow_tnl flow_tnl;
ovs_be16 flow_vlan_tci;
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 31);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 33);
memset(&flow_tnl, 0, sizeof flow_tnl);
if (!xport) {
const struct xport *peer = xport->peer;
struct flow old_flow = ctx->xin->flow;
bool old_was_mpls = ctx->was_mpls;
- enum slow_path_reason special;
+ cls_version_t old_version = ctx->tables_version;
struct ofpbuf old_stack = ctx->stack;
union mf_subvalue new_stack[1024 / sizeof(union mf_subvalue)];
struct ofpbuf old_action_set = ctx->action_set;
memset(flow->regs, 0, sizeof flow->regs);
flow->actset_output = OFPP_UNSET;
- special = process_special(ctx, &ctx->xin->flow, peer,
- ctx->xin->packet);
- if (special) {
- ctx->xout->slow |= special;
- } else if (may_receive(peer, ctx)) {
+ /* The bridge is now known so obtain its table version. */
+ ctx->tables_version
+ = ofproto_dpif_get_tables_version(ctx->xbridge->ofproto);
+
+ if (!process_special(ctx, peer) && may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
if (ctx->action_set.size) {
/* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
* the learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
- size_t old_size = ctx->xout->odp_actions->size;
+ size_t old_size = ctx->odp_actions->size;
mirror_mask_t old_mirrors = ctx->xout->mirrors;
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
- ctx->xout->odp_actions->size = old_size;
+ ctx->odp_actions->size = old_size;
/* Undo changes that may have been done for recirculation. */
if (exit_recirculates(ctx)) {
ofpbuf_uninit(&ctx->stack);
ctx->stack = old_stack;
+ /* Restore calling bridge's lookup version. */
+ ctx->tables_version = old_version;
+
/* The peer bridge popping MPLS should have no effect on the original
* bridge. */
ctx->was_mpls = old_was_mpls;
* matches, while explicit set actions on tunnel metadata are.
*/
flow_tnl = flow->tunnel;
- odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
+ odp_port = tnl_port_send(xport->ofport, flow, ctx->wc);
if (odp_port == ODPP_NONE) {
xlate_report(ctx, "Tunneling decided against output");
goto out; /* restore flow_nw_tos */
}
out_port = odp_port;
if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
+ xlate_report(ctx, "output to native tunnel");
tnl_push_pop_send = true;
} else {
- commit_odp_tunnel_action(flow, &ctx->base_flow,
- ctx->xout->odp_actions);
+ xlate_report(ctx, "output to kernel tunnel");
+ commit_odp_tunnel_action(flow, &ctx->base_flow, ctx->odp_actions);
flow->tunnel = flow_tnl; /* Restore tunnel metadata */
}
} else {
}
if (out_port != ODPP_NONE) {
+ bool use_masked = ctx->xbridge->support.masked_set_action;
+
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
- ctx->xout->odp_actions,
- wc,
- ctx->xbridge->masked_set_action);
+ ctx->odp_actions,
+ wc, use_masked);
if (xr) {
struct ovs_action_hash *act_hash;
/* Hash action. */
- act_hash = nl_msg_put_unspec_uninit(ctx->xout->odp_actions,
+ act_hash = nl_msg_put_unspec_uninit(ctx->odp_actions,
OVS_ACTION_ATTR_HASH,
sizeof *act_hash);
act_hash->hash_alg = xr->hash_alg;
act_hash->hash_basis = xr->hash_basis;
/* Recirc action. */
- nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC,
xr->recirc_id);
} else {
}
if (odp_tnl_port != ODPP_NONE) {
- nl_msg_put_odp_port(ctx->xout->odp_actions,
+ nl_msg_put_odp_port(ctx->odp_actions,
OVS_ACTION_ATTR_TUNNEL_POP,
odp_tnl_port);
} else {
/* Tunnel push-pop action is not compatible with
* IPFIX action. */
add_ipfix_output_action(ctx, out_port);
- nl_msg_put_odp_port(ctx->xout->odp_actions,
+ nl_msg_put_odp_port(ctx->odp_actions,
OVS_ACTION_ATTR_OUTPUT,
out_port);
}
MAX_RESUBMIT_RECURSION);
} else if (ctx->resubmits >= MAX_RESUBMITS + MAX_INTERNAL_RESUBMITS) {
VLOG_ERR_RL(&rl, "over %d resubmit actions", MAX_RESUBMITS);
- } else if (ctx->xout->odp_actions->size > UINT16_MAX) {
+ } else if (ctx->odp_actions->size > UINT16_MAX) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of actions");
} else if (ctx->stack.size >= 65536) {
VLOG_ERR_RL(&rl, "resubmits yielded over 64 kB of stack");
return;
}
if (xlate_resubmit_resource_check(ctx)) {
- struct flow_wildcards *wc;
uint8_t old_table_id = ctx->table_id;
struct rule_dpif *rule;
ctx->table_id = table_id;
- wc = (ctx->xin->skip_wildcards) ? NULL : &ctx->xout->wc;
rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
- &ctx->xin->flow, wc,
+ ctx->tables_version,
+ &ctx->xin->flow, ctx->xin->wc,
ctx->xin->xcache != NULL,
ctx->xin->resubmit_stats,
&ctx->table_id, in_port,
static void
xlate_default_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
+ struct flow_wildcards *wc = ctx->wc;
struct ofputil_bucket *bucket;
uint32_t basis;
xlate_hash_fields_select_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
struct mf_bitmap hash_fields = MF_BITMAP_INITIALIZER;
- struct flow_wildcards *wc = &ctx->xout->wc;
const struct field_array *fields;
struct ofputil_bucket *bucket;
uint32_t basis;
}
basis = hash_bytes(&value, mf->n_bytes, basis);
- mf_mask_field(mf, &wc->masks);
+ mf_mask_field(mf, &ctx->wc->masks);
}
}
static void
xlate_group_action__(struct xlate_ctx *ctx, struct group_dpif *group)
{
+ bool was_in_group = ctx->in_group;
ctx->in_group = true;
switch (group_dpif_get_type(group)) {
}
group_dpif_unref(group);
- ctx->in_group = false;
-}
-
-static bool
-xlate_group_resource_check(struct xlate_ctx *ctx)
-{
- if (!xlate_resubmit_resource_check(ctx)) {
- return false;
- } else if (ctx->in_group) {
- /* Prevent nested translation of OpenFlow groups.
- *
- * OpenFlow allows this restriction. We enforce this restriction only
- * because, with the current architecture, we would otherwise have to
- * take a possibly recursive read lock on the ofgroup rwlock, which is
- * unsafe given that POSIX allows taking a read lock to block if there
- * is a thread blocked on taking the write lock. Other solutions
- * without this restriction are also possible, but seem unwarranted
- * given the current limited use of groups. */
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
-
- VLOG_ERR_RL(&rl, "cannot recursively translate OpenFlow group");
- return false;
- } else {
- return true;
- }
+ ctx->in_group = was_in_group;
}
static bool
xlate_group_action(struct xlate_ctx *ctx, uint32_t group_id)
{
- if (xlate_group_resource_check(ctx)) {
+ if (xlate_resubmit_resource_check(ctx)) {
struct group_dpif *group;
bool got_group;
{
struct ofproto_packet_in *pin;
struct dp_packet *packet;
+ bool use_masked;
ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
packet = dp_packet_clone(ctx->xin->packet);
+ use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ ctx->odp_actions,
+ ctx->wc, use_masked);
odp_execute_actions(NULL, &packet, 1, false,
- ctx->xout->odp_actions->data,
- ctx->xout->odp_actions->size, NULL);
+ ctx->odp_actions->data, ctx->odp_actions->size, NULL);
pin = xmalloc(sizeof *pin);
pin->up.packet_len = dp_packet_size(packet);
pin->up.table_id = ctx->table_id;
pin->up.cookie = ctx->rule_cookie;
- flow_get_metadata(&ctx->xin->flow, &pin->up.fmd);
+ flow_get_metadata(&ctx->xin->flow, &pin->up.flow_metadata);
pin->controller_id = controller_id;
pin->send_len = len;
compose_recirculate_action(struct xlate_ctx *ctx)
{
struct recirc_metadata md;
+ bool use_masked;
uint32_t id;
+ use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ ctx->odp_actions,
+ ctx->wc, use_masked);
recirc_metadata_from_flow(&md, &ctx->xin->flow);
* fail all revalidations as zero is not a valid recirculation ID. */
}
- nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
+ nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
/* Undo changes done by recirculation. */
ctx->action_set.size = ctx->recirc_action_offset;
static void
compose_mpls_push_action(struct xlate_ctx *ctx, struct ofpact_push_mpls *mpls)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
struct flow *flow = &ctx->xin->flow;
int n;
ovs_assert(eth_type_mpls(mpls->ethertype));
- n = flow_count_mpls_labels(flow, wc);
+ n = flow_count_mpls_labels(flow, ctx->wc);
if (!n) {
+ bool use_masked = ctx->xbridge->support.masked_set_action;
+
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
- ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ ctx->odp_actions,
+ ctx->wc, use_masked);
} else if (n >= FLOW_MAX_MPLS_LABELS) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
return;
}
- flow_push_mpls(flow, n, mpls->ethertype, wc);
+ flow_push_mpls(flow, n, mpls->ethertype, ctx->wc);
}
static void
compose_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
struct flow *flow = &ctx->xin->flow;
- int n = flow_count_mpls_labels(flow, wc);
+ int n = flow_count_mpls_labels(flow, ctx->wc);
- if (flow_pop_mpls(flow, n, eth_type, wc)) {
- if (ctx->xbridge->enable_recirc) {
+ if (flow_pop_mpls(flow, n, eth_type, ctx->wc)) {
+ if (ctx->xbridge->support.odp.recirc) {
ctx->was_mpls = true;
}
} else if (n >= FLOW_MAX_MPLS_LABELS) {
ctx->xbridge->name, FLOW_MAX_MPLS_LABELS);
}
ctx->exit = true;
- ofpbuf_clear(ctx->xout->odp_actions);
+ ofpbuf_clear(ctx->odp_actions);
}
}
return false;
}
- ctx->xout->wc.masks.nw_ttl = 0xff;
+ ctx->wc->masks.nw_ttl = 0xff;
if (flow->nw_ttl > 1) {
flow->nw_ttl--;
return false;
compose_set_mpls_label_action(struct xlate_ctx *ctx, ovs_be32 label)
{
if (eth_type_mpls(ctx->xin->flow.dl_type)) {
- ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
+ ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_LABEL_MASK);
set_mpls_lse_label(&ctx->xin->flow.mpls_lse[0], label);
}
}
compose_set_mpls_tc_action(struct xlate_ctx *ctx, uint8_t tc)
{
if (eth_type_mpls(ctx->xin->flow.dl_type)) {
- ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
+ ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TC_MASK);
set_mpls_lse_tc(&ctx->xin->flow.mpls_lse[0], tc);
}
}
compose_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
{
if (eth_type_mpls(ctx->xin->flow.dl_type)) {
- ctx->xout->wc.masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
+ ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse[0], ttl);
}
}
compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
{
struct flow *flow = &ctx->xin->flow;
- struct flow_wildcards *wc = &ctx->xout->wc;
if (eth_type_mpls(flow->dl_type)) {
uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
- wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
+ ctx->wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
if (ttl > 1) {
ttl--;
set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
union mf_subvalue value;
memset(&value, 0xff, sizeof value);
- mf_write_subfield_flow(&or->src, &value, &ctx->xout->wc.masks);
+ mf_write_subfield_flow(&or->src, &value, &ctx->wc->masks);
xlate_output_action(ctx, u16_to_ofp(port),
or->max_len, false);
}
{
ofp_port_t port;
- port = bundle_execute(bundle, &ctx->xin->flow, &ctx->xout->wc,
- slave_enabled_cb,
+ port = bundle_execute(bundle, &ctx->xin->flow, ctx->wc, slave_enabled_cb,
CONST_CAST(struct xbridge *, ctx->xbridge));
if (bundle->dst.field) {
- nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow,
- &ctx->xout->wc);
+ nxm_reg_load(&bundle->dst, ofp_to_u16(port), &ctx->xin->flow, ctx->wc);
} else {
xlate_output_action(ctx, port, 0, false);
}
xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
{
ctx->xout->has_learn = true;
- learn_mask(learn, &ctx->xout->wc);
+ learn_mask(learn, ctx->wc);
if (ctx->xin->xcache) {
struct xc_entry *entry;
/* Scale the probability from 16-bit to 32-bit while representing
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
+ bool use_masked;
- if (!ctx->xbridge->variable_length_userdata) {
+ if (!ctx->xbridge->support.variable_length_userdata) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_ERR_RL(&rl, "ignoring NXAST_SAMPLE action because datapath "
return;
}
+ use_masked = ctx->xbridge->support.masked_set_action;
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- ctx->xout->odp_actions,
- &ctx->xout->wc,
- ctx->xbridge->masked_set_action);
+ ctx->odp_actions,
+ ctx->wc, use_masked);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
- compose_sample_action(ctx->xbridge, ctx->xout->odp_actions,
+ compose_sample_action(ctx->xbridge, ctx->odp_actions,
&ctx->xin->flow, probability, &cookie,
- sizeof cookie.flow_sample, ODPP_NONE);
+ sizeof cookie.flow_sample, ODPP_NONE,
+ false);
}
static bool
} else if (inner->type == OFPACT_GROUP) {
ctx->xin->flow.actset_output = OFPP_UNSET;
ctx->action_set_has_group = true;
+ break;
}
}
}
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
struct xlate_ctx *ctx)
{
- struct flow_wildcards *wc = &ctx->xout->wc;
+ struct flow_wildcards *wc = ctx->wc;
struct flow *flow = &ctx->xin->flow;
const struct ofpact *a;
xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
const struct flow *flow, ofp_port_t in_port,
struct rule_dpif *rule, uint16_t tcp_flags,
- const struct dp_packet *packet)
+ const struct dp_packet *packet, struct flow_wildcards *wc,
+ struct ofpbuf *odp_actions)
{
xin->ofproto = ofproto;
xin->flow = *flow;
xin->resubmit_hook = NULL;
xin->report_hook = NULL;
xin->resubmit_stats = NULL;
- xin->skip_wildcards = false;
- xin->odp_actions = NULL;
+ xin->wc = wc;
+ xin->odp_actions = odp_actions;
/* Do recirc lookup. */
xin->recirc = flow->recirc_id
xlate_out_uninit(struct xlate_out *xout)
{
if (xout) {
- if (xout->odp_actions == &xout->odp_actions_buf) {
- ofpbuf_uninit(xout->odp_actions);
- }
xlate_out_free_recircs(xout);
}
}
xlate_actions(xin, &xout);
xlate_out_uninit(&xout);
}
-
-void
-xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
-{
- dst->wc = src->wc;
- dst->slow = src->slow;
- dst->has_learn = src->has_learn;
- dst->has_normal = src->has_normal;
- dst->has_fin_timeout = src->has_fin_timeout;
- dst->nf_output_iface = src->nf_output_iface;
- dst->mirrors = src->mirrors;
-
- dst->odp_actions = &dst->odp_actions_buf;
- ofpbuf_use_stub(dst->odp_actions, dst->odp_actions_stub,
- sizeof dst->odp_actions_stub);
- ofpbuf_put(dst->odp_actions, src->odp_actions->data, src->odp_actions->size);
-}
\f
static struct skb_priority_to_dscp *
get_skb_priority(const struct xport *xport, uint32_t skb_priority)
const struct nlattr *a;
unsigned int left;
- NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->xout->odp_actions->data,
- ctx->xout->odp_actions->size) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ctx->odp_actions->data,
+ ctx->odp_actions->size) {
if (nl_attr_type(a) == OVS_ACTION_ATTR_OUTPUT
&& nl_attr_get_odp_port(a) == local_odp_port) {
return true;
void
xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
{
+ *xout = (struct xlate_out) {
+ .slow = 0,
+ .fail_open = false,
+ .has_learn = false,
+ .has_normal = false,
+ .has_fin_timeout = false,
+ .nf_output_iface = NF_OUT_DROP,
+ .mirrors = 0,
+ .n_recircs = 0,
+ };
+
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
- struct flow_wildcards *wc = NULL;
+ struct xbridge *xbridge = xbridge_lookup(xcfg, xin->ofproto);
+ if (!xbridge) {
+ return;
+ }
+
struct flow *flow = &xin->flow;
- struct rule_dpif *rule = NULL;
- enum slow_path_reason special;
- const struct ofpact *ofpacts;
- struct xbridge *xbridge;
+ union mf_subvalue stack_stub[1024 / sizeof(union mf_subvalue)];
+ uint64_t action_set_stub[1024 / 8];
+ struct flow_wildcards scratch_wc;
+ uint64_t actions_stub[256 / 8];
+ struct ofpbuf scratch_actions = OFPBUF_STUB_INITIALIZER(actions_stub);
+ struct xlate_ctx ctx = {
+ .xin = xin,
+ .xout = xout,
+ .base_flow = *flow,
+ .orig_tunnel_ip_dst = flow->tunnel.ip_dst,
+ .xbridge = xbridge,
+ .stack = OFPBUF_STUB_INITIALIZER(stack_stub),
+ .rule = xin->rule,
+ .wc = xin->wc ? xin->wc : &scratch_wc,
+ .odp_actions = xin->odp_actions ? xin->odp_actions : &scratch_actions,
+
+ .recurse = 0,
+ .resubmits = 0,
+ .in_group = false,
+ .in_action_set = false,
+
+ .table_id = 0,
+ .rule_cookie = OVS_BE64_MAX,
+ .orig_skb_priority = flow->skb_priority,
+ .sflow_n_outputs = 0,
+ .sflow_odp_port = 0,
+ .user_cookie_offset = 0,
+ .exit = false,
+
+ .recirc_action_offset = -1,
+ .last_unroll_offset = -1,
+
+ .was_mpls = false,
+
+ .action_set_has_group = false,
+ .action_set = OFPBUF_STUB_INITIALIZER(action_set_stub),
+ };
+ memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+ ofpbuf_reserve(ctx.odp_actions, NL_A_U32_SIZE);
+
struct xport *in_port;
struct flow orig_flow;
- struct xlate_ctx ctx;
- size_t ofpacts_len;
bool tnl_may_send;
- bool is_icmp;
COVERAGE_INC(xlate_actions);
* kernel does. If we wish to maintain the original values an action
* needs to be generated. */
- ctx.xin = xin;
- ctx.xout = xout;
- ctx.xout->slow = 0;
- ctx.xout->has_learn = false;
- ctx.xout->has_normal = false;
- ctx.xout->has_fin_timeout = false;
- ctx.xout->nf_output_iface = NF_OUT_DROP;
- ctx.xout->mirrors = 0;
- ctx.xout->n_recircs = 0;
-
- xout->odp_actions = xin->odp_actions;
- if (!xout->odp_actions) {
- xout->odp_actions = &xout->odp_actions_buf;
- ofpbuf_use_stub(xout->odp_actions, xout->odp_actions_stub,
- sizeof xout->odp_actions_stub);
- }
- ofpbuf_reserve(xout->odp_actions, NL_A_U32_SIZE);
-
- xbridge = xbridge_lookup(xcfg, xin->ofproto);
- if (!xbridge) {
- return;
- }
- /* 'ctx.xbridge' may be changed by action processing, whereas 'xbridge'
- * will remain set on the original input bridge. */
- ctx.xbridge = xbridge;
- ctx.rule = xin->rule;
-
- ctx.base_flow = *flow;
- memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
- ctx.orig_tunnel_ip_dst = flow->tunnel.ip_dst;
-
- if (!xin->skip_wildcards) {
- wc = &xout->wc;
- flow_wildcards_init_catchall(wc);
- memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
- memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
+ if (xin->wc) {
+ flow_wildcards_init_catchall(ctx.wc);
+ memset(&ctx.wc->masks.in_port, 0xff, sizeof ctx.wc->masks.in_port);
+ memset(&ctx.wc->masks.dl_type, 0xff, sizeof ctx.wc->masks.dl_type);
if (is_ip_any(flow)) {
- wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ ctx.wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
}
- if (xbridge->enable_recirc) {
+ if (xbridge->support.odp.recirc) {
/* Always exactly match recirc_id when datapath supports
* recirculation. */
- wc->masks.recirc_id = UINT32_MAX;
+ ctx.wc->masks.recirc_id = UINT32_MAX;
}
if (xbridge->netflow) {
- netflow_mask_wc(flow, wc);
+ netflow_mask_wc(flow, ctx.wc);
}
}
- is_icmp = is_icmpv4(flow) || is_icmpv6(flow);
-
- tnl_may_send = tnl_xlate_init(&ctx.base_flow, flow, wc);
- ctx.recurse = 0;
- ctx.resubmits = 0;
- ctx.in_group = false;
- ctx.in_action_set = false;
- ctx.orig_skb_priority = flow->skb_priority;
- ctx.table_id = 0;
- ctx.rule_cookie = OVS_BE64_MAX;
- ctx.exit = false;
- ctx.was_mpls = false;
- ctx.recirc_action_offset = -1;
- ctx.last_unroll_offset = -1;
-
- ctx.action_set_has_group = false;
- ofpbuf_use_stub(&ctx.action_set,
- ctx.action_set_stub, sizeof ctx.action_set_stub);
-
- ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
+ tnl_may_send = tnl_xlate_init(flow, xin->wc);
/* The in_port of the original packet before recirculation. */
in_port = get_ofp_port(xbridge, flow->in_port.ofp_port);
xin->ofpacts_len > 0
? "actions"
: "rule");
- return;
+ goto exit;
}
/* Set the bridge for post-recirculation processing if needed. */
/* Drop the packet if the bridge cannot be found. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "Recirculation bridge no longer exists.");
- return;
+ goto exit;
}
ctx.xbridge = new_bridge;
}
VLOG_WARN_RL(&rl, "Recirculation context not found for ID %"PRIx32,
flow->recirc_id);
- return;
+ goto exit;
}
+ /* The bridge is now known so obtain its table version. */
+ ctx.tables_version = ofproto_dpif_get_tables_version(ctx.xbridge->ofproto);
if (!xin->ofpacts && !ctx.rule) {
- rule = rule_dpif_lookup_from_table(ctx.xbridge->ofproto, flow, wc,
- ctx.xin->xcache != NULL,
- ctx.xin->resubmit_stats,
- &ctx.table_id,
- flow->in_port.ofp_port, true, true);
+ ctx.rule = rule_dpif_lookup_from_table(
+ ctx.xbridge->ofproto, ctx.tables_version, flow, xin->wc,
+ ctx.xin->xcache != NULL, ctx.xin->resubmit_stats, &ctx.table_id,
+ flow->in_port.ofp_port, true, true);
if (ctx.xin->resubmit_stats) {
- rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
+ rule_dpif_credit_stats(ctx.rule, ctx.xin->resubmit_stats);
}
if (ctx.xin->xcache) {
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
- entry->u.rule = rule;
+ entry->u.rule = ctx.rule;
}
- ctx.rule = rule;
if (OVS_UNLIKELY(ctx.xin->resubmit_hook)) {
- ctx.xin->resubmit_hook(ctx.xin, rule, 0);
+ ctx.xin->resubmit_hook(ctx.xin, ctx.rule, 0);
}
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
- if (xin->ofpacts) {
- ofpacts = xin->ofpacts;
- ofpacts_len = xin->ofpacts_len;
- } else if (ctx.rule) {
- const struct rule_actions *actions = rule_dpif_get_actions(ctx.rule);
-
- ofpacts = actions->ofpacts;
- ofpacts_len = actions->ofpacts_len;
-
- ctx.rule_cookie = rule_dpif_get_flow_cookie(ctx.rule);
- } else {
- OVS_NOT_REACHED();
- }
-
if (mbridge_has_mirrors(xbridge->mbridge)) {
/* Do this conditionally because the copy is expensive enough that it
* shows up in profiles. */
/* Do not perform special processing on recirculated packets,
* as recirculated packets are not really received by the bridge. */
- if (!xin->recirc &&
- (special = process_special(&ctx, flow, in_port, ctx.xin->packet))) {
- ctx.xout->slow |= special;
- } else {
+ if (xin->recirc || !process_special(&ctx, in_port)) {
size_t sample_actions_len;
if (flow->in_port.ofp_port
if (!xin->recirc) {
add_sflow_action(&ctx);
add_ipfix_action(&ctx);
- sample_actions_len = ctx.xout->odp_actions->size;
+ sample_actions_len = ctx.odp_actions->size;
} else {
sample_actions_len = 0;
}
if (tnl_may_send && (!in_port || may_receive(in_port, &ctx))) {
+ const struct ofpact *ofpacts;
+ size_t ofpacts_len;
+
+ if (xin->ofpacts) {
+ ofpacts = xin->ofpacts;
+ ofpacts_len = xin->ofpacts_len;
+ } else if (ctx.rule) {
+ const struct rule_actions *actions
+ = rule_dpif_get_actions(ctx.rule);
+ ofpacts = actions->ofpacts;
+ ofpacts_len = actions->ofpacts_len;
+ ctx.rule_cookie = rule_dpif_get_flow_cookie(ctx.rule);
+ } else {
+ OVS_NOT_REACHED();
+ }
+
do_xlate_actions(ofpacts, ofpacts_len, &ctx);
/* We've let OFPP_NORMAL and the learning action look at the
if (in_port && (!xport_stp_forward_state(in_port) ||
!xport_rstp_forward_state(in_port))) {
/* Drop all actions added by do_xlate_actions() above. */
- ctx.xout->odp_actions->size = sample_actions_len;
+ ctx.odp_actions->size = sample_actions_len;
/* Undo changes that may have been done for recirculation. */
if (exit_recirculates(&ctx)) {
}
}
- if (nl_attr_oversized(ctx.xout->odp_actions->size)) {
+ if (nl_attr_oversized(ctx.odp_actions->size)) {
/* These datapath actions are too big for a Netlink attribute, so we
* can't hand them to the kernel directly. dpif_execute() can execute
* them one by one with help, so just mark the result as SLOW_ACTION to
* prevent the flow from being installed. */
COVERAGE_INC(xlate_actions_oversize);
ctx.xout->slow |= SLOW_ACTION;
- } else if (too_many_output_actions(ctx.xout->odp_actions)) {
+ } else if (too_many_output_actions(ctx.odp_actions)) {
COVERAGE_INC(xlate_actions_too_many_output);
ctx.xout->slow |= SLOW_ACTION;
}
}
}
- /* Do netflow only for packets really received by the bridge. */
- if (!xin->recirc && xbridge->netflow) {
- /* Only update netflow if we don't have controller flow. We don't
- * report NetFlow expiration messages for such facets because they
- * are just part of the control logic for the network, not real
- * traffic. */
- if (ofpacts_len == 0
- || ofpacts->type != OFPACT_CONTROLLER
- || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) {
- if (ctx.xin->resubmit_stats) {
- netflow_flow_update(xbridge->netflow, flow,
- xout->nf_output_iface,
- ctx.xin->resubmit_stats);
- }
- if (ctx.xin->xcache) {
- struct xc_entry *entry;
+ /* Do netflow only for packets really received by the bridge and not sent
+ * to the controller. We consider packets sent to the controller to be
+ * part of the control plane rather than the data plane. */
+ if (!xin->recirc && xbridge->netflow && !(xout->slow & SLOW_CONTROLLER)) {
+ if (ctx.xin->resubmit_stats) {
+ netflow_flow_update(xbridge->netflow, flow,
+ xout->nf_output_iface,
+ ctx.xin->resubmit_stats);
+ }
+ if (ctx.xin->xcache) {
+ struct xc_entry *entry;
- entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
- entry->u.nf.netflow = netflow_ref(xbridge->netflow);
- entry->u.nf.flow = xmemdup(flow, sizeof *flow);
- entry->u.nf.iface = xout->nf_output_iface;
- }
+ entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW);
+ entry->u.nf.netflow = netflow_ref(xbridge->netflow);
+ entry->u.nf.flow = xmemdup(flow, sizeof *flow);
+ entry->u.nf.iface = xout->nf_output_iface;
}
}
- ofpbuf_uninit(&ctx.stack);
- ofpbuf_uninit(&ctx.action_set);
-
- if (wc) {
+ if (xin->wc) {
/* Clear the metadata and register wildcard masks, because we won't
* use non-header fields as part of the cache. */
- flow_wildcards_clear_non_packet_fields(wc);
+ flow_wildcards_clear_non_packet_fields(ctx.wc);
/* ICMPv4 and ICMPv6 have 8-bit "type" and "code" fields. struct flow
* uses the low 8 bits of the 16-bit tp_src and tp_dst members to
* Avoid the problem here by making sure that only the low 8 bits of
* either field can be unwildcarded for ICMP.
*/
- if (is_icmp) {
- wc->masks.tp_src &= htons(UINT8_MAX);
- wc->masks.tp_dst &= htons(UINT8_MAX);
+ if (is_icmpv4(flow) || is_icmpv6(flow)) {
+ ctx.wc->masks.tp_src &= htons(UINT8_MAX);
+ ctx.wc->masks.tp_dst &= htons(UINT8_MAX);
+ }
+ /* VLAN_TCI CFI bit must be matched if any of the TCI is matched. */
+ if (ctx.wc->masks.vlan_tci) {
+ ctx.wc->masks.vlan_tci |= htons(VLAN_CFI);
}
}
+
+exit:
+ ofpbuf_uninit(&ctx.stack);
+ ofpbuf_uninit(&ctx.action_set);
+ ofpbuf_uninit(&scratch_actions);
}
/* Sends 'packet' out 'ofport'.