-/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "ofproto/ofproto-dpif-xlate.h"
#include <errno.h>
+#include <arpa/inet.h>
+#include <net/if.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include "tnl-arp-cache.h"
#include "bfd.h"
#include "bitmap.h"
#include "bond.h"
#include "ofproto/ofproto-dpif.h"
#include "ofproto/ofproto-provider.h"
#include "packet-dpif.h"
+#include "ovs-router.h"
+#include "tnl-ports.h"
#include "tunnel.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
COVERAGE_DEFINE(xlate_actions);
COVERAGE_DEFINE(xlate_actions_oversize);
COVERAGE_DEFINE(xlate_actions_too_many_output);
-COVERAGE_DEFINE(xlate_actions_mpls_overflow);
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
struct hmap_node hmap_node; /* Node in global 'xbridges' map. */
struct ofproto_dpif *ofproto; /* Key in global 'xbridges' map. */
- struct list xbundles; /* Owned xbundles. */
+ struct ovs_list xbundles; /* Owned xbundles. */
struct hmap xports; /* Indexed by ofp_port. */
char *name; /* Name used in log messages. */
struct stp *stp; /* STP or null if disabled. */
struct rstp *rstp; /* RSTP or null if disabled. */
- /* Special rules installed by ofproto-dpif. */
- struct rule_dpif *miss_rule;
- struct rule_dpif *no_packet_in_rule;
-
- enum ofp_config_flags frag; /* Fragmentation handling. */
bool has_in_band; /* Bridge has in band control? */
bool forward_bpdu; /* Bridge forwards STP BPDUs? */
struct hmap_node hmap_node; /* In global 'xbundles' map. */
struct ofbundle *ofbundle; /* Key in global 'xbundles' map. */
- struct list list_node; /* In parent 'xbridges' list. */
+ struct ovs_list list_node; /* In parent 'xbridges' list. */
struct xbridge *xbridge; /* Parent xbridge. */
- struct list xports; /* Contains "struct xport"s. */
+ struct ovs_list xports; /* Contains "struct xport"s. */
char *name; /* Name used in log messages. */
struct bond *bond; /* Nonnull iff more than one port. */
odp_port_t odp_port; /* Datapath port number or ODPP_NONE. */
- struct list bundle_node; /* In parent xbundle (if it exists). */
+ struct ovs_list bundle_node; /* In parent xbundle (if it exists). */
struct xbundle *xbundle; /* Parent xbundle or null. */
struct netdev *netdev; /* 'ofport''s netdev. */
int recurse; /* Current resubmit nesting depth. */
int resubmits; /* Total number of resubmits. */
bool in_group; /* Currently translating ofgroup, if true. */
+ bool in_action_set; /* Currently translating action_set, if true. */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
* When translation is otherwise complete, ofpacts_execute_action_set()
* converts it to a set of "struct ofpact"s that can be translated into
* datapath actions. */
+ bool action_set_has_group; /* Action set contains OFPACT_GROUP? */
struct ofpbuf action_set; /* Action set. */
uint64_t action_set_stub[1024 / 8];
};
XC_NORMAL,
XC_FIN_TIMEOUT,
XC_GROUP,
+ XC_TNL_ARP,
};
/* xlate_cache entries hold enough information to perform the side effects of
struct group_dpif *group;
struct ofputil_bucket *bucket;
} group;
+ struct {
+ char br_name[IFNAMSIZ];
+ ovs_be32 d_ip;
+ } tnl_arp_cache;
} u;
};
struct hmap xbundles;
struct hmap xports;
};
-static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_TYPE_INITIALIZER;
+static OVSRCU_TYPE(struct xlate_cfg *) xcfgp = OVSRCU_INITIALIZER(NULL);
static struct xlate_cfg *new_xcfg = NULL;
static bool may_receive(const struct xport *, struct xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
struct xlate_ctx *);
static void xlate_normal(struct xlate_ctx *);
-static void xlate_report(struct xlate_ctx *, const char *);
+static inline void xlate_report(struct xlate_ctx *, const char *);
static void xlate_table_action(struct xlate_ctx *, ofp_port_t in_port,
uint8_t table_id, bool may_packet_in,
bool honor_table_miss);
static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
static void xlate_xport_init(struct xlate_cfg *, struct xport *);
static void xlate_xbridge_set(struct xbridge *, struct dpif *,
- struct rule_dpif *miss_rule,
- struct rule_dpif *no_packet_in_rule,
const struct mac_learning *, struct stp *,
struct rstp *, const struct mcast_snooping *,
const struct mbridge *,
const struct dpif_sflow *,
const struct dpif_ipfix *,
- const struct netflow *, enum ofp_config_flags,
+ const struct netflow *,
bool forward_bpdu, bool has_in_band,
bool enable_recirc,
bool variable_length_userdata,
struct xport *);
static void xlate_xcfg_free(struct xlate_cfg *);
+static inline void
+xlate_report(struct xlate_ctx *ctx, const char *s)
+{
+ if (OVS_UNLIKELY(ctx->xin->report_hook)) {
+ ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
+ }
+}
static void
xlate_xbridge_init(struct xlate_cfg *xcfg, struct xbridge *xbridge)
static void
xlate_xbridge_set(struct xbridge *xbridge,
struct dpif *dpif,
- struct rule_dpif *miss_rule,
- struct rule_dpif *no_packet_in_rule,
const struct mac_learning *ml, struct stp *stp,
struct rstp *rstp, const struct mcast_snooping *ms,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
- const struct netflow *netflow, enum ofp_config_flags frag,
+ const struct netflow *netflow,
bool forward_bpdu, bool has_in_band,
bool enable_recirc,
bool variable_length_userdata,
xbridge->dpif = dpif;
xbridge->forward_bpdu = forward_bpdu;
xbridge->has_in_band = has_in_band;
- xbridge->frag = frag;
- xbridge->miss_rule = miss_rule;
- xbridge->no_packet_in_rule = no_packet_in_rule;
xbridge->enable_recirc = enable_recirc;
xbridge->variable_length_userdata = variable_length_userdata;
xbridge->max_mpls_depth = max_mpls_depth;
xlate_xbridge_init(new_xcfg, new_xbridge);
xlate_xbridge_set(new_xbridge,
- xbridge->dpif, xbridge->miss_rule,
- xbridge->no_packet_in_rule, xbridge->ml, xbridge->stp,
+ xbridge->dpif, xbridge->ml, xbridge->stp,
xbridge->rstp, xbridge->ms, xbridge->mbridge,
xbridge->sflow, xbridge->ipfix, xbridge->netflow,
- xbridge->frag, xbridge->forward_bpdu,
+ xbridge->forward_bpdu,
xbridge->has_in_band, xbridge->enable_recirc,
xbridge->variable_length_userdata,
xbridge->max_mpls_depth, xbridge->masked_set_action);
struct xlate_cfg *xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
ovsrcu_set(&xcfgp, new_xcfg);
- ovsrcu_postpone(xlate_xcfg_free, xcfg);
-
+ ovsrcu_synchronize();
+ xlate_xcfg_free(xcfg);
new_xcfg = NULL;
}
void
xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name,
- struct dpif *dpif, struct rule_dpif *miss_rule,
- struct rule_dpif *no_packet_in_rule,
+ struct dpif *dpif,
const struct mac_learning *ml, struct stp *stp,
struct rstp *rstp, const struct mcast_snooping *ms,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
- const struct netflow *netflow, enum ofp_config_flags frag,
+ const struct netflow *netflow,
bool forward_bpdu, bool has_in_band, bool enable_recirc,
bool variable_length_userdata, size_t max_mpls_depth,
bool masked_set_action)
free(xbridge->name);
xbridge->name = xstrdup(name);
- xlate_xbridge_set(xbridge, dpif, miss_rule, no_packet_in_rule, ml, stp,
- rstp, ms, mbridge, sflow, ipfix, netflow, frag,
- forward_bpdu, has_in_band, enable_recirc,
+ xlate_xbridge_set(xbridge, dpif, ml, stp, rstp, ms, mbridge, sflow, ipfix,
+ netflow, forward_bpdu, has_in_band, enable_recirc,
variable_length_userdata, max_mpls_depth,
masked_set_action);
}
xlate_lookup_ofproto_(const struct dpif_backer *backer, const struct flow *flow,
ofp_port_t *ofp_in_port, const struct xport **xportp)
{
+ struct ofproto_dpif *recv_ofproto = NULL;
+ struct ofproto_dpif *recirc_ofproto = NULL;
const struct xport *xport;
+ ofp_port_t in_port = OFPP_NONE;
*xportp = xport = xlate_lookup_xport(backer, flow);
if (xport) {
- if (ofp_in_port) {
- *ofp_in_port = xport->ofp_port;
+ recv_ofproto = xport->xbridge->ofproto;
+ in_port = xport->ofp_port;
+ }
+
+ /* When recirc_id is set in 'flow', checks whether the ofproto_dpif that
+ * corresponds to the recirc_id is same as the receiving bridge. If they
+ * are the same, uses the 'recv_ofproto' and keeps the 'ofp_in_port' as
+ * assigned. Otherwise, uses the 'recirc_ofproto' that owns recirc_id and
+ * assigns OFPP_NONE to 'ofp_in_port'. Doing this is in that, the
+ * recirculated flow must be processced by the ofproto which originates
+ * the recirculation, and as bridges can only see their own ports, the
+ * in_port of the 'recv_ofproto' should not be passed to the
+ * 'recirc_ofproto'.
+ *
+ * Admittedly, setting the 'ofp_in_port' to OFPP_NONE limits the
+ * 'recirc_ofproto' from meaningfully matching on in_port of recirculated
+ * flow, and should be fixed in the near future.
+ *
+ * TODO: Restore the original patch port.
+ */
+ if (recv_ofproto && flow->recirc_id) {
+ recirc_ofproto = ofproto_dpif_recirc_get_ofproto(backer,
+ flow->recirc_id);
+ if (recv_ofproto != recirc_ofproto) {
+ *xportp = xport = NULL;
+ in_port = OFPP_NONE;
}
- return xport->xbridge->ofproto;
}
- return NULL;
+ if (ofp_in_port) {
+ *ofp_in_port = in_port;
+ }
+
+ return xport ? recv_ofproto : recirc_ofproto;
}
/* Given a datapath and flow metadata ('backer', and 'flow' respectively)
* pointers until quiescing, for longer term use additional references must
* be taken.
*
- * '*ofp_in_port' is set to OFPP_NONE if 'flow''s in_port does not exist.
- *
- * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport.
+ * Returns 0 if successful, ENODEV if the parsed flow has no associated ofproto.
*/
int
xlate_lookup(const struct dpif_backer *backer, const struct flow *flow,
ofproto = xlate_lookup_ofproto_(backer, flow, ofp_in_port, &xport);
- if (ofp_in_port && !xport) {
- *ofp_in_port = OFPP_NONE;
- }
-
- if (!xport) {
+ if (!ofproto) {
return ENODEV;
}
}
if (ipfix) {
- *ipfix = xport->xbridge->ipfix;
+ *ipfix = xport ? xport->xbridge->ipfix : NULL;
}
if (sflow) {
- *sflow = xport->xbridge->sflow;
+ *sflow = xport ? xport->xbridge->sflow : NULL;
}
if (netflow) {
- *netflow = xport->xbridge->netflow;
+ *netflow = xport ? xport->xbridge->netflow : NULL;
}
+
return 0;
}
xport_stp_learn_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
- return stp_learn_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+ return sp
+ ? stp_learn_in_state(stp_port_get_state(sp))
+ : true;
}
static bool
xport_stp_forward_state(const struct xport *xport)
{
struct stp_port *sp = xport_get_stp_port(xport);
- return stp_forward_in_state(sp ? stp_port_get_state(sp) : STP_DISABLED);
+ return sp
+ ? stp_forward_in_state(stp_port_get_state(sp))
+ : true;
}
static bool
static bool
xport_rstp_learn_state(const struct xport *xport)
{
- return rstp_learn_in_state(xport_get_rstp_port_state(xport));
+ return xport->xbridge->rstp && xport->rstp_port
+ ? rstp_learn_in_state(xport_get_rstp_port_state(xport))
+ : true;
}
static bool
xport_rstp_forward_state(const struct xport *xport)
{
- return rstp_forward_in_state(xport_get_rstp_port_state(xport));
+ return xport->xbridge->rstp && xport->rstp_port
+ ? rstp_forward_in_state(xport_get_rstp_port_state(xport))
+ : true;
}
static bool
static bool
odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port)
{
- struct xport *xport;
- struct bfd *bfd;
-
- xport = get_ofp_port(ctx->xbridge, ofp_port);
- if (!xport || xport->config & OFPUTIL_PC_PORT_DOWN ||
- xport->state & OFPUTIL_PS_LINK_DOWN) {
- return false;
- }
-
- bfd = xport->bfd;
- if (bfd && !bfd_forwarding(bfd)) {
- return false;
- }
-
- return true;
+ struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
+ return xport && xport->may_enable;
}
static struct ofputil_bucket *
const struct group_dpif *group, int depth)
{
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
group_dpif_get_buckets(group, &buckets);
LIST_FOR_EACH (bucket, list_node, buckets) {
int i = 0;
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
group_dpif_get_buckets(group, &buckets);
LIST_FOR_EACH (bucket, list_node, buckets) {
case BV_DROP_IF_MOVED:
ovs_rwlock_rdlock(&xbridge->ml->rwlock);
mac = mac_learning_lookup(xbridge->ml, flow->dl_src, vlan);
- if (mac && mac->port.p != in_xbundle->ofbundle &&
- (!is_gratuitous_arp(flow, &ctx->xout->wc)
- || mac_entry_is_grat_arp_locked(mac))) {
+ if (mac
+ && mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle
+ && (!is_gratuitous_arp(flow, &ctx->xout->wc)
+ || mac_entry_is_grat_arp_locked(mac))) {
ovs_rwlock_unlock(&xbridge->ml->rwlock);
xlate_report(ctx, "SLB bond thinks this packet looped back, "
"dropping");
}
}
- return mac->port.p != in_xbundle->ofbundle;
+ return mac_entry_get_port(ml, mac) != in_xbundle->ofbundle;
}
}
}
- if (mac->port.p != in_xbundle->ofbundle) {
+ if (mac_entry_get_port(xbridge->ml, mac) != in_xbundle->ofbundle) {
/* The log messages here could actually be useful in debugging,
* so keep the rate limit relatively high. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
xbridge->name, ETH_ADDR_ARGS(flow->dl_src),
in_xbundle->name, vlan);
- mac->port.p = in_xbundle->ofbundle;
- mac_learning_changed(xbridge->ml);
+ mac_entry_set_port(xbridge->ml, mac, in_xbundle->ofbundle);
}
}
struct mcast_snooping *ms = xbridge->ms;
struct xlate_cfg *xcfg;
struct xbundle *mcast_xbundle;
- struct mcast_fport_bundle *fport;
+ struct mcast_port_bundle *fport;
/* Don't learn the OFPP_NONE port. */
if (in_xbundle == &ofpp_none_bundle) {
mcast_xbundle = NULL;
ovs_rwlock_wrlock(&ms->rwlock);
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
- LIST_FOR_EACH(fport, fport_node, &ms->fport_list) {
+ LIST_FOR_EACH(fport, node, &ms->fport_list) {
mcast_xbundle = xbundle_lookup(xcfg, fport->port);
if (mcast_xbundle == in_xbundle) {
break;
OVS_REQ_RDLOCK(ms->rwlock)
{
struct xlate_cfg *xcfg;
- struct mcast_fport_bundle *fport;
+ struct mcast_port_bundle *fport;
struct xbundle *mcast_xbundle;
xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
- LIST_FOR_EACH(fport, fport_node, &ms->fport_list) {
+ LIST_FOR_EACH(fport, node, &ms->fport_list) {
mcast_xbundle = xbundle_lookup(xcfg, fport->port);
if (mcast_xbundle && mcast_xbundle != in_xbundle) {
xlate_report(ctx, "forwarding to mcast flood port");
}
}
+/* forward the Reports to configured ports */
+static void
+xlate_normal_mcast_send_rports(struct xlate_ctx *ctx,
+ struct mcast_snooping *ms,
+ struct xbundle *in_xbundle, uint16_t vlan)
+ OVS_REQ_RDLOCK(ms->rwlock)
+{
+ struct xlate_cfg *xcfg;
+ struct mcast_port_bundle *rport;
+ struct xbundle *mcast_xbundle;
+
+ xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
+ LIST_FOR_EACH(rport, node, &ms->rport_list) {
+ mcast_xbundle = xbundle_lookup(xcfg, rport->port);
+ if (mcast_xbundle && mcast_xbundle != in_xbundle) {
+ xlate_report(ctx, "forwarding Report to mcast flagged port");
+ output_normal(ctx, mcast_xbundle, vlan);
+ } else if (!mcast_xbundle) {
+ xlate_report(ctx, "mcast port is unknown, dropping the Report");
+ } else {
+ xlate_report(ctx, "mcast port is input port, dropping the Report");
+ }
+ }
+}
+
static void
xlate_normal_flood(struct xlate_ctx *ctx, struct xbundle *in_xbundle,
uint16_t vlan)
if (mcast_snooping_is_membership(flow->tp_src)) {
ovs_rwlock_rdlock(&ms->rwlock);
xlate_normal_mcast_send_mrouters(ctx, ms, in_xbundle, vlan);
+ /* RFC4541: section 2.1.1, item 1: A snooping switch should
+ * forward IGMP Membership Reports only to those ports where
+ * multicast routers are attached. Alternatively stated: a
+ * snooping switch should not forward IGMP Membership Reports
+ * to ports on which only hosts are attached.
+ * An administrative control may be provided to override this
+ * restriction, allowing the report messages to be flooded to
+ * other ports. */
+ xlate_normal_mcast_send_rports(ctx, ms, in_xbundle, vlan);
ovs_rwlock_unlock(&ms->rwlock);
} else {
xlate_report(ctx, "multicast traffic, flooding");
} else {
ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock);
mac = mac_learning_lookup(ctx->xbridge->ml, flow->dl_dst, vlan);
- mac_port = mac ? mac->port.p : NULL;
+ mac_port = mac ? mac_entry_get_port(ctx->xbridge->ml, mac) : NULL;
ovs_rwlock_unlock(&ctx->xbridge->ml->rwlock);
if (mac_port) {
}
}
+static int
+tnl_route_lookup_flow(const struct flow *oflow,
+ ovs_be32 *ip, struct xport **out_port)
+{
+ char out_dev[IFNAMSIZ];
+ struct xbridge *xbridge;
+ struct xlate_cfg *xcfg;
+ ovs_be32 gw;
+
+ if (!ovs_router_lookup(oflow->tunnel.ip_dst, out_dev, &gw)) {
+ return -ENOENT;
+ }
+
+ if (gw) {
+ *ip = gw;
+ } else {
+ *ip = oflow->tunnel.ip_dst;
+ }
+
+ xcfg = ovsrcu_get(struct xlate_cfg *, &xcfgp);
+ ovs_assert(xcfg);
+
+ HMAP_FOR_EACH (xbridge, hmap_node, &xcfg->xbridges) {
+ if (!strncmp(xbridge->name, out_dev, IFNAMSIZ)) {
+ struct xport *port;
+
+ HMAP_FOR_EACH (port, ofp_node, &xbridge->xports) {
+ if (!strncmp(netdev_get_name(port->netdev), out_dev, IFNAMSIZ)) {
+ *out_port = port;
+ return 0;
+ }
+ }
+ }
+ }
+ return -ENOENT;
+}
+
+static int
+xlate_flood_packet(struct xbridge *xbridge, struct ofpbuf *packet)
+{
+ struct ofpact_output output;
+ struct flow flow;
+
+ ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+ /* Use OFPP_NONE as the in_port to avoid special packet processing. */
+ flow_extract(packet, NULL, &flow);
+ flow.in_port.ofp_port = OFPP_NONE;
+ output.port = OFPP_FLOOD;
+ output.max_len = 0;
+
+ return ofproto_dpif_execute_actions(xbridge->ofproto, &flow, NULL,
+ &output.ofpact, sizeof output,
+ packet);
+}
+
+static void
+tnl_send_arp_request(const struct xport *out_dev, const uint8_t eth_src[ETH_ADDR_LEN],
+ ovs_be32 ip_src, ovs_be32 ip_dst)
+{
+ struct xbridge *xbridge = out_dev->xbridge;
+ struct ofpbuf packet;
+
+ ofpbuf_init(&packet, 0);
+ compose_arp(&packet, eth_src, ip_src, ip_dst);
+
+ xlate_flood_packet(xbridge, &packet);
+ ofpbuf_uninit(&packet);
+}
+
+static int
+build_tunnel_send(const struct xlate_ctx *ctx, const struct xport *xport,
+ const struct flow *flow, odp_port_t tunnel_odp_port)
+{
+ struct ovs_action_push_tnl tnl_push_data;
+ struct xport *out_dev = NULL;
+ ovs_be32 s_ip, d_ip = 0;
+ uint8_t smac[ETH_ADDR_LEN];
+ uint8_t dmac[ETH_ADDR_LEN];
+ int err;
+
+ err = tnl_route_lookup_flow(flow, &d_ip, &out_dev);
+ if (err) {
+ return err;
+ }
+
+ /* Use mac addr of bridge port of the peer. */
+ err = netdev_get_etheraddr(out_dev->netdev, smac);
+ if (err) {
+ return err;
+ }
+
+ err = netdev_get_in4(out_dev->netdev, (struct in_addr *) &s_ip, NULL);
+ if (err) {
+ return err;
+ }
+
+ err = tnl_arp_lookup(out_dev->xbridge->name, d_ip, dmac);
+ if (err) {
+ tnl_send_arp_request(out_dev, smac, s_ip, d_ip);
+ return err;
+ }
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_TNL_ARP);
+ strncpy(entry->u.tnl_arp_cache.br_name, out_dev->xbridge->name, IFNAMSIZ);
+ entry->u.tnl_arp_cache.d_ip = d_ip;
+ }
+ err = tnl_port_build_header(xport->ofport, flow,
+ dmac, smac, s_ip, &tnl_push_data);
+ if (err) {
+ return err;
+ }
+ tnl_push_data.tnl_port = odp_to_u32(tunnel_odp_port);
+ tnl_push_data.out_port = odp_to_u32(out_dev->odp_port);
+ odp_put_tnl_push_action(ctx->xout->odp_actions, &tnl_push_data);
+ return 0;
+}
+
static void
compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port,
bool check_stp)
const struct xport *xport = get_ofp_port(ctx->xbridge, ofp_port);
struct flow_wildcards *wc = &ctx->xout->wc;
struct flow *flow = &ctx->xin->flow;
+ struct flow_tnl flow_tnl;
ovs_be16 flow_vlan_tci;
uint32_t flow_pkt_mark;
uint8_t flow_nw_tos;
odp_port_t out_port, odp_port;
+ bool tnl_push_pop_send = false;
uint8_t dscp;
/* If 'struct flow' gets additional metadata, we'll need to zero it out
* before traversing a patch port. */
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 27);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 30);
+ memset(&flow_tnl, 0, sizeof flow_tnl);
if (!xport) {
xlate_report(ctx, "Nonexistent output port");
const struct xport *peer = xport->peer;
struct flow old_flow = ctx->xin->flow;
enum slow_path_reason special;
+ uint8_t table_id = rule_dpif_lookup_get_init_table_id(&ctx->xin->flow);
ctx->xbridge = peer->xbridge;
flow->in_port.ofp_port = peer->ofp_port;
flow->metadata = htonll(0);
memset(&flow->tunnel, 0, sizeof flow->tunnel);
memset(flow->regs, 0, sizeof flow->regs);
+ flow->actset_output = OFPP_UNSET;
special = process_special(ctx, &ctx->xin->flow, peer,
ctx->xin->packet);
ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
- xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
+ xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
+ true, true);
} else {
/* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
* the learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
size_t old_size = ofpbuf_size(ctx->xout->odp_actions);
mirror_mask_t old_mirrors = ctx->xout->mirrors;
- xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
+ xlate_table_action(ctx, flow->in_port.ofp_port, table_id,
+ true, true);
ctx->xout->mirrors = old_mirrors;
ctx->base_flow = old_base_flow;
ofpbuf_set_size(ctx->xout->odp_actions, old_size);
* the Logical (tunnel) Port are not visible for any further
* matches, while explicit set actions on tunnel metadata are.
*/
- struct flow_tnl flow_tnl = flow->tunnel;
+ flow_tnl = flow->tunnel;
odp_port = tnl_port_send(xport->ofport, flow, &ctx->xout->wc);
if (odp_port == ODPP_NONE) {
xlate_report(ctx, "Tunneling decided against output");
entry->u.dev.tx = netdev_ref(xport->netdev);
}
out_port = odp_port;
- commit_odp_tunnel_action(flow, &ctx->base_flow,
- ctx->xout->odp_actions);
- flow->tunnel = flow_tnl; /* Restore tunnel metadata */
+ if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
+ tnl_push_pop_send = true;
+ } else {
+ commit_odp_tunnel_action(flow, &ctx->base_flow,
+ ctx->xout->odp_actions);
+ flow->tunnel = flow_tnl; /* Restore tunnel metadata */
+ }
} else {
odp_port = xport->odp_port;
out_port = odp_port;
if (out_port != ODPP_NONE) {
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc,
+ wc,
ctx->xbridge->masked_set_action);
if (ctx->use_recirc) {
nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC,
xr->recirc_id);
} else {
- add_ipfix_output_action(ctx, out_port);
- nl_msg_put_odp_port(ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
- out_port);
+
+ if (tnl_push_pop_send) {
+ build_tunnel_send(ctx, xport, flow, odp_port);
+ flow->tunnel = flow_tnl; /* Restore tunnel metadata */
+ } else {
+ odp_port_t odp_tnl_port = ODPP_NONE;
+
+ /* XXX: Write better Filter for tunnel port. We can use inport
+ * int tunnel-port flow to avoid these checks completely. */
+ if (ofp_port == OFPP_LOCAL &&
+ ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
+
+ odp_tnl_port = tnl_port_map_lookup(flow, wc);
+ }
+
+ if (odp_tnl_port != ODPP_NONE) {
+ nl_msg_put_odp_port(ctx->xout->odp_actions,
+ OVS_ACTION_ATTR_TUNNEL_POP,
+ odp_tnl_port);
+ } else {
+ /* Tunnel push-pop action is not compatible with
+ * IPFIX action. */
+ add_ipfix_output_action(ctx, out_port);
+ nl_msg_put_odp_port(ctx->xout->odp_actions,
+ OVS_ACTION_ATTR_OUTPUT,
+ out_port);
+ }
+ }
}
ctx->sflow_odp_port = odp_port;
bool may_packet_in, bool honor_table_miss)
{
if (xlate_resubmit_resource_check(ctx)) {
- ofp_port_t old_in_port = ctx->xin->flow.in_port.ofp_port;
- bool skip_wildcards = ctx->xin->skip_wildcards;
+ struct flow_wildcards *wc;
uint8_t old_table_id = ctx->table_id;
struct rule_dpif *rule;
- enum rule_dpif_lookup_verdict verdict;
- enum ofputil_port_config config = 0;
ctx->table_id = table_id;
+ wc = (ctx->xin->skip_wildcards) ? NULL : &ctx->xout->wc;
- /* Look up a flow with 'in_port' as the input port. Then restore the
- * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
- * have surprising behavior). */
- ctx->xin->flow.in_port.ofp_port = in_port;
- verdict = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
- &ctx->xin->flow,
- !skip_wildcards
- ? &ctx->xout->wc : NULL,
- honor_table_miss,
- &ctx->table_id, &rule,
- ctx->xin->xcache != NULL,
- ctx->xin->resubmit_stats);
- ctx->xin->flow.in_port.ofp_port = old_in_port;
-
- if (ctx->xin->resubmit_hook) {
- ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse);
- }
-
- switch (verdict) {
- case RULE_DPIF_LOOKUP_VERDICT_MATCH:
- goto match;
- case RULE_DPIF_LOOKUP_VERDICT_CONTROLLER:
- if (may_packet_in) {
- struct xport *xport;
+ rule = rule_dpif_lookup_from_table(ctx->xbridge->ofproto,
+ &ctx->xin->flow, wc,
+ ctx->xin->xcache != NULL,
+ ctx->xin->resubmit_stats,
+ &ctx->table_id, in_port,
+ may_packet_in, honor_table_miss);
- xport = get_ofp_port(ctx->xbridge,
- ctx->xin->flow.in_port.ofp_port);
- config = xport ? xport->config : 0;
- break;
- }
- /* Fall through to drop */
- case RULE_DPIF_LOOKUP_VERDICT_DROP:
- config = OFPUTIL_PC_NO_PACKET_IN;
- break;
- case RULE_DPIF_LOOKUP_VERDICT_DEFAULT:
- if (!ofproto_dpif_wants_packet_in_on_miss(ctx->xbridge->ofproto)) {
- config = OFPUTIL_PC_NO_PACKET_IN;
- }
- break;
- default:
- OVS_NOT_REACHED();
+ if (OVS_UNLIKELY(ctx->xin->resubmit_hook)) {
+ ctx->xin->resubmit_hook(ctx->xin, rule, ctx->recurse + 1);
}
- choose_miss_rule(config, ctx->xbridge->miss_rule,
- ctx->xbridge->no_packet_in_rule, &rule,
- ctx->xin->xcache != NULL);
-
-match:
if (rule) {
/* Fill in the cache entry here instead of xlate_recursively
* to make the reference counting more explicit. We take a
xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group)
{
struct ofputil_bucket *bucket;
- const struct list *buckets;
+ const struct ovs_list *buckets;
struct flow old_flow = ctx->xin->flow;
group_dpif_get_buckets(group, &buckets);
}
ctx->exit = true;
return;
- } else if (n >= ctx->xbridge->max_mpls_depth) {
- COVERAGE_INC(xlate_actions_mpls_overflow);
- ctx->xout->slow |= SLOW_ACTION;
}
flow_push_mpls(flow, n, mpls->ethertype, wc);
int n = flow_count_mpls_labels(flow, wc);
if (flow_pop_mpls(flow, n, eth_type, wc)) {
- if (ctx->xbridge->enable_recirc && !eth_type_mpls(eth_type)) {
+ if (ctx->xbridge->enable_recirc) {
ctx->was_mpls = true;
}
} else if (n >= FLOW_MAX_MPLS_LABELS) {
flood_packets(ctx, true);
break;
case OFPP_CONTROLLER:
- execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
+ execute_controller_action(ctx, max_len,
+ (ctx->in_group ? OFPR_GROUP
+ : ctx->in_action_set ? OFPR_ACTION_SET
+ : OFPR_ACTION),
+ 0);
break;
case OFPP_NONE:
break;
static void
xlate_write_actions(struct xlate_ctx *ctx, const struct ofpact *a)
{
- struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
- ofpbuf_put(&ctx->action_set, on->actions, ofpact_nest_get_action_len(on));
+ const struct ofpact_nest *on = ofpact_get_WRITE_ACTIONS(a);
+ size_t on_len = ofpact_nest_get_action_len(on);
+ const struct ofpact *inner;
+
+ /* Maintain actset_output depending on the contents of the action set:
+ *
+ * - OFPP_UNSET, if there is no "output" action.
+ *
+ * - The output port, if there is an "output" action and no "group"
+ * action.
+ *
+ * - OFPP_UNSET, if there is a "group" action.
+ */
+ if (!ctx->action_set_has_group) {
+ OFPACT_FOR_EACH (inner, on->actions, on_len) {
+ if (inner->type == OFPACT_OUTPUT) {
+ ctx->xin->flow.actset_output = ofpact_get_OUTPUT(inner)->port;
+ } else if (inner->type == OFPACT_GROUP) {
+ ctx->xin->flow.actset_output = OFPP_UNSET;
+ ctx->action_set_has_group = true;
+ }
+ }
+ }
+
+ ofpbuf_put(&ctx->action_set, on->actions, on_len);
ofpact_pad(&ctx->action_set);
}
uint64_t action_list_stub[1024 / 64];
struct ofpbuf action_list;
+ ctx->in_action_set = true;
ofpbuf_use_stub(&action_list, action_list_stub, sizeof action_list_stub);
ofpacts_execute_action_set(&action_list, &ctx->action_set);
do_xlate_actions(ofpbuf_data(&action_list), ofpbuf_size(&action_list), ctx);
+ ctx->in_action_set = false;
ofpbuf_uninit(&action_list);
}
static bool
-ofpact_needs_recirculation_after_mpls(const struct xlate_ctx *ctx,
- const struct ofpact *a)
+ofpact_needs_recirculation_after_mpls(const struct ofpact *a, struct xlate_ctx *ctx)
{
struct flow_wildcards *wc = &ctx->xout->wc;
struct flow *flow = &ctx->xin->flow;
+ if (!ctx->was_mpls) {
+ return false;
+ }
+
switch (a->type) {
case OFPACT_OUTPUT:
case OFPACT_GROUP:
case OFPACT_SET_TUNNEL:
case OFPACT_SET_QUEUE:
case OFPACT_POP_QUEUE:
- case OFPACT_POP_MPLS:
- case OFPACT_DEC_MPLS_TTL:
- case OFPACT_SET_MPLS_TTL:
- case OFPACT_SET_MPLS_TC:
- case OFPACT_SET_MPLS_LABEL:
+ case OFPACT_CONJUNCTION:
case OFPACT_NOTE:
case OFPACT_OUTPUT_REG:
case OFPACT_EXIT:
case OFPACT_SAMPLE:
return false;
+ case OFPACT_POP_MPLS:
+ case OFPACT_DEC_MPLS_TTL:
+ case OFPACT_SET_MPLS_TTL:
+ case OFPACT_SET_MPLS_TC:
+ case OFPACT_SET_MPLS_LABEL:
case OFPACT_SET_IPV4_SRC:
case OFPACT_SET_IPV4_DST:
case OFPACT_SET_IP_DSCP:
struct flow *flow = &ctx->xin->flow;
const struct ofpact *a;
+ if (ovs_native_tunneling_is_on(ctx->xbridge->ofproto)) {
+ tnl_arp_snoop(flow, wc, ctx->xbridge->name);
+ }
/* dl_type already in the mask, not set below. */
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
break;
}
- if (ctx->was_mpls && ofpact_needs_recirculation_after_mpls(ctx, a)) {
+ if (ofpact_needs_recirculation_after_mpls(a, ctx)) {
compose_recirculate_action(ctx, ofpacts, a, ofpacts_len);
return;
}
break;
case OFPACT_SET_L4_SRC_PORT:
- if (is_ip_any(flow)) {
+ if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
flow->tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
break;
case OFPACT_SET_L4_DST_PORT:
- if (is_ip_any(flow)) {
+ if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
flow->tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
&& !eth_type_mpls(flow->dl_type)) {
break;
}
-
+ /* A flow may wildcard nw_frag. Do nothing if setting a trasport
+ * header field on a packet that does not have them. */
mf_mask_field_and_prereqs(mf, &wc->masks);
- mf_set_flow_value_masked(mf, &set_field->value, &set_field->mask,
- flow);
+ if (mf_are_prereqs_ok(mf, flow)) {
+ mf_set_flow_value_masked(mf, &set_field->value,
+ &set_field->mask, flow);
+ }
break;
case OFPACT_STACK_PUSH:
xlate_learn_action(ctx, ofpact_get_LEARN(a));
break;
+ case OFPACT_CONJUNCTION: {
+ /* A flow with a "conjunction" action represents part of a special
+ * kind of "set membership match". Such a flow should not actually
+ * get executed, but it could via, say, a "packet-out", even though
+ * that wouldn't be useful. Log it to help debugging. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
+ VLOG_INFO_RL(&rl, "executing no-op conjunction action");
+ break;
+ }
+
case OFPACT_EXIT:
ctx->exit = true;
break;
case OFPACT_CLEAR_ACTIONS:
ofpbuf_clear(&ctx->action_set);
+ ctx->xin->flow.actset_output = OFPP_UNSET;
+ ctx->action_set_has_group = false;
break;
case OFPACT_WRITE_ACTIONS:
xin->ofproto = ofproto;
xin->flow = *flow;
xin->flow.in_port.ofp_port = in_port;
+ xin->flow.actset_output = OFPP_UNSET;
xin->packet = packet;
xin->may_learn = packet != NULL;
xin->rule = rule;
xlate_out_uninit(&xout);
}
-static void
-xlate_report(struct xlate_ctx *ctx, const char *s)
-{
- if (ctx->xin->report_hook) {
- ctx->xin->report_hook(ctx->xin, s, ctx->recurse);
- }
-}
-
void
xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src)
{
struct flow *flow = &xin->flow;
struct rule_dpif *rule = NULL;
- const struct rule_actions *actions = NULL;
enum slow_path_reason special;
const struct ofpact *ofpacts;
struct xport *in_port;
ctx.recurse = 0;
ctx.resubmits = 0;
ctx.in_group = false;
+ ctx.in_action_set = false;
ctx.orig_skb_priority = flow->skb_priority;
ctx.table_id = 0;
ctx.exit = false;
ctx.was_mpls = false;
if (!xin->ofpacts && !ctx.rule) {
- ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
- !xin->skip_wildcards ? wc : NULL,
- &rule, ctx.xin->xcache != NULL,
- ctx.xin->resubmit_stats);
+ rule = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
+ xin->skip_wildcards ? NULL : wc,
+ ctx.xin->xcache != NULL,
+ ctx.xin->resubmit_stats, &ctx.table_id);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
}
entry->u.rule = rule;
}
ctx.rule = rule;
+
+ if (OVS_UNLIKELY(ctx.xin->resubmit_hook)) {
+ ctx.xin->resubmit_hook(ctx.xin, rule, 0);
+ }
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
ofpacts = xin->ofpacts;
ofpacts_len = xin->ofpacts_len;
} else if (ctx.rule) {
- actions = rule_dpif_get_actions(ctx.rule);
+ const struct rule_actions *actions = rule_dpif_get_actions(ctx.rule);
+
ofpacts = actions->ofpacts;
ofpacts_len = actions->ofpacts_len;
} else {
}
ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
+
+ ctx.action_set_has_group = false;
ofpbuf_use_stub(&ctx.action_set,
ctx.action_set_stub, sizeof ctx.action_set_stub);
orig_flow = *flow;
}
- if (flow->nw_frag & FLOW_NW_FRAG_ANY) {
- switch (ctx.xbridge->frag) {
- case OFPC_FRAG_NORMAL:
- /* We must pretend that transport ports are unavailable. */
- flow->tp_src = ctx.base_flow.tp_src = htons(0);
- flow->tp_dst = ctx.base_flow.tp_dst = htons(0);
- break;
-
- case OFPC_FRAG_DROP:
- return;
-
- case OFPC_FRAG_REASM:
- OVS_NOT_REACHED();
-
- case OFPC_FRAG_NX_MATCH:
- /* Nothing to do. */
- break;
-
- case OFPC_INVALID_TTL_TO_CONTROLLER:
- OVS_NOT_REACHED();
- }
- }
-
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
if (in_port && in_port->is_tunnel) {
if (ctx.xin->resubmit_stats) {
{
struct xc_entry *entry;
struct ofpbuf entries = xcache->entries;
+ uint8_t dmac[ETH_ADDR_LEN];
if (!stats->n_packets) {
return;
group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket,
stats);
break;
+ case XC_TNL_ARP:
+ /* Lookup arp to avoid arp timeout. */
+ tnl_arp_lookup(entry->u.tnl_arp_cache.br_name, entry->u.tnl_arp_cache.d_ip, dmac);
+ break;
default:
OVS_NOT_REACHED();
}
case XC_GROUP:
group_dpif_unref(entry->u.group.group);
break;
+ case XC_TNL_ARP:
+ break;
default:
OVS_NOT_REACHED();
}