#include <netinet/icmp6.h>
#include <stdlib.h>
#include <string.h>
+
#include "byte-order.h"
#include "coverage.h"
#include "dpif.h"
#include "packets.h"
#include "simap.h"
#include "timeval.h"
+#include "unaligned.h"
#include "util.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(odp_util);
switch ((enum ovs_action_attr) type) {
case OVS_ACTION_ATTR_OUTPUT: return sizeof(uint32_t);
+ case OVS_ACTION_ATTR_TUNNEL_PUSH: return -2;
+ case OVS_ACTION_ATTR_TUNNEL_POP: return sizeof(uint32_t);
case OVS_ACTION_ATTR_USERSPACE: return -2;
case OVS_ACTION_ATTR_PUSH_VLAN: return sizeof(struct ovs_action_push_vlan);
case OVS_ACTION_ATTR_POP_VLAN: return 0;
ds_put_format(ds, ")");
}
+static void
+format_odp_tnl_push_header(struct ds *ds, struct ovs_action_push_tnl *data)
+{
+ const struct eth_header *eth;
+ const struct ip_header *ip;
+ const void *l3;
+
+ eth = (const struct eth_header *)data->header;
+
+ l3 = eth + 1;
+ ip = (const struct ip_header *)l3;
+
+ /* Ethernet */
+ ds_put_format(ds, "header(size=%"PRIu8",type=%"PRIu8",eth(dst=",
+ data->header_len, data->tnl_type);
+ ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_dst));
+ ds_put_format(ds, ",src=");
+ ds_put_format(ds, ETH_ADDR_FMT, ETH_ADDR_ARGS(eth->eth_src));
+ ds_put_format(ds, ",dl_type=0x%04"PRIx16"),", ntohs(eth->eth_type));
+
+ /* IPv4 */
+ ds_put_format(ds, "ipv4(src="IP_FMT",dst="IP_FMT",proto=%"PRIu8
+ ",tos=%#"PRIx8",ttl=%"PRIu8",frag=0x%"PRIx16"),",
+ IP_ARGS(get_16aligned_be32(&ip->ip_src)),
+ IP_ARGS(get_16aligned_be32(&ip->ip_dst)),
+ ip->ip_proto, ip->ip_tos,
+ ip->ip_ttl,
+ ip->ip_frag_off);
+
+ if (data->tnl_type == OVS_VPORT_TYPE_VXLAN) {
+ const struct vxlanhdr *vxh;
+ const struct udp_header *udp;
+
+ /* UDP */
+ udp = (const struct udp_header *) (ip + 1);
+ ds_put_format(ds, "udp(src=%"PRIu16",dst=%"PRIu16"),",
+ ntohs(udp->udp_src), ntohs(udp->udp_dst));
+
+ /* VxLan */
+ vxh = (const struct vxlanhdr *) (udp + 1);
+ ds_put_format(ds, "vxlan(flags=0x%"PRIx32",vni=0x%"PRIx32")",
+ ntohl(get_16aligned_be32(&vxh->vx_flags)),
+ ntohl(get_16aligned_be32(&vxh->vx_vni)));
+ } else if (data->tnl_type == OVS_VPORT_TYPE_GRE) {
+ const struct gre_base_hdr *greh;
+ ovs_16aligned_be32 *options;
+ void *l4;
+
+ l4 = ((uint8_t *)l3 + sizeof(struct ip_header));
+ greh = (const struct gre_base_hdr *) l4;
+
+ ds_put_format(ds, "gre((flags=0x%"PRIx16",proto=0x%"PRIx16")",
+ greh->flags, ntohs(greh->protocol));
+ options = (ovs_16aligned_be32 *)(greh + 1);
+ if (greh->flags & htons(GRE_CSUM)) {
+ ds_put_format(ds, ",csum=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
+ options++;
+ }
+ if (greh->flags & htons(GRE_KEY)) {
+ ds_put_format(ds, ",key=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
+ options++;
+ }
+ if (greh->flags & htons(GRE_SEQ)) {
+ ds_put_format(ds, ",seq=0x%"PRIx32, ntohl(get_16aligned_be32(options)));
+ options++;
+ }
+ ds_put_format(ds, ")");
+ }
+ ds_put_format(ds, ")");
+}
+
+static void
+format_odp_tnl_push_action(struct ds *ds, const struct nlattr *attr)
+{
+ struct ovs_action_push_tnl *data;
+
+ data = (struct ovs_action_push_tnl *) nl_attr_get(attr);
+
+ ds_put_format(ds, "tnl_push(tnl_port(%"PRIu32"),", data->tnl_port);
+ format_odp_tnl_push_header(ds, data);
+ ds_put_format(ds, ",out_port(%"PRIu32"))", data->out_port);
+}
+
static void
format_odp_action(struct ds *ds, const struct nlattr *a)
{
case OVS_ACTION_ATTR_OUTPUT:
ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
break;
+ case OVS_ACTION_ATTR_TUNNEL_POP:
+ ds_put_format(ds, "tnl_pop(%"PRIu32")", nl_attr_get_u32(a));
+ break;
+ case OVS_ACTION_ATTR_TUNNEL_PUSH:
+ format_odp_tnl_push_action(ds, a);
+ break;
case OVS_ACTION_ATTR_USERSPACE:
format_odp_userspace_action(ds, a);
break;
if (end[0] != ')') {
return -EINVAL;
}
- user_data = ofpbuf_data(&buf);
- user_data_size = ofpbuf_size(&buf);
+ user_data = buf.data;
+ user_data_size = buf.size;
n = (end + 1) - s;
}
}
return -EINVAL;
}
+static int
+ovs_parse_tnl_push(const char *s, struct ovs_action_push_tnl *data)
+{
+ struct eth_header *eth;
+ struct ip_header *ip;
+ struct udp_header *udp;
+ struct gre_base_hdr *greh;
+ uint16_t gre_proto, dl_type, udp_src, udp_dst;
+ ovs_be32 sip, dip;
+ uint32_t tnl_type = 0, header_len = 0;
+ void *l3, *l4;
+ int n = 0;
+
+ if (!ovs_scan_len(s, &n, "tnl_push(tnl_port(%"SCNi32"),", &data->tnl_port)) {
+ return -EINVAL;
+ }
+ eth = (struct eth_header *) data->header;
+ l3 = (data->header + sizeof *eth);
+ l4 = ((uint8_t *) l3 + sizeof (struct ip_header));
+ ip = (struct ip_header *) l3;
+ if (!ovs_scan_len(s, &n, "header(size=%"SCNi32",type=%"SCNi32","
+ "eth(dst="ETH_ADDR_SCAN_FMT",",
+ &data->header_len,
+ &data->tnl_type,
+ ETH_ADDR_SCAN_ARGS(eth->eth_dst))) {
+ return -EINVAL;
+ }
+
+ if (!ovs_scan_len(s, &n, "src="ETH_ADDR_SCAN_FMT",",
+ ETH_ADDR_SCAN_ARGS(eth->eth_src))) {
+ return -EINVAL;
+ }
+ if (!ovs_scan_len(s, &n, "dl_type=0x%"SCNx16"),", &dl_type)) {
+ return -EINVAL;
+ }
+ eth->eth_type = htons(dl_type);
+
+ /* IPv4 */
+ if (!ovs_scan_len(s, &n, "ipv4(src="IP_SCAN_FMT",dst="IP_SCAN_FMT",proto=%"SCNi8
+ ",tos=%"SCNi8",ttl=%"SCNi8",frag=0x%"SCNx16"),",
+ IP_SCAN_ARGS(&sip),
+ IP_SCAN_ARGS(&dip),
+ &ip->ip_proto, &ip->ip_tos,
+ &ip->ip_ttl, &ip->ip_frag_off)) {
+ return -EINVAL;
+ }
+ put_16aligned_be32(&ip->ip_src, sip);
+ put_16aligned_be32(&ip->ip_dst, dip);
+
+ /* Tunnel header */
+ udp = (struct udp_header *) l4;
+ greh = (struct gre_base_hdr *) l4;
+ if (ovs_scan_len(s, &n, "udp(src=%"SCNi16",dst=%"SCNi16"),",
+ &udp_src, &udp_dst)) {
+ struct vxlanhdr *vxh;
+ uint32_t vx_flags, vx_vni;
+
+ udp->udp_src = htons(udp_src);
+ udp->udp_dst = htons(udp_dst);
+ udp->udp_len = 0;
+ udp->udp_csum = 0;
+
+ vxh = (struct vxlanhdr *) (udp + 1);
+ if (!ovs_scan_len(s, &n, "vxlan(flags=0x%"SCNx32",vni=0x%"SCNx32"))",
+ &vx_flags, &vx_vni)) {
+ return -EINVAL;
+ }
+ put_16aligned_be32(&vxh->vx_flags, htonl(vx_flags));
+ put_16aligned_be32(&vxh->vx_vni, htonl(vx_vni));
+ tnl_type = OVS_VPORT_TYPE_VXLAN;
+ header_len = sizeof *eth + sizeof *ip +
+ sizeof *udp + sizeof *vxh;
+ } else if (ovs_scan_len(s, &n, "gre((flags=0x%"SCNx16",proto=0x%"SCNx16")",
+ &greh->flags, &gre_proto)){
+
+ tnl_type = OVS_VPORT_TYPE_GRE;
+ greh->protocol = htons(gre_proto);
+ ovs_16aligned_be32 *options = (ovs_16aligned_be32 *) (greh + 1);
+
+ if (greh->flags & htons(GRE_CSUM)) {
+ uint32_t csum;
+
+ if (!ovs_scan_len(s, &n, ",csum=0x%"SCNx32, &csum)) {
+ return -EINVAL;
+ }
+ put_16aligned_be32(options, htonl(csum));
+ options++;
+ }
+ if (greh->flags & htons(GRE_KEY)) {
+ uint32_t key;
+
+ if (!ovs_scan_len(s, &n, ",key=0x%"SCNx32, &key)) {
+ return -EINVAL;
+ }
+
+ put_16aligned_be32(options, htonl(key));
+ options++;
+ }
+ if (greh->flags & htons(GRE_SEQ)) {
+ uint32_t seq;
+
+ if (!ovs_scan_len(s, &n, ",seq=0x%"SCNx32, &seq)) {
+ return -EINVAL;
+ }
+ put_16aligned_be32(options, htonl(seq));
+ options++;
+ }
+
+ if (!ovs_scan_len(s, &n, "))")) {
+ return -EINVAL;
+ }
+
+ header_len = sizeof *eth + sizeof *ip +
+ ((uint8_t *) options - (uint8_t *) greh);
+ } else {
+ return -EINVAL;
+ }
+
+ /* check tunnel meta data. */
+ if (data->tnl_type != tnl_type) {
+ return -EINVAL;
+ }
+ if (data->header_len != header_len) {
+ return -EINVAL;
+ }
+
+ /* Out port */
+ if (!ovs_scan_len(s, &n, ",out_port(%"SCNi32"))", &data->out_port)) {
+ return -EINVAL;
+ }
+
+ return n;
+}
+
static int
parse_odp_action(const char *s, const struct simap *port_names,
struct ofpbuf *actions)
}
}
+ {
+ uint32_t recirc_id;
+ int n = -1;
+
+ if (ovs_scan(s, "recirc(%"PRIu32")%n", &recirc_id, &n)) {
+ nl_msg_put_u32(actions, OVS_ACTION_ATTR_RECIRC, recirc_id);
+ return n;
+ }
+ }
+
if (!strncmp(s, "userspace(", 10)) {
return parse_odp_userspace_action(s, actions);
}
}
}
+ {
+ uint32_t port;
+ int n;
+
+ if (ovs_scan(s, "tnl_pop(%"SCNi32")%n", &port, &n)) {
+ nl_msg_put_u32(actions, OVS_ACTION_ATTR_TUNNEL_POP, port);
+ return n;
+ }
+ }
+
+ {
+ struct ovs_action_push_tnl data;
+ int n;
+
+ n = ovs_parse_tnl_push(s, &data);
+ if (n > 0) {
+ odp_put_tnl_push_action(actions, &data);
+ return n;
+ } else if (n < 0) {
+ return n;
+ }
+ }
return -EINVAL;
}
return 0;
}
- old_size = ofpbuf_size(actions);
+ old_size = actions->size;
for (;;) {
int retval;
retval = parse_odp_action(s, port_names, actions);
if (retval < 0 || !strchr(delimiters, s[retval])) {
- ofpbuf_set_size(actions, old_size);
+ actions->size = old_size;
return -retval;
}
s += retval;
case OVS_TUNNEL_KEY_ATTR_TP_DST: return 2;
case OVS_TUNNEL_KEY_ATTR_OAM: return 0;
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: return -2;
+ case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: return -2;
case __OVS_TUNNEL_KEY_ATTR_MAX:
return -1;
}
case OVS_TUNNEL_KEY_ATTR_OAM:
tun->flags |= FLOW_TNL_F_OAM;
break;
+ case OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS: {
+ static const struct nl_policy vxlan_opts_policy[] = {
+ [OVS_VXLAN_EXT_GBP] = { .type = NL_A_U32 },
+ };
+ struct nlattr *ext[ARRAY_SIZE(vxlan_opts_policy)];
+
+ if (!nl_parse_nested(a, vxlan_opts_policy, ext, ARRAY_SIZE(ext))) {
+ return ODP_FIT_ERROR;
+ }
+
+ if (ext[OVS_VXLAN_EXT_GBP]) {
+ uint32_t gbp = nl_attr_get_u32(ext[OVS_VXLAN_EXT_GBP]);
+
+ tun->gbp_id = htons(gbp & 0xFFFF);
+ tun->gbp_flags = (gbp >> 16) & 0xFF;
+ }
+
+ break;
+ }
case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: {
if (parse_geneve_opts(a)) {
return ODP_FIT_ERROR;
if (tun_key->flags & FLOW_TNL_F_OAM) {
nl_msg_put_flag(a, OVS_TUNNEL_KEY_ATTR_OAM);
}
+ if (tun_key->gbp_flags || tun_key->gbp_id) {
+ size_t vxlan_opts_ofs;
+
+ vxlan_opts_ofs = nl_msg_start_nested(a, OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS);
+ nl_msg_put_u32(a, OVS_VXLAN_EXT_GBP,
+ (tun_key->gbp_flags << 16) | ntohs(tun_key->gbp_id));
+ nl_msg_end_nested(a, vxlan_opts_ofs);
+ }
nl_msg_end_nested(a, tun_key_ofs);
}
}
static bool
-odp_mask_attr_is_exact(const struct nlattr *ma)
+odp_mask_is_exact(enum ovs_key_attr attr, const void *mask, size_t size)
{
- bool is_exact;
- enum ovs_key_attr attr = nl_attr_type(ma);
-
if (attr == OVS_KEY_ATTR_TCP_FLAGS) {
- is_exact = TCP_FLAGS(nl_attr_get_be16(ma)) == TCP_FLAGS(OVS_BE16_MAX);
- } else if (attr == OVS_KEY_ATTR_IPV6) {
- const struct ovs_key_ipv6 *mask = nl_attr_get(ma);
+ return TCP_FLAGS(*(ovs_be16 *)mask) == TCP_FLAGS(OVS_BE16_MAX);
+ }
+ if (attr == OVS_KEY_ATTR_IPV6) {
+ const struct ovs_key_ipv6 *ipv6_mask = mask;
- is_exact =
- ((mask->ipv6_label & htonl(IPV6_LABEL_MASK))
+ return
+ ((ipv6_mask->ipv6_label & htonl(IPV6_LABEL_MASK))
== htonl(IPV6_LABEL_MASK))
- && mask->ipv6_proto == UINT8_MAX
- && mask->ipv6_tclass == UINT8_MAX
- && mask->ipv6_hlimit == UINT8_MAX
- && mask->ipv6_frag == UINT8_MAX
- && ipv6_mask_is_exact((const struct in6_addr *)mask->ipv6_src)
- && ipv6_mask_is_exact((const struct in6_addr *)mask->ipv6_dst);
- } else if (attr == OVS_KEY_ATTR_TUNNEL) {
- struct flow_tnl tun_mask;
+ && ipv6_mask->ipv6_proto == UINT8_MAX
+ && ipv6_mask->ipv6_tclass == UINT8_MAX
+ && ipv6_mask->ipv6_hlimit == UINT8_MAX
+ && ipv6_mask->ipv6_frag == UINT8_MAX
+ && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_src)
+ && ipv6_mask_is_exact((const struct in6_addr *)ipv6_mask->ipv6_dst);
+ }
+ if (attr == OVS_KEY_ATTR_TUNNEL) {
+ const struct flow_tnl *tun_mask = mask;
+
+ return tun_mask->flags == FLOW_TNL_F_MASK
+ && tun_mask->tun_id == OVS_BE64_MAX
+ && tun_mask->ip_src == OVS_BE32_MAX
+ && tun_mask->ip_dst == OVS_BE32_MAX
+ && tun_mask->ip_tos == UINT8_MAX
+ && tun_mask->ip_ttl == UINT8_MAX
+ && tun_mask->tp_src == OVS_BE16_MAX
+ && tun_mask->tp_dst == OVS_BE16_MAX
+ && tun_mask->gbp_id == OVS_BE16_MAX
+ && tun_mask->gbp_flags == UINT8_MAX;
+ }
+
+ if (attr == OVS_KEY_ATTR_ARP) {
+ /* ARP key has padding, ignore it. */
+ BUILD_ASSERT_DECL(sizeof(struct ovs_key_arp) == 24);
+ BUILD_ASSERT_DECL(offsetof(struct ovs_key_arp, arp_tha) == 10 + 6);
+ size = offsetof(struct ovs_key_arp, arp_tha) + ETH_ADDR_LEN;
+ ovs_assert(((uint16_t *)mask)[size/2] == 0);
+ }
+
+ return is_all_ones(mask, size);
+}
+
+static bool
+odp_mask_attr_is_exact(const struct nlattr *ma)
+{
+ struct flow_tnl tun_mask;
+ enum ovs_key_attr attr = nl_attr_type(ma);
+ const void *mask;
+ size_t size;
+ if (attr == OVS_KEY_ATTR_TUNNEL) {
memset(&tun_mask, 0, sizeof tun_mask);
odp_tun_key_from_attr(ma, &tun_mask);
- is_exact = tun_mask.flags == FLOW_TNL_F_MASK
- && tun_mask.tun_id == OVS_BE64_MAX
- && tun_mask.ip_src == OVS_BE32_MAX
- && tun_mask.ip_dst == OVS_BE32_MAX
- && tun_mask.ip_tos == UINT8_MAX
- && tun_mask.ip_ttl == UINT8_MAX
- && tun_mask.tp_src == OVS_BE16_MAX
- && tun_mask.tp_dst == OVS_BE16_MAX;
+ mask = &tun_mask;
+ size = sizeof tun_mask;
} else {
- is_exact = is_all_ones(nl_attr_get(ma), nl_attr_get_size(ma));
+ mask = nl_attr_get(ma);
+ size = nl_attr_get_size(ma);
}
- return is_exact;
+ return odp_mask_is_exact(attr, mask, size);
}
void
format_u8u(ds, "ttl", key.ip_ttl, MASK(mask, ip_ttl), verbose);
format_be16(ds, "tp_src", key.tp_src, MASK(mask, tp_src), verbose);
format_be16(ds, "tp_dst", key.tp_dst, MASK(mask, tp_dst), verbose);
+ format_be16(ds, "gbp_id", key.gbp_id, MASK(mask, gbp_id), verbose);
+ format_u8x(ds, "gbp_flags", key.gbp_flags, MASK(mask, gbp_flags), verbose);
format_tun_flags(ds, "flags", key.flags, MASK(mask, flags), verbose);
ds_chomp(ds, ',');
break;
nl_msg_end_nested(ofp, nested_mask);
}
- return ofpbuf_base(ofp);
+ return ofp->base;
+}
+
+int
+odp_ufid_from_string(const char *s_, ovs_u128 *ufid)
+{
+ const char *s = s_;
+
+ if (ovs_scan(s, "ufid:")) {
+ size_t n;
+
+ s += 5;
+ if (ovs_scan(s, "0x")) {
+ s += 2;
+ }
+
+ n = strspn(s, "0123456789abcdefABCDEF");
+ if (n != 32) {
+ return -EINVAL;
+ }
+
+ if (!ovs_scan(s, "%16"SCNx64"%16"SCNx64, &ufid->u64.hi,
+ &ufid->u64.lo)) {
+ return -EINVAL;
+ }
+ s += n;
+ s += strspn(s, delimiters);
+
+ return s - s_;
+ }
+
+ return 0;
+}
+
+void
+odp_format_ufid(const ovs_u128 *ufid, struct ds *ds)
+{
+ ds_put_format(ds, "ufid:%016"PRIx64"%016"PRIx64, ufid->u64.hi,
+ ufid->u64.lo);
}
/* Appends to 'ds' a string representation of the 'key_len' bytes of
do { \
len = 0;
+/* Init as fully-masked as mask will not be scanned. */
+#define SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) \
+ SCAN_IF(NAME); \
+ TYPE skey, smask; \
+ memset(&skey, 0, sizeof skey); \
+ memset(&smask, 0xff, sizeof smask); \
+ do { \
+ len = 0;
+
/* VLAN needs special initialization. */
#define SCAN_BEGIN_INIT(NAME, TYPE, KEY_INIT, MASK_INIT) \
SCAN_IF(NAME); \
SCAN_TYPE(SCAN_AS, &skey, &smask); \
} SCAN_END_SINGLE(ATTR)
-#define SCAN_SINGLE_NO_MASK(NAME, TYPE, SCAN_AS, ATTR) \
- SCAN_BEGIN(NAME, TYPE) { \
- SCAN_TYPE(SCAN_AS, &skey, NULL); \
+#define SCAN_SINGLE_FULLY_MASKED(NAME, TYPE, SCAN_AS, ATTR) \
+ SCAN_BEGIN_FULLY_MASKED(NAME, TYPE) { \
+ SCAN_TYPE(SCAN_AS, &skey, NULL); \
} SCAN_END_SINGLE(ATTR)
/* scan_port needs one extra argument. */
{
SCAN_SINGLE("skb_priority(", uint32_t, u32, OVS_KEY_ATTR_PRIORITY);
SCAN_SINGLE("skb_mark(", uint32_t, u32, OVS_KEY_ATTR_SKB_MARK);
- SCAN_SINGLE_NO_MASK("recirc_id(", uint32_t, u32, OVS_KEY_ATTR_RECIRC_ID);
+ SCAN_SINGLE_FULLY_MASKED("recirc_id(", uint32_t, u32,
+ OVS_KEY_ATTR_RECIRC_ID);
SCAN_SINGLE("dp_hash(", uint32_t, u32, OVS_KEY_ATTR_DP_HASH);
SCAN_BEGIN("tunnel(", struct flow_tnl) {
SCAN_FIELD("ttl=", u8, ip_ttl);
SCAN_FIELD("tp_src=", be16, tp_src);
SCAN_FIELD("tp_dst=", be16, tp_dst);
+ SCAN_FIELD("gbp_id=", be16, gbp_id);
+ SCAN_FIELD("gbp_flags=", u8, gbp_flags);
SCAN_FIELD("flags(", tun_flags, flags);
} SCAN_END(OVS_KEY_ATTR_TUNNEL);
for (;;) {
int retval;
- s += strspn(s, ", \t\r\n");
+ s += strspn(s, delimiters);
if (!*s) {
return -EINVAL;
} else if (*s == ')') {
odp_flow_from_string(const char *s, const struct simap *port_names,
struct ofpbuf *key, struct ofpbuf *mask)
{
- const size_t old_size = ofpbuf_size(key);
+ const size_t old_size = key->size;
for (;;) {
int retval;
retval = parse_odp_key_mask_attr(s, port_names, key, mask);
if (retval < 0) {
- ofpbuf_set_size(key, old_size);
+ key->size = old_size;
return -retval;
}
s += retval;
}
static uint8_t
-ovs_to_odp_frag(uint8_t nw_frag)
+ovs_to_odp_frag(uint8_t nw_frag, bool is_mask)
{
- return (nw_frag == 0 ? OVS_FRAG_TYPE_NONE
- : nw_frag == FLOW_NW_FRAG_ANY ? OVS_FRAG_TYPE_FIRST
- : OVS_FRAG_TYPE_LATER);
+ if (is_mask) {
+ /* Netlink interface 'enum ovs_frag_type' is an 8-bit enumeration type,
+ * not a set of flags or bitfields. Hence, if the struct flow nw_frag
+ * mask, which is a set of bits, has the FLOW_NW_FRAG_ANY as zero, we
+ * must use a zero mask for the netlink frag field, and all ones mask
+ * otherwise. */
+ return (nw_frag & FLOW_NW_FRAG_ANY) ? UINT8_MAX : 0;
+ }
+ return !(nw_frag & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_NONE
+ : nw_frag & FLOW_NW_FRAG_LATER ? OVS_FRAG_TYPE_LATER
+ : OVS_FRAG_TYPE_FIRST;
}
-static uint8_t
-ovs_to_odp_frag_mask(uint8_t nw_frag_mask)
-{
- uint8_t frag_mask = ~(OVS_FRAG_TYPE_FIRST | OVS_FRAG_TYPE_LATER);
+static void get_ethernet_key(const struct flow *, struct ovs_key_ethernet *);
+static void put_ethernet_key(const struct ovs_key_ethernet *, struct flow *);
+static void get_ipv4_key(const struct flow *, struct ovs_key_ipv4 *,
+ bool is_mask);
+static void put_ipv4_key(const struct ovs_key_ipv4 *, struct flow *,
+ bool is_mask);
+static void get_ipv6_key(const struct flow *, struct ovs_key_ipv6 *,
+ bool is_mask);
+static void put_ipv6_key(const struct ovs_key_ipv6 *, struct flow *,
+ bool is_mask);
+static void get_arp_key(const struct flow *, struct ovs_key_arp *);
+static void put_arp_key(const struct ovs_key_arp *, struct flow *);
+static void get_nd_key(const struct flow *, struct ovs_key_nd *);
+static void put_nd_key(const struct ovs_key_nd *, struct flow *);
+
+/* These share the same layout. */
+union ovs_key_tp {
+ struct ovs_key_tcp tcp;
+ struct ovs_key_udp udp;
+ struct ovs_key_sctp sctp;
+};
- frag_mask |= (nw_frag_mask & FLOW_NW_FRAG_ANY) ? OVS_FRAG_TYPE_FIRST : 0;
- frag_mask |= (nw_frag_mask & FLOW_NW_FRAG_LATER) ? OVS_FRAG_TYPE_LATER : 0;
-
- return frag_mask;
-}
+static void get_tp_key(const struct flow *, union ovs_key_tp *);
+static void put_tp_key(const union ovs_key_tp *, struct flow *);
static void
odp_flow_key_from_flow__(struct ofpbuf *buf, const struct flow *flow,
eth_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ETHERNET,
sizeof *eth_key);
- memcpy(eth_key->eth_src, data->dl_src, ETH_ADDR_LEN);
- memcpy(eth_key->eth_dst, data->dl_dst, ETH_ADDR_LEN);
+ get_ethernet_key(data, eth_key);
if (flow->vlan_tci != htons(0) || flow->dl_type == htons(ETH_TYPE_VLAN)) {
if (export_mask) {
ipv4_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV4,
sizeof *ipv4_key);
- ipv4_key->ipv4_src = data->nw_src;
- ipv4_key->ipv4_dst = data->nw_dst;
- ipv4_key->ipv4_proto = data->nw_proto;
- ipv4_key->ipv4_tos = data->nw_tos;
- ipv4_key->ipv4_ttl = data->nw_ttl;
- ipv4_key->ipv4_frag = export_mask ? ovs_to_odp_frag_mask(data->nw_frag)
- : ovs_to_odp_frag(data->nw_frag);
+ get_ipv4_key(data, ipv4_key, export_mask);
} else if (flow->dl_type == htons(ETH_TYPE_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
ipv6_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_IPV6,
sizeof *ipv6_key);
- memcpy(ipv6_key->ipv6_src, &data->ipv6_src, sizeof ipv6_key->ipv6_src);
- memcpy(ipv6_key->ipv6_dst, &data->ipv6_dst, sizeof ipv6_key->ipv6_dst);
- ipv6_key->ipv6_label = data->ipv6_label;
- ipv6_key->ipv6_proto = data->nw_proto;
- ipv6_key->ipv6_tclass = data->nw_tos;
- ipv6_key->ipv6_hlimit = data->nw_ttl;
- ipv6_key->ipv6_frag = export_mask ? ovs_to_odp_frag_mask(data->nw_frag)
- : ovs_to_odp_frag(data->nw_frag);
+ get_ipv6_key(data, ipv6_key, export_mask);
} else if (flow->dl_type == htons(ETH_TYPE_ARP) ||
flow->dl_type == htons(ETH_TYPE_RARP)) {
struct ovs_key_arp *arp_key;
- arp_key = nl_msg_put_unspec_zero(buf, OVS_KEY_ATTR_ARP,
- sizeof *arp_key);
- arp_key->arp_sip = data->nw_src;
- arp_key->arp_tip = data->nw_dst;
- arp_key->arp_op = htons(data->nw_proto);
- memcpy(arp_key->arp_sha, data->arp_sha, ETH_ADDR_LEN);
- memcpy(arp_key->arp_tha, data->arp_tha, ETH_ADDR_LEN);
+ arp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_ARP,
+ sizeof *arp_key);
+ get_arp_key(data, arp_key);
} else if (eth_type_mpls(flow->dl_type)) {
struct ovs_key_mpls *mpls_key;
int i, n;
if (is_ip_any(flow) && !(flow->nw_frag & FLOW_NW_FRAG_LATER)) {
if (flow->nw_proto == IPPROTO_TCP) {
- struct ovs_key_tcp *tcp_key;
+ union ovs_key_tp *tcp_key;
tcp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_TCP,
sizeof *tcp_key);
- tcp_key->tcp_src = data->tp_src;
- tcp_key->tcp_dst = data->tp_dst;
-
+ get_tp_key(data, tcp_key);
if (data->tcp_flags) {
nl_msg_put_be16(buf, OVS_KEY_ATTR_TCP_FLAGS, data->tcp_flags);
}
} else if (flow->nw_proto == IPPROTO_UDP) {
- struct ovs_key_udp *udp_key;
+ union ovs_key_tp *udp_key;
udp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_UDP,
sizeof *udp_key);
- udp_key->udp_src = data->tp_src;
- udp_key->udp_dst = data->tp_dst;
+ get_tp_key(data, udp_key);
} else if (flow->nw_proto == IPPROTO_SCTP) {
- struct ovs_key_sctp *sctp_key;
+ union ovs_key_tp *sctp_key;
sctp_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_SCTP,
sizeof *sctp_key);
- sctp_key->sctp_src = data->tp_src;
- sctp_key->sctp_dst = data->tp_dst;
+ get_tp_key(data, sctp_key);
} else if (flow->dl_type == htons(ETH_TYPE_IP)
&& flow->nw_proto == IPPROTO_ICMP) {
struct ovs_key_icmp *icmp_key;
ds_destroy(&s);
}
-static bool
-odp_to_ovs_frag(uint8_t odp_frag, struct flow *flow)
+static uint8_t
+odp_to_ovs_frag(uint8_t odp_frag, bool is_mask)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ if (is_mask) {
+ return odp_frag ? FLOW_NW_FRAG_MASK : 0;
+ }
+
if (odp_frag > OVS_FRAG_TYPE_LATER) {
VLOG_ERR_RL(&rl, "invalid frag %"PRIu8" in flow key", odp_frag);
- return false;
+ return 0xff; /* Error. */
}
- if (odp_frag != OVS_FRAG_TYPE_NONE) {
- flow->nw_frag |= FLOW_NW_FRAG_ANY;
- if (odp_frag == OVS_FRAG_TYPE_LATER) {
- flow->nw_frag |= FLOW_NW_FRAG_LATER;
- }
- }
- return true;
+ return (odp_frag == OVS_FRAG_TYPE_NONE) ? 0
+ : (odp_frag == OVS_FRAG_TYPE_FIRST) ? FLOW_NW_FRAG_ANY
+ : FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER;
}
static bool
enum ovs_key_attr expected_bit = 0xff;
if (eth_type_mpls(src_flow->dl_type)) {
- size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
- const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
- int n = size / sizeof(ovs_be32);
- int i;
-
- if (!size || size % sizeof(ovs_be32)) {
- return ODP_FIT_ERROR;
- }
-
- if (!is_mask) {
+ if (!is_mask || present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
+ }
+ if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
+ size_t size = nl_attr_get_size(attrs[OVS_KEY_ATTR_MPLS]);
+ const ovs_be32 *mpls_lse = nl_attr_get(attrs[OVS_KEY_ATTR_MPLS]);
+ int n = size / sizeof(ovs_be32);
+ int i;
- if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS))) {
- return ODP_FIT_TOO_LITTLE;
+ if (!size || size % sizeof(ovs_be32)) {
+ return ODP_FIT_ERROR;
}
- } else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
if (flow->mpls_lse[0] && flow->dl_type != htons(0xffff)) {
return ODP_FIT_ERROR;
}
- expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
- }
- for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
- flow->mpls_lse[i] = mpls_lse[i];
- }
- if (n > FLOW_MAX_MPLS_LABELS) {
- return ODP_FIT_TOO_MUCH;
- }
+ for (i = 0; i < n && i < FLOW_MAX_MPLS_LABELS; i++) {
+ flow->mpls_lse[i] = mpls_lse[i];
+ }
+ if (n > FLOW_MAX_MPLS_LABELS) {
+ return ODP_FIT_TOO_MUCH;
+ }
- if (!is_mask) {
- /* BOS may be set only in the innermost label. */
- for (i = 0; i < n - 1; i++) {
- if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
- return ODP_FIT_ERROR;
+ if (!is_mask) {
+ /* BOS may be set only in the innermost label. */
+ for (i = 0; i < n - 1; i++) {
+ if (flow->mpls_lse[i] & htonl(MPLS_BOS_MASK)) {
+ return ODP_FIT_ERROR;
+ }
}
- }
- /* BOS must be set in the innermost label. */
- if (n < FLOW_MAX_MPLS_LABELS
- && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
- return ODP_FIT_TOO_LITTLE;
+ /* BOS must be set in the innermost label. */
+ if (n < FLOW_MAX_MPLS_LABELS
+ && !(flow->mpls_lse[n - 1] & htonl(MPLS_BOS_MASK))) {
+ return ODP_FIT_TOO_LITTLE;
+ }
}
}
const struct ovs_key_ipv4 *ipv4_key;
ipv4_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV4]);
- flow->nw_src = ipv4_key->ipv4_src;
- flow->nw_dst = ipv4_key->ipv4_dst;
- flow->nw_proto = ipv4_key->ipv4_proto;
- flow->nw_tos = ipv4_key->ipv4_tos;
- flow->nw_ttl = ipv4_key->ipv4_ttl;
+ put_ipv4_key(ipv4_key, flow, is_mask);
+ if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
+ return ODP_FIT_ERROR;
+ }
if (is_mask) {
- flow->nw_frag = ipv4_key->ipv4_frag;
check_start = ipv4_key;
check_len = sizeof *ipv4_key;
expected_bit = OVS_KEY_ATTR_IPV4;
- } else if (!odp_to_ovs_frag(ipv4_key->ipv4_frag, flow)) {
- return ODP_FIT_ERROR;
}
}
} else if (src_flow->dl_type == htons(ETH_TYPE_IPV6)) {
const struct ovs_key_ipv6 *ipv6_key;
ipv6_key = nl_attr_get(attrs[OVS_KEY_ATTR_IPV6]);
- memcpy(&flow->ipv6_src, ipv6_key->ipv6_src, sizeof flow->ipv6_src);
- memcpy(&flow->ipv6_dst, ipv6_key->ipv6_dst, sizeof flow->ipv6_dst);
- flow->ipv6_label = ipv6_key->ipv6_label;
- flow->nw_proto = ipv6_key->ipv6_proto;
- flow->nw_tos = ipv6_key->ipv6_tclass;
- flow->nw_ttl = ipv6_key->ipv6_hlimit;
+ put_ipv6_key(ipv6_key, flow, is_mask);
+ if (flow->nw_frag > FLOW_NW_FRAG_MASK) {
+ return ODP_FIT_ERROR;
+ }
if (is_mask) {
- flow->nw_frag = ipv6_key->ipv6_frag;
check_start = ipv6_key;
check_len = sizeof *ipv6_key;
expected_bit = OVS_KEY_ATTR_IPV6;
- } else if (!odp_to_ovs_frag(ipv6_key->ipv6_frag, flow)) {
- return ODP_FIT_ERROR;
}
}
} else if (src_flow->dl_type == htons(ETH_TYPE_ARP) ||
const struct ovs_key_arp *arp_key;
arp_key = nl_attr_get(attrs[OVS_KEY_ATTR_ARP]);
- flow->nw_src = arp_key->arp_sip;
- flow->nw_dst = arp_key->arp_tip;
if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
VLOG_ERR_RL(&rl, "unsupported ARP opcode %"PRIu16" in flow "
"key", ntohs(arp_key->arp_op));
return ODP_FIT_ERROR;
}
- flow->nw_proto = ntohs(arp_key->arp_op);
- memcpy(flow->arp_sha, arp_key->arp_sha, ETH_ADDR_LEN);
- memcpy(flow->arp_tha, arp_key->arp_tha, ETH_ADDR_LEN);
-
+ put_arp_key(arp_key, flow);
if (is_mask) {
check_start = arp_key;
check_len = sizeof *arp_key;
expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_TCP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP)) {
- const struct ovs_key_tcp *tcp_key;
+ const union ovs_key_tp *tcp_key;
tcp_key = nl_attr_get(attrs[OVS_KEY_ATTR_TCP]);
- flow->tp_src = tcp_key->tcp_src;
- flow->tp_dst = tcp_key->tcp_dst;
+ put_tp_key(tcp_key, flow);
expected_bit = OVS_KEY_ATTR_TCP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_TCP_FLAGS)) {
expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_UDP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_UDP)) {
- const struct ovs_key_udp *udp_key;
+ const union ovs_key_tp *udp_key;
udp_key = nl_attr_get(attrs[OVS_KEY_ATTR_UDP]);
- flow->tp_src = udp_key->udp_src;
- flow->tp_dst = udp_key->udp_dst;
+ put_tp_key(udp_key, flow);
expected_bit = OVS_KEY_ATTR_UDP;
}
} else if (src_flow->nw_proto == IPPROTO_SCTP
expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_SCTP;
}
if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_SCTP)) {
- const struct ovs_key_sctp *sctp_key;
+ const union ovs_key_tp *sctp_key;
sctp_key = nl_attr_get(attrs[OVS_KEY_ATTR_SCTP]);
- flow->tp_src = sctp_key->sctp_src;
- flow->tp_dst = sctp_key->sctp_dst;
+ put_tp_key(sctp_key, flow);
expected_bit = OVS_KEY_ATTR_SCTP;
}
} else if (src_flow->nw_proto == IPPROTO_ICMP
const struct ovs_key_ethernet *eth_key;
eth_key = nl_attr_get(attrs[OVS_KEY_ATTR_ETHERNET]);
- memcpy(flow->dl_src, eth_key->eth_src, ETH_ADDR_LEN);
- memcpy(flow->dl_dst, eth_key->eth_dst, ETH_ADDR_LEN);
+ put_ethernet_key(eth_key, flow);
if (is_mask) {
expected_attrs |= UINT64_C(1) << OVS_KEY_ATTR_ETHERNET;
}
offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_USERSPACE);
nl_msg_put_u32(odp_actions, OVS_USERSPACE_ATTR_PID, pid);
if (userdata) {
- userdata_ofs = ofpbuf_size(odp_actions) + NLA_HDRLEN;
+ userdata_ofs = odp_actions->size + NLA_HDRLEN;
/* The OVS kernel module before OVS 1.11 and the upstream Linux kernel
* module before Linux 3.10 required the userdata to be exactly 8 bytes
tun_key_to_attr(odp_actions, tunnel);
nl_msg_end_nested(odp_actions, offset);
}
+
+void
+odp_put_tnl_push_action(struct ofpbuf *odp_actions,
+ struct ovs_action_push_tnl *data)
+{
+ int size = offsetof(struct ovs_action_push_tnl, header);
+
+ size += data->header_len;
+ nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_TUNNEL_PUSH, data, size);
+}
+
\f
/* The commit_odp_actions() function and its helpers. */
nl_msg_end_nested(odp_actions, offset);
}
-void
-odp_put_pkt_mark_action(const uint32_t pkt_mark,
- struct ofpbuf *odp_actions)
-{
- commit_set_action(odp_actions, OVS_KEY_ATTR_SKB_MARK, &pkt_mark,
- sizeof(pkt_mark));
-}
-
/* If any of the flow key data that ODP actions can modify are different in
* 'base->tunnel' and 'flow->tunnel', appends a set_tunnel ODP action to
* 'odp_actions' that change the flow tunneling information in key from
}
}
-static void
-commit_set_ether_addr_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions,
- struct flow_wildcards *wc)
+static bool
+commit(enum ovs_key_attr attr, bool use_masked_set,
+ const void *key, void *base, void *mask, size_t size,
+ struct ofpbuf *odp_actions)
{
- struct ovs_key_ethernet eth_key;
+ if (memcmp(key, base, size)) {
+ bool fully_masked = odp_mask_is_exact(attr, mask, size);
- if (eth_addr_equals(base->dl_src, flow->dl_src) &&
- eth_addr_equals(base->dl_dst, flow->dl_dst)) {
- return;
+ if (use_masked_set && !fully_masked) {
+ commit_masked_set_action(odp_actions, attr, key, mask, size);
+ } else {
+ if (!fully_masked) {
+ memset(mask, 0xff, size);
+ }
+ commit_set_action(odp_actions, attr, key, size);
+ }
+ memcpy(base, key, size);
+ return true;
+ } else {
+ /* Mask bits are set when we have either read or set the corresponding
+ * values. Masked bits will be exact-matched, no need to set them
+ * if the value did not actually change. */
+ return false;
}
+}
- memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
- memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+static void
+get_ethernet_key(const struct flow *flow, struct ovs_key_ethernet *eth)
+{
+ memcpy(eth->eth_src, flow->dl_src, ETH_ADDR_LEN);
+ memcpy(eth->eth_dst, flow->dl_dst, ETH_ADDR_LEN);
+}
+
+static void
+put_ethernet_key(const struct ovs_key_ethernet *eth, struct flow *flow)
+{
+ memcpy(flow->dl_src, eth->eth_src, ETH_ADDR_LEN);
+ memcpy(flow->dl_dst, eth->eth_dst, ETH_ADDR_LEN);
+}
- memcpy(base->dl_src, flow->dl_src, ETH_ADDR_LEN);
- memcpy(base->dl_dst, flow->dl_dst, ETH_ADDR_LEN);
+static void
+commit_set_ether_addr_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc,
+ bool use_masked)
+{
+ struct ovs_key_ethernet key, base, mask;
- memcpy(eth_key.eth_src, base->dl_src, ETH_ADDR_LEN);
- memcpy(eth_key.eth_dst, base->dl_dst, ETH_ADDR_LEN);
+ get_ethernet_key(flow, &key);
+ get_ethernet_key(base_flow, &base);
+ get_ethernet_key(&wc->masks, &mask);
- commit_set_action(odp_actions, OVS_KEY_ATTR_ETHERNET,
- ð_key, sizeof(eth_key));
+ if (commit(OVS_KEY_ATTR_ETHERNET, use_masked,
+ &key, &base, &mask, sizeof key, odp_actions)) {
+ put_ethernet_key(&base, base_flow);
+ put_ethernet_key(&mask, &wc->masks);
+ }
}
static void
base->vlan_tci = vlan_tci;
}
+/* Wildcarding already done at action translation time. */
static void
commit_mpls_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+ struct ofpbuf *odp_actions)
{
- int base_n = flow_count_mpls_labels(base, wc);
- int flow_n = flow_count_mpls_labels(flow, wc);
+ int base_n = flow_count_mpls_labels(base, NULL);
+ int flow_n = flow_count_mpls_labels(flow, NULL);
int common_n = flow_count_common_mpls_labels(flow, flow_n, base, base_n,
- wc);
+ NULL);
while (base_n > common_n) {
if (base_n - 1 == common_n && flow_n > common_n) {
dl_type = flow->dl_type;
}
nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, dl_type);
- popped = flow_pop_mpls(base, base_n, flow->dl_type, wc);
+ popped = flow_pop_mpls(base, base_n, flow->dl_type, NULL);
ovs_assert(popped);
base_n--;
}
sizeof *mpls);
mpls->mpls_ethertype = flow->dl_type;
mpls->mpls_lse = flow->mpls_lse[flow_n - base_n - 1];
- flow_push_mpls(base, base_n, mpls->mpls_ethertype, wc);
+ flow_push_mpls(base, base_n, mpls->mpls_ethertype, NULL);
flow_set_mpls_lse(base, 0, mpls->mpls_lse);
base_n++;
}
}
static void
-commit_set_ipv4_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+get_ipv4_key(const struct flow *flow, struct ovs_key_ipv4 *ipv4, bool is_mask)
{
- struct ovs_key_ipv4 ipv4_key;
+ ipv4->ipv4_src = flow->nw_src;
+ ipv4->ipv4_dst = flow->nw_dst;
+ ipv4->ipv4_proto = flow->nw_proto;
+ ipv4->ipv4_tos = flow->nw_tos;
+ ipv4->ipv4_ttl = flow->nw_ttl;
+ ipv4->ipv4_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
+}
- if (base->nw_src == flow->nw_src &&
- base->nw_dst == flow->nw_dst &&
- base->nw_tos == flow->nw_tos &&
- base->nw_ttl == flow->nw_ttl &&
- base->nw_frag == flow->nw_frag) {
- return;
- }
+static void
+put_ipv4_key(const struct ovs_key_ipv4 *ipv4, struct flow *flow, bool is_mask)
+{
+ flow->nw_src = ipv4->ipv4_src;
+ flow->nw_dst = ipv4->ipv4_dst;
+ flow->nw_proto = ipv4->ipv4_proto;
+ flow->nw_tos = ipv4->ipv4_tos;
+ flow->nw_ttl = ipv4->ipv4_ttl;
+ flow->nw_frag = odp_to_ovs_frag(ipv4->ipv4_frag, is_mask);
+}
+
+static void
+commit_set_ipv4_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ bool use_masked)
+{
+ struct ovs_key_ipv4 key, mask, base;
+
+ /* Check that nw_proto and nw_frag remain unchanged. */
+ ovs_assert(flow->nw_proto == base_flow->nw_proto &&
+ flow->nw_frag == base_flow->nw_frag);
+
+ get_ipv4_key(flow, &key, false);
+ get_ipv4_key(base_flow, &base, false);
+ get_ipv4_key(&wc->masks, &mask, true);
+ mask.ipv4_proto = 0; /* Not writeable. */
+ mask.ipv4_frag = 0; /* Not writable. */
- memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
- memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
- memset(&wc->masks.nw_tos, 0xff, sizeof wc->masks.nw_tos);
- memset(&wc->masks.nw_ttl, 0xff, sizeof wc->masks.nw_ttl);
- memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
- memset(&wc->masks.nw_frag, 0xff, sizeof wc->masks.nw_frag);
+ if (commit(OVS_KEY_ATTR_IPV4, use_masked, &key, &base, &mask, sizeof key,
+ odp_actions)) {
+ put_ipv4_key(&base, base_flow, false);
+ if (mask.ipv4_proto != 0) { /* Mask was changed by commit(). */
+ put_ipv4_key(&mask, &wc->masks, true);
+ }
+ }
+}
- ipv4_key.ipv4_src = base->nw_src = flow->nw_src;
- ipv4_key.ipv4_dst = base->nw_dst = flow->nw_dst;
- ipv4_key.ipv4_tos = base->nw_tos = flow->nw_tos;
- ipv4_key.ipv4_ttl = base->nw_ttl = flow->nw_ttl;
- ipv4_key.ipv4_proto = base->nw_proto;
- ipv4_key.ipv4_frag = ovs_to_odp_frag(base->nw_frag);
+static void
+get_ipv6_key(const struct flow *flow, struct ovs_key_ipv6 *ipv6, bool is_mask)
+{
+ memcpy(ipv6->ipv6_src, &flow->ipv6_src, sizeof ipv6->ipv6_src);
+ memcpy(ipv6->ipv6_dst, &flow->ipv6_dst, sizeof ipv6->ipv6_dst);
+ ipv6->ipv6_label = flow->ipv6_label;
+ ipv6->ipv6_proto = flow->nw_proto;
+ ipv6->ipv6_tclass = flow->nw_tos;
+ ipv6->ipv6_hlimit = flow->nw_ttl;
+ ipv6->ipv6_frag = ovs_to_odp_frag(flow->nw_frag, is_mask);
+}
- commit_set_action(odp_actions, OVS_KEY_ATTR_IPV4,
- &ipv4_key, sizeof(ipv4_key));
+static void
+put_ipv6_key(const struct ovs_key_ipv6 *ipv6, struct flow *flow, bool is_mask)
+{
+ memcpy(&flow->ipv6_src, ipv6->ipv6_src, sizeof flow->ipv6_src);
+ memcpy(&flow->ipv6_dst, ipv6->ipv6_dst, sizeof flow->ipv6_dst);
+ flow->ipv6_label = ipv6->ipv6_label;
+ flow->nw_proto = ipv6->ipv6_proto;
+ flow->nw_tos = ipv6->ipv6_tclass;
+ flow->nw_ttl = ipv6->ipv6_hlimit;
+ flow->nw_frag = odp_to_ovs_frag(ipv6->ipv6_frag, is_mask);
}
static void
-commit_set_ipv6_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+commit_set_ipv6_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ bool use_masked)
{
- struct ovs_key_ipv6 ipv6_key;
+ struct ovs_key_ipv6 key, mask, base;
- if (ipv6_addr_equals(&base->ipv6_src, &flow->ipv6_src) &&
- ipv6_addr_equals(&base->ipv6_dst, &flow->ipv6_dst) &&
- base->ipv6_label == flow->ipv6_label &&
- base->nw_tos == flow->nw_tos &&
- base->nw_ttl == flow->nw_ttl &&
- base->nw_frag == flow->nw_frag) {
- return;
- }
+ /* Check that nw_proto and nw_frag remain unchanged. */
+ ovs_assert(flow->nw_proto == base_flow->nw_proto &&
+ flow->nw_frag == base_flow->nw_frag);
- memset(&wc->masks.ipv6_src, 0xff, sizeof wc->masks.ipv6_src);
- memset(&wc->masks.ipv6_dst, 0xff, sizeof wc->masks.ipv6_dst);
- memset(&wc->masks.ipv6_label, 0xff, sizeof wc->masks.ipv6_label);
- memset(&wc->masks.nw_tos, 0xff, sizeof wc->masks.nw_tos);
- memset(&wc->masks.nw_ttl, 0xff, sizeof wc->masks.nw_ttl);
- memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
- memset(&wc->masks.nw_frag, 0xff, sizeof wc->masks.nw_frag);
+ get_ipv6_key(flow, &key, false);
+ get_ipv6_key(base_flow, &base, false);
+ get_ipv6_key(&wc->masks, &mask, true);
+ mask.ipv6_proto = 0; /* Not writeable. */
+ mask.ipv6_frag = 0; /* Not writable. */
- base->ipv6_src = flow->ipv6_src;
- memcpy(&ipv6_key.ipv6_src, &base->ipv6_src, sizeof(ipv6_key.ipv6_src));
- base->ipv6_dst = flow->ipv6_dst;
- memcpy(&ipv6_key.ipv6_dst, &base->ipv6_dst, sizeof(ipv6_key.ipv6_dst));
+ if (commit(OVS_KEY_ATTR_IPV6, use_masked, &key, &base, &mask, sizeof key,
+ odp_actions)) {
+ put_ipv6_key(&base, base_flow, false);
+ if (mask.ipv6_proto != 0) { /* Mask was changed by commit(). */
+ put_ipv6_key(&mask, &wc->masks, true);
+ }
+ }
+}
- ipv6_key.ipv6_label = base->ipv6_label = flow->ipv6_label;
- ipv6_key.ipv6_tclass = base->nw_tos = flow->nw_tos;
- ipv6_key.ipv6_hlimit = base->nw_ttl = flow->nw_ttl;
- ipv6_key.ipv6_proto = base->nw_proto;
- ipv6_key.ipv6_frag = ovs_to_odp_frag(base->nw_frag);
+static void
+get_arp_key(const struct flow *flow, struct ovs_key_arp *arp)
+{
+ /* ARP key has padding, clear it. */
+ memset(arp, 0, sizeof *arp);
+
+ arp->arp_sip = flow->nw_src;
+ arp->arp_tip = flow->nw_dst;
+ arp->arp_op = htons(flow->nw_proto);
+ memcpy(arp->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(arp->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+}
- commit_set_action(odp_actions, OVS_KEY_ATTR_IPV6,
- &ipv6_key, sizeof(ipv6_key));
+static void
+put_arp_key(const struct ovs_key_arp *arp, struct flow *flow)
+{
+ flow->nw_src = arp->arp_sip;
+ flow->nw_dst = arp->arp_tip;
+ flow->nw_proto = ntohs(arp->arp_op);
+ memcpy(flow->arp_sha, arp->arp_sha, ETH_ADDR_LEN);
+ memcpy(flow->arp_tha, arp->arp_tha, ETH_ADDR_LEN);
}
static enum slow_path_reason
-commit_set_arp_action(const struct flow *flow, struct flow *base,
+commit_set_arp_action(const struct flow *flow, struct flow *base_flow,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
{
- struct ovs_key_arp arp_key;
+ struct ovs_key_arp key, mask, base;
- if (base->nw_src == flow->nw_src &&
- base->nw_dst == flow->nw_dst &&
- base->nw_proto == flow->nw_proto &&
- eth_addr_equals(base->arp_sha, flow->arp_sha) &&
- eth_addr_equals(base->arp_tha, flow->arp_tha)) {
- return 0;
+ get_arp_key(flow, &key);
+ get_arp_key(base_flow, &base);
+ get_arp_key(&wc->masks, &mask);
+
+ if (commit(OVS_KEY_ATTR_ARP, true, &key, &base, &mask, sizeof key,
+ odp_actions)) {
+ put_arp_key(&base, base_flow);
+ put_arp_key(&mask, &wc->masks);
+ return SLOW_ACTION;
}
+ return 0;
+}
- memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
- memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
- memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
- memset(&wc->masks.arp_sha, 0xff, sizeof wc->masks.arp_sha);
- memset(&wc->masks.arp_tha, 0xff, sizeof wc->masks.arp_tha);
+static void
+get_nd_key(const struct flow *flow, struct ovs_key_nd *nd)
+{
+ memcpy(nd->nd_target, &flow->nd_target, sizeof flow->nd_target);
+ /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
+ memcpy(nd->nd_sll, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(nd->nd_tll, flow->arp_tha, ETH_ADDR_LEN);
+}
- base->nw_src = flow->nw_src;
- base->nw_dst = flow->nw_dst;
- base->nw_proto = flow->nw_proto;
- memcpy(base->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
- memcpy(base->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+static void
+put_nd_key(const struct ovs_key_nd *nd, struct flow *flow)
+{
+ memcpy(&flow->nd_target, &flow->nd_target, sizeof flow->nd_target);
+ /* nd_sll and nd_tll are stored in arp_sha and arp_tha, respectively */
+ memcpy(flow->arp_sha, nd->nd_sll, ETH_ADDR_LEN);
+ memcpy(flow->arp_tha, nd->nd_tll, ETH_ADDR_LEN);
+}
- arp_key.arp_sip = base->nw_src;
- arp_key.arp_tip = base->nw_dst;
- arp_key.arp_op = htons(base->nw_proto);
- memcpy(arp_key.arp_sha, flow->arp_sha, ETH_ADDR_LEN);
- memcpy(arp_key.arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+static enum slow_path_reason
+commit_set_nd_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc, bool use_masked)
+{
+ struct ovs_key_nd key, mask, base;
- commit_set_action(odp_actions, OVS_KEY_ATTR_ARP, &arp_key, sizeof arp_key);
+ get_nd_key(flow, &key);
+ get_nd_key(base_flow, &base);
+ get_nd_key(&wc->masks, &mask);
- return SLOW_ACTION;
+ if (commit(OVS_KEY_ATTR_ND, use_masked, &key, &base, &mask, sizeof key,
+ odp_actions)) {
+ put_nd_key(&base, base_flow);
+ put_nd_key(&mask, &wc->masks);
+ return SLOW_ACTION;
+ }
+
+ return 0;
}
static enum slow_path_reason
commit_set_nw_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ bool use_masked)
{
/* Check if 'flow' really has an L3 header. */
if (!flow->nw_proto) {
switch (ntohs(base->dl_type)) {
case ETH_TYPE_IP:
- commit_set_ipv4_action(flow, base, odp_actions, wc);
+ commit_set_ipv4_action(flow, base, odp_actions, wc, use_masked);
break;
case ETH_TYPE_IPV6:
- commit_set_ipv6_action(flow, base, odp_actions, wc);
- break;
+ commit_set_ipv6_action(flow, base, odp_actions, wc, use_masked);
+ return commit_set_nd_action(flow, base, odp_actions, wc, use_masked);
case ETH_TYPE_ARP:
return commit_set_arp_action(flow, base, odp_actions, wc);
return 0;
}
+/* TCP, UDP, and SCTP keys have the same layout. */
+BUILD_ASSERT_DECL(sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_udp) &&
+ sizeof(struct ovs_key_tcp) == sizeof(struct ovs_key_sctp));
+
+static void
+get_tp_key(const struct flow *flow, union ovs_key_tp *tp)
+{
+ tp->tcp.tcp_src = flow->tp_src;
+ tp->tcp.tcp_dst = flow->tp_dst;
+}
+
static void
-commit_set_port_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+put_tp_key(const union ovs_key_tp *tp, struct flow *flow)
{
+ flow->tp_src = tp->tcp.tcp_src;
+ flow->tp_dst = tp->tcp.tcp_dst;
+}
+
+static void
+commit_set_port_action(const struct flow *flow, struct flow *base_flow,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ bool use_masked)
+{
+ enum ovs_key_attr key_type;
+ union ovs_key_tp key, mask, base;
+
/* Check if 'flow' really has an L3 header. */
if (!flow->nw_proto) {
return;
}
- if (!is_ip_any(base) || (!base->tp_src && !base->tp_dst)) {
+ if (!is_ip_any(base_flow)) {
return;
}
- if (base->tp_src == flow->tp_src &&
- base->tp_dst == flow->tp_dst) {
- return;
- }
-
- memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
- memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
-
if (flow->nw_proto == IPPROTO_TCP) {
- struct ovs_key_tcp port_key;
-
- port_key.tcp_src = base->tp_src = flow->tp_src;
- port_key.tcp_dst = base->tp_dst = flow->tp_dst;
-
- commit_set_action(odp_actions, OVS_KEY_ATTR_TCP,
- &port_key, sizeof(port_key));
-
+ key_type = OVS_KEY_ATTR_TCP;
} else if (flow->nw_proto == IPPROTO_UDP) {
- struct ovs_key_udp port_key;
-
- port_key.udp_src = base->tp_src = flow->tp_src;
- port_key.udp_dst = base->tp_dst = flow->tp_dst;
-
- commit_set_action(odp_actions, OVS_KEY_ATTR_UDP,
- &port_key, sizeof(port_key));
+ key_type = OVS_KEY_ATTR_UDP;
} else if (flow->nw_proto == IPPROTO_SCTP) {
- struct ovs_key_sctp port_key;
+ key_type = OVS_KEY_ATTR_SCTP;
+ } else {
+ return;
+ }
- port_key.sctp_src = base->tp_src = flow->tp_src;
- port_key.sctp_dst = base->tp_dst = flow->tp_dst;
+ get_tp_key(flow, &key);
+ get_tp_key(base_flow, &base);
+ get_tp_key(&wc->masks, &mask);
- commit_set_action(odp_actions, OVS_KEY_ATTR_SCTP,
- &port_key, sizeof(port_key));
+ if (commit(key_type, use_masked, &key, &base, &mask, sizeof key,
+ odp_actions)) {
+ put_tp_key(&base, base_flow);
+ put_tp_key(&mask, &wc->masks);
}
}
static void
-commit_set_priority_action(const struct flow *flow, struct flow *base,
+commit_set_priority_action(const struct flow *flow, struct flow *base_flow,
struct ofpbuf *odp_actions,
- struct flow_wildcards *wc)
+ struct flow_wildcards *wc,
+ bool use_masked)
{
- if (base->skb_priority == flow->skb_priority) {
- return;
- }
+ uint32_t key, mask, base;
- memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
- base->skb_priority = flow->skb_priority;
+ key = flow->skb_priority;
+ base = base_flow->skb_priority;
+ mask = wc->masks.skb_priority;
- commit_set_action(odp_actions, OVS_KEY_ATTR_PRIORITY,
- &base->skb_priority, sizeof(base->skb_priority));
+ if (commit(OVS_KEY_ATTR_PRIORITY, use_masked, &key, &base, &mask,
+ sizeof key, odp_actions)) {
+ base_flow->skb_priority = base;
+ wc->masks.skb_priority = mask;
+ }
}
static void
-commit_set_pkt_mark_action(const struct flow *flow, struct flow *base,
+commit_set_pkt_mark_action(const struct flow *flow, struct flow *base_flow,
struct ofpbuf *odp_actions,
- struct flow_wildcards *wc)
+ struct flow_wildcards *wc,
+ bool use_masked)
{
- if (base->pkt_mark == flow->pkt_mark) {
- return;
- }
+ uint32_t key, mask, base;
- memset(&wc->masks.pkt_mark, 0xff, sizeof wc->masks.pkt_mark);
- base->pkt_mark = flow->pkt_mark;
+ key = flow->pkt_mark;
+ base = base_flow->pkt_mark;
+ mask = wc->masks.pkt_mark;
- odp_put_pkt_mark_action(base->pkt_mark, odp_actions);
+ if (commit(OVS_KEY_ATTR_SKB_MARK, use_masked, &key, &base, &mask,
+ sizeof key, odp_actions)) {
+ base_flow->pkt_mark = base;
+ wc->masks.pkt_mark = mask;
+ }
}
/* If any of the flow key data that ODP actions can modify are different in
* slow path, if there is one, otherwise 0. */
enum slow_path_reason
commit_odp_actions(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ bool use_masked)
{
enum slow_path_reason slow;
- commit_set_ether_addr_action(flow, base, odp_actions, wc);
- slow = commit_set_nw_action(flow, base, odp_actions, wc);
- commit_set_port_action(flow, base, odp_actions, wc);
- commit_mpls_action(flow, base, odp_actions, wc);
+ commit_set_ether_addr_action(flow, base, odp_actions, wc, use_masked);
+ slow = commit_set_nw_action(flow, base, odp_actions, wc, use_masked);
+ commit_set_port_action(flow, base, odp_actions, wc, use_masked);
+ commit_mpls_action(flow, base, odp_actions);
commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
- commit_set_priority_action(flow, base, odp_actions, wc);
- commit_set_pkt_mark_action(flow, base, odp_actions, wc);
+ commit_set_priority_action(flow, base, odp_actions, wc, use_masked);
+ commit_set_pkt_mark_action(flow, base, odp_actions, wc, use_masked);
return slow;
}