/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "netdev.h"
#include "netdev-linux.h"
#include "netdev-vport.h"
+#include "netlink-conntrack.h"
#include "netlink-notifier.h"
#include "netlink-socket.h"
#include "netlink.h"
#include "timeval.h"
#include "unaligned.h"
#include "util.h"
-#include "vlog.h"
+#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(dpif_netlink);
+#ifdef _WIN32
+enum { WINDOWS = 1 };
+#else
+enum { WINDOWS = 0 };
+#endif
enum { MAX_PORTS = USHRT_MAX };
/* This ethtool flag was introduced in Linux 2.6.24, so it might be
size_t mask_len;
const struct nlattr *actions; /* OVS_FLOW_ATTR_ACTIONS. */
size_t actions_len;
+ ovs_u128 ufid; /* OVS_FLOW_ATTR_FLOW_ID. */
+ bool ufid_present; /* Is there a UFID? */
+ bool ufid_terse; /* Skip serializing key/mask/acts? */
const struct ovs_flow_stats *stats; /* OVS_FLOW_ATTR_STATS. */
const uint8_t *tcp_flags; /* OVS_FLOW_ATTR_TCP_FLAGS. */
const ovs_32aligned_u64 *used; /* OVS_FLOW_ATTR_USED. */
struct ofpbuf **bufp);
static void dpif_netlink_flow_get_stats(const struct dpif_netlink_flow *,
struct dpif_flow_stats *);
-static void dpif_netlink_flow_to_dpif_flow(struct dpif_flow *,
+static void dpif_netlink_flow_to_dpif_flow(struct dpif *, struct dpif_flow *,
const struct dpif_netlink_flow *);
/* One of the dpif channels between the kernel and userspace. */
long long int last_poll; /* Last time this channel was polled. */
};
+#ifdef _WIN32
+#define VPORT_SOCK_POOL_SIZE 1
+/* On Windows, there is no native support for epoll. There are equivalent
+ * interfaces though, that are not used currently. For simpicity, a pool of
+ * netlink sockets is used. Each socket is represented by 'struct
+ * dpif_windows_vport_sock'. Since it is a pool, multiple OVS ports may be
+ * sharing the same socket. In the future, we can add a reference count and
+ * such fields. */
+struct dpif_windows_vport_sock {
+ struct nl_sock *nl_sock; /* netlink socket. */
+};
+#endif
+
struct dpif_handler {
struct dpif_channel *channels;/* Array of channels for each handler. */
struct epoll_event *epoll_events;
int epoll_fd; /* epoll fd that includes channel socks. */
int n_events; /* Num events returned by epoll_wait(). */
int event_offset; /* Offset into 'epoll_events'. */
+
+#ifdef _WIN32
+ /* Pool of sockets. */
+ struct dpif_windows_vport_sock *vport_sock_pool;
+ size_t last_used_pool_idx; /* Index to aid in allocating a
+ socket in the pool to a port. */
+#endif
};
/* Datapath interface for the openvswitch Linux kernel module. */
static int open_dpif(const struct dpif_netlink_dp *, struct dpif **);
static uint32_t dpif_netlink_port_get_pid(const struct dpif *,
odp_port_t port_no, uint32_t hash);
+static void dpif_netlink_handler_uninit(struct dpif_handler *handler);
static int dpif_netlink_refresh_channels(struct dpif_netlink *,
uint32_t n_handlers);
static void dpif_netlink_vport_to_ofpbuf(const struct dpif_netlink_vport *,
/* Destroys the netlink sockets pointed by the elements in 'socksp'
* and frees the 'socksp'. */
static void
-vport_del_socksp(struct nl_sock **socksp, uint32_t n_socks)
+vport_del_socksp__(struct nl_sock **socksp, uint32_t n_socks)
{
size_t i;
/* Creates an array of netlink sockets. Returns an array of the
* corresponding pointers. Records the error in 'error'. */
static struct nl_sock **
-vport_create_socksp(uint32_t n_socks, int *error)
+vport_create_socksp__(uint32_t n_socks, int *error)
{
struct nl_sock **socksp = xzalloc(n_socks * sizeof *socksp);
size_t i;
return socksp;
error:
- vport_del_socksp(socksp, n_socks);
+ vport_del_socksp__(socksp, n_socks);
return NULL;
}
+#ifdef _WIN32
+static void
+vport_delete_sock_pool(struct dpif_handler *handler)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+ if (handler->vport_sock_pool) {
+ uint32_t i;
+ struct dpif_windows_vport_sock *sock_pool =
+ handler->vport_sock_pool;
+
+ for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
+ if (sock_pool[i].nl_sock) {
+ nl_sock_unsubscribe_packets(sock_pool[i].nl_sock);
+ nl_sock_destroy(sock_pool[i].nl_sock);
+ sock_pool[i].nl_sock = NULL;
+ }
+ }
+
+ free(handler->vport_sock_pool);
+ handler->vport_sock_pool = NULL;
+ }
+}
+
+static int
+vport_create_sock_pool(struct dpif_handler *handler)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+ struct dpif_windows_vport_sock *sock_pool;
+ size_t i;
+ int error = 0;
+
+ sock_pool = xzalloc(VPORT_SOCK_POOL_SIZE * sizeof *sock_pool);
+ for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
+ error = nl_sock_create(NETLINK_GENERIC, &sock_pool[i].nl_sock);
+ if (error) {
+ goto error;
+ }
+
+ /* Enable the netlink socket to receive packets. This is equivalent to
+ * calling nl_sock_join_mcgroup() to receive events. */
+ error = nl_sock_subscribe_packets(sock_pool[i].nl_sock);
+ if (error) {
+ goto error;
+ }
+ }
+
+ handler->vport_sock_pool = sock_pool;
+ handler->last_used_pool_idx = 0;
+ return 0;
+
+error:
+ vport_delete_sock_pool(handler);
+ return error;
+}
+
+/* Returns an array pointers to netlink sockets. The sockets are picked from a
+ * pool. Records the error in 'error'. */
+static struct nl_sock **
+vport_create_socksp_windows(struct dpif_netlink *dpif, int *error)
+ OVS_REQ_WRLOCK(dpif->upcall_lock)
+{
+ uint32_t n_socks = dpif->n_handlers;
+ struct nl_sock **socksp;
+ size_t i;
+
+ ovs_assert(n_socks <= 1);
+ socksp = xzalloc(n_socks * sizeof *socksp);
+
+ /* Pick netlink sockets to use in a round-robin fashion from each
+ * handler's pool of sockets. */
+ for (i = 0; i < n_socks; i++) {
+ struct dpif_handler *handler = &dpif->handlers[i];
+ struct dpif_windows_vport_sock *sock_pool = handler->vport_sock_pool;
+ size_t index = handler->last_used_pool_idx;
+
+ /* A pool of sockets is allocated when the handler is initialized. */
+ if (sock_pool == NULL) {
+ free(socksp);
+ *error = EINVAL;
+ return NULL;
+ }
+
+ ovs_assert(index < VPORT_SOCK_POOL_SIZE);
+ socksp[i] = sock_pool[index].nl_sock;
+ socksp[i] = sock_pool[index].nl_sock;
+ ovs_assert(socksp[i]);
+ index = (index == VPORT_SOCK_POOL_SIZE - 1) ? 0 : index + 1;
+ handler->last_used_pool_idx = index;
+ }
+
+ return socksp;
+}
+
+static void
+vport_del_socksp_windows(struct dpif_netlink *dpif, struct nl_sock **socksp)
+{
+ free(socksp);
+}
+#endif /* _WIN32 */
+
+static struct nl_sock **
+vport_create_socksp(struct dpif_netlink *dpif, int *error)
+{
+#ifdef _WIN32
+ return vport_create_socksp_windows(dpif, error);
+#else
+ return vport_create_socksp__(dpif->n_handlers, error);
+#endif
+}
+
+static void
+vport_del_socksp(struct dpif_netlink *dpif, struct nl_sock **socksp)
+{
+#ifdef _WIN32
+ vport_del_socksp_windows(dpif, socksp);
+#else
+ vport_del_socksp__(socksp, dpif->n_handlers);
+#endif
+}
+
/* Given the array of pointers to netlink sockets 'socksp', returns
* the array of corresponding pids. If the 'socksp' is NULL, returns
* a single-element array of value 0. */
if (!dpif->handlers[0].channels[port_idx].sock) {
return false;
}
+ ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
pids = xzalloc(dpif->n_handlers * sizeof *pids);
for (i = 0; i < dpif->n_handlers; i++) {
struct dpif_handler *handler = &dpif->handlers[i];
-#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
-#else
+#ifndef _WIN32
if (epoll_ctl(handler->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(socksp[i]),
&event) < 0) {
error = errno;
error:
for (j = 0; j < i; j++) {
-#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
-#else
+#ifndef _WIN32
epoll_ctl(dpif->handlers[j].epoll_fd, EPOLL_CTL_DEL,
nl_sock_fd(socksp[j]), NULL);
#endif
for (i = 0; i < dpif->n_handlers; i++) {
struct dpif_handler *handler = &dpif->handlers[i];
-
-#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
-#else
+#ifndef _WIN32
epoll_ctl(handler->epoll_fd, EPOLL_CTL_DEL,
nl_sock_fd(handler->channels[port_idx].sock), NULL);
-#endif
nl_sock_destroy(handler->channels[port_idx].sock);
+#endif
handler->channels[port_idx].sock = NULL;
handler->event_offset = handler->n_events = 0;
}
vport_request.cmd = OVS_VPORT_CMD_SET;
vport_request.dp_ifindex = dpif->dp_ifindex;
vport_request.port_no = u32_to_odp(i);
+ vport_request.n_upcall_pids = 1;
vport_request.upcall_pids = &upcall_pids;
dpif_netlink_vport_transact(&vport_request, NULL, NULL);
for (i = 0; i < dpif->n_handlers; i++) {
struct dpif_handler *handler = &dpif->handlers[i];
-#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
-#else
- close(handler->epoll_fd);
-#endif
+ dpif_netlink_handler_uninit(handler);
free(handler->epoll_events);
free(handler->channels);
}
return dpif_netlink_dp_transact(&dp, NULL, NULL);
}
-static void
+static bool
dpif_netlink_run(struct dpif *dpif_)
{
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
dpif_netlink_refresh_channels(dpif, dpif->n_handlers);
fat_rwlock_unlock(&dpif->upcall_lock);
}
+ return false;
}
static int
case OVS_VPORT_TYPE_GRE:
return "gre";
- case OVS_VPORT_TYPE_GRE64:
- return "gre64";
-
case OVS_VPORT_TYPE_VXLAN:
return "vxlan";
case OVS_VPORT_TYPE_LISP:
return "lisp";
+ case OVS_VPORT_TYPE_STT:
+ return "stt";
+
case OVS_VPORT_TYPE_UNSPEC:
case __OVS_VPORT_TYPE_MAX:
break;
return OVS_VPORT_TYPE_NETDEV;
} else if (!strcmp(type, "internal")) {
return OVS_VPORT_TYPE_INTERNAL;
+ } else if (strstr(type, "stt")) {
+ return OVS_VPORT_TYPE_STT;
} else if (!strcmp(type, "geneve")) {
return OVS_VPORT_TYPE_GENEVE;
- } else if (strstr(type, "gre64")) {
- return OVS_VPORT_TYPE_GRE64;
} else if (strstr(type, "gre")) {
return OVS_VPORT_TYPE_GRE;
} else if (!strcmp(type, "vxlan")) {
int error = 0;
if (dpif->handlers) {
- socksp = vport_create_socksp(dpif->n_handlers, &error);
+ socksp = vport_create_socksp(dpif, &error);
if (!socksp) {
return error;
}
VLOG_WARN_RL(&error_rl, "%s: cannot create port `%s' because it has "
"unsupported type `%s'",
dpif_name(&dpif->dpif), name, type);
- vport_del_socksp(socksp, dpif->n_handlers);
+ vport_del_socksp(dpif, socksp);
return EINVAL;
}
request.name = name;
if (request.type == OVS_VPORT_TYPE_NETDEV) {
#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
+ /* XXX : Map appropiate Windows handle */
#else
netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false);
#endif
}
tnl_cfg = netdev_get_tunnel_config(netdev);
- if (tnl_cfg && tnl_cfg->dst_port != 0) {
+ if (tnl_cfg && (tnl_cfg->dst_port != 0 || tnl_cfg->exts)) {
ofpbuf_use_stack(&options, options_stub, sizeof options_stub);
- nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
- ntohs(tnl_cfg->dst_port));
- request.options = ofpbuf_data(&options);
- request.options_len = ofpbuf_size(&options);
+ if (tnl_cfg->dst_port) {
+ nl_msg_put_u16(&options, OVS_TUNNEL_ATTR_DST_PORT,
+ ntohs(tnl_cfg->dst_port));
+ }
+ if (tnl_cfg->exts) {
+ size_t ext_ofs;
+ int i;
+
+ ext_ofs = nl_msg_start_nested(&options, OVS_TUNNEL_ATTR_EXTENSION);
+ for (i = 0; i < 32; i++) {
+ if (tnl_cfg->exts & (1 << i)) {
+ nl_msg_put_flag(&options, i);
+ }
+ }
+ nl_msg_end_nested(&options, ext_ofs);
+ }
+ request.options = options.data;
+ request.options_len = options.size;
}
request.port_no = *port_nop;
dpif_name(&dpif->dpif), *port_nop);
}
- vport_del_socksp(socksp, dpif->n_handlers);
+ vport_del_socksp(dpif, socksp);
goto exit;
}
request.dp_ifindex = dpif->dp_ifindex;
request.port_no = *port_nop;
dpif_netlink_vport_transact(&request, NULL, NULL);
- vport_del_socksp(socksp, dpif->n_handlers);
+ vport_del_socksp(dpif, socksp);
goto exit;
}
}
}
static void
-dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
- const struct nlattr *key, size_t key_len,
- struct dpif_netlink_flow *request)
+dpif_netlink_flow_init_ufid(struct dpif_netlink_flow *request,
+ const ovs_u128 *ufid, bool terse)
+{
+ if (ufid) {
+ request->ufid = *ufid;
+ request->ufid_present = true;
+ } else {
+ request->ufid_present = false;
+ }
+ request->ufid_terse = terse;
+}
+
+static void
+dpif_netlink_init_flow_get__(const struct dpif_netlink *dpif,
+ const struct nlattr *key, size_t key_len,
+ const ovs_u128 *ufid, bool terse,
+ struct dpif_netlink_flow *request)
{
dpif_netlink_flow_init(request);
request->cmd = OVS_FLOW_CMD_GET;
request->dp_ifindex = dpif->dp_ifindex;
request->key = key;
request->key_len = key_len;
+ dpif_netlink_flow_init_ufid(request, ufid, terse);
+}
+
+static void
+dpif_netlink_init_flow_get(const struct dpif_netlink *dpif,
+ const struct dpif_flow_get *get,
+ struct dpif_netlink_flow *request)
+{
+ dpif_netlink_init_flow_get__(dpif, get->key, get->key_len, get->ufid,
+ false, request);
}
static int
-dpif_netlink_flow_get(const struct dpif_netlink *dpif,
- const struct nlattr *key, size_t key_len,
- struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
+dpif_netlink_flow_get__(const struct dpif_netlink *dpif,
+ const struct nlattr *key, size_t key_len,
+ const ovs_u128 *ufid, bool terse,
+ struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
{
struct dpif_netlink_flow request;
- dpif_netlink_init_flow_get(dpif, key, key_len, &request);
+ dpif_netlink_init_flow_get__(dpif, key, key_len, ufid, terse, &request);
return dpif_netlink_flow_transact(&request, reply, bufp);
}
+static int
+dpif_netlink_flow_get(const struct dpif_netlink *dpif,
+ const struct dpif_netlink_flow *flow,
+ struct dpif_netlink_flow *reply, struct ofpbuf **bufp)
+{
+ return dpif_netlink_flow_get__(dpif, flow->key, flow->key_len,
+ flow->ufid_present ? &flow->ufid : NULL,
+ false, reply, bufp);
+}
+
static void
dpif_netlink_init_flow_put(struct dpif_netlink *dpif,
const struct dpif_flow_put *put,
request->key_len = put->key_len;
request->mask = put->mask;
request->mask_len = put->mask_len;
+ dpif_netlink_flow_init_ufid(request, put->ufid, false);
+
/* Ensure that OVS_FLOW_ATTR_ACTIONS will always be included. */
request->actions = (put->actions
? put->actions
}
static void
-dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
- const struct dpif_flow_del *del,
- struct dpif_netlink_flow *request)
+dpif_netlink_init_flow_del__(struct dpif_netlink *dpif,
+ const struct nlattr *key, size_t key_len,
+ const ovs_u128 *ufid, bool terse,
+ struct dpif_netlink_flow *request)
{
dpif_netlink_flow_init(request);
request->cmd = OVS_FLOW_CMD_DEL;
request->dp_ifindex = dpif->dp_ifindex;
- request->key = del->key;
- request->key_len = del->key_len;
+ request->key = key;
+ request->key_len = key_len;
+ dpif_netlink_flow_init_ufid(request, ufid, terse);
+}
+
+static void
+dpif_netlink_init_flow_del(struct dpif_netlink *dpif,
+ const struct dpif_flow_del *del,
+ struct dpif_netlink_flow *request)
+{
+ dpif_netlink_init_flow_del__(dpif, del->key, del->key_len,
+ del->ufid, del->terse, request);
}
struct dpif_netlink_flow_dump {
}
static struct dpif_flow_dump *
-dpif_netlink_flow_dump_create(const struct dpif *dpif_)
+dpif_netlink_flow_dump_create(const struct dpif *dpif_, bool terse)
{
const struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
struct dpif_netlink_flow_dump *dump;
dpif_netlink_flow_init(&request);
request.cmd = OVS_FLOW_CMD_GET;
request.dp_ifindex = dpif->dp_ifindex;
+ request.ufid_present = false;
+ request.ufid_terse = terse;
buf = ofpbuf_new(1024);
dpif_netlink_flow_to_ofpbuf(&request, buf);
nl_dump_start(&dump->nl_dump, NETLINK_GENERIC, buf);
ofpbuf_delete(buf);
atomic_init(&dump->status, 0);
+ dump->up.terse = terse;
return &dump->up;
}
}
static void
-dpif_netlink_flow_to_dpif_flow(struct dpif_flow *dpif_flow,
+dpif_netlink_flow_to_dpif_flow(struct dpif *dpif, struct dpif_flow *dpif_flow,
const struct dpif_netlink_flow *datapath_flow)
{
dpif_flow->key = datapath_flow->key;
dpif_flow->mask_len = datapath_flow->mask_len;
dpif_flow->actions = datapath_flow->actions;
dpif_flow->actions_len = datapath_flow->actions_len;
+ dpif_flow->ufid_present = datapath_flow->ufid_present;
+ dpif_flow->pmd_id = PMD_ID_NULL;
+ if (datapath_flow->ufid_present) {
+ dpif_flow->ufid = datapath_flow->ufid;
+ } else {
+ ovs_assert(datapath_flow->key && datapath_flow->key_len);
+ dpif_flow_hash(dpif, datapath_flow->key, datapath_flow->key_len,
+ &dpif_flow->ufid);
+ }
dpif_netlink_flow_get_stats(datapath_flow, &dpif_flow->stats);
}
n_flows = 0;
while (!n_flows
- || (n_flows < max_flows && ofpbuf_size(&thread->nl_flows))) {
+ || (n_flows < max_flows && thread->nl_flows.size)) {
struct dpif_netlink_flow datapath_flow;
struct ofpbuf nl_flow;
int error;
break;
}
- if (datapath_flow.actions) {
- /* Common case: the flow includes actions. */
- dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
+ if (dump->up.terse || datapath_flow.actions) {
+ /* Common case: we don't want actions, or the flow includes
+ * actions. */
+ dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
+ &datapath_flow);
} else {
/* Rare case: the flow does not include actions. Retrieve this
* individual flow again to get the actions. */
- error = dpif_netlink_flow_get(dpif, datapath_flow.key,
- datapath_flow.key_len,
+ error = dpif_netlink_flow_get(dpif, &datapath_flow,
&datapath_flow, &thread->nl_actions);
if (error == ENOENT) {
VLOG_DBG("dumped flow disappeared on get");
/* Save this flow. Then exit, because we only have one buffer to
* handle this case. */
- dpif_netlink_flow_to_dpif_flow(&flows[n_flows++], &datapath_flow);
+ dpif_netlink_flow_to_dpif_flow(&dpif->dpif, &flows[n_flows++],
+ &datapath_flow);
break;
}
}
size_t key_ofs;
ofpbuf_prealloc_tailroom(buf, (64
- + ofpbuf_size(d_exec->packet)
+ + dp_packet_size(d_exec->packet)
+ ODP_KEY_METADATA_SIZE
+ d_exec->actions_len));
k_exec->dp_ifindex = dp_ifindex;
nl_msg_put_unspec(buf, OVS_PACKET_ATTR_PACKET,
- ofpbuf_data(d_exec->packet),
- ofpbuf_size(d_exec->packet));
+ dp_packet_data(d_exec->packet),
+ dp_packet_size(d_exec->packet));
key_ofs = nl_msg_start_nested(buf, OVS_PACKET_ATTR_KEY);
- odp_key_from_pkt_metadata(buf, &d_exec->md);
+ odp_key_from_pkt_metadata(buf, &d_exec->packet->md);
nl_msg_end_nested(buf, key_ofs);
nl_msg_put_unspec(buf, OVS_PACKET_ATTR_ACTIONS,
d_exec->actions, d_exec->actions_len);
if (d_exec->probe) {
- nl_msg_put_flag(buf, OVS_FLOW_ATTR_PROBE);
+ nl_msg_put_flag(buf, OVS_PACKET_ATTR_PROBE);
+ }
+ if (d_exec->mtu) {
+ nl_msg_put_u16(buf, OVS_PACKET_ATTR_MRU, d_exec->mtu);
}
}
-#define MAX_OPS 50
-
-static void
+/* Executes, against 'dpif', up to the first 'n_ops' operations in 'ops'.
+ * Returns the number actually executed (at least 1, if 'n_ops' is
+ * positive). */
+static size_t
dpif_netlink_operate__(struct dpif_netlink *dpif,
struct dpif_op **ops, size_t n_ops)
{
+ enum { MAX_OPS = 50 };
+
struct op_auxdata {
struct nl_transaction txn;
struct nl_transaction *txnsp[MAX_OPS];
size_t i;
- ovs_assert(n_ops <= MAX_OPS);
+ n_ops = MIN(n_ops, MAX_OPS);
for (i = 0; i < n_ops; i++) {
struct op_auxdata *aux = &auxes[i];
struct dpif_op *op = ops[i];
struct dpif_flow_put *put;
struct dpif_flow_del *del;
- struct dpif_execute *execute;
struct dpif_flow_get *get;
struct dpif_netlink_flow flow;
break;
case DPIF_OP_EXECUTE:
- execute = &op->u.execute;
- dpif_netlink_encode_execute(dpif->dp_ifindex, execute,
- &aux->request);
+ /* Can't execute a packet that won't fit in a Netlink attribute. */
+ if (OVS_UNLIKELY(nl_attr_oversized(
+ dp_packet_size(op->u.execute.packet)))) {
+ /* Report an error immediately if this is the first operation.
+ * Otherwise the easiest thing to do is to postpone to the next
+ * call (when this will be the first operation). */
+ if (i == 0) {
+ VLOG_ERR_RL(&error_rl,
+ "dropping oversized %"PRIu32"-byte packet",
+ dp_packet_size(op->u.execute.packet));
+ op->error = ENOBUFS;
+ return 1;
+ }
+ n_ops = i;
+ } else {
+ dpif_netlink_encode_execute(dpif->dp_ifindex, &op->u.execute,
+ &aux->request);
+ }
break;
case DPIF_OP_FLOW_GET:
get = &op->u.flow_get;
- dpif_netlink_init_flow_get(dpif, get->key, get->key_len, &flow);
+ dpif_netlink_init_flow_get(dpif, get, &flow);
aux->txn.reply = get->buffer;
dpif_netlink_flow_to_ofpbuf(&flow, &aux->request);
break;
op->error = dpif_netlink_flow_from_ofpbuf(&reply, txn->reply);
if (!op->error) {
- dpif_netlink_flow_to_dpif_flow(get->flow, &reply);
+ dpif_netlink_flow_to_dpif_flow(&dpif->dpif, get->flow,
+ &reply);
}
}
break;
ofpbuf_uninit(&aux->request);
ofpbuf_uninit(&aux->reply);
}
+
+ return n_ops;
}
static void
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
while (n_ops > 0) {
- size_t chunk = MIN(n_ops, MAX_OPS);
- dpif_netlink_operate__(dpif, ops, chunk);
+ size_t chunk = dpif_netlink_operate__(dpif, ops, n_ops);
ops += chunk;
n_ops -= chunk;
}
}
+#if _WIN32
+static void
+dpif_netlink_handler_uninit(struct dpif_handler *handler)
+{
+ vport_delete_sock_pool(handler);
+}
+
+static int
+dpif_netlink_handler_init(struct dpif_handler *handler)
+{
+ return vport_create_sock_pool(handler);
+}
+#else
+
+static int
+dpif_netlink_handler_init(struct dpif_handler *handler)
+{
+ handler->epoll_fd = epoll_create(10);
+ return handler->epoll_fd < 0 ? errno : 0;
+}
+
+static void
+dpif_netlink_handler_uninit(struct dpif_handler *handler)
+{
+ close(handler->epoll_fd);
+}
+#endif
+
/* Synchronizes 'channels' in 'dpif->handlers' with the set of vports
* currently in 'dpif' in the kernel, by adding a new set of channels for
* any kernel vport that lacks one and deleting any channels that have no
int retval = 0;
size_t i;
+ ovs_assert(!WINDOWS || n_handlers <= 1);
+ ovs_assert(!WINDOWS || dpif->n_handlers <= 1);
+
if (dpif->n_handlers != n_handlers) {
destroy_all_channels(dpif);
dpif->handlers = xzalloc(n_handlers * sizeof *dpif->handlers);
for (i = 0; i < n_handlers; i++) {
+ int error;
struct dpif_handler *handler = &dpif->handlers[i];
-#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
-#else
- handler->epoll_fd = epoll_create(10);
- if (handler->epoll_fd < 0) {
+ error = dpif_netlink_handler_init(handler);
+ if (error) {
size_t j;
+ struct dpif_handler *tmp = &dpif->handlers[i];
+
for (j = 0; j < i; j++) {
- close(dpif->handlers[j].epoll_fd);
+ dpif_netlink_handler_uninit(tmp);
}
free(dpif->handlers);
dpif->handlers = NULL;
- return errno;
+ return error;
}
-#endif
}
dpif->n_handlers = n_handlers;
}
if (port_no >= dpif->uc_array_size
|| !vport_get_pids(dpif, port_no, &upcall_pids)) {
- struct nl_sock **socksp = vport_create_socksp(dpif->n_handlers,
- &error);
+ struct nl_sock **socksp = vport_create_socksp(dpif, &error);
if (!socksp) {
goto error;
if (error) {
VLOG_INFO("%s: could not add channels for port %s",
dpif_name(&dpif->dpif), vport.name);
- vport_del_socksp(socksp, dpif->n_handlers);
+ vport_del_socksp(dpif, socksp);
retval = error;
goto error;
}
struct dpif_netlink *dpif = dpif_netlink_cast(dpif_);
int error = 0;
+#ifdef _WIN32
+ /* Multiple upcall handlers will be supported once kernel datapath supports
+ * it. */
+ if (n_handlers > 1) {
+ return error;
+ }
+#endif
+
fat_rwlock_wrlock(&dpif->upcall_lock);
if (dpif->handlers) {
error = dpif_netlink_refresh_channels(dpif, n_handlers);
}
static int
-parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall,
- int *dp_ifindex)
+parse_odp_packet(const struct dpif_netlink *dpif, struct ofpbuf *buf,
+ struct dpif_upcall *upcall, int *dp_ifindex)
{
static const struct nl_policy ovs_packet_policy[] = {
/* Always present. */
/* OVS_PACKET_CMD_ACTION only. */
[OVS_PACKET_ATTR_USERDATA] = { .type = NL_A_UNSPEC, .optional = true },
[OVS_PACKET_ATTR_EGRESS_TUN_KEY] = { .type = NL_A_NESTED, .optional = true },
+ [OVS_PACKET_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
+ [OVS_PACKET_ATTR_MRU] = { .type = NL_A_U16, .optional = true }
};
- struct ovs_header *ovs_header;
- struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
- struct nlmsghdr *nlmsg;
- struct genlmsghdr *genl;
- struct ofpbuf b;
- int type;
-
- ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
+ struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
+ struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
+ struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
+ struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
- nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
- genl = ofpbuf_try_pull(&b, sizeof *genl);
- ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+ struct nlattr *a[ARRAY_SIZE(ovs_packet_policy)];
if (!nlmsg || !genl || !ovs_header
|| nlmsg->nlmsg_type != ovs_packet_family
|| !nl_policy_parse(&b, 0, ovs_packet_policy, a,
return EINVAL;
}
- type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
- : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
- : -1);
+ int type = (genl->cmd == OVS_PACKET_CMD_MISS ? DPIF_UC_MISS
+ : genl->cmd == OVS_PACKET_CMD_ACTION ? DPIF_UC_ACTION
+ : -1);
if (type < 0) {
return EINVAL;
}
upcall->key = CONST_CAST(struct nlattr *,
nl_attr_get(a[OVS_PACKET_ATTR_KEY]));
upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]);
+ dpif_flow_hash(&dpif->dpif, upcall->key, upcall->key_len, &upcall->ufid);
upcall->userdata = a[OVS_PACKET_ATTR_USERDATA];
upcall->out_tun_key = a[OVS_PACKET_ATTR_EGRESS_TUN_KEY];
+ upcall->actions = a[OVS_PACKET_ATTR_ACTIONS];
+ upcall->mru = a[OVS_PACKET_ATTR_MRU];
/* Allow overwriting the netlink attribute header without reallocating. */
- ofpbuf_use_stub(&upcall->packet,
+ dp_packet_use_stub(&upcall->packet,
CONST_CAST(struct nlattr *,
nl_attr_get(a[OVS_PACKET_ATTR_PACKET])) - 1,
nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]) +
sizeof(struct nlattr));
- ofpbuf_set_data(&upcall->packet,
- (char *)ofpbuf_data(&upcall->packet) + sizeof(struct nlattr));
- ofpbuf_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
+ dp_packet_set_data(&upcall->packet,
+ (char *)dp_packet_data(&upcall->packet) + sizeof(struct nlattr));
+ dp_packet_set_size(&upcall->packet, nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]));
*dp_ifindex = ovs_header->dp_ifindex;
return 0;
}
+#ifdef _WIN32
+#define PACKET_RECV_BATCH_SIZE 50
+static int
+dpif_netlink_recv_windows(struct dpif_netlink *dpif, uint32_t handler_id,
+ struct dpif_upcall *upcall, struct ofpbuf *buf)
+ OVS_REQ_RDLOCK(dpif->upcall_lock)
+{
+ struct dpif_handler *handler;
+ int read_tries = 0;
+ struct dpif_windows_vport_sock *sock_pool;
+ uint32_t i;
+
+ if (!dpif->handlers) {
+ return EAGAIN;
+ }
+
+ /* Only one handler is supported currently. */
+ if (handler_id >= 1) {
+ return EAGAIN;
+ }
+
+ if (handler_id >= dpif->n_handlers) {
+ return EAGAIN;
+ }
+
+ handler = &dpif->handlers[handler_id];
+ sock_pool = handler->vport_sock_pool;
+
+ for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
+ for (;;) {
+ int dp_ifindex;
+ int error;
+
+ if (++read_tries > PACKET_RECV_BATCH_SIZE) {
+ return EAGAIN;
+ }
+
+ error = nl_sock_recv(sock_pool[i].nl_sock, buf, false);
+ if (error == ENOBUFS) {
+ /* ENOBUFS typically means that we've received so many
+ * packets that the buffer overflowed. Try again
+ * immediately because there's almost certainly a packet
+ * waiting for us. */
+ /* XXX: report_loss(dpif, ch, idx, handler_id); */
+ continue;
+ }
+
+ /* XXX: ch->last_poll = time_msec(); */
+ if (error) {
+ if (error == EAGAIN) {
+ break;
+ }
+ return error;
+ }
+
+ error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
+ if (!error && dp_ifindex == dpif->dp_ifindex) {
+ return 0;
+ } else if (error) {
+ return error;
+ }
+ }
+ }
+
+ return EAGAIN;
+}
+#else
static int
dpif_netlink_recv__(struct dpif_netlink *dpif, uint32_t handler_id,
struct dpif_upcall *upcall, struct ofpbuf *buf)
handler->event_offset = handler->n_events = 0;
-#ifdef _WIN32
- retval = dpif->uc_array_size;
- handler->event_offset = 0;
-#else
do {
retval = epoll_wait(handler->epoll_fd, handler->epoll_events,
dpif->uc_array_size, 0);
} while (retval < 0 && errno == EINTR);
-#endif
+
if (retval < 0) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1);
VLOG_WARN_RL(&rl, "epoll_wait failed (%s)", ovs_strerror(errno));
return error;
}
- error = parse_odp_packet(buf, upcall, &dp_ifindex);
+ error = parse_odp_packet(dpif, buf, upcall, &dp_ifindex);
if (!error && dp_ifindex == dpif->dp_ifindex) {
return 0;
} else if (error) {
return EAGAIN;
}
+#endif
static int
dpif_netlink_recv(struct dpif *dpif_, uint32_t handler_id,
int error;
fat_rwlock_rdlock(&dpif->upcall_lock);
+#ifdef _WIN32
+ error = dpif_netlink_recv_windows(dpif, handler_id, upcall, buf);
+#else
error = dpif_netlink_recv__(dpif, handler_id, upcall, buf);
+#endif
fat_rwlock_unlock(&dpif->upcall_lock);
return error;
OVS_REQ_RDLOCK(dpif->upcall_lock)
{
#ifdef _WIN32
- /*
- * XXX : Map appropiate Windows handle
- */
+ uint32_t i;
+ struct dpif_windows_vport_sock *sock_pool =
+ dpif->handlers[handler_id].vport_sock_pool;
+
+ /* Only one handler is supported currently. */
+ if (handler_id >= 1) {
+ return;
+ }
+
+ for (i = 0; i < VPORT_SOCK_POOL_SIZE; i++) {
+ nl_sock_wait(sock_pool[i].nl_sock, POLLIN);
+ }
#else
if (dpif->handlers && handler_id < dpif->n_handlers) {
struct dpif_handler *handler = &dpif->handlers[handler_id];
fat_rwlock_unlock(&dpif->upcall_lock);
}
+static char *
+dpif_netlink_get_datapath_version(void)
+{
+ char *version_str = NULL;
+
+#ifdef __linux__
+
+#define MAX_VERSION_STR_SIZE 80
+#define LINUX_DATAPATH_VERSION_FILE "/sys/module/openvswitch/version"
+ FILE *f;
+
+ f = fopen(LINUX_DATAPATH_VERSION_FILE, "r");
+ if (f) {
+ char *newline;
+ char version[MAX_VERSION_STR_SIZE];
+
+ if (fgets(version, MAX_VERSION_STR_SIZE, f)) {
+ newline = strchr(version, '\n');
+ if (newline) {
+ *newline = '\0';
+ }
+ version_str = xstrdup(version);
+ }
+ fclose(f);
+ }
+#endif
+
+ return version_str;
+}
+
+#ifdef __linux__
+struct dpif_netlink_ct_dump_state {
+ struct ct_dpif_dump_state up;
+ struct nl_ct_dump_state *nl_ct_dump;
+};
+
+static int
+dpif_netlink_ct_dump_start(struct dpif *dpif OVS_UNUSED,
+ struct ct_dpif_dump_state **dump_,
+ const uint16_t *zone)
+{
+ struct dpif_netlink_ct_dump_state *dump;
+ int err;
+
+ dump = xzalloc(sizeof *dump);
+ err = nl_ct_dump_start(&dump->nl_ct_dump, zone);
+ if (err) {
+ free(dump);
+ return err;
+ }
+
+ *dump_ = &dump->up;
+
+ return 0;
+}
+
+static int
+dpif_netlink_ct_dump_next(struct dpif *dpif OVS_UNUSED,
+ struct ct_dpif_dump_state *dump_,
+ struct ct_dpif_entry *entry)
+{
+ struct dpif_netlink_ct_dump_state *dump;
+
+ INIT_CONTAINER(dump, dump_, up);
+
+ return nl_ct_dump_next(dump->nl_ct_dump, entry);
+}
+
+static int
+dpif_netlink_ct_dump_done(struct dpif *dpif OVS_UNUSED,
+ struct ct_dpif_dump_state *dump_)
+{
+ struct dpif_netlink_ct_dump_state *dump;
+ int err;
+
+ INIT_CONTAINER(dump, dump_, up);
+
+ err = nl_ct_dump_done(dump->nl_ct_dump);
+ free(dump);
+ return err;
+}
+
+static int
+dpif_netlink_ct_flush(struct dpif *dpif OVS_UNUSED, const uint16_t *zone)
+{
+ if (zone) {
+ return nl_ct_flush_zone(*zone);
+ } else {
+ return nl_ct_flush();
+ }
+}
+#endif
+
const struct dpif_class dpif_netlink_class = {
"system",
+ NULL, /* init */
dpif_netlink_enumerate,
NULL,
dpif_netlink_open,
dpif_netlink_recv,
dpif_netlink_recv_wait,
dpif_netlink_recv_purge,
+ NULL, /* register_dp_purge_cb */
NULL, /* register_upcall_cb */
NULL, /* enable_upcall */
NULL, /* disable_upcall */
+ dpif_netlink_get_datapath_version, /* get_datapath_version */
+#ifdef __linux__
+ dpif_netlink_ct_dump_start,
+ dpif_netlink_ct_dump_next,
+ dpif_netlink_ct_dump_done,
+ dpif_netlink_ct_flush,
+#else
+ NULL, /* ct_dump_start */
+ NULL, /* ct_dump_next */
+ NULL, /* ct_dump_done */
+ NULL, /* ct_flush */
+#endif
};
static int
[OVS_VPORT_ATTR_OPTIONS] = { .type = NL_A_NESTED, .optional = true },
};
- struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
- struct ovs_header *ovs_header;
- struct nlmsghdr *nlmsg;
- struct genlmsghdr *genl;
- struct ofpbuf b;
-
dpif_netlink_vport_init(vport);
- ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
- nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
- genl = ofpbuf_try_pull(&b, sizeof *genl);
- ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+ struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
+ struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
+ struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
+ struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+
+ struct nlattr *a[ARRAY_SIZE(ovs_vport_policy)];
if (!nlmsg || !genl || !ovs_header
|| nlmsg->nlmsg_type != ovs_vport_family
|| !nl_policy_parse(&b, 0, ovs_vport_policy, a,
.optional = true },
};
- struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
- struct ovs_header *ovs_header;
- struct nlmsghdr *nlmsg;
- struct genlmsghdr *genl;
- struct ofpbuf b;
-
dpif_netlink_dp_init(dp);
- ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
- nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
- genl = ofpbuf_try_pull(&b, sizeof *genl);
- ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+ struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
+ struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
+ struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
+ struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+
+ struct nlattr *a[ARRAY_SIZE(ovs_datapath_policy)];
if (!nlmsg || !genl || !ovs_header
|| nlmsg->nlmsg_type != ovs_datapath_family
|| !nl_policy_parse(&b, 0, ovs_datapath_policy, a,
dpif_netlink_flow_from_ofpbuf(struct dpif_netlink_flow *flow,
const struct ofpbuf *buf)
{
- static const struct nl_policy ovs_flow_policy[] = {
- [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED },
+ static const struct nl_policy ovs_flow_policy[__OVS_FLOW_ATTR_MAX] = {
+ [OVS_FLOW_ATTR_KEY] = { .type = NL_A_NESTED, .optional = true },
[OVS_FLOW_ATTR_MASK] = { .type = NL_A_NESTED, .optional = true },
[OVS_FLOW_ATTR_ACTIONS] = { .type = NL_A_NESTED, .optional = true },
[OVS_FLOW_ATTR_STATS] = { NL_POLICY_FOR(struct ovs_flow_stats),
.optional = true },
[OVS_FLOW_ATTR_TCP_FLAGS] = { .type = NL_A_U8, .optional = true },
[OVS_FLOW_ATTR_USED] = { .type = NL_A_U64, .optional = true },
+ [OVS_FLOW_ATTR_UFID] = { .type = NL_A_UNSPEC, .optional = true,
+ .min_len = sizeof(ovs_u128) },
/* The kernel never uses OVS_FLOW_ATTR_CLEAR. */
/* The kernel never uses OVS_FLOW_ATTR_PROBE. */
+ /* The kernel never uses OVS_FLOW_ATTR_UFID_FLAGS. */
};
- struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
- struct ovs_header *ovs_header;
- struct nlmsghdr *nlmsg;
- struct genlmsghdr *genl;
- struct ofpbuf b;
-
dpif_netlink_flow_init(flow);
- ofpbuf_use_const(&b, ofpbuf_data(buf), ofpbuf_size(buf));
- nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
- genl = ofpbuf_try_pull(&b, sizeof *genl);
- ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+ struct ofpbuf b = ofpbuf_const_initializer(buf->data, buf->size);
+ struct nlmsghdr *nlmsg = ofpbuf_try_pull(&b, sizeof *nlmsg);
+ struct genlmsghdr *genl = ofpbuf_try_pull(&b, sizeof *genl);
+ struct ovs_header *ovs_header = ofpbuf_try_pull(&b, sizeof *ovs_header);
+
+ struct nlattr *a[ARRAY_SIZE(ovs_flow_policy)];
if (!nlmsg || !genl || !ovs_header
|| nlmsg->nlmsg_type != ovs_flow_family
|| !nl_policy_parse(&b, 0, ovs_flow_policy, a,
ARRAY_SIZE(ovs_flow_policy))) {
return EINVAL;
}
+ if (!a[OVS_FLOW_ATTR_KEY] && !a[OVS_FLOW_ATTR_UFID]) {
+ return EINVAL;
+ }
flow->nlmsg_flags = nlmsg->nlmsg_flags;
flow->dp_ifindex = ovs_header->dp_ifindex;
- flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
- flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
+ if (a[OVS_FLOW_ATTR_KEY]) {
+ flow->key = nl_attr_get(a[OVS_FLOW_ATTR_KEY]);
+ flow->key_len = nl_attr_get_size(a[OVS_FLOW_ATTR_KEY]);
+ }
+ if (a[OVS_FLOW_ATTR_UFID]) {
+ const ovs_u128 *ufid;
+
+ ufid = nl_attr_get_unspec(a[OVS_FLOW_ATTR_UFID],
+ nl_attr_get_size(a[OVS_FLOW_ATTR_UFID]));
+ flow->ufid = *ufid;
+ flow->ufid_present = true;
+ }
if (a[OVS_FLOW_ATTR_MASK]) {
flow->mask = nl_attr_get(a[OVS_FLOW_ATTR_MASK]);
flow->mask_len = nl_attr_get_size(a[OVS_FLOW_ATTR_MASK]);
ovs_header = ofpbuf_put_uninit(buf, sizeof *ovs_header);
ovs_header->dp_ifindex = flow->dp_ifindex;
- if (flow->key_len) {
- nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY, flow->key, flow->key_len);
+ if (flow->ufid_present) {
+ nl_msg_put_unspec(buf, OVS_FLOW_ATTR_UFID, &flow->ufid,
+ sizeof flow->ufid);
}
-
- if (flow->mask_len) {
- nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK, flow->mask, flow->mask_len);
+ if (flow->ufid_terse) {
+ nl_msg_put_u32(buf, OVS_FLOW_ATTR_UFID_FLAGS,
+ OVS_UFID_F_OMIT_KEY | OVS_UFID_F_OMIT_MASK
+ | OVS_UFID_F_OMIT_ACTIONS);
}
+ if (!flow->ufid_terse || !flow->ufid_present) {
+ if (flow->key_len) {
+ nl_msg_put_unspec(buf, OVS_FLOW_ATTR_KEY,
+ flow->key, flow->key_len);
+ }
- if (flow->actions || flow->actions_len) {
- nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
- flow->actions, flow->actions_len);
+ if (flow->mask_len) {
+ nl_msg_put_unspec(buf, OVS_FLOW_ATTR_MASK,
+ flow->mask, flow->mask_len);
+ }
+ if (flow->actions || flow->actions_len) {
+ nl_msg_put_unspec(buf, OVS_FLOW_ATTR_ACTIONS,
+ flow->actions, flow->actions_len);
+ }
}
/* We never need to send these to the kernel. */