/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*/
#include <config.h>
-
-#include "ofproto/ofproto-dpif.h"
-#include "ofproto/ofproto-provider.h"
-
#include <errno.h>
#include "bfd.h"
#include "connmgr.h"
#include "coverage.h"
#include "cfm.h"
-#include "ovs-lldp.h"
#include "dpif.h"
-#include "dynamic-string.h"
#include "fail-open.h"
#include "guarded-list.h"
#include "hmapx.h"
#include "learn.h"
#include "mac-learning.h"
#include "mcast-snooping.h"
-#include "meta-flow.h"
#include "multipath.h"
#include "netdev-vport.h"
#include "netdev.h"
#include "nx-match.h"
#include "odp-util.h"
#include "odp-execute.h"
-#include "ofp-util.h"
-#include "ofpbuf.h"
-#include "ofp-actions.h"
-#include "ofp-parse.h"
-#include "ofp-print.h"
+#include "ofproto/ofproto-dpif.h"
+#include "ofproto/ofproto-provider.h"
#include "ofproto-dpif-ipfix.h"
#include "ofproto-dpif-mirror.h"
#include "ofproto-dpif-monitor.h"
#include "ofproto-dpif-sflow.h"
#include "ofproto-dpif-upcall.h"
#include "ofproto-dpif-xlate.h"
-#include "poll-loop.h"
+#include "openvswitch/ofp-actions.h"
+#include "openvswitch/dynamic-string.h"
+#include "openvswitch/meta-flow.h"
+#include "openvswitch/ofp-parse.h"
+#include "openvswitch/ofp-print.h"
+#include "openvswitch/ofp-util.h"
+#include "openvswitch/ofpbuf.h"
+#include "openvswitch/vlog.h"
+#include "ovs-lldp.h"
#include "ovs-rcu.h"
#include "ovs-router.h"
+#include "poll-loop.h"
#include "seq.h"
#include "simap.h"
#include "smap.h"
#include "tunnel.h"
#include "unaligned.h"
#include "unixctl.h"
+#include "util.h"
#include "vlan-bitmap.h"
-#include "openvswitch/vlog.h"
VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
-static void rule_expire(struct rule_dpif *);
+static void rule_expire(struct rule_dpif *, long long now);
struct group_dpif {
struct ofgroup up;
/* Queue to DSCP mapping. */
struct ofproto_port_queue *qdscp;
size_t n_qdscp;
-
- /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
- *
- * This is deprecated. It is only for compatibility with broken device
- * drivers in old versions of Linux that do not properly support VLANs when
- * VLAN devices are not used. When broken device drivers are no longer in
- * widespread use, we will delete these interfaces. */
- ofp_port_t realdev_ofp_port;
- int vlandev_vid;
-};
-
-/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
- *
- * This is deprecated. It is only for compatibility with broken device drivers
- * in old versions of Linux that do not properly support VLANs when VLAN
- * devices are not used. When broken device drivers are no longer in
- * widespread use, we will delete these interfaces. */
-struct vlan_splinter {
- struct hmap_node realdev_vid_node;
- struct hmap_node vlandev_node;
- ofp_port_t realdev_ofp_port;
- ofp_port_t vlandev_ofp_port;
- int vid;
};
-static void vsp_remove(struct ofport_dpif *);
-static void vsp_add(struct ofport_dpif *, ofp_port_t realdev_ofp_port, int vid);
-
static odp_port_t ofp_port_to_odp_port(const struct ofproto_dpif *,
ofp_port_t);
struct ofproto up;
struct dpif_backer *backer;
+ /* Unique identifier for this instantiation of this bridge in this running
+ * process. */
+ struct uuid uuid;
+
ATOMIC(cls_version_t) tables_version; /* For classifier lookups. */
uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
/* Special OpenFlow rules. */
struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
- struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
+ struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
/* Bridging. */
struct netflow *netflow;
struct rstp *rstp;
long long int rstp_last_tick;
- /* VLAN splinters. */
- struct ovs_mutex vsp_mutex;
- struct hmap realdev_vid_map OVS_GUARDED; /* (realdev,vid) -> vlandev. */
- struct hmap vlandev_map OVS_GUARDED; /* vlandev -> (realdev,vid). */
-
/* Ports. */
struct sset ports; /* Set of standard port names. */
struct sset ghost_ports; /* Ports with no datapath port. */
uint64_t change_seq; /* Connectivity status changes. */
/* Work queues. */
- struct guarded_list pins; /* Contains "struct ofputil_packet_in"s. */
- struct seq *pins_seq; /* For notifying 'pins' reception. */
- uint64_t pins_seqno;
+ struct guarded_list ams; /* Contains "struct ofproto_async_msgs"s. */
+ struct seq *ams_seq; /* For notifying 'ams' reception. */
+ uint64_t ams_seqno;
};
/* All existing ofproto_dpif instances, indexed by ->up.name. */
ofproto_flow_mod(&ofproto->up, &ofm);
}
-/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
- * Takes ownership of 'pin' and pin->packet. */
+/* Appends 'am' to the queue of asynchronous messages to be sent to the
+ * controller. Takes ownership of 'am' and any data it points to. */
void
-ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
- struct ofproto_packet_in *pin)
+ofproto_dpif_send_async_msg(struct ofproto_dpif *ofproto,
+ struct ofproto_async_msg *am)
{
- if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) {
+ if (!guarded_list_push_back(&ofproto->ams, &am->list_node, 1024)) {
COVERAGE_INC(packet_in_overflow);
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
+ ofproto_async_msg_free(am);
}
/* Wakes up main thread for packet-in I/O. */
- seq_change(ofproto->pins_seq);
+ seq_change(ofproto->ams_seq);
}
/* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
shash_add(&init_ofp_ports, node->name, new_hint);
}
+
+ ofproto_unixctl_init();
+ udpif_init();
}
static void
return NULL;
}
+bool
+ofproto_dpif_backer_enabled(struct dpif_backer* backer)
+{
+ return backer->recv_set_enable;
+}
+
static int
type_run(const char *type)
{
return 0;
}
+ /* This must be called before dpif_run() */
+ dpif_poll_threads_set(backer->dpif, pmd_cpu_mask);
if (dpif_run(backer->dpif)) {
backer->need_revalidate = REV_RECONFIGURE;
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
}
- dpif_poll_threads_set(backer->dpif, n_dpdk_rxqs, pmd_cpu_mask);
-
if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
struct simap_node *node;
const char *name;
int error;
- recirc_init();
-
backer = shash_find_data(&all_dpif_backers, type);
if (backer) {
backer->refcount++;
/* Loop through the ports already on the datapath and remove any
* that we don't need anymore. */
- list_init(&garbage_list);
+ ovs_list_init(&garbage_list);
dpif_port_dump_start(&port_dump, backer->dpif);
while (dpif_port_dump_next(&port_dump, &port)) {
node = shash_find(&init_ofp_ports, port.name);
if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
garbage = xmalloc(sizeof *garbage);
garbage->odp_port = port.port_no;
- list_push_front(&garbage_list, &garbage->list_node);
+ ovs_list_push_front(&garbage_list, &garbage->list_node);
}
}
dpif_port_dump_done(&port_dump);
struct ofpbuf actions;
struct dpif_execute execute;
struct dp_packet packet;
+ struct flow flow;
size_t start;
int error;
eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
eth->eth_type = htons(0x1234);
+ flow_extract(&packet, &flow);
+
/* Execute the actions. On older datapaths this fails with ERANGE, on
* newer datapaths it succeeds. */
execute.actions = actions.data;
execute.actions_len = actions.size;
execute.packet = &packet;
+ execute.flow = &flow;
execute.needs_help = false;
execute.probe = true;
+ execute.mtu = 0;
error = dpif_execute(backer->dpif, &execute);
struct ofpbuf actions;
struct dpif_execute execute;
struct dp_packet packet;
+ struct flow flow;
int error;
struct ovs_key_ethernet key, mask;
eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
eth->eth_type = htons(0x1234);
+ flow_extract(&packet, &flow);
+
/* Execute the actions. On older datapaths this fails with EINVAL, on
* newer datapaths it succeeds. */
execute.actions = actions.data;
execute.actions_len = actions.size;
execute.packet = &packet;
+ execute.flow = &flow;
execute.needs_help = false;
execute.probe = true;
+ execute.mtu = 0;
error = dpif_execute(backer->dpif, &execute);
return !error;
}
+/* Tests whether 'backer''s datapath supports truncation of a packet in
+ * OVS_ACTION_ATTR_TRUNC. We need to disable some features on older
+ * datapaths that don't support this feature. */
+static bool
+check_trunc_action(struct dpif_backer *backer)
+{
+ struct eth_header *eth;
+ struct ofpbuf actions;
+ struct dpif_execute execute;
+ struct dp_packet packet;
+ struct ovs_action_trunc *trunc;
+ struct flow flow;
+ int error;
+
+ /* Compose an action with output(port:1,
+ * max_len:OVS_ACTION_OUTPUT_MIN + 1).
+ * This translates to one truncate action and one output action. */
+ ofpbuf_init(&actions, 64);
+ trunc = nl_msg_put_unspec_uninit(&actions,
+ OVS_ACTION_ATTR_TRUNC, sizeof *trunc);
+
+ trunc->max_len = ETH_HEADER_LEN + 1;
+ nl_msg_put_odp_port(&actions, OVS_ACTION_ATTR_OUTPUT, u32_to_odp(1));
+
+ /* Compose a dummy Ethernet packet. */
+ dp_packet_init(&packet, ETH_HEADER_LEN);
+ eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
+ eth->eth_type = htons(0x1234);
+
+ flow_extract(&packet, &flow);
+
+ /* Execute the actions. On older datapaths this fails with EINVAL, on
+ * newer datapaths it succeeds. */
+ execute.actions = actions.data;
+ execute.actions_len = actions.size;
+ execute.packet = &packet;
+ execute.flow = &flow;
+ execute.needs_help = false;
+ execute.probe = true;
+ execute.mtu = 0;
+
+ error = dpif_execute(backer->dpif, &execute);
+
+ dp_packet_uninit(&packet);
+ ofpbuf_uninit(&actions);
+
+ if (error) {
+ VLOG_INFO("%s: Datapath does not support truncate action",
+ dpif_name(backer->dpif));
+ } else {
+ VLOG_INFO("%s: Datapath supports truncate action",
+ dpif_name(backer->dpif));
+ }
+
+ return !error;
+}
+
+#define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE) \
+static bool \
+check_##NAME(struct dpif_backer *backer) \
+{ \
+ struct flow flow; \
+ struct odputil_keybuf keybuf; \
+ struct ofpbuf key; \
+ bool enable; \
+ struct odp_flow_key_parms odp_parms = { \
+ .flow = &flow, \
+ .support = { \
+ .SUPPORT = true, \
+ }, \
+ }; \
+ \
+ memset(&flow, 0, sizeof flow); \
+ flow.FIELD = VALUE; \
+ \
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); \
+ odp_flow_key_from_flow(&odp_parms, &key); \
+ enable = dpif_probe_feature(backer->dpif, #NAME, &key, NULL); \
+ \
+ if (enable) { \
+ VLOG_INFO("%s: Datapath supports "#NAME, dpif_name(backer->dpif)); \
+ } else { \
+ VLOG_INFO("%s: Datapath does not support "#NAME, \
+ dpif_name(backer->dpif)); \
+ } \
+ \
+ return enable; \
+}
+#define CHECK_FEATURE(FIELD) CHECK_FEATURE__(FIELD, FIELD, FIELD, 1)
+
+CHECK_FEATURE(ct_state)
+CHECK_FEATURE(ct_zone)
+CHECK_FEATURE(ct_mark)
+CHECK_FEATURE__(ct_label, ct_label, ct_label.u64.lo, 1)
+CHECK_FEATURE__(ct_state_nat, ct_state, ct_state, CS_TRACKED|CS_SRC_NAT)
+
+#undef CHECK_FEATURE
+#undef CHECK_FEATURE__
+
static void
check_support(struct dpif_backer *backer)
{
backer->support.odp.recirc = check_recirc(backer);
backer->support.odp.max_mpls_depth = check_max_mpls_depth(backer);
backer->support.masked_set_action = check_masked_set_action(backer);
+ backer->support.trunc = check_trunc_action(backer);
backer->support.ufid = check_ufid(backer);
backer->support.tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
+
+ backer->support.odp.ct_state = check_ct_state(backer);
+ backer->support.odp.ct_zone = check_ct_zone(backer);
+ backer->support.odp.ct_mark = check_ct_mark(backer);
+ backer->support.odp.ct_label = check_ct_label(backer);
+
+ backer->support.odp.ct_state_nat = check_ct_state_nat(backer);
}
static int
return error;
}
+ uuid_generate(&ofproto->uuid);
atomic_init(&ofproto->tables_version, CLS_MIN_VERSION);
ofproto->netflow = NULL;
ofproto->sflow = NULL;
ofproto->has_bonded_bundles = false;
ofproto->lacp_enabled = false;
ovs_mutex_init_adaptive(&ofproto->stats_mutex);
- ovs_mutex_init(&ofproto->vsp_mutex);
-
- guarded_list_init(&ofproto->pins);
-
- ofproto_unixctl_init();
- hmap_init(&ofproto->vlandev_map);
- hmap_init(&ofproto->realdev_vid_map);
+ guarded_list_init(&ofproto->ams);
sset_init(&ofproto->ports);
sset_init(&ofproto->ghost_ports);
sset_init(&ofproto->port_poll_set);
ofproto->port_poll_errno = 0;
ofproto->change_seq = 0;
- ofproto->pins_seq = seq_create();
- ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+ ofproto->ams_seq = seq_create();
+ ofproto->ams_seqno = seq_read(ofproto->ams_seq);
SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
controller = ofpact_put_CONTROLLER(&ofpacts);
controller->max_len = UINT16_MAX;
controller->controller_id = 0;
- controller->reason = OFPR_NO_MATCH;
- ofpact_pad(&ofpacts);
+ controller->reason = OFPR_IMPLICIT_MISS;
+ ofpact_finish_CONTROLLER(&ofpacts, &controller);
error = add_internal_miss_flow(ofproto, id++, &ofpacts,
&ofproto->miss_rule);
destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofproto_packet_in *pin;
+ struct ofproto_async_msg *am;
struct rule_dpif *rule;
struct oftable *table;
- struct ovs_list pins;
+ struct ovs_list ams;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
xlate_txn_start();
}
ofproto_group_delete_all(&ofproto->up);
- guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_POP (pin, list_node, &pins) {
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
+ guarded_list_pop_all(&ofproto->ams, &ams);
+ LIST_FOR_EACH_POP (am, list_node, &ams) {
+ ofproto_async_msg_free(am);
}
- guarded_list_destroy(&ofproto->pins);
+ guarded_list_destroy(&ofproto->ams);
recirc_free_ofproto(ofproto, ofproto->up.name);
mac_learning_unref(ofproto->ml);
mcast_snooping_unref(ofproto->ms);
- hmap_destroy(&ofproto->vlandev_map);
- hmap_destroy(&ofproto->realdev_vid_map);
-
sset_destroy(&ofproto->ports);
sset_destroy(&ofproto->ghost_ports);
sset_destroy(&ofproto->port_poll_set);
ovs_mutex_destroy(&ofproto->stats_mutex);
- ovs_mutex_destroy(&ofproto->vsp_mutex);
- seq_destroy(ofproto->pins_seq);
+ seq_destroy(ofproto->ams_seq);
close_dpif_backer(ofproto->backer);
}
mcast_snooping_mdb_flush(ofproto->ms);
}
- /* Always updates the ofproto->pins_seqno to avoid frequent wakeup during
+ /* Always updates the ofproto->ams_seqno to avoid frequent wakeup during
* flow restore. Even though nothing is processed during flow restore,
- * all queued 'pins' will be handled immediately when flow restore
+ * all queued 'ams' will be handled immediately when flow restore
* completes. */
- ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+ ofproto->ams_seqno = seq_read(ofproto->ams_seq);
/* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
if (!ofproto_get_flow_restore_wait()) {
- struct ofproto_packet_in *pin;
- struct ovs_list pins;
+ struct ofproto_async_msg *am;
+ struct ovs_list ams;
- guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_POP (pin, list_node, &pins) {
- connmgr_send_packet_in(ofproto->up.connmgr, pin);
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
+ guarded_list_pop_all(&ofproto->ams, &ams);
+ LIST_FOR_EACH_POP (am, list_node, &ams) {
+ connmgr_send_async_msg(ofproto->up.connmgr, am);
+ ofproto_async_msg_free(am);
}
}
new_dump_seq = seq_read(udpif_dump_seq(ofproto->backer->udpif));
if (ofproto->dump_seq != new_dump_seq) {
struct rule *rule, *next_rule;
+ long long now = time_msec();
/* We know stats are relatively fresh, so now is a good time to do some
* periodic work. */
ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
&ofproto->up.expirable) {
- rule_expire(rule_dpif_cast(rule));
+ rule_expire(rule_dpif_cast(rule), now);
}
ovs_mutex_unlock(&ofproto_mutex);
}
static void
-wait(struct ofproto *ofproto_)
+ofproto_dpif_wait(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
mcast_snooping_wait(ofproto->ms);
stp_wait(ofproto);
if (ofproto->backer->need_revalidate) {
- /* Shouldn't happen, but if it does just go around again. */
- VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
poll_immediate_wake();
}
seq_wait(udpif_dump_seq(ofproto->backer->udpif), ofproto->dump_seq);
- seq_wait(ofproto->pins_seq, ofproto->pins_seqno);
+ seq_wait(ofproto->ams_seq, ofproto->ams_seqno);
}
static void
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
atomic_store_relaxed(&ofproto->tables_version, version);
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
-
static struct ofport *
port_alloc(void)
{
port->peer = NULL;
port->qdscp = NULL;
port->n_qdscp = 0;
- port->realdev_ofp_port = 0;
- port->vlandev_vid = 0;
port->carrier_seq = netdev_get_carrier_resets(netdev);
port->is_layer3 = netdev_vport_is_layer3(netdev);
}
static void
-port_destruct(struct ofport *port_)
+port_destruct(struct ofport *port_, bool del)
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
sizeof namebuf);
- if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
+ if (del && dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
/* The underlying device is still there, so delete it. This
* happens when the ofproto is being destroyed, since the caller
* assumes that removal of attached ports will happen as part of
}
ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
- port->lldp, port->up.pp.hw_addr);
+ port->lldp, &port->up.pp.hw_addr);
dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
return 0;
}
+static int
+get_ipfix_stats(const struct ofproto *ofproto_,
+ bool bridge_ipfix,
+ struct ovs_list *replies)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct dpif_ipfix *di = ofproto->ipfix;
+
+ if (!di) {
+ return OFPERR_NXST_NOT_CONFIGURED;
+ }
+
+ return dpif_ipfix_get_stats(di, bridge_ipfix, replies);
+}
+
static int
set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
{
ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
- ofport->lldp, ofport->up.pp.hw_addr);
+ ofport->lldp, &ofport->up.pp.hw_addr);
return error;
}
ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
- ofport->lldp, ofport->up.pp.hw_addr);
+ ofport->lldp, &ofport->up.pp.hw_addr);
return 0;
}
ofport->bfd,
ofport->cfm,
ofport->lldp,
- ofport->up.pp.hw_addr);
+ &ofport->up.pp.hw_addr);
return error;
}
struct ofport_dpif *ofport = ofport_;
struct eth_header *eth = dp_packet_l2(pkt);
- netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
+ netdev_get_etheraddr(ofport->up.netdev, ð->eth_src);
if (eth_addr_is_zero(eth->eth_src)) {
VLOG_WARN_RL(&rl, "%s port %d: cannot send RSTP BPDU on a port which "
"does not have a configured source MAC address.",
ofproto->up.name, ofp_to_u16(ofport->up.ofp_port));
} else {
- ofproto_dpif_send_packet(ofport, pkt);
+ ofproto_dpif_send_packet(ofport, false, pkt);
}
dp_packet_delete(pkt);
}
} else {
struct eth_header *eth = dp_packet_l2(pkt);
- netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
+ netdev_get_etheraddr(ofport->up.netdev, ð->eth_src);
if (eth_addr_is_zero(eth->eth_src)) {
VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
"with unknown MAC", ofproto->up.name, port_num);
} else {
- ofproto_dpif_send_packet(ofport, pkt);
+ ofproto_dpif_send_packet(ofport, false, pkt);
}
}
dp_packet_delete(pkt);
}
if (rstp_shift_root_learned_address(ofproto->rstp)) {
- bundle_move(((struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp))->bundle,
- ((struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp))->bundle);
- rstp_reset_root_changed(ofproto->rstp);
+ struct ofport_dpif *old_root_aux =
+ (struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp);
+ struct ofport_dpif *new_root_aux =
+ (struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp);
+ if (old_root_aux != NULL && new_root_aux != NULL) {
+ bundle_move(old_root_aux->bundle, new_root_aux->bundle);
+ rstp_reset_root_changed(ofproto->rstp);
+ }
}
}
}
if (!s || !s->enable) {
if (rp) {
- rstp_port_unref(rp);
+ rstp_port_set_aux(rp, NULL);
+ rstp_port_set_state(rp, RSTP_DISABLED);
+ rstp_port_set_mac_operational(rp, false);
ofport->rstp_port = NULL;
+ rstp_port_unref(rp);
update_rstp_port_state(ofport);
}
return;
bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
- list_remove(&port->bundle_node);
+ ovs_list_remove(&port->bundle_node);
port->bundle = NULL;
if (bundle->lacp) {
}
port->bundle = bundle;
- list_push_back(&bundle->ports, &port->bundle_node);
+ ovs_list_push_back(&bundle->ports, &port->bundle_node);
if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
|| port->is_layer3
|| (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
bundle->aux = aux;
bundle->name = NULL;
- list_init(&bundle->ports);
+ ovs_list_init(&bundle->ports);
bundle->vlan_mode = PORT_VLAN_TRUNK;
bundle->vlan = -1;
bundle->trunks = NULL;
ok = false;
}
}
- if (!ok || list_size(&bundle->ports) != s->n_slaves) {
+ if (!ok || ovs_list_size(&bundle->ports) != s->n_slaves) {
struct ofport_dpif *next_port;
LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
found: ;
}
}
- ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
+ ovs_assert(ovs_list_size(&bundle->ports) <= s->n_slaves);
- if (list_is_empty(&bundle->ports)) {
+ if (ovs_list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
return EINVAL;
}
}
/* Bonding. */
- if (!list_is_short(&bundle->ports)) {
+ if (!ovs_list_is_short(&bundle->ports)) {
bundle->ofproto->has_bonded_bundles = true;
if (bundle->bond) {
if (bond_reconfigure(bundle->bond, s->bond)) {
if (bundle) {
bundle_del_port(port);
- if (list_is_empty(&bundle->ports)) {
+ if (ovs_list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
- } else if (list_is_short(&bundle->ports)) {
+ } else if (ovs_list_is_short(&bundle->ports)) {
bond_unref(bundle->bond);
bundle->bond = NULL;
}
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
struct ofport_dpif *port = port_;
- uint8_t ea[ETH_ADDR_LEN];
+ struct eth_addr ea;
int error;
- error = netdev_get_etheraddr(port->up.netdev, ea);
+ error = netdev_get_etheraddr(port->up.netdev, &ea);
if (!error) {
struct dp_packet packet;
void *packet_pdu;
pdu_size);
memcpy(packet_pdu, pdu, pdu_size);
- ofproto_dpif_send_packet(port, &packet);
+ ofproto_dpif_send_packet(port, false, &packet);
dp_packet_uninit(&packet);
} else {
VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
} *pkt_node;
struct ovs_list packets;
- list_init(&packets);
+ ovs_list_init(&packets);
ovs_rwlock_rdlock(&ofproto->ml->rwlock);
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
if (mac_entry_get_port(ofproto->ml, e) != bundle) {
pkt_node->pkt = bond_compose_learning_packet(bundle->bond,
e->mac, e->vlan,
(void **)&pkt_node->port);
- list_push_back(&packets, &pkt_node->list_node);
+ ovs_list_push_back(&packets, &pkt_node->list_node);
}
}
ovs_rwlock_unlock(&ofproto->ml->rwlock);
LIST_FOR_EACH_POP (pkt_node, list_node, &packets) {
int ret;
- ret = ofproto_dpif_send_packet(pkt_node->port, pkt_node->pkt);
+ ret = ofproto_dpif_send_packet(pkt_node->port, false, pkt_node->pkt);
dp_packet_delete(pkt_node->pkt);
free(pkt_node);
if (ret) {
error = mirror_set(ofproto->mbridge, aux, s->name, srcs, s->n_srcs, dsts,
s->n_dsts, s->src_vlans,
- bundle_lookup(ofproto, s->out_bundle), s->out_vlan);
+ bundle_lookup(ofproto, s->out_bundle),
+ s->snaplen, s->out_vlan);
free(srcs);
free(dsts);
return error;
}
struct port_dump_state {
- uint32_t bucket;
- uint32_t offset;
+ struct sset_position pos;
bool ghost;
struct ofproto_port port;
state->has_port = false;
}
sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
- while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
+ while ((node = sset_at_position(sset, &state->pos))) {
int error;
error = port_query_by_name(ofproto_, node->name, &state->port);
if (!state->ghost) {
state->ghost = true;
- state->bucket = 0;
- state->offset = 0;
+ memset(&state->pos, 0, sizeof state->pos);
return port_dump_next(ofproto_, state_, port);
}
/* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
* then delete it entirely. */
static void
-rule_expire(struct rule_dpif *rule)
+rule_expire(struct rule_dpif *rule, long long now)
OVS_REQUIRES(ofproto_mutex)
{
uint16_t hard_timeout, idle_timeout;
- long long int now = time_msec();
int reason = -1;
hard_timeout = rule->up.hard_timeout;
}
}
-/* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
- * 'flow' must reflect the data in 'packet'. */
+static void
+ofproto_dpif_set_packet_odp_port(const struct ofproto_dpif *ofproto,
+ ofp_port_t in_port, struct dp_packet *packet)
+{
+ if (in_port == OFPP_NONE) {
+ in_port = OFPP_LOCAL;
+ }
+ packet->md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
+}
+
int
-ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
- const struct flow *flow,
- struct rule_dpif *rule,
- const struct ofpact *ofpacts, size_t ofpacts_len,
- struct dp_packet *packet)
+ofproto_dpif_execute_actions__(struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ struct rule_dpif *rule,
+ const struct ofpact *ofpacts, size_t ofpacts_len,
+ int indentation, int depth, int resubmits,
+ struct dp_packet *packet)
{
struct dpif_flow_stats stats;
struct xlate_out xout;
struct xlate_in xin;
- ofp_port_t in_port;
struct dpif_execute execute;
int error;
rule_dpif_credit_stats(rule, &stats);
}
+ uint64_t odp_actions_stub[1024 / 8];
+ struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
- stats.tcp_flags, packet);
+ stats.tcp_flags, packet, NULL, &odp_actions);
xin.ofpacts = ofpacts;
xin.ofpacts_len = ofpacts_len;
xin.resubmit_stats = &stats;
- xlate_actions(&xin, &xout);
+ xin.indentation = indentation;
+ xin.depth = depth;
+ xin.resubmits = resubmits;
+ if (xlate_actions(&xin, &xout) != XLATE_OK) {
+ error = EINVAL;
+ goto out;
+ }
- execute.actions = xout.odp_actions->data;
- execute.actions_len = xout.odp_actions->size;
+ execute.actions = odp_actions.data;
+ execute.actions_len = odp_actions.size;
pkt_metadata_from_flow(&packet->md, flow);
execute.packet = packet;
+ execute.flow = flow;
execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
execute.probe = false;
+ execute.mtu = 0;
/* Fix up in_port. */
- in_port = flow->in_port.ofp_port;
- if (in_port == OFPP_NONE) {
- in_port = OFPP_LOCAL;
- }
- execute.packet->md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
+ ofproto_dpif_set_packet_odp_port(ofproto, flow->in_port.ofp_port, packet);
error = dpif_execute(ofproto->backer->dpif, &execute);
-
+out:
xlate_out_uninit(&xout);
+ ofpbuf_uninit(&odp_actions);
return error;
}
+/* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
+ * 'flow' must reflect the data in 'packet'. */
+int
+ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ struct rule_dpif *rule,
+ const struct ofpact *ofpacts, size_t ofpacts_len,
+ struct dp_packet *packet)
+{
+ return ofproto_dpif_execute_actions__(ofproto, flow, rule, ofpacts,
+ ofpacts_len, 0, 0, 0, packet);
+}
+
void
rule_dpif_credit_stats(struct rule_dpif *rule,
const struct dpif_flow_stats *stats)
}
/* The returned rule (if any) is valid at least until the next RCU quiescent
- * period. If the rule needs to stay around longer, a non-zero 'take_ref'
- * must be passed in to cause a reference to be taken on it.
+ * period. If the rule needs to stay around longer, the caller should take
+ * a reference.
*
* 'flow' is non-const to allow for temporary modifications during the lookup.
* Any changes are restored before returning. */
static struct rule_dpif *
rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, cls_version_t version,
uint8_t table_id, struct flow *flow,
- struct flow_wildcards *wc, bool take_ref)
+ struct flow_wildcards *wc)
{
struct classifier *cls = &ofproto->up.tables[table_id].cls;
- const struct cls_rule *cls_rule;
- struct rule_dpif *rule;
-
- do {
- cls_rule = classifier_lookup(cls, version, flow, wc);
-
- rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
-
- /* Try again if the rule was released before we get the reference. */
- } while (rule && take_ref && !rule_dpif_try_ref(rule));
-
- return rule;
+ return rule_dpif_cast(rule_from_cls_rule(classifier_lookup(cls, version,
+ flow, wc)));
}
/* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
* '*table_id'.
*
* The rule is returned in '*rule', which is valid at least until the next
- * RCU quiescent period. If the '*rule' needs to stay around longer,
- * a non-zero 'take_ref' must be passed in to cause a reference to be taken
- * on it before this returns.
+ * RCU quiescent period. If the '*rule' needs to stay around longer, the
+ * caller must take a reference.
*
* 'in_port' allows the lookup to take place as if the in port had the value
* 'in_port'. This is needed for resubmit action support.
struct rule_dpif *
rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto,
cls_version_t version, struct flow *flow,
- struct flow_wildcards *wc, bool take_ref,
+ struct flow_wildcards *wc,
const struct dpif_flow_stats *stats,
uint8_t *table_id, ofp_port_t in_port,
bool may_packet_in, bool honor_table_miss)
/* We always unwildcard nw_frag (for IP), so they
* need not be unwildcarded here. */
if (flow->nw_frag & FLOW_NW_FRAG_ANY
- && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
- if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+ && ofproto->up.frag_handling != OFPUTIL_FRAG_NX_MATCH) {
+ if (ofproto->up.frag_handling == OFPUTIL_FRAG_NORMAL) {
/* We must pretend that transport ports are unavailable. */
flow->tp_src = htons(0);
flow->tp_dst = htons(0);
} else {
- /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM).
+ /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
* Use the drop_frags_rule (which cannot disappear). */
rule = ofproto->drop_frags_rule;
- if (take_ref) {
- rule_dpif_ref(rule);
- }
if (stats) {
struct oftable *tbl = &ofproto->up.tables[*table_id];
unsigned long orig;
next_id++, next_id += (next_id == TBL_INTERNAL))
{
*table_id = next_id;
- rule = rule_dpif_lookup_in_table(ofproto, version, next_id, flow, wc,
- take_ref);
+ rule = rule_dpif_lookup_in_table(ofproto, version, next_id, flow, wc);
if (stats) {
struct oftable *tbl = &ofproto->up.tables[next_id];
unsigned long orig;
rule = ofproto->miss_rule;
}
}
- if (take_ref) {
- rule_dpif_ref(rule);
- }
out:
/* Restore port numbers, as they may have been modified above. */
flow->tp_src = old_tp_src;
return rule;
}
-static void
-complete_operation(struct rule_dpif *rule)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
- ofproto->backer->need_revalidate = REV_FLOW_TABLE;
-}
-
static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
{
return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
free(rule);
}
+static enum ofperr
+check_mask(struct ofproto_dpif *ofproto, const struct miniflow *flow)
+{
+ const struct odp_support *support;
+ uint16_t ct_state, ct_zone;
+ ovs_u128 ct_label;
+ uint32_t ct_mark;
+
+ support = &ofproto_dpif_get_support(ofproto)->odp;
+ ct_state = MINIFLOW_GET_U16(flow, ct_state);
+ if (support->ct_state && support->ct_zone && support->ct_mark
+ && support->ct_label && support->ct_state_nat) {
+ return ct_state & CS_UNSUPPORTED_MASK ? OFPERR_OFPBMC_BAD_MASK : 0;
+ }
+
+ ct_zone = MINIFLOW_GET_U16(flow, ct_zone);
+ ct_mark = MINIFLOW_GET_U32(flow, ct_mark);
+ ct_label = MINIFLOW_GET_U128(flow, ct_label);
+
+ if ((ct_state && !support->ct_state)
+ || (ct_state & CS_UNSUPPORTED_MASK)
+ || ((ct_state & (CS_SRC_NAT | CS_DST_NAT)) && !support->ct_state_nat)
+ || (ct_zone && !support->ct_zone)
+ || (ct_mark && !support->ct_mark)
+ || (!ovs_u128_is_zero(ct_label) && !support->ct_label)) {
+ return OFPERR_OFPBMC_BAD_MASK;
+ }
+
+ return 0;
+}
+
+static enum ofperr
+check_actions(const struct ofproto_dpif *ofproto,
+ const struct rule_actions *const actions)
+{
+ const struct ofpact *ofpact;
+
+ OFPACT_FOR_EACH (ofpact, actions->ofpacts, actions->ofpacts_len) {
+ const struct odp_support *support;
+ const struct ofpact_conntrack *ct;
+ const struct ofpact *a;
+
+ if (ofpact->type != OFPACT_CT) {
+ continue;
+ }
+
+ ct = CONTAINER_OF(ofpact, struct ofpact_conntrack, ofpact);
+ support = &ofproto_dpif_get_support(ofproto)->odp;
+
+ if (!support->ct_state) {
+ return OFPERR_OFPBAC_BAD_TYPE;
+ }
+ if ((ct->zone_imm || ct->zone_src.field) && !support->ct_zone) {
+ return OFPERR_OFPBAC_BAD_ARGUMENT;
+ }
+
+ OFPACT_FOR_EACH(a, ct->actions, ofpact_ct_get_action_len(ct)) {
+ const struct mf_field *dst = ofpact_get_mf_dst(a);
+
+ if (a->type == OFPACT_NAT && !support->ct_state_nat) {
+ /* The backer doesn't seem to support the NAT bits in
+ * 'ct_state': assume that it doesn't support the NAT
+ * action. */
+ return OFPERR_OFPBAC_BAD_TYPE;
+ }
+ if (dst && ((dst->id == MFF_CT_MARK && !support->ct_mark)
+ || (dst->id == MFF_CT_LABEL && !support->ct_label))) {
+ return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static enum ofperr
+rule_check(struct rule *rule)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->ofproto);
+ enum ofperr err;
+
+ err = check_mask(ofproto, &rule->cr.match.mask->masks);
+ if (err) {
+ return err;
+ }
+ return check_actions(ofproto, rule->actions);
+}
+
static enum ofperr
rule_construct(struct rule *rule_)
OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
+ int error;
+
+ error = rule_check(rule_);
+ if (error) {
+ return error;
+ }
+
ovs_mutex_init_adaptive(&rule->stats_mutex);
rule->stats.n_packets = 0;
rule->stats.n_bytes = 0;
ovs_mutex_unlock(&rule->stats_mutex);
ovs_mutex_unlock(&old_rule->stats_mutex);
}
-
- complete_operation(rule);
-}
-
-static void
-rule_delete(struct rule *rule_)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct rule_dpif *rule = rule_dpif_cast(rule_);
- complete_operation(rule);
}
static void
return group->up.props.selection_method;
}
\f
-/* Sends 'packet' out 'ofport'.
+/* Sends 'packet' out 'ofport'. If 'port' is a tunnel and that tunnel type
+ * supports a notion of an OAM flag, sets it if 'oam' is true.
* May modify 'packet'.
* Returns 0 if successful, otherwise a positive errno value. */
int
-ofproto_dpif_send_packet(const struct ofport_dpif *ofport, struct dp_packet *packet)
+ofproto_dpif_send_packet(const struct ofport_dpif *ofport, bool oam,
+ struct dp_packet *packet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
int error;
- error = xlate_send_packet(ofport, packet);
+ error = xlate_send_packet(ofport, oam, packet);
ovs_mutex_lock(&ofproto->stats_mutex);
ofproto->stats.tx_packets++;
static bool
set_frag_handling(struct ofproto *ofproto_,
- enum ofp_config_flags frag_handling)
+ enum ofputil_frag_handling frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- if (frag_handling != OFPC_FRAG_REASM) {
+ if (frag_handling != OFPUTIL_FRAG_REASM) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
return true;
} else {
ofpacts_len, packet);
return 0;
}
+
+static enum ofperr
+nxt_resume(struct ofproto *ofproto_,
+ const struct ofputil_packet_in_private *pin)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ /* Translate pin into datapath actions. */
+ uint64_t odp_actions_stub[1024 / 8];
+ struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
+ enum slow_path_reason slow;
+ enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow);
+
+ /* Steal 'pin->packet' and put it into a dp_packet. */
+ struct dp_packet packet;
+ dp_packet_init(&packet, pin->public.packet_len);
+ dp_packet_put(&packet, pin->public.packet, pin->public.packet_len);
+
+ pkt_metadata_from_flow(&packet.md, &pin->public.flow_metadata.flow);
+
+ /* Fix up in_port. */
+ ofproto_dpif_set_packet_odp_port(ofproto,
+ pin->public.flow_metadata.flow.in_port.ofp_port,
+ &packet);
+
+ struct flow headers;
+ flow_extract(&packet, &headers);
+
+ /* Execute the datapath actions on the packet. */
+ struct dpif_execute execute = {
+ .actions = odp_actions.data,
+ .actions_len = odp_actions.size,
+ .needs_help = (slow & SLOW_ACTION) != 0,
+ .packet = &packet,
+ .flow = &headers,
+ };
+ dpif_execute(ofproto->backer->dpif, &execute);
+
+ /* Clean up. */
+ ofpbuf_uninit(&odp_actions);
+ dp_packet_uninit(&packet);
+
+ return error;
+}
\f
/* NetFlow. */
static struct ofport_dpif *
ofbundle_get_a_port(const struct ofbundle *bundle)
{
- return CONTAINER_OF(list_front(&bundle->ports), struct ofport_dpif,
+ return CONTAINER_OF(ovs_list_front(&bundle->ports), struct ofport_dpif,
bundle_node);
}
ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
name, sizeof name);
ds_put_format(&ds, "%5s %4d ", name, grp->vlan);
- print_ipv6_mapped(&ds, &grp->addr);
+ ipv6_format_mapped(&grp->addr, &ds);
ds_put_format(&ds, " %3d\n",
mcast_bundle_age(ofproto->ms, b));
}
struct xlate_in xin;
const struct flow *key;
struct flow flow;
- struct flow_wildcards wc;
struct ds *result;
+ struct flow_wildcards wc;
+ struct ofpbuf odp_actions;
};
static void
trace_format_odp(struct ds *result, int level, const char *title,
struct trace_ctx *trace)
{
- struct ofpbuf *odp_actions = trace->xout.odp_actions;
+ struct ofpbuf *odp_actions = &trace->odp_actions;
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
- flow_wildcards_or(&trace->wc, &trace->xout.wc, &trace->wc);
match_init(&match, trace->key, &trace->wc);
match_format(&match, result, OFP_DEFAULT_PRIORITY);
ds_put_char(result, '\n');
}
-static void trace_report(struct xlate_in *, int recurse,
+static void trace_report(struct xlate_in *, int indentation,
const char *format, ...)
OVS_PRINTF_FORMAT(3, 4);
-static void trace_report_valist(struct xlate_in *, int recurse,
+static void trace_report_valist(struct xlate_in *, int indentation,
const char *format, va_list args)
OVS_PRINTF_FORMAT(3, 0);
static void
-trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
+trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int indentation)
{
struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
struct ds *result = trace->result;
- if (!recurse) {
+ if (!indentation) {
if (rule == xin->ofproto->miss_rule) {
- trace_report(xin, recurse,
+ trace_report(xin, indentation,
"No match, flow generates \"packet in\"s.");
} else if (rule == xin->ofproto->no_packet_in_rule) {
- trace_report(xin, recurse, "No match, packets dropped because "
+ trace_report(xin, indentation, "No match, packets dropped because "
"OFPPC_NO_PACKET_IN is set on in_port.");
} else if (rule == xin->ofproto->drop_frags_rule) {
- trace_report(xin, recurse, "Packets dropped because they are IP "
- "fragments and the fragment handling mode is "
- "\"drop\".");
+ trace_report(xin, indentation,
+ "Packets dropped because they are IP fragments and "
+ "the fragment handling mode is \"drop\".");
}
}
ds_put_char(result, '\n');
- if (recurse) {
- trace_format_flow(result, recurse, "Resubmitted flow", trace);
- trace_format_regs(result, recurse, "Resubmitted regs", trace);
- trace_format_odp(result, recurse, "Resubmitted odp", trace);
- trace_format_megaflow(result, recurse, "Resubmitted megaflow", trace);
+ if (indentation) {
+ trace_format_flow(result, indentation, "Resubmitted flow", trace);
+ trace_format_regs(result, indentation, "Resubmitted regs", trace);
+ trace_format_odp(result, indentation, "Resubmitted odp", trace);
+ trace_format_megaflow(result, indentation, "Resubmitted megaflow",
+ trace);
}
- trace_format_rule(result, recurse, rule);
+ trace_format_rule(result, indentation, rule);
}
static void
-trace_report_valist(struct xlate_in *xin, int recurse,
+trace_report_valist(struct xlate_in *xin, int indentation,
const char *format, va_list args)
{
struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
struct ds *result = trace->result;
- ds_put_char_multiple(result, '\t', recurse);
+ ds_put_char_multiple(result, '\t', indentation);
ds_put_format_valist(result, format, args);
ds_put_char(result, '\n');
}
static void
-trace_report(struct xlate_in *xin, int recurse, const char *format, ...)
+trace_report(struct xlate_in *xin, int indentation, const char *format, ...)
{
va_list args;
va_start(args, format);
- trace_report_valist(xin, recurse, format, args);
+ trace_report_valist(xin, indentation, format, args);
va_end(args);
}
error = "Invalid datapath flow";
goto exit;
}
-
- vsp_adjust_flow(*ofprotop, flow, NULL);
-
} else {
char *err = parse_ofp_exact_flow(flow, NULL, argv[argc - 1], NULL);
goto exit;
}
if (enforce_consistency) {
- retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size,
- &flow, u16_to_ofp(ofproto->up.max_ports),
- 0, 0, usable_protocols);
+ retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
+ u16_to_ofp(ofproto->up.max_ports),
+ 0, ofproto->up.n_tables,
+ usable_protocols);
} else {
retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
- u16_to_ofp(ofproto->up.max_ports), 0, 0,
- &usable_protocols);
+ u16_to_ofp(ofproto->up.max_ports), 0,
+ ofproto->up.n_tables, &usable_protocols);
+ }
+ if (!retval) {
+ retval = ofproto_check_ofpacts(&ofproto->up, ofpacts.data,
+ ofpacts.size);
}
if (retval) {
struct ds *ds)
{
struct trace_ctx trace;
+ enum xlate_error error;
ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
ds_put_cstr(ds, "Flow: ");
flow_format(ds, flow);
ds_put_char(ds, '\n');
- flow_wildcards_init_catchall(&trace.wc);
+ ofpbuf_init(&trace.odp_actions, 0);
trace.result = ds;
trace.key = flow; /* Original flow key, used for megaflow. */
trace.flow = *flow; /* May be modified by actions. */
xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL,
- ntohs(flow->tcp_flags), packet);
+ ntohs(flow->tcp_flags), packet, &trace.wc,
+ &trace.odp_actions);
trace.xin.ofpacts = ofpacts;
trace.xin.ofpacts_len = ofpacts_len;
trace.xin.resubmit_hook = trace_resubmit;
trace.xin.report_hook = trace_report_valist;
- xlate_actions(&trace.xin, &trace.xout);
-
+ error = xlate_actions(&trace.xin, &trace.xout);
ds_put_char(ds, '\n');
+ trace.xin.flow.actset_output = 0;
trace_format_flow(ds, 0, "Final flow", &trace);
trace_format_megaflow(ds, 0, "Megaflow", &trace);
ds_put_cstr(ds, "Datapath actions: ");
- format_odp_actions(ds, trace.xout.odp_actions->data,
- trace.xout.odp_actions->size);
+ format_odp_actions(ds, trace.odp_actions.data, trace.odp_actions.size);
- if (trace.xout.slow) {
+ if (error != XLATE_OK) {
+ ds_put_format(ds, "\nTranslation failed (%s), packet is dropped.\n",
+ xlate_strerror(error));
+ } else if (trace.xout.slow) {
enum slow_path_reason slow;
ds_put_cstr(ds, "\nThis flow is handled by the userspace "
}
xlate_out_uninit(&trace.xout);
+ ofpbuf_uninit(&trace.odp_actions);
}
/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
ofproto_use_tnl_push_pop = true;
unixctl_command_reply(conn, "Tunnel push-pop on");
ofproto_revalidate_all_backers();
+ } else {
+ unixctl_command_reply_error(conn, "Invalid argument");
+ }
+}
+
+static void
+disable_datapath_truncate(struct unixctl_conn *conn OVS_UNUSED,
+ int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ const struct shash_node **backers;
+ int i;
+
+ backers = shash_sort(&all_dpif_backers);
+ for (i = 0; i < shash_count(&all_dpif_backers); i++) {
+ struct dpif_backer *backer = backers[i]->data;
+ backer->support.trunc = false;
}
+ free(backers);
+ unixctl_command_reply(conn, "Datapath truncate action diabled");
}
static void
unixctl_command_register("ofproto/tnl-push-pop", "[on]|[off]", 1, 1,
disable_tnl_push_pop, NULL);
+
+ unixctl_command_register("dpif/disable-truncate", "", 0, 0,
+ disable_datapath_truncate, NULL);
}
/* Returns true if 'table' is the table used for internal rules,
return table_id == TBL_INTERNAL;
}
\f
-/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
- *
- * This is deprecated. It is only for compatibility with broken device drivers
- * in old versions of Linux that do not properly support VLANs when VLAN
- * devices are not used. When broken device drivers are no longer in
- * widespread use, we will delete these interfaces. */
-
-static int
-set_realdev(struct ofport *ofport_, ofp_port_t realdev_ofp_port, int vid)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- if (realdev_ofp_port == ofport->realdev_ofp_port
- && vid == ofport->vlandev_vid) {
- return 0;
- }
-
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
-
- if (ofport->realdev_ofp_port) {
- vsp_remove(ofport);
- }
- if (realdev_ofp_port && ofport->bundle) {
- /* vlandevs are enslaved to their realdevs, so they are not allowed to
- * themselves be part of a bundle. */
- bundle_set(ofport_->ofproto, ofport->bundle, NULL);
- }
-
- ofport->realdev_ofp_port = realdev_ofp_port;
- ofport->vlandev_vid = vid;
-
- if (realdev_ofp_port) {
- vsp_add(ofport, realdev_ofp_port, vid);
- }
-
- return 0;
-}
-
-static uint32_t
-hash_realdev_vid(ofp_port_t realdev_ofp_port, int vid)
-{
- return hash_2words(ofp_to_u16(realdev_ofp_port), vid);
-}
-
-bool
-ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto)
- OVS_EXCLUDED(ofproto->vsp_mutex)
-{
- /* hmap_is_empty is thread safe. */
- return !hmap_is_empty(&ofproto->realdev_vid_map);
-}
-
-
-static ofp_port_t
-vsp_realdev_to_vlandev__(const struct ofproto_dpif *ofproto,
- ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
- OVS_REQUIRES(ofproto->vsp_mutex)
-{
- if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
- int vid = vlan_tci_to_vid(vlan_tci);
- const struct vlan_splinter *vsp;
-
- HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
- hash_realdev_vid(realdev_ofp_port, vid),
- &ofproto->realdev_vid_map) {
- if (vsp->realdev_ofp_port == realdev_ofp_port
- && vsp->vid == vid) {
- return vsp->vlandev_ofp_port;
- }
- }
- }
- return realdev_ofp_port;
-}
-
-/* Returns the OFP port number of the Linux VLAN device that corresponds to
- * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
- * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and
- * 'vlan_tci' 9, it would return the port number of eth0.9.
- *
- * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
- * function just returns its 'realdev_ofp_port' argument. */
-ofp_port_t
-vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
- ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
- OVS_EXCLUDED(ofproto->vsp_mutex)
-{
- ofp_port_t ret;
-
- /* hmap_is_empty is thread safe, see if we can return immediately. */
- if (hmap_is_empty(&ofproto->realdev_vid_map)) {
- return realdev_ofp_port;
- }
- ovs_mutex_lock(&ofproto->vsp_mutex);
- ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci);
- ovs_mutex_unlock(&ofproto->vsp_mutex);
- return ret;
-}
-
-static struct vlan_splinter *
-vlandev_find(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port)
-{
- struct vlan_splinter *vsp;
-
- HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node,
- hash_ofp_port(vlandev_ofp_port),
- &ofproto->vlandev_map) {
- if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
- return vsp;
- }
- }
-
- return NULL;
-}
-
-/* Returns the OpenFlow port number of the "real" device underlying the Linux
- * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
- * VLAN VID of the Linux VLAN device in '*vid'. For example, given
- * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
- * eth0 and store 9 in '*vid'.
- *
- * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
- * VLAN device. Unless VLAN splinters are enabled, this is what this function
- * always does.*/
-static ofp_port_t
-vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
- ofp_port_t vlandev_ofp_port, int *vid)
- OVS_REQUIRES(ofproto->vsp_mutex)
-{
- if (!hmap_is_empty(&ofproto->vlandev_map)) {
- const struct vlan_splinter *vsp;
-
- vsp = vlandev_find(ofproto, vlandev_ofp_port);
- if (vsp) {
- if (vid) {
- *vid = vsp->vid;
- }
- return vsp->realdev_ofp_port;
- }
- }
- return 0;
-}
-
-/* Given 'flow', a flow representing a packet received on 'ofproto', checks
- * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
- * 'flow->in_port' to the "real" device backing the VLAN device, sets
- * 'flow->vlan_tci' to the VLAN VID, and returns true. Optionally pushes the
- * appropriate VLAN on 'packet' if provided. Otherwise (which is always the
- * case unless VLAN splinters are enabled), returns false without making any
- * changes. */
-bool
-vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow,
- struct dp_packet *packet)
- OVS_EXCLUDED(ofproto->vsp_mutex)
-{
- ofp_port_t realdev;
- int vid;
-
- /* hmap_is_empty is thread safe. */
- if (hmap_is_empty(&ofproto->vlandev_map)) {
- return false;
- }
-
- ovs_mutex_lock(&ofproto->vsp_mutex);
- realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid);
- ovs_mutex_unlock(&ofproto->vsp_mutex);
- if (!realdev) {
- return false;
- }
-
- /* Cause the flow to be processed as if it came in on the real device with
- * the VLAN device's VLAN ID. */
- flow->in_port.ofp_port = realdev;
- flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
-
- if (packet) {
- /* Make the packet resemble the flow, so that it gets sent to an
- * OpenFlow controller properly, so that it looks correct for sFlow,
- * and so that flow_extract() will get the correct vlan_tci if it is
- * called on 'packet'. */
- eth_push_vlan(packet, htons(ETH_TYPE_VLAN), flow->vlan_tci);
- }
-
- return true;
-}
-
-static void
-vsp_remove(struct ofport_dpif *port)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
- struct vlan_splinter *vsp;
-
- ovs_mutex_lock(&ofproto->vsp_mutex);
- vsp = vlandev_find(ofproto, port->up.ofp_port);
- if (vsp) {
- hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
- hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
- free(vsp);
-
- port->realdev_ofp_port = 0;
- } else {
- VLOG_ERR("missing vlan device record");
- }
- ovs_mutex_unlock(&ofproto->vsp_mutex);
-}
-
-static void
-vsp_add(struct ofport_dpif *port, ofp_port_t realdev_ofp_port, int vid)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
-
- ovs_mutex_lock(&ofproto->vsp_mutex);
- if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
- && (vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, htons(vid))
- == realdev_ofp_port)) {
- struct vlan_splinter *vsp;
-
- vsp = xmalloc(sizeof *vsp);
- vsp->realdev_ofp_port = realdev_ofp_port;
- vsp->vlandev_ofp_port = port->up.ofp_port;
- vsp->vid = vid;
-
- port->realdev_ofp_port = realdev_ofp_port;
-
- hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
- hash_ofp_port(port->up.ofp_port));
- hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
- hash_realdev_vid(realdev_ofp_port, vid));
- } else {
- VLOG_ERR("duplicate vlan device record");
- }
- ovs_mutex_unlock(&ofproto->vsp_mutex);
-}
static odp_port_t
ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
struct rule_dpif *rule;
int error;
- ofm.fm.match = *match;
- ofm.fm.priority = priority;
- ofm.fm.new_cookie = htonll(0);
- ofm.fm.cookie = htonll(0);
- ofm.fm.cookie_mask = htonll(0);
- ofm.fm.modify_cookie = false;
- ofm.fm.table_id = TBL_INTERNAL;
- ofm.fm.command = OFPFC_ADD;
- ofm.fm.idle_timeout = idle_timeout;
- ofm.fm.hard_timeout = 0;
- ofm.fm.importance = 0;
- ofm.fm.buffer_id = 0;
- ofm.fm.out_port = 0;
- ofm.fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
- ofm.fm.ofpacts = ofpacts->data;
- ofm.fm.ofpacts_len = ofpacts->size;
+ ofm.fm = (struct ofputil_flow_mod) {
+ .match = *match,
+ .priority = priority,
+ .table_id = TBL_INTERNAL,
+ .command = OFPFC_ADD,
+ .idle_timeout = idle_timeout,
+ .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
+ .ofpacts = ofpacts->data,
+ .ofpacts_len = ofpacts->size,
+ .delete_reason = OVS_OFPRR_NONE,
+ };
error = ofproto_flow_mod(&ofproto->up, &ofm);
if (error) {
rule = rule_dpif_lookup_in_table(ofproto,
ofproto_dpif_get_tables_version(ofproto),
TBL_INTERNAL, &ofm.fm.match.flow,
- &ofm.fm.match.wc, false);
+ &ofm.fm.match.wc);
if (rule) {
*rulep = &rule->up;
} else {
struct ofproto_flow_mod ofm;
int error;
- ofm.fm.match = *match;
- ofm.fm.priority = priority;
- ofm.fm.new_cookie = htonll(0);
- ofm.fm.cookie = htonll(0);
- ofm.fm.cookie_mask = htonll(0);
- ofm.fm.modify_cookie = false;
- ofm.fm.table_id = TBL_INTERNAL;
- ofm.fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
- ofm.fm.command = OFPFC_DELETE_STRICT;
+ ofm.fm = (struct ofputil_flow_mod) {
+ .match = *match,
+ .priority = priority,
+ .table_id = TBL_INTERNAL,
+ .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
+ .command = OFPFC_DELETE_STRICT,
+ };
error = ofproto_flow_mod(&ofproto->up, &ofm);
if (error) {
return 0;
}
+const struct uuid *
+ofproto_dpif_get_uuid(const struct ofproto_dpif *ofproto)
+{
+ return &ofproto->uuid;
+}
+
const struct ofproto_class ofproto_dpif_class = {
init,
enumerate_types,
destruct,
dealloc,
run,
- wait,
+ ofproto_dpif_wait,
NULL, /* get_memory_usage. */
type_get_memory_usage,
flush,
rule_alloc,
rule_construct,
rule_insert,
- rule_delete,
+ NULL, /* rule_delete */
rule_destruct,
rule_dealloc,
rule_get_stats,
rule_execute,
set_frag_handling,
packet_out,
+ nxt_resume,
set_netflow,
get_netflow_ids,
set_sflow,
set_ipfix,
+ get_ipfix_stats,
set_cfm,
cfm_status_changed,
get_cfm_status,
set_mac_table_config,
set_mcast_snooping,
set_mcast_snooping_port,
- set_realdev,
NULL, /* meter_get_features */
NULL, /* meter_set */
NULL, /* meter_get */