COVERAGE_DEFINE(xlate_actions);
COVERAGE_DEFINE(xlate_actions_oversize);
+COVERAGE_DEFINE(xlate_actions_too_many_output);
COVERAGE_DEFINE(xlate_actions_mpls_overflow);
VLOG_DEFINE_THIS_MODULE(ofproto_dpif_xlate);
struct dpif_ipfix *ipfix; /* Ipfix handle, or null. */
struct netflow *netflow; /* Netflow handle, or null. */
struct stp *stp; /* STP or null if disabled. */
+ struct rstp *rstp; /* RSTP or null if disabled. */
/* Special rules installed by ofproto-dpif. */
struct rule_dpif *miss_rule;
/* Number of MPLS label stack entries that the datapath supports
* in matches. */
size_t max_mpls_depth;
+
+ /* True if the datapath supports masked data in OVS_ACTION_ATTR_SET
+ * actions. */
+ bool masked_set_action;
};
struct xbundle {
enum ofputil_port_config config; /* OpenFlow port configuration. */
enum ofputil_port_state state; /* OpenFlow port state. */
int stp_port_no; /* STP port number or -1 if not in use. */
+ struct rstp_port *rstp_port; /* RSTP port or null. */
struct hmap skb_priorities; /* Map of 'skb_priority_to_dscp's. */
static struct skb_priority_to_dscp *get_skb_priority(const struct xport *,
uint32_t skb_priority);
static void clear_skb_priorities(struct xport *);
+static size_t count_skb_priorities(const struct xport *);
static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority,
uint8_t *dscp);
static void xlate_xbridge_init(struct xlate_cfg *, struct xbridge *);
static void xlate_xbundle_init(struct xlate_cfg *, struct xbundle *);
static void xlate_xport_init(struct xlate_cfg *, struct xport *);
-static void xlate_xbridge_set(struct xbridge *xbridge,
- struct dpif *dpif,
+static void xlate_xbridge_set(struct xbridge *, struct dpif *,
struct rule_dpif *miss_rule,
struct rule_dpif *no_packet_in_rule,
- const struct mac_learning *ml, struct stp *stp,
- const struct mcast_snooping *ms,
- const struct mbridge *mbridge,
- const struct dpif_sflow *sflow,
- const struct dpif_ipfix *ipfix,
- const struct netflow *netflow,
- enum ofp_config_flags frag,
+ const struct mac_learning *, struct stp *,
+ struct rstp *, const struct mcast_snooping *,
+ const struct mbridge *,
+ const struct dpif_sflow *,
+ const struct dpif_ipfix *,
+ const struct netflow *, enum ofp_config_flags,
bool forward_bpdu, bool has_in_band,
bool enable_recirc,
bool variable_length_userdata,
- size_t max_mpls_depth);
+ size_t max_mpls_depth,
+ bool masked_set_action);
static void xlate_xbundle_set(struct xbundle *xbundle,
enum port_vlan_mode vlan_mode, int vlan,
unsigned long *trunks, bool use_priority_tags,
static void xlate_xport_set(struct xport *xport, odp_port_t odp_port,
const struct netdev *netdev, const struct cfm *cfm,
const struct bfd *bfd, int stp_port_no,
+ const struct rstp_port *rstp_port,
enum ofputil_port_config config,
enum ofputil_port_state state, bool is_tunnel,
bool may_enable);
struct rule_dpif *miss_rule,
struct rule_dpif *no_packet_in_rule,
const struct mac_learning *ml, struct stp *stp,
- const struct mcast_snooping *ms,
+ struct rstp *rstp, const struct mcast_snooping *ms,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
bool forward_bpdu, bool has_in_band,
bool enable_recirc,
bool variable_length_userdata,
- size_t max_mpls_depth)
+ size_t max_mpls_depth,
+ bool masked_set_action)
{
if (xbridge->ml != ml) {
mac_learning_unref(xbridge->ml);
xbridge->stp = stp_ref(stp);
}
+ if (xbridge->rstp != rstp) {
+ rstp_unref(xbridge->rstp);
+ xbridge->rstp = rstp_ref(rstp);
+ }
+
if (xbridge->netflow != netflow) {
netflow_unref(xbridge->netflow);
xbridge->netflow = netflow_ref(netflow);
xbridge->enable_recirc = enable_recirc;
xbridge->variable_length_userdata = variable_length_userdata;
xbridge->max_mpls_depth = max_mpls_depth;
+ xbridge->masked_set_action = masked_set_action;
}
static void
xlate_xport_set(struct xport *xport, odp_port_t odp_port,
const struct netdev *netdev, const struct cfm *cfm,
const struct bfd *bfd, int stp_port_no,
+ const struct rstp_port* rstp_port,
enum ofputil_port_config config, enum ofputil_port_state state,
bool is_tunnel, bool may_enable)
{
xport->may_enable = may_enable;
xport->odp_port = odp_port;
+ if (xport->rstp_port != rstp_port) {
+ rstp_port_unref(xport->rstp_port);
+ xport->rstp_port = rstp_port_ref(rstp_port);
+ }
+
if (xport->cfm != cfm) {
cfm_unref(xport->cfm);
xport->cfm = cfm_ref(cfm);
xlate_xbridge_set(new_xbridge,
xbridge->dpif, xbridge->miss_rule,
xbridge->no_packet_in_rule, xbridge->ml, xbridge->stp,
- xbridge->ms, xbridge->mbridge, xbridge->sflow,
- xbridge->ipfix, xbridge->netflow, xbridge->frag,
- xbridge->forward_bpdu, xbridge->has_in_band,
- xbridge->enable_recirc, xbridge->variable_length_userdata,
- xbridge->max_mpls_depth);
+ xbridge->rstp, xbridge->ms, xbridge->mbridge,
+ xbridge->sflow, xbridge->ipfix, xbridge->netflow,
+ xbridge->frag, xbridge->forward_bpdu,
+ xbridge->has_in_band, xbridge->enable_recirc,
+ xbridge->variable_length_userdata,
+ xbridge->max_mpls_depth, xbridge->masked_set_action);
LIST_FOR_EACH (xbundle, list_node, &xbridge->xbundles) {
xlate_xbundle_copy(new_xbridge, xbundle);
}
xlate_xport_init(new_xcfg, new_xport);
xlate_xport_set(new_xport, xport->odp_port, xport->netdev, xport->cfm,
- xport->bfd, xport->stp_port_no, xport->config, xport->state,
- xport->is_tunnel, xport->may_enable);
+ xport->bfd, xport->stp_port_no, xport->rstp_port,
+ xport->config, xport->state, xport->is_tunnel,
+ xport->may_enable);
if (xport->peer) {
struct xport *peer = xport_lookup(new_xcfg, xport->peer->ofport);
struct dpif *dpif, struct rule_dpif *miss_rule,
struct rule_dpif *no_packet_in_rule,
const struct mac_learning *ml, struct stp *stp,
- const struct mcast_snooping *ms,
+ struct rstp *rstp, const struct mcast_snooping *ms,
const struct mbridge *mbridge,
const struct dpif_sflow *sflow,
const struct dpif_ipfix *ipfix,
const struct netflow *netflow, enum ofp_config_flags frag,
- bool forward_bpdu, bool has_in_band,
- bool enable_recirc,
- bool variable_length_userdata,
- size_t max_mpls_depth)
+ bool forward_bpdu, bool has_in_band, bool enable_recirc,
+ bool variable_length_userdata, size_t max_mpls_depth,
+ bool masked_set_action)
{
struct xbridge *xbridge;
xbridge->name = xstrdup(name);
xlate_xbridge_set(xbridge, dpif, miss_rule, no_packet_in_rule, ml, stp,
- ms, mbridge, sflow, ipfix, netflow, frag, forward_bpdu,
- has_in_band, enable_recirc, variable_length_userdata,
- max_mpls_depth);
+ rstp, ms, mbridge, sflow, ipfix, netflow, frag,
+ forward_bpdu, has_in_band, enable_recirc,
+ variable_length_userdata, max_mpls_depth,
+ masked_set_action);
}
static void
dpif_sflow_unref(xbridge->sflow);
dpif_ipfix_unref(xbridge->ipfix);
stp_unref(xbridge->stp);
+ rstp_unref(xbridge->rstp);
hmap_destroy(&xbridge->xports);
free(xbridge->name);
free(xbridge);
odp_port_t odp_port, const struct netdev *netdev,
const struct cfm *cfm, const struct bfd *bfd,
struct ofport_dpif *peer, int stp_port_no,
+ const struct rstp_port *rstp_port,
const struct ofproto_port_queue *qdscp_list, size_t n_qdscp,
enum ofputil_port_config config,
enum ofputil_port_state state, bool is_tunnel,
ovs_assert(xport->ofp_port == ofp_port);
- xlate_xport_set(xport, odp_port, netdev, cfm, bfd, stp_port_no, config,
- state, is_tunnel, may_enable);
+ xlate_xport_set(xport, odp_port, netdev, cfm, bfd, stp_port_no,
+ rstp_port, config, state, is_tunnel, may_enable);
if (xport->peer) {
xport->peer->peer = NULL;
hmap_remove(&xport->xbridge->xports, &xport->ofp_node);
netdev_close(xport->netdev);
+ rstp_port_unref(xport->rstp_port);
cfm_unref(xport->cfm);
bfd_unref(xport->bfd);
free(xport);
}
}
+static enum rstp_state
+xport_get_rstp_port_state(const struct xport *xport)
+{
+ return xport->rstp_port
+ ? rstp_port_get_state(xport->rstp_port)
+ : RSTP_DISABLED;
+}
+
+static bool
+xport_rstp_learn_state(const struct xport *xport)
+{
+ return rstp_learn_in_state(xport_get_rstp_port_state(xport));
+}
+
+static bool
+xport_rstp_forward_state(const struct xport *xport)
+{
+ return rstp_forward_in_state(xport_get_rstp_port_state(xport));
+}
+
+static bool
+xport_rstp_should_manage_bpdu(const struct xport *xport)
+{
+ return rstp_should_manage_bpdu(xport_get_rstp_port_state(xport));
+}
+
+static void
+rstp_process_packet(const struct xport *xport, const struct ofpbuf *packet)
+{
+ struct ofpbuf payload = *packet;
+ struct eth_header *eth = ofpbuf_data(&payload);
+
+ /* Sink packets on ports that have no RSTP. */
+ if (!xport->rstp_port) {
+ return;
+ }
+
+ /* Trim off padding on payload. */
+ if (ofpbuf_size(&payload) > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
+ ofpbuf_set_size(&payload, ntohs(eth->eth_type) + ETH_HEADER_LEN);
+ }
+
+ if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
+ rstp_port_received_bpdu(xport->rstp_port, ofpbuf_data(&payload),
+ ofpbuf_size(&payload));
+ }
+}
+
static struct xport *
get_ofp_port(const struct xbridge *xbridge, ofp_port_t ofp_port)
{
lacp_process_packet(xport->xbundle->lacp, xport->ofport, packet);
}
return SLOW_LACP;
- } else if (xbridge->stp && stp_should_process_flow(flow, wc)) {
+ } else if ((xbridge->stp || xbridge->rstp) &&
+ stp_should_process_flow(flow, wc)) {
if (packet) {
- stp_process_packet(xport, packet);
+ xbridge->stp
+ ? stp_process_packet(xport, packet)
+ : rstp_process_packet(xport, packet);
}
return SLOW_STP;
} else {
return;
} else if (check_stp) {
if (is_stp(&ctx->base_flow)) {
- if (!xport_stp_should_forward_bpdu(xport)) {
- xlate_report(ctx, "STP not in listening state, "
- "skipping bpdu output");
+ if (!xport_stp_should_forward_bpdu(xport) &&
+ !xport_rstp_should_manage_bpdu(xport)) {
+ if (ctx->xbridge->stp != NULL) {
+ xlate_report(ctx, "STP not in listening state, "
+ "skipping bpdu output");
+ } else if (ctx->xbridge->rstp != NULL) {
+ xlate_report(ctx, "RSTP not managing BPDU in this state, "
+ "skipping bpdu output");
+ }
return;
}
- } else if (!xport_stp_forward_state(xport)) {
- xlate_report(ctx, "STP not in forwarding state, "
- "skipping output");
+ } else if (!xport_stp_forward_state(xport) ||
+ !xport_rstp_forward_state(xport)) {
+ if (ctx->xbridge->stp != NULL) {
+ xlate_report(ctx, "STP not in forwarding state, "
+ "skipping output");
+ } else if (ctx->xbridge->rstp != NULL) {
+ xlate_report(ctx, "RSTP not in forwarding state, "
+ "skipping output");
+ }
return;
}
}
if (special) {
ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
- if (xport_stp_forward_state(peer)) {
+ if (xport_stp_forward_state(peer) && xport_rstp_forward_state(peer)) {
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true, true);
} else {
- /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
- * learning action look at the packet, then drop it. */
+ /* Forwarding is disabled by STP and RSTP. Let OFPP_NORMAL and
+ * the learning action look at the packet, then drop it. */
struct flow old_base_flow = ctx->base_flow;
size_t old_size = ofpbuf_size(ctx->xout->odp_actions);
mirror_mask_t old_mirrors = ctx->xout->mirrors;
flow_pkt_mark = flow->pkt_mark;
flow_nw_tos = flow->nw_tos;
- if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
- wc->masks.nw_tos |= IP_DSCP_MASK;
- flow->nw_tos &= ~IP_DSCP_MASK;
- flow->nw_tos |= dscp;
+ if (count_skb_priorities(xport)) {
+ memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
+ if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) {
+ wc->masks.nw_tos |= IP_DSCP_MASK;
+ flow->nw_tos &= ~IP_DSCP_MASK;
+ flow->nw_tos |= dscp;
+ }
}
if (xport->is_tunnel) {
if (out_port != ODPP_NONE) {
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc);
+ &ctx->xout->wc,
+ ctx->xbridge->masked_set_action);
if (ctx->use_recirc) {
struct ovs_action_hash *act_hash;
struct ofputil_bucket *bucket;
uint32_t basis;
- basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0);
+ basis = flow_hash_symmetric_l4(&ctx->xin->flow, 0);
bucket = group_best_live_bucket(ctx, group, basis);
if (bucket) {
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ memset(&wc->masks.dl_src, 0xff, sizeof wc->masks.dl_src);
+ memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.tp_dst, 0xff, sizeof wc->masks.tp_dst);
+ memset(&wc->masks.tp_src, 0xff, sizeof wc->masks.tp_src);
+ memset(&wc->masks.vlan_tci, 0xff, sizeof wc->masks.vlan_tci);
+
xlate_group_bucket(ctx, bucket);
xlate_group_stats(ctx, group, bucket);
}
{
struct ofproto_packet_in *pin;
struct dpif_packet *packet;
- struct pkt_metadata md = PKT_METADATA_INITIALIZER(0);
ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc);
+ &ctx->xout->wc,
+ ctx->xbridge->masked_set_action);
- odp_execute_actions(NULL, &packet, 1, false, &md,
+ odp_execute_actions(NULL, &packet, 1, false,
ofpbuf_data(ctx->xout->odp_actions),
ofpbuf_size(ctx->xout->odp_actions), NULL);
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc);
+ &ctx->xout->wc,
+ ctx->xbridge->masked_set_action);
nl_msg_put_u32(ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, id);
}
if (!n) {
ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc);
+ &ctx->xout->wc,
+ ctx->xbridge->masked_set_action);
} else if (n >= FLOW_MAX_MPLS_LABELS) {
if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
compose_dec_mpls_ttl_action(struct xlate_ctx *ctx)
{
struct flow *flow = &ctx->xin->flow;
- uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
struct flow_wildcards *wc = &ctx->xout->wc;
- memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
if (eth_type_mpls(flow->dl_type)) {
+ uint8_t ttl = mpls_lse_to_ttl(flow->mpls_lse[0]);
+
+ wc->masks.mpls_lse[0] |= htonl(MPLS_TTL_MASK);
if (ttl > 1) {
ttl--;
set_mpls_lse_ttl(&flow->mpls_lse[0], ttl);
return false;
} else {
execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
-
- /* Stop processing for current table. */
- return true;
}
- } else {
- return true;
}
+
+ /* Stop processing for current table. */
+ return true;
}
static void
ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
ctx->xout->odp_actions,
- &ctx->xout->wc);
+ &ctx->xout->wc,
+ ctx->xbridge->masked_set_action);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
* disabled. If just learning is enabled, we need to have
* OFPP_NORMAL and the learning action have a look at the packet
* before we can drop it. */
- if (!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) {
+ if ((!xport_stp_forward_state(xport) && !xport_stp_learn_state(xport)) ||
+ (!xport_rstp_forward_state(xport) && !xport_rstp_learn_state(xport))) {
return false;
}
return (mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->dst.field) ||
mf_is_l3_or_higher(ofpact_get_REG_MOVE(a)->src.field));
- case OFPACT_REG_LOAD:
- return mf_is_l3_or_higher(ofpact_get_REG_LOAD(a)->dst.field);
-
case OFPACT_SET_FIELD:
return mf_is_l3_or_higher(ofpact_get_SET_FIELD(a)->field);
break;
case OFPACT_ENQUEUE:
+ memset(&wc->masks.skb_priority, 0xff,
+ sizeof wc->masks.skb_priority);
xlate_enqueue_action(ctx, ofpact_get_ENQUEUE(a));
break;
break;
case OFPACT_SET_QUEUE:
+ memset(&wc->masks.skb_priority, 0xff,
+ sizeof wc->masks.skb_priority);
xlate_set_queue_action(ctx, ofpact_get_SET_QUEUE(a)->queue_id);
break;
case OFPACT_POP_QUEUE:
+ memset(&wc->masks.skb_priority, 0xff,
+ sizeof wc->masks.skb_priority);
flow->skb_priority = ctx->orig_skb_priority;
break;
nxm_execute_reg_move(ofpact_get_REG_MOVE(a), flow, wc);
break;
- case OFPACT_REG_LOAD:
- nxm_execute_reg_load(ofpact_get_REG_LOAD(a), flow, wc);
- break;
-
case OFPACT_SET_FIELD:
set_field = ofpact_get_SET_FIELD(a);
mf = set_field->field;
}
mf_mask_field_and_prereqs(mf, &wc->masks);
- mf_set_flow_value(mf, &set_field->value, flow);
+ mf_set_flow_value_masked(mf, &set_field->value, &set_field->mask,
+ flow);
break;
case OFPACT_STACK_PUSH:
return pdscp != NULL;
}
+static size_t
+count_skb_priorities(const struct xport *xport)
+{
+ return hmap_count(&xport->skb_priorities);
+}
+
static void
clear_skb_priorities(struct xport *xport)
{
return false;
}
+#if defined(__linux__)
+/* Returns the maximum number of packets that the Linux kernel is willing to
+ * queue up internally to certain kinds of software-implemented ports, or the
+ * default (and rarely modified) value if it cannot be determined. */
+static int
+netdev_max_backlog(void)
+{
+ static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
+ static int max_backlog = 1000; /* The normal default value. */
+
+ if (ovsthread_once_start(&once)) {
+ static const char filename[] = "/proc/sys/net/core/netdev_max_backlog";
+ FILE *stream;
+ int n;
+
+ stream = fopen(filename, "r");
+ if (!stream) {
+ VLOG_WARN("%s: open failed (%s)", filename, ovs_strerror(errno));
+ } else {
+ if (fscanf(stream, "%d", &n) != 1) {
+ VLOG_WARN("%s: read error", filename);
+ } else if (n <= 100) {
+ VLOG_WARN("%s: unexpectedly small value %d", filename, n);
+ } else {
+ max_backlog = n;
+ }
+ fclose(stream);
+ }
+ ovsthread_once_done(&once);
+
+ VLOG_DBG("%s: using %d max_backlog", filename, max_backlog);
+ }
+
+ return max_backlog;
+}
+
+/* Counts and returns the number of OVS_ACTION_ATTR_OUTPUT actions in
+ * 'odp_actions'. */
+static int
+count_output_actions(const struct ofpbuf *odp_actions)
+{
+ const struct nlattr *a;
+ size_t left;
+ int n = 0;
+
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, ofpbuf_data(odp_actions),
+ ofpbuf_size(odp_actions)) {
+ if (a->nla_type == OVS_ACTION_ATTR_OUTPUT) {
+ n++;
+ }
+ }
+ return n;
+}
+#endif /* defined(__linux__) */
+
+/* Returns true if 'odp_actions' contains more output actions than the datapath
+ * can reliably handle in one go. On Linux, this is the value of the
+ * net.core.netdev_max_backlog sysctl, which limits the maximum number of
+ * packets that the kernel is willing to queue up for processing while the
+ * datapath is processing a set of actions. */
+static bool
+too_many_output_actions(const struct ofpbuf *odp_actions OVS_UNUSED)
+{
+#ifdef __linux__
+ return (ofpbuf_size(odp_actions) / NL_A_U32_SIZE > netdev_max_backlog()
+ && count_output_actions(odp_actions) > netdev_max_backlog());
+#else
+ /* OSes other than Linux might have similar limits, but we don't know how
+ * to determine them.*/
+ return false;
+#endif
+}
+
/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
* into datapath actions in 'odp_actions', using 'ctx'.
*
flow_wildcards_init_catchall(wc);
memset(&wc->masks.in_port, 0xff, sizeof wc->masks.in_port);
- memset(&wc->masks.skb_priority, 0xff, sizeof wc->masks.skb_priority);
memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
if (is_ip_any(flow)) {
wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
/* We've let OFPP_NORMAL and the learning action look at the
* packet, so drop it now if forwarding is disabled. */
- if (in_port && !xport_stp_forward_state(in_port)) {
+ if (in_port && (!xport_stp_forward_state(in_port) ||
+ !xport_rstp_forward_state(in_port))) {
ofpbuf_set_size(ctx.xout->odp_actions, sample_actions_len);
}
}
* prevent the flow from being installed. */
COVERAGE_INC(xlate_actions_oversize);
ctx.xout->slow |= SLOW_ACTION;
+ } else if (too_many_output_actions(ctx.xout->odp_actions)) {
+ COVERAGE_INC(xlate_actions_too_many_output);
+ ctx.xout->slow |= SLOW_ACTION;
}
if (mbridge_has_mirrors(ctx.xbridge->mbridge)) {
/* Push stats and perform side effects of flow translation. */
void
-xlate_push_stats(struct xlate_cache *xcache, bool may_learn,
+xlate_push_stats(struct xlate_cache *xcache,
const struct dpif_flow_stats *stats)
{
struct xc_entry *entry;
struct ofpbuf entries = xcache->entries;
+ if (!stats->n_packets) {
+ return;
+ }
+
XC_ENTRY_FOR_EACH (entry, entries, xcache) {
switch (entry->type) {
case XC_RULE:
stats->n_packets, stats->n_bytes);
break;
case XC_LEARN:
- if (may_learn) {
- ofproto_dpif_flow_mod(entry->u.learn.ofproto,
- entry->u.learn.fm);
- }
+ ofproto_dpif_flow_mod(entry->u.learn.ofproto, entry->u.learn.fm);
break;
case XC_NORMAL:
- if (may_learn) {
- xlate_cache_normal(entry->u.normal.ofproto,
- entry->u.normal.flow, entry->u.normal.vlan);
- }
+ xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow,
+ entry->u.normal.vlan);
break;
case XC_FIN_TIMEOUT:
xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags,