uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
+ bool use_recirc; /* Should generate recirc? */
+ struct xlate_recirc recirc; /* Information used for generating
+ * recirculation actions */
+
/* OpenFlow 1.1+ action set.
*
* 'action_set' accumulates "struct ofpact"s added by OFPACT_WRITE_ACTIONS.
} bond;
struct {
struct ofproto_dpif *ofproto;
- struct rule_dpif *rule;
+ struct ofputil_flow_mod *fm;
+ struct ofpbuf *ofpacts;
} learn;
struct {
struct ofproto_dpif *ofproto;
static bool
stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
+ /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */
memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
- return eth_addr_equals(flow->dl_dst, eth_addr_stp);
+ return is_stp(flow);
}
static void
return xport->xbundle;
}
- /* Special-case OFPP_NONE, which a controller may use as the ingress
- * port for traffic that it is sourcing. */
- if (in_port == OFPP_NONE) {
+ /* Special-case OFPP_NONE (OF1.0) and OFPP_CONTROLLER (OF1.1+),
+ * which a controller may use as the ingress port for traffic that
+ * it is sourcing. */
+ if (in_port == OFPP_CONTROLLER || in_port == OFPP_NONE) {
return &ofpp_none_bundle;
}
/* Partially configured bundle with no slaves. Drop the packet. */
return;
} else if (!out_xbundle->bond) {
- ctx->xout->use_recirc = false;
+ ctx->use_recirc = false;
xport = CONTAINER_OF(list_front(&out_xbundle->xports), struct xport,
bundle_node);
} else {
struct ofport_dpif *ofport;
- struct xlate_recirc *xr = &ctx->xout->recirc;
+ struct xlate_recirc *xr = &ctx->recirc;
struct flow_wildcards *wc = &ctx->xout->wc;
if (ctx->xbridge->enable_recirc) {
- ctx->xout->use_recirc = bond_may_recirc(
+ ctx->use_recirc = bond_may_recirc(
out_xbundle->bond, &xr->recirc_id, &xr->hash_basis);
- if (ctx->xout->use_recirc) {
+ if (ctx->use_recirc) {
/* Only TCP mode uses recirculation. */
xr->hash_alg = OVS_HASH_ALG_L4;
bond_update_post_recirc_rules(out_xbundle->bond, false);
/* If ctx->xout->use_recirc is set, the main thread will handle stats
* accounting for this bond. */
- if (!ctx->xout->use_recirc) {
+ if (!ctx->use_recirc) {
if (ctx->xin->resubmit_stats) {
bond_account(out_xbundle->bond, &ctx->xin->flow, vid,
ctx->xin->resubmit_stats->n_bytes);
/* Save enough info to update mac learning table later. */
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL);
- entry->u.normal.ofproto = ctx->xin->ofproto;
+ entry->u.normal.ofproto = ctx->xbridge->ofproto;
entry->u.normal.flow = xmemdup(flow, sizeof *flow);
entry->u.normal.vlan = vlan;
}
bfd_process_packet(xport->bfd, flow, packet);
/* If POLL received, immediately sends FINAL back. */
if (bfd_should_send_packet(xport->bfd)) {
- if (xport->peer) {
- ofproto_dpif_monitor_port_send_soon(xport->ofport);
- } else {
- ofproto_dpif_monitor_port_send_soon_safe(xport->ofport);
- }
+ ofproto_dpif_monitor_port_send_soon(xport->ofport);
}
}
return SLOW_BFD;
xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
return;
} else if (check_stp) {
- if (eth_addr_equals(ctx->base_flow.dl_dst, eth_addr_stp)) {
+ if (is_stp(&ctx->base_flow)) {
if (!xport_stp_listen_state(xport)) {
xlate_report(ctx, "STP not in listening state, "
"skipping bpdu output");
&ctx->xout->odp_actions,
&ctx->xout->wc);
- if (ctx->xout->use_recirc) {
+ if (ctx->use_recirc) {
struct ovs_action_hash *act_hash;
- struct xlate_recirc *xr = &ctx->xout->recirc;
+ struct xlate_recirc *xr = &ctx->recirc;
/* Hash action. */
act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions,
xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule)
{
struct rule_dpif *old_rule = ctx->rule;
- struct rule_actions *actions;
+ const struct rule_actions *actions;
if (ctx->xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats);
}
- if (ctx->xin->xcache) {
- struct xc_entry *entry;
-
- entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
- entry->u.rule = rule;
- rule_dpif_ref(rule);
- }
ctx->resubmits++;
ctx->recurse++;
!skip_wildcards
? &ctx->xout->wc : NULL,
honor_table_miss,
- &ctx->table_id, &rule);
+ &ctx->table_id, &rule,
+ ctx->xin->xcache != NULL);
ctx->xin->flow.in_port.ofp_port = old_in_port;
if (ctx->xin->resubmit_hook) {
}
choose_miss_rule(config, ctx->xbridge->miss_rule,
- ctx->xbridge->no_packet_in_rule, &rule);
+ ctx->xbridge->no_packet_in_rule, &rule,
+ ctx->xin->xcache != NULL);
match:
if (rule) {
+ /* Fill in the cache entry here instead of xlate_recursively
+ * to make the reference counting more explicit. We take a
+ * reference in the lookups above if we are going to cache the
+ * rule. */
+ if (ctx->xin->xcache) {
+ struct xc_entry *entry;
+
+ entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE);
+ entry->u.rule = rule;
+ }
xlate_recursively(ctx, rule);
- rule_dpif_unref(rule);
}
ctx->table_id = old_table_id;
}
static void
-xlate_learn_action(struct xlate_ctx *ctx,
- const struct ofpact_learn *learn)
+xlate_learn_action__(struct xlate_ctx *ctx, const struct ofpact_learn *learn,
+ struct ofputil_flow_mod *fm, struct ofpbuf *ofpacts)
{
- uint64_t ofpacts_stub[1024 / 8];
- struct ofputil_flow_mod fm;
- struct ofpbuf ofpacts;
+ learn_execute(learn, &ctx->xin->flow, fm, ofpacts);
+ if (ctx->xin->may_learn) {
+ ofproto_dpif_flow_mod(ctx->xbridge->ofproto, fm);
+ }
+}
+static void
+xlate_learn_action(struct xlate_ctx *ctx, const struct ofpact_learn *learn)
+{
ctx->xout->has_learn = true;
-
learn_mask(learn, &ctx->xout->wc);
- if (!ctx->xin->may_learn) {
- return;
- }
-
- ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
- learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
- ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm);
- ofpbuf_uninit(&ofpacts);
-
if (ctx->xin->xcache) {
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN);
- entry->u.learn.ofproto = ctx->xin->ofproto;
- rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL,
- &entry->u.learn.rule);
+ entry->u.learn.ofproto = ctx->xbridge->ofproto;
+ entry->u.learn.fm = xmalloc(sizeof *entry->u.learn.fm);
+ entry->u.learn.ofpacts = ofpbuf_new(64);
+ xlate_learn_action__(ctx, learn, entry->u.learn.fm,
+ entry->u.learn.ofpacts);
+ } else if (ctx->xin->may_learn) {
+ uint64_t ofpacts_stub[1024 / 8];
+ struct ofputil_flow_mod fm;
+ struct ofpbuf ofpacts;
+
+ ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
+ xlate_learn_action__(ctx, learn, &fm, &ofpacts);
+ ofpbuf_uninit(&ofpacts);
}
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT);
+ /* XC_RULE already holds a reference on the rule, none is taken
+ * here. */
entry->u.fin.rule = ctx->rule;
entry->u.fin.idle = oft->fin_idle_timeout;
entry->u.fin.hard = oft->fin_hard_timeout;
- rule_dpif_ref(ctx->rule);
}
}
}
static bool
may_receive(const struct xport *xport, struct xlate_ctx *ctx)
{
- if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp)
+ if (xport->config & (is_stp(&ctx->xin->flow)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
return false;
struct flow *flow = &xin->flow;
struct rule_dpif *rule = NULL;
- struct rule_actions *actions = NULL;
+ const struct rule_actions *actions = NULL;
enum slow_path_reason special;
const struct ofpact *ofpacts;
struct xport *in_port;
ctx.xbridge = xbridge_lookup(xin->ofproto);
if (!ctx.xbridge) {
- goto out;
+ return;
}
ctx.rule = xin->rule;
ctx.orig_skb_priority = flow->skb_priority;
ctx.table_id = 0;
ctx.exit = false;
+ ctx.use_recirc = false;
if (!xin->ofpacts && !ctx.rule) {
ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow,
!xin->skip_wildcards ? wc : NULL,
- &rule);
+ &rule, ctx.xin->xcache != NULL);
if (ctx.xin->resubmit_stats) {
rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
}
struct xc_entry *entry;
entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE);
- rule_dpif_ref(rule);
entry->u.rule = rule;
}
ctx.rule = rule;
}
xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule);
- xout->use_recirc = false;
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
break;
case OFPC_FRAG_DROP:
- goto out;
+ return;
case OFPC_FRAG_REASM:
OVS_NOT_REACHED();
}
if (ctx.xbridge->netflow) {
- const struct ofpact *ofpacts = actions->ofpacts;
- size_t ofpacts_len = actions->ofpacts_len;
-
/* Only update netflow if we don't have controller flow. We don't
* report NetFlow expiration messages for such facets because they
* are just part of the control logic for the network, not real
wc->masks.tp_src &= htons(UINT8_MAX);
wc->masks.tp_dst &= htons(UINT8_MAX);
}
-
-out:
- rule_dpif_unref(rule);
}
/* Sends 'packet' out 'ofport'.
break;
case XC_LEARN:
if (may_learn) {
- struct rule_dpif *rule = entry->u.learn.rule;
-
- /* Reset the modified time for a rule that is equivalent to
- * the currently cached rule. If the rule is not the exact
- * rule wehave cached, update the reference that we have. */
- entry->u.learn.rule = ofproto_dpif_refresh_rule(rule);
+ ofproto_dpif_flow_mod(entry->u.learn.ofproto,
+ entry->u.learn.fm);
}
break;
case XC_NORMAL:
static void
xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow)
{
- netflow_expire(netflow, flow);
netflow_flow_clear(netflow, flow);
netflow_unref(netflow);
free(flow);
mbridge_unref(entry->u.mirror.mbridge);
break;
case XC_LEARN:
- rule_dpif_unref(entry->u.learn.rule);
+ free(entry->u.learn.fm);
+ ofpbuf_delete(entry->u.learn.ofpacts);
break;
case XC_NORMAL:
free(entry->u.normal.flow);
break;
case XC_FIN_TIMEOUT:
- rule_dpif_unref(entry->u.fin.rule);
+ /* 'u.fin.rule' is always already held as a XC_RULE, which
+ * has already released it's reference above. */
break;
default:
OVS_NOT_REACHED();