COVERAGE_DEFINE(ofproto_dpif_expired);
COVERAGE_DEFINE(packet_in_overflow);
-/* Number of implemented OpenFlow tables. */
-enum { N_TABLES = 255 };
-enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
-BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
-
/* No bfd/cfm status change. */
#define NO_STATUS_CHANGE -1
struct dpif_flow_stats stats OVS_GUARDED;
};
+/* RULE_CAST() depends on this. */
+BUILD_ASSERT_DECL(offsetof(struct rule_dpif, up) == 0);
+
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
ofproto_flow_mod(&ofproto->up, fm);
}
-/* Resets the modified time for 'rule' or an equivalent rule. If 'rule' is not
- * in the classifier, but an equivalent rule is, unref 'rule' and ref the new
- * rule. Otherwise if 'rule' is no longer installed in the classifier,
- * reinstall it.
- *
- * Returns the rule whose modified time has been reset. */
-struct rule_dpif *
-ofproto_dpif_refresh_rule(struct rule_dpif *rule)
-{
- return rule_dpif_cast(ofproto_refresh_rule(&rule->up));
-}
-
/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
* Takes ownership of 'pin' and pin->packet. */
void
return error;
}
-/* Tests whether 'backer''s datapath supports recirculation Only newer datapath
- * supports OVS_KEY_ATTR in OVS_ACTION_ATTR_USERSPACE actions. We need to
- * disable some features on older datapaths that don't support this feature.
+/* Tests whether 'backer''s datapath supports recirculation. Only newer
+ * datapaths support OVS_KEY_ATTR_RECIRC_ID in keys. We need to disable some
+ * features on older datapaths that don't support this feature.
*
* Returns false if 'backer' definitely does not support recirculation, true if
* it seems to support recirculation or if at least the error we get is
ofpbuf_clear(&ofpacts);
error = add_internal_miss_flow(ofproto, id++, &ofpacts,
- &ofproto->no_packet_in_rule);
+ &ofproto->no_packet_in_rule);
if (error) {
return error;
}
error = add_internal_miss_flow(ofproto, id++, &ofpacts,
- &ofproto->drop_frags_rule);
+ &ofproto->drop_frags_rule);
if (error) {
return error;
}
match_init_catchall(&match);
match_set_recirc_id(&match, 0);
- error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, &ofpacts,
+ error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, &ofpacts,
&unused_rulep);
if (error) {
return error;
*/
ofpbuf_clear(&ofpacts);
match_init_catchall(&match);
- error = ofproto_dpif_add_internal_flow(ofproto, &match, 1, &ofpacts,
+ error = ofproto_dpif_add_internal_flow(ofproto, &match, 1, &ofpacts,
&unused_rulep);
return error;
ovs_rwlock_unlock(&ofproto->ml->rwlock);
}
+ /* Always updates the ofproto->pins_seqno to avoid frequent wakeup during
+ * flow restore. Even though nothing is processed during flow restore,
+ * all queued 'pins' will be handled immediately when flow restore
+ * completes. */
+ ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+
/* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
if (!ofproto_get_flow_restore_wait()) {
}
}
- /* Always updates the ofproto->pins_seqno to avoid frequent wakeup during
- * flow restore. Even though nothing is processed during flow restore,
- * all queued 'pins' will be handled immediately when flow restore
- * completes. */
- ofproto->pins_seqno = seq_read(ofproto->pins_seq);
-
if (ofproto->netflow) {
netflow_run(ofproto->netflow);
}
}
static int
-get_cfm_status(const struct ofport *ofport_,
+get_cfm_status(const struct ofport *ofport_, bool force,
struct ofproto_cfm_status *status)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
int ret = 0;
if (ofport->cfm) {
- if (cfm_check_status_change(ofport->cfm)) {
+ if (cfm_check_status_change(ofport->cfm) || force) {
status->faults = cfm_get_fault(ofport->cfm);
status->flap_count = cfm_get_flap_count(ofport->cfm);
status->remote_opstate = cfm_get_opup(ofport->cfm);
}
static int
-get_bfd_status(struct ofport *ofport_, struct smap *smap)
+get_bfd_status(struct ofport *ofport_, bool force, struct smap *smap)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
int ret = 0;
if (ofport->bfd) {
- if (bfd_check_status_change(ofport->bfd)) {
+ if (bfd_check_status_change(ofport->bfd) || force) {
bfd_get_status(ofport->bfd, smap);
} else {
ret = NO_STATUS_CHANGE;
ovs_mutex_unlock(&rule->stats_mutex);
}
-bool
-rule_dpif_is_fail_open(const struct rule_dpif *rule)
-{
- return is_fail_open_rule(&rule->up);
-}
-
-bool
-rule_dpif_is_table_miss(const struct rule_dpif *rule)
-{
- return rule_is_table_miss(&rule->up);
-}
-
-bool
-rule_dpif_is_internal(const struct rule_dpif *rule)
-{
- return rule_is_internal(&rule->up);
-}
-
ovs_be64
rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
OVS_REQUIRES(rule->up.mutex)
}
}
-void
-rule_dpif_ref(struct rule_dpif *rule)
-{
- if (rule) {
- ofproto_rule_ref(&rule->up);
- }
-}
-
-void
-rule_dpif_unref(struct rule_dpif *rule)
-{
- if (rule) {
- ofproto_rule_unref(&rule->up);
- }
-}
-
static void
complete_operation(struct rule_dpif *rule)
OVS_REQUIRES(ofproto_mutex)
ofproto_unixctl_dpif_dump_flows, NULL);
}
-
-/* Returns true if 'rule' is an internal rule, false otherwise. */
+/* Returns true if 'table' is the table used for internal rules,
+ * false otherwise. */
bool
-rule_is_internal(const struct rule *rule)
+table_is_internal(uint8_t table_id)
{
- return rule->table_id == TBL_INTERNAL;
+ return table_id == TBL_INTERNAL;
}
\f
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
int
ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
- struct match *match, int priority,
+ const struct match *match, int priority,
const struct ofpbuf *ofpacts,
struct rule **rulep)
{
return error;
}
- rule = rule_dpif_lookup_in_table(ofproto, TBL_INTERNAL, &match->flow,
- &match->wc, false);
+ rule = rule_dpif_lookup_in_table(ofproto, TBL_INTERNAL, &fm.match.flow,
+ &fm.match.wc, false);
if (rule) {
*rulep = &rule->up;
} else {