struct ovs_mutex stats_mutex;
struct dpif_flow_stats stats OVS_GUARDED;
+ /* In non-NULL, will point to a new rule (for which a reference is held) to
+ * which all the stats updates should be forwarded. This exists only
+ * transitionally when flows are replaced.
+ *
+ * Protected by stats_mutex. If both 'rule->stats_mutex' and
+ * 'rule->new_rule->stats_mutex' must be held together, acquire them in that
+ * order, */
+ struct rule_dpif *new_rule OVS_GUARDED;
+
/* If non-zero then the recirculation id that has
* been allocated for use with this rule.
* The recirculation id and associated internal flow should
struct ofproto up;
struct dpif_backer *backer;
+ atomic_llong tables_version; /* Version # to use in classifier lookups. */
+
uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
/* Special OpenFlow rules. */
return error;
}
+ atomic_init(&ofproto->tables_version, CLS_MIN_VERSION);
ofproto->netflow = NULL;
ofproto->sflow = NULL;
ofproto->ipfix = NULL;
}
}
+static void
+set_tables_version(struct ofproto *ofproto_, long long version)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ atomic_store_relaxed(&ofproto->tables_version, version);
+}
+
+
static struct ofport *
port_alloc(void)
{
const struct dpif_flow_stats *stats)
{
ovs_mutex_lock(&rule->stats_mutex);
- rule->stats.n_packets += stats->n_packets;
- rule->stats.n_bytes += stats->n_bytes;
- rule->stats.used = MAX(rule->stats.used, stats->used);
+ if (OVS_UNLIKELY(rule->new_rule)) {
+ rule_dpif_credit_stats(rule->new_rule, stats);
+ } else {
+ rule->stats.n_packets += stats->n_packets;
+ rule->stats.n_bytes += stats->n_bytes;
+ rule->stats.used = MAX(rule->stats.used, stats->used);
+ }
ovs_mutex_unlock(&rule->stats_mutex);
}
ovs_mutex_unlock(&rule->up.mutex);
}
+long long
+ofproto_dpif_get_tables_version(struct ofproto_dpif *ofproto OVS_UNUSED)
+{
+ long long version;
+
+ atomic_read_relaxed(&ofproto->tables_version, &version);
+
+ return version;
+}
+
/* The returned rule (if any) is valid at least until the next RCU quiescent
* period. If the rule needs to stay around longer, a non-zero 'take_ref'
* must be passed in to cause a reference to be taken on it.
* 'flow' is non-const to allow for temporary modifications during the lookup.
* Any changes are restored before returning. */
static struct rule_dpif *
-rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, uint8_t table_id,
- struct flow *flow, struct flow_wildcards *wc,
- bool take_ref)
+rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, long long version,
+ uint8_t table_id, struct flow *flow,
+ struct flow_wildcards *wc, bool take_ref)
{
struct classifier *cls = &ofproto->up.tables[table_id].cls;
const struct cls_rule *cls_rule;
struct rule_dpif *rule;
do {
- cls_rule = classifier_lookup(cls, CLS_MAX_VERSION, flow, wc);
+ cls_rule = classifier_lookup(cls, version, flow, wc);
rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
return rule;
}
-/* Look up 'flow' in 'ofproto''s classifier starting from table '*table_id'.
- * Returns the rule that was found, which may be one of the special rules
- * according to packet miss hadling. If 'may_packet_in' is false, returning of
- * the miss_rule (which issues packet ins for the controller) is avoided.
- * Updates 'wc', if nonnull, to reflect the fields that were used during the
- * lookup.
+/* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
+ * table '*table_id'. Returns the rule that was found, which may be one of the
+ * special rules according to packet miss hadling. If 'may_packet_in' is
+ * false, returning of the miss_rule (which issues packet ins for the
+ * controller) is avoided. Updates 'wc', if nonnull, to reflect the fields
+ * that were used during the lookup.
*
* If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
* if none is found then the table miss configuration for that table is
* 'flow' is non-const to allow for temporary modifications during the lookup.
* Any changes are restored before returning. */
struct rule_dpif *
-rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, struct flow *flow,
- struct flow_wildcards *wc, bool take_ref,
- const struct dpif_flow_stats *stats,
+rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto, long long version,
+ struct flow *flow, struct flow_wildcards *wc,
+ bool take_ref, const struct dpif_flow_stats *stats,
uint8_t *table_id, ofp_port_t in_port,
bool may_packet_in, bool honor_table_miss)
{
next_id++, next_id += (next_id == TBL_INTERNAL))
{
*table_id = next_id;
- rule = rule_dpif_lookup_in_table(ofproto, next_id, flow, wc, take_ref);
+ rule = rule_dpif_lookup_in_table(ofproto, version, next_id, flow, wc,
+ take_ref);
if (stats) {
struct oftable *tbl = &ofproto->up.tables[next_id];
unsigned long orig;
rule->stats.n_bytes = 0;
rule->stats.used = rule->up.modified;
rule->recirc_id = 0;
+ rule->new_rule = NULL;
return 0;
}
-static enum ofperr
-rule_insert(struct rule *rule_)
+static void
+rule_insert(struct rule *rule_, struct rule *old_rule_, bool forward_stats)
OVS_REQUIRES(ofproto_mutex)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
+
+ if (old_rule_ && forward_stats) {
+ struct rule_dpif *old_rule = rule_dpif_cast(old_rule_);
+
+ ovs_assert(!old_rule->new_rule);
+
+ /* Take a reference to the new rule, and refer all stats updates from
+ * the old rule to the new rule. */
+ rule_dpif_ref(rule);
+
+ ovs_mutex_lock(&old_rule->stats_mutex);
+ ovs_mutex_lock(&rule->stats_mutex);
+ old_rule->new_rule = rule; /* Forward future stats. */
+ rule->stats = old_rule->stats; /* Transfer stats to the new rule. */
+ ovs_mutex_unlock(&rule->stats_mutex);
+ ovs_mutex_unlock(&old_rule->stats_mutex);
+ }
+
complete_operation(rule);
- return 0;
}
static void
static void
rule_destruct(struct rule *rule_)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
ovs_mutex_destroy(&rule->stats_mutex);
+ /* Release reference to the new rule, if any. */
+ if (rule->new_rule) {
+ rule_dpif_unref(rule->new_rule);
+ }
if (rule->recirc_id) {
recirc_free_id(rule->recirc_id);
}
struct rule_dpif *rule = rule_dpif_cast(rule_);
ovs_mutex_lock(&rule->stats_mutex);
- *packets = rule->stats.n_packets;
- *bytes = rule->stats.n_bytes;
- *used = rule->stats.used;
+ if (OVS_UNLIKELY(rule->new_rule)) {
+ rule_get_stats(&rule->new_rule->up, packets, bytes, used);
+ } else {
+ *packets = rule->stats.n_packets;
+ *bytes = rule->stats.n_bytes;
+ *used = rule->stats.used;
+ }
ovs_mutex_unlock(&rule->stats_mutex);
}
return 0;
}
-static void
-rule_modify_actions(struct rule *rule_, bool reset_counters)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct rule_dpif *rule = rule_dpif_cast(rule_);
-
- if (reset_counters) {
- ovs_mutex_lock(&rule->stats_mutex);
- rule->stats.n_packets = 0;
- rule->stats.n_bytes = 0;
- ovs_mutex_unlock(&rule->stats_mutex);
- }
-
- complete_operation(rule);
-}
-
static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
{
return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
return error;
}
- rule = rule_dpif_lookup_in_table(ofproto, TBL_INTERNAL, &fm.match.flow,
+ rule = rule_dpif_lookup_in_table(ofproto,
+ ofproto_dpif_get_tables_version(ofproto),
+ TBL_INTERNAL, &fm.match.flow,
&fm.match.wc, false);
if (rule) {
*rulep = &rule->up;
type_get_memory_usage,
flush,
query_tables,
+ set_tables_version,
port_alloc,
port_construct,
port_destruct,
rule_dealloc,
rule_get_stats,
rule_execute,
- NULL, /* rule_premodify_actions */
- rule_modify_actions,
set_frag_handling,
packet_out,
set_netflow,