/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
* Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
#include "bitmap.h"
#include "byte-order.h"
#include "classifier.h"
+#include "connectivity.h"
#include "connmgr.h"
#include "coverage.h"
#include "dynamic-string.h"
#include "ofproto-provider.h"
#include "openflow/nicira-ext.h"
#include "openflow/openflow.h"
+#include "ovs-rcu.h"
#include "packets.h"
#include "pinsched.h"
#include "pktbuf.h"
#include "poll-loop.h"
#include "random.h"
+#include "seq.h"
#include "shash.h"
#include "simap.h"
#include "smap.h"
#include "unaligned.h"
#include "unixctl.h"
#include "vlog.h"
+#include "bundles.h"
VLOG_DEFINE_THIS_MODULE(ofproto);
COVERAGE_DEFINE(ofproto_reinit_ports);
COVERAGE_DEFINE(ofproto_update_port);
-enum ofproto_state {
- S_OPENFLOW, /* Processing OpenFlow commands. */
- S_EVICT, /* Evicting flows from over-limit tables. */
- S_FLUSH, /* Deleting all flow table rules. */
-};
-
-enum ofoperation_type {
- OFOPERATION_ADD,
- OFOPERATION_DELETE,
- OFOPERATION_MODIFY,
- OFOPERATION_REPLACE
-};
-
-/* A single OpenFlow request can execute any number of operations. The
- * ofopgroup maintain OpenFlow state common to all of the operations, e.g. the
- * ofconn to which an error reply should be sent if necessary.
- *
- * ofproto initiates some operations internally. These operations are still
- * assigned to groups but will not have an associated ofconn. */
-struct ofopgroup {
- struct ofproto *ofproto; /* Owning ofproto. */
- struct list ofproto_node; /* In ofproto's "pending" list. */
- struct list ops; /* List of "struct ofoperation"s. */
- int n_running; /* Number of ops still pending. */
-
- /* Data needed to send OpenFlow reply on failure or to send a buffered
- * packet on success.
- *
- * If list_is_empty(ofconn_node) then this ofopgroup never had an
- * associated ofconn or its ofconn's connection dropped after it initiated
- * the operation. In the latter case 'ofconn' is a wild pointer that
- * refers to freed memory, so the 'ofconn' member must be used only if
- * !list_is_empty(ofconn_node).
- */
- struct list ofconn_node; /* In ofconn's list of pending opgroups. */
- struct ofconn *ofconn; /* ofconn for reply (but see note above). */
- struct ofp_header *request; /* Original request (truncated at 64 bytes). */
- uint32_t buffer_id; /* Buffer id from original request. */
-};
-
-static struct ofopgroup *ofopgroup_create_unattached(struct ofproto *);
-static struct ofopgroup *ofopgroup_create(struct ofproto *, struct ofconn *,
- const struct ofp_header *,
- uint32_t buffer_id);
-static void ofopgroup_submit(struct ofopgroup *);
-static void ofopgroup_complete(struct ofopgroup *);
-
-/* A single flow table operation. */
-struct ofoperation {
- struct ofopgroup *group; /* Owning group. */
- struct list group_node; /* In ofopgroup's "ops" list. */
- struct hmap_node hmap_node; /* In ofproto's "deletions" hmap. */
- struct rule *rule; /* Rule being operated upon. */
- enum ofoperation_type type; /* Type of operation. */
-
- /* OFOPERATION_MODIFY, OFOPERATION_REPLACE: The old actions, if the actions
- * are changing. */
- struct rule_actions *actions;
-
- /* OFOPERATION_DELETE. */
- enum ofp_flow_removed_reason reason; /* Reason flow was removed. */
-
- ovs_be64 flow_cookie; /* Rule's old flow cookie. */
- uint16_t idle_timeout; /* Rule's old idle timeout. */
- uint16_t hard_timeout; /* Rule's old hard timeout. */
- enum ofputil_flow_mod_flags flags; /* Rule's old flags. */
- enum ofperr error; /* 0 if no error. */
-};
-
-static struct ofoperation *ofoperation_create(struct ofopgroup *,
- struct rule *,
- enum ofoperation_type,
- enum ofp_flow_removed_reason);
-static void ofoperation_destroy(struct ofoperation *);
+/* Default fields to use for prefix tries in each flow table, unless something
+ * else is configured. */
+const enum mf_field_id default_prefix_fields[2] =
+ { MFF_IPV4_DST, MFF_IPV4_SRC };
/* oftable. */
static void oftable_init(struct oftable *);
static void oftable_set_name(struct oftable *, const char *name);
+static enum ofperr evict_rules_from_table(struct oftable *,
+ unsigned int extra_space)
+ OVS_REQUIRES(ofproto_mutex);
static void oftable_disable_eviction(struct oftable *);
static void oftable_enable_eviction(struct oftable *,
const struct mf_subfield *fields,
static void oftable_remove_rule(struct rule *rule) OVS_REQUIRES(ofproto_mutex);
static void oftable_remove_rule__(struct ofproto *, struct rule *)
OVS_REQUIRES(ofproto_mutex);
-static void oftable_insert_rule(struct rule *);
/* A set of rules within a single OpenFlow table (oftable) that have the same
* values for the oftable's eviction_fields. A rule to be evicted, when one is
struct heap rules; /* Contains "struct rule"s. */
};
-static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep);
-static void ofproto_evict(struct ofproto *) OVS_EXCLUDED(ofproto_mutex);
-static uint32_t rule_eviction_priority(struct rule *);
-static void eviction_group_add_rule(struct rule *);
-static void eviction_group_remove_rule(struct rule *);
+static bool choose_rule_to_evict(struct oftable *table, struct rule **rulep)
+ OVS_REQUIRES(ofproto_mutex);
+static uint32_t rule_eviction_priority(struct ofproto *ofproto, struct rule *)
+ OVS_REQUIRES(ofproto_mutex);;
+static void eviction_group_add_rule(struct rule *)
+ OVS_REQUIRES(ofproto_mutex);
+static void eviction_group_remove_rule(struct rule *)
+ OVS_REQUIRES(ofproto_mutex);
/* Criteria that flow_mod and other operations use for selecting rules on
* which to operate. */
* If out_group != OFPG_ALL, select only rules that output to out_group. */
ofp_port_t out_port;
uint32_t out_group;
+
+ /* If true, collects only rules that are modifiable. */
+ bool include_hidden;
+ bool include_readonly;
};
static void rule_criteria_init(struct rule_criteria *, uint8_t table_id,
unsigned int priority,
ovs_be64 cookie, ovs_be64 cookie_mask,
ofp_port_t out_port, uint32_t out_group);
+static void rule_criteria_require_rw(struct rule_criteria *,
+ bool can_write_readonly);
static void rule_criteria_destroy(struct rule_criteria *);
+static enum ofperr collect_rules_loose(struct ofproto *,
+ const struct rule_criteria *,
+ struct rule_collection *);
+
/* A packet that needs to be passed to rule_execute().
*
* (We can't do this immediately from ofopgroup_complete() because that holds
static void run_rule_executes(struct ofproto *) OVS_EXCLUDED(ofproto_mutex);
static void destroy_rule_executes(struct ofproto *);
+struct learned_cookie {
+ union {
+ /* In struct ofproto's 'learned_cookies' hmap. */
+ struct hmap_node hmap_node OVS_GUARDED_BY(ofproto_mutex);
+
+ /* In 'dead_cookies' list when removed from hmap. */
+ struct list list_node;
+ } u;
+
+ /* Key. */
+ ovs_be64 cookie OVS_GUARDED_BY(ofproto_mutex);
+ uint8_t table_id OVS_GUARDED_BY(ofproto_mutex);
+
+ /* Number of references from "learn" actions.
+ *
+ * When this drops to 0, all of the flows in 'table_id' with the specified
+ * 'cookie' are deleted. */
+ int n OVS_GUARDED_BY(ofproto_mutex);
+};
+
+static const struct ofpact_learn *next_learn_with_delete(
+ const struct rule_actions *, const struct ofpact_learn *start);
+
+static void learned_cookies_inc(struct ofproto *, const struct rule_actions *)
+ OVS_REQUIRES(ofproto_mutex);
+static void learned_cookies_dec(struct ofproto *, const struct rule_actions *,
+ struct list *dead_cookies)
+ OVS_REQUIRES(ofproto_mutex);
+static void learned_cookies_flush(struct ofproto *, struct list *dead_cookies)
+ OVS_REQUIRES(ofproto_mutex);
+
/* ofport. */
static void ofport_destroy__(struct ofport *) OVS_EXCLUDED(ofproto_mutex);
static void ofport_destroy(struct ofport *);
};
/* rule. */
-static void ofproto_rule_destroy__(struct rule *);
static void ofproto_rule_send_removed(struct rule *, uint8_t reason);
-static bool rule_is_modifiable(const struct rule *);
+static bool rule_is_readonly(const struct rule *);
+
+/* The source of a flow_mod request, in the code that processes flow_mods.
+ *
+ * A flow table modification request can be generated externally, via OpenFlow,
+ * or internally through a function call. This structure indicates the source
+ * of an OpenFlow-generated flow_mod. For an internal flow_mod, it isn't
+ * meaningful and thus supplied as NULL. */
+struct flow_mod_requester {
+ struct ofconn *ofconn; /* Connection on which flow_mod arrived. */
+ ovs_be32 xid; /* OpenFlow xid of flow_mod request. */
+};
/* OpenFlow. */
-static enum ofperr add_flow(struct ofproto *, struct ofconn *,
- struct ofputil_flow_mod *,
- const struct ofp_header *);
-static enum ofperr modify_flows__(struct ofproto *, struct ofconn *,
- struct ofputil_flow_mod *,
- const struct ofp_header *,
- const struct rule_collection *);
-static void delete_flow__(struct rule *rule, struct ofopgroup *,
- enum ofp_flow_removed_reason)
+static enum ofperr add_flow(struct ofproto *, struct ofputil_flow_mod *,
+ const struct flow_mod_requester *);
+
+static enum ofperr modify_flows__(struct ofproto *, struct ofputil_flow_mod *,
+ const struct rule_collection *,
+ const struct flow_mod_requester *);
+static void delete_flows__(const struct rule_collection *,
+ enum ofp_flow_removed_reason,
+ const struct flow_mod_requester *)
+ OVS_REQUIRES(ofproto_mutex);
+
+static enum ofperr send_buffered_packet(struct ofconn *, uint32_t buffer_id,
+ struct rule *)
OVS_REQUIRES(ofproto_mutex);
+
static bool ofproto_group_exists__(const struct ofproto *ofproto,
uint32_t group_id)
OVS_REQ_RDLOCK(ofproto->groups_rwlock);
uint32_t group_id)
OVS_EXCLUDED(ofproto->groups_rwlock);
static enum ofperr add_group(struct ofproto *, struct ofputil_group_mod *);
-static bool handle_openflow(struct ofconn *, const struct ofpbuf *);
-static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *,
+static void handle_openflow(struct ofconn *, const struct ofpbuf *);
+static enum ofperr handle_flow_mod__(struct ofproto *,
struct ofputil_flow_mod *,
- const struct ofp_header *)
+ const struct flow_mod_requester *)
OVS_EXCLUDED(ofproto_mutex);
static void calc_duration(long long int start, long long int now,
uint32_t *sec, uint32_t *nsec);
static void ofproto_destroy__(struct ofproto *);
static void update_mtu(struct ofproto *, struct ofport *);
static void meter_delete(struct ofproto *, uint32_t first, uint32_t last);
+static void meter_insert_rule(struct rule *);
/* unixctl. */
static void ofproto_unixctl_init(void);
/* Global lock that protects all flow table operations. */
struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER;
-unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
-unsigned n_handler_threads;
-enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO;
+unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT;
+unsigned ofproto_max_idle = OFPROTO_MAX_IDLE_DEFAULT;
+
+size_t n_handlers, n_revalidators;
/* Map from datapath name to struct ofproto, for use by unixctl commands. */
static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos);
{
size_t i;
+ sset_clear(types);
for (i = 0; i < n_ofproto_classes; i++) {
ofproto_classes[i]->enumerate_types(types);
}
ofproto->tables = NULL;
ofproto->n_tables = 0;
hindex_init(&ofproto->cookies);
+ hmap_init(&ofproto->learned_cookies);
list_init(&ofproto->expirable);
ofproto->connmgr = connmgr_create(ofproto, datapath_name, datapath_name);
- ofproto->state = S_OPENFLOW;
- list_init(&ofproto->pending);
- ofproto->n_pending = 0;
- hmap_init(&ofproto->deletions);
guarded_list_init(&ofproto->rule_executes);
- ofproto->n_add = ofproto->n_delete = ofproto->n_modify = 0;
- ofproto->first_op = ofproto->last_op = LLONG_MIN;
- ofproto->next_op_report = LLONG_MAX;
- ofproto->op_backoff = LLONG_MIN;
ofproto->vlan_bitmap = NULL;
ofproto->vlans_changed = false;
ofproto->min_mtu = INT_MAX;
/* Sets the number of flows at which eviction from the kernel flow table
* will occur. */
void
-ofproto_set_flow_eviction_threshold(unsigned threshold)
+ofproto_set_flow_limit(unsigned limit)
{
- flow_eviction_threshold = MAX(OFPROTO_FLOW_EVICTION_THRESHOLD_MIN,
- threshold);
+ ofproto_flow_limit = limit;
}
-/* Sets the path for handling flow misses. */
+/* Sets the maximum idle time for flows in the datapath before they are
+ * expired. */
void
-ofproto_set_flow_miss_model(unsigned model)
+ofproto_set_max_idle(unsigned max_idle)
{
- flow_miss_model = model;
+ ofproto_max_idle = max_idle;
}
/* If forward_bpdu is true, the NORMAL action will forward frames with
}
}
-/* Sets number of upcall handler threads. The default is
- * (number of online cores - 2). */
void
-ofproto_set_n_handler_threads(unsigned limit)
+ofproto_set_threads(int n_handlers_, int n_revalidators_)
{
- if (limit) {
- n_handler_threads = limit;
- } else {
- int n_proc = count_cpu_cores();
- n_handler_threads = n_proc > 2 ? n_proc - 2 : 1;
+ int threads = MAX(count_cpu_cores(), 2);
+
+ n_revalidators = MAX(n_revalidators_, 0);
+ n_handlers = MAX(n_handlers_, 0);
+
+ if (!n_revalidators) {
+ n_revalidators = n_handlers
+ ? MAX(threads - (int) n_handlers, 1)
+ : threads / 4 + 1;
+ }
+
+ if (!n_handlers) {
+ n_handlers = MAX(threads - (int) n_revalidators, 1);
}
}
}
}
-/* Populates 'status' with key value pairs indicating the status of the BFD
- * session on 'ofp_port'. This information is intended to be populated in the
- * OVS database. Has no effect if 'ofp_port' is not na OpenFlow port in
- * 'ofproto'. */
+/* Populates 'status' with the status of BFD on 'ofport'. Returns 0 on
+ * success. Returns a negative number if there is no status change since
+ * last update. Returns a positive errno otherwise. Has no effect if
+ * 'ofp_port' is not an OpenFlow port in 'ofproto'.
+ *
+ * The caller must provide and own '*status'. */
int
ofproto_port_get_bfd_status(struct ofproto *ofproto, ofp_port_t ofp_port,
struct smap *status)
return ofproto->n_tables;
}
+/* Returns the number of Controller visible OpenFlow tables
+ * in 'ofproto'. This number will exclude Hidden tables.
+ * This funtion's return value should be less or equal to that of
+ * ofproto_get_n_tables() . */
+uint8_t
+ofproto_get_n_visible_tables(const struct ofproto *ofproto)
+{
+ uint8_t n = ofproto->n_tables;
+
+ /* Count only non-hidden tables in the number of tables. (Hidden tables,
+ * if present, are always at the end.) */
+ while(n && (ofproto->tables[n - 1].flags & OFTABLE_HIDDEN)) {
+ n--;
+ }
+
+ return n;
+}
+
/* Configures the OpenFlow table in 'ofproto' with id 'table_id' with the
* settings from 's'. 'table_id' must be in the range 0 through the number of
* OpenFlow tables in 'ofproto' minus 1, inclusive.
}
table->max_flows = s->max_flows;
- ovs_rwlock_rdlock(&table->cls.rwlock);
- if (classifier_count(&table->cls) > table->max_flows
- && table->eviction_fields) {
- /* 'table' contains more flows than allowed. We might not be able to
- * evict them right away because of the asynchronous nature of flow
- * table changes. Schedule eviction for later. */
- switch (ofproto->state) {
- case S_OPENFLOW:
- ofproto->state = S_EVICT;
- break;
- case S_EVICT:
- case S_FLUSH:
- /* We're already deleting flows, nothing more to do. */
- break;
- }
- }
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_wrlock(&table->cls.rwlock);
+ classifier_set_prefix_fields(&table->cls,
+ s->prefix_fields, s->n_prefix_fields);
+ fat_rwlock_unlock(&table->cls.rwlock);
+
+ ovs_mutex_lock(&ofproto_mutex);
+ evict_rules_from_table(table, 0);
+ ovs_mutex_unlock(&ofproto_mutex);
}
\f
bool
}
static void
-ofproto_rule_delete__(struct ofproto *ofproto, struct rule *rule,
- uint8_t reason)
+ofproto_rule_delete__(struct rule *rule, uint8_t reason)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofopgroup *group;
-
- ovs_assert(!rule->pending);
+ struct rule_collection rules;
- group = ofopgroup_create_unattached(ofproto);
- delete_flow__(rule, group, reason);
- ofopgroup_submit(group);
+ rules.rules = rules.stub;
+ rules.n = 1;
+ rules.stub[0] = rule;
+ delete_flows__(&rules, reason, NULL);
}
-/* Deletes 'rule' from 'cls' within 'ofproto'.
+/* Deletes 'rule' from 'ofproto'.
*
* Within an ofproto implementation, this function allows an ofproto
* implementation to destroy any rules that remain when its ->destruct()
ofproto_rule_delete(struct ofproto *ofproto, struct rule *rule)
OVS_EXCLUDED(ofproto_mutex)
{
- struct ofopgroup *group;
-
+ /* This skips the ofmonitor and flow-removed notifications because the
+ * switch is being deleted and any OpenFlow channels have been or soon will
+ * be killed. */
ovs_mutex_lock(&ofproto_mutex);
- ovs_assert(!rule->pending);
-
- group = ofopgroup_create_unattached(ofproto);
- ofoperation_create(group, rule, OFOPERATION_DELETE, OFPRR_DELETE);
oftable_remove_rule__(ofproto, rule);
ofproto->ofproto_class->rule_delete(rule);
- ofopgroup_submit(group);
-
ovs_mutex_unlock(&ofproto_mutex);
}
continue;
}
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
- if (!rule->pending) {
- ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE);
- }
+ ofproto_rule_delete__(rule, OFPRR_DELETE);
}
}
ovs_mutex_unlock(&ofproto_mutex);
{
struct oftable *table;
- ovs_assert(list_is_empty(&ofproto->pending));
-
destroy_rule_executes(ofproto);
- guarded_list_destroy(&ofproto->rule_executes);
-
delete_group(ofproto, OFPG_ALL);
+
+ guarded_list_destroy(&ofproto->rule_executes);
ovs_rwlock_destroy(&ofproto->groups_rwlock);
hmap_destroy(&ofproto->groups);
}
free(ofproto->tables);
- hmap_destroy(&ofproto->deletions);
+ ovs_assert(hindex_is_empty(&ofproto->cookies));
+ hindex_destroy(&ofproto->cookies);
+
+ ovs_assert(hmap_is_empty(&ofproto->learned_cookies));
+ hmap_destroy(&ofproto->learned_cookies);
free(ofproto->vlan_bitmap);
}
p->ofproto_class->destruct(p);
- ofproto_destroy__(p);
+ /* Destroying rules is deferred, must have 'ofproto' around for them. */
+ ovsrcu_postpone(ofproto_destroy__, p);
}
/* Destroys the datapath with the respective 'name' and 'type'. With the Linux
return error;
}
-int
-ofproto_type_run_fast(const char *datapath_type)
-{
- const struct ofproto_class *class;
- int error;
-
- datapath_type = ofproto_normalize_type(datapath_type);
- class = ofproto_class_find__(datapath_type);
-
- error = class->type_run_fast ? class->type_run_fast(datapath_type) : 0;
- if (error && error != EAGAIN) {
- VLOG_ERR_RL(&rl, "%s: type_run_fast failed (%s)",
- datapath_type, ovs_strerror(error));
- }
- return error;
-}
-
void
ofproto_type_wait(const char *datapath_type)
{
}
}
-static bool
-any_pending_ops(const struct ofproto *p)
- OVS_EXCLUDED(ofproto_mutex)
-{
- bool b;
-
- ovs_mutex_lock(&ofproto_mutex);
- b = !list_is_empty(&p->pending);
- ovs_mutex_unlock(&ofproto_mutex);
-
- return b;
-}
-
int
ofproto_run(struct ofproto *p)
{
- struct sset changed_netdevs;
- const char *changed_netdev;
- struct ofport *ofport;
int error;
+ uint64_t new_seq;
error = p->ofproto_class->run(p);
if (error && error != EAGAIN) {
}
ovs_mutex_lock(&ofproto_mutex);
- HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
- heap_rebuild(&evg->rules);
- }
-
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
- if (!rule->eviction_group
- && (rule->idle_timeout || rule->hard_timeout)) {
- eviction_group_add_rule(rule);
+ if (rule->idle_timeout || rule->hard_timeout) {
+ if (!rule->eviction_group) {
+ eviction_group_add_rule(rule);
+ } else {
+ heap_raw_change(&rule->evg_node,
+ rule_eviction_priority(p, rule));
+ }
}
}
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
+
+ HEAP_FOR_EACH (evg, size_node, &table->eviction_groups_by_size) {
+ heap_rebuild(&evg->rules);
+ }
ovs_mutex_unlock(&ofproto_mutex);
}
}
}
}
- /* Update OpenFlow port status for any port whose netdev has changed.
- *
- * Refreshing a given 'ofport' can cause an arbitrary ofport to be
- * destroyed, so it's not safe to update ports directly from the
- * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
- * need this two-phase approach. */
- sset_init(&changed_netdevs);
- HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
- unsigned int change_seq = netdev_change_seq(ofport->netdev);
- if (ofport->change_seq != change_seq) {
- ofport->change_seq = change_seq;
- sset_add(&changed_netdevs, netdev_get_name(ofport->netdev));
- }
- }
- SSET_FOR_EACH (changed_netdev, &changed_netdevs) {
- update_port(p, changed_netdev);
- }
- sset_destroy(&changed_netdevs);
-
- switch (p->state) {
- case S_OPENFLOW:
- connmgr_run(p->connmgr, handle_openflow);
- break;
-
- case S_EVICT:
- connmgr_run(p->connmgr, NULL);
- ofproto_evict(p);
- if (!any_pending_ops(p)) {
- p->state = S_OPENFLOW;
- }
- break;
-
- case S_FLUSH:
- connmgr_run(p->connmgr, NULL);
- ofproto_flush__(p);
- if (!any_pending_ops(p)) {
- connmgr_flushed(p->connmgr);
- p->state = S_OPENFLOW;
- }
- break;
-
- default:
- NOT_REACHED();
- }
-
- if (time_msec() >= p->next_op_report) {
- long long int ago = (time_msec() - p->first_op) / 1000;
- long long int interval = (p->last_op - p->first_op) / 1000;
- struct ds s;
-
- ds_init(&s);
- ds_put_format(&s, "%d flow_mods ",
- p->n_add + p->n_delete + p->n_modify);
- if (interval == ago) {
- ds_put_format(&s, "in the last %lld s", ago);
- } else if (interval) {
- ds_put_format(&s, "in the %lld s starting %lld s ago",
- interval, ago);
- } else {
- ds_put_format(&s, "%lld s ago", ago);
- }
+ new_seq = seq_read(connectivity_seq_get());
+ if (new_seq != p->change_seq) {
+ struct sset devnames;
+ const char *devname;
+ struct ofport *ofport;
- ds_put_cstr(&s, " (");
- if (p->n_add) {
- ds_put_format(&s, "%d adds, ", p->n_add);
- }
- if (p->n_delete) {
- ds_put_format(&s, "%d deletes, ", p->n_delete);
+ /* Update OpenFlow port status for any port whose netdev has changed.
+ *
+ * Refreshing a given 'ofport' can cause an arbitrary ofport to be
+ * destroyed, so it's not safe to update ports directly from the
+ * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
+ * need this two-phase approach. */
+ sset_init(&devnames);
+ HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
+ uint64_t port_change_seq;
+
+ port_change_seq = netdev_get_change_seq(ofport->netdev);
+ if (ofport->change_seq != port_change_seq) {
+ ofport->change_seq = port_change_seq;
+ sset_add(&devnames, netdev_get_name(ofport->netdev));
+ }
}
- if (p->n_modify) {
- ds_put_format(&s, "%d modifications, ", p->n_modify);
+ SSET_FOR_EACH (devname, &devnames) {
+ update_port(p, devname);
}
- s.length -= 2;
- ds_put_char(&s, ')');
+ sset_destroy(&devnames);
- VLOG_INFO("%s: %s", p->name, ds_cstr(&s));
- ds_destroy(&s);
-
- p->n_add = p->n_delete = p->n_modify = 0;
- p->next_op_report = LLONG_MAX;
+ p->change_seq = new_seq;
}
- return error;
-}
-
-/* Performs periodic activity required by 'ofproto' that needs to be done
- * with the least possible latency.
- *
- * It makes sense to call this function a couple of times per poll loop, to
- * provide a significant performance boost on some benchmarks with the
- * ofproto-dpif implementation. */
-int
-ofproto_run_fast(struct ofproto *p)
-{
- int error;
+ connmgr_run(p->connmgr, handle_openflow);
- error = p->ofproto_class->run_fast ? p->ofproto_class->run_fast(p) : 0;
- if (error && error != EAGAIN) {
- VLOG_ERR_RL(&rl, "%s: fastpath run failed (%s)",
- p->name, ovs_strerror(error));
- }
return error;
}
void
ofproto_wait(struct ofproto *p)
{
- struct ofport *ofport;
-
p->ofproto_class->wait(p);
if (p->ofproto_class->port_poll_wait) {
p->ofproto_class->port_poll_wait(p);
}
-
- HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
- if (ofport->change_seq != netdev_change_seq(ofport->netdev)) {
- poll_immediate_wake();
- }
- }
-
- switch (p->state) {
- case S_OPENFLOW:
- connmgr_wait(p->connmgr, true);
- break;
-
- case S_EVICT:
- case S_FLUSH:
- connmgr_wait(p->connmgr, false);
- if (!any_pending_ops(p)) {
- poll_immediate_wake();
- }
- break;
- }
+ seq_wait(connectivity_seq_get(), p->change_seq);
+ connmgr_wait(p->connmgr);
}
bool
simap_increase(usage, "ports", hmap_count(&ofproto->ports));
- ovs_mutex_lock(&ofproto_mutex);
- simap_increase(usage, "ops",
- ofproto->n_pending + hmap_count(&ofproto->deletions));
- ovs_mutex_unlock(&ofproto_mutex);
-
n_rules = 0;
OFPROTO_FOR_EACH_TABLE (table, ofproto) {
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
n_rules += classifier_count(&table->cls);
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
}
simap_increase(usage, "rules", n_rules);
connmgr_get_memory_usage(ofproto->connmgr, usage);
}
+void
+ofproto_type_get_memory_usage(const char *datapath_type, struct simap *usage)
+{
+ const struct ofproto_class *class;
+
+ datapath_type = ofproto_normalize_type(datapath_type);
+ class = ofproto_class_find__(datapath_type);
+
+ if (class && class->type_get_memory_usage) {
+ class->type_get_memory_usage(datapath_type, usage);
+ }
+}
+
void
ofproto_get_ofproto_controller_info(const struct ofproto *ofproto,
struct shash *info)
fm->flags = 0;
fm->ofpacts = CONST_CAST(struct ofpact *, ofpacts);
fm->ofpacts_len = ofpacts_len;
+ fm->delete_reason = OFPRR_DELETE;
}
static int
flow_mod_init(&fm, match, priority, ofpacts, ofpacts_len, command);
- return handle_flow_mod__(ofproto, NULL, &fm, NULL);
+ return handle_flow_mod__(ofproto, &fm, NULL);
}
/* Adds a flow to OpenFlow flow table 0 in 'p' that matches 'cls_rule' and
/* First do a cheap check whether the rule we're looking for already exists
* with the actions that we want. If it does, then we're done. */
- ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
+ fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(
&ofproto->tables[0].cls, match, priority));
if (rule) {
- ovs_mutex_lock(&rule->mutex);
- must_add = !ofpacts_equal(rule->actions->ofpacts,
- rule->actions->ofpacts_len,
+ const struct rule_actions *actions = rule_get_actions(rule);
+ must_add = !ofpacts_equal(actions->ofpacts, actions->ofpacts_len,
ofpacts, ofpacts_len);
- ovs_mutex_unlock(&rule->mutex);
} else {
must_add = true;
}
- ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
+ fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
/* If there's no such rule or the rule doesn't have the actions we want,
* fall back to a executing a full flow mod. We can't optimize this at
ofproto_flow_mod(struct ofproto *ofproto, struct ofputil_flow_mod *fm)
OVS_EXCLUDED(ofproto_mutex)
{
- return handle_flow_mod__(ofproto, NULL, fm, NULL);
+ /* Optimize for the most common case of a repeated learn action.
+ * If an identical flow already exists we only need to update its
+ * 'modified' time. */
+ if (fm->command == OFPFC_MODIFY_STRICT && fm->table_id != OFPTT_ALL
+ && !(fm->flags & OFPUTIL_FF_RESET_COUNTS)) {
+ struct oftable *table = &ofproto->tables[fm->table_id];
+ struct rule *rule;
+ bool done = false;
+
+ fat_rwlock_rdlock(&table->cls.rwlock);
+ rule = rule_from_cls_rule(classifier_find_match_exactly(&table->cls,
+ &fm->match,
+ fm->priority));
+ if (rule) {
+ /* Reading many of the rule fields and writing on 'modified'
+ * requires the rule->mutex. Also, rule->actions may change
+ * if rule->mutex is not held. */
+ const struct rule_actions *actions;
+
+ ovs_mutex_lock(&rule->mutex);
+ actions = rule_get_actions(rule);
+ if (rule->idle_timeout == fm->idle_timeout
+ && rule->hard_timeout == fm->hard_timeout
+ && rule->flags == (fm->flags & OFPUTIL_FF_STATE)
+ && (!fm->modify_cookie || (fm->new_cookie == rule->flow_cookie))
+ && ofpacts_equal(fm->ofpacts, fm->ofpacts_len,
+ actions->ofpacts, actions->ofpacts_len)) {
+ /* Rule already exists and need not change, only update the
+ modified timestamp. */
+ rule->modified = time_msec();
+ done = true;
+ }
+ ovs_mutex_unlock(&rule->mutex);
+ }
+ fat_rwlock_unlock(&table->cls.rwlock);
+
+ if (done) {
+ return 0;
+ }
+ }
+
+ return handle_flow_mod__(ofproto, fm, NULL);
}
/* Searches for a rule with matching criteria exactly equal to 'target' in
* ofproto's table 0 and, if it finds one, deletes it.
*
* This is a helper function for in-band control and fail-open. */
-bool
+void
ofproto_delete_flow(struct ofproto *ofproto,
const struct match *target, unsigned int priority)
OVS_EXCLUDED(ofproto_mutex)
/* First do a cheap check whether the rule we're looking for has already
* been deleted. If so, then we're done. */
- ovs_rwlock_rdlock(&cls->rwlock);
+ fat_rwlock_rdlock(&cls->rwlock);
rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
priority));
- ovs_rwlock_unlock(&cls->rwlock);
+ fat_rwlock_unlock(&cls->rwlock);
if (!rule) {
- return true;
+ return;
}
- /* Fall back to a executing a full flow mod. We can't optimize this at all
- * because we didn't take enough locks above to ensure that the flow table
- * didn't already change beneath us. */
- return simple_flow_mod(ofproto, target, priority, NULL, 0,
- OFPFC_DELETE_STRICT) != OFPROTO_POSTPONE;
+ /* Execute a flow mod. We can't optimize this at all because we didn't
+ * take enough locks above to ensure that the flow table didn't already
+ * change beneath us. */
+ simple_flow_mod(ofproto, target, priority, NULL, 0, OFPFC_DELETE_STRICT);
}
-/* Starts the process of deleting all of the flows from all of ofproto's flow
- * tables and then reintroducing the flows required by in-band control and
- * fail-open. The process will complete in a later call to ofproto_run(). */
+/* Delete all of the flows from all of ofproto's flow tables, then reintroduce
+ * the flows required by in-band control and fail-open. */
void
ofproto_flush_flows(struct ofproto *ofproto)
{
COVERAGE_INC(ofproto_flush);
- ofproto->state = S_FLUSH;
+ ofproto_flush__(ofproto);
+ connmgr_flushed(ofproto->connmgr);
}
\f
static void
/* Opens and returns a netdev for 'ofproto_port' in 'ofproto', or a null
* pointer if the netdev cannot be opened. On success, also fills in
- * 'opp'. */
+ * '*pp'. */
static struct netdev *
ofport_open(struct ofproto *ofproto,
struct ofproto_port *ofproto_port,
}
/* Returns true if most fields of 'a' and 'b' are equal. Differences in name,
- * port number, and 'config' bits other than OFPUTIL_PS_LINK_DOWN are
+ * port number, and 'config' bits other than OFPUTIL_PC_PORT_DOWN are
* disregarded. */
static bool
ofport_equal(const struct ofputil_phy_port *a,
}
ofport->ofproto = p;
ofport->netdev = netdev;
- ofport->change_seq = netdev_change_seq(netdev);
+ ofport->change_seq = netdev_get_change_seq(netdev);
ofport->pp = *pp;
ofport->ofp_port = pp->port_no;
ofport->created = time_msec();
if (error) {
goto error;
}
- connmgr_send_port_status(p->connmgr, pp, OFPPR_ADD);
+ connmgr_send_port_status(p->connmgr, NULL, pp, OFPPR_ADD);
return;
error:
static void
ofport_remove(struct ofport *ofport)
{
- connmgr_send_port_status(ofport->ofproto->connmgr, &ofport->pp,
+ connmgr_send_port_status(ofport->ofproto->connmgr, NULL, &ofport->pp,
OFPPR_DELETE);
ofport_destroy(ofport);
}
memcpy(port->pp.hw_addr, pp->hw_addr, ETH_ADDR_LEN);
port->pp.config = ((port->pp.config & ~OFPUTIL_PC_PORT_DOWN)
| (pp->config & OFPUTIL_PC_PORT_DOWN));
- port->pp.state = pp->state;
+ port->pp.state = ((port->pp.state & ~OFPUTIL_PS_LINK_DOWN)
+ | (pp->state & OFPUTIL_PS_LINK_DOWN));
port->pp.curr = pp->curr;
port->pp.advertised = pp->advertised;
port->pp.supported = pp->supported;
port->pp.curr_speed = pp->curr_speed;
port->pp.max_speed = pp->max_speed;
- connmgr_send_port_status(port->ofproto->connmgr, &port->pp, OFPPR_MODIFY);
+ connmgr_send_port_status(port->ofproto->connmgr, NULL,
+ &port->pp, OFPPR_MODIFY);
}
/* Update OpenFlow 'state' in 'port' and notify controller. */
{
if (port->pp.state != state) {
port->pp.state = state;
- connmgr_send_port_status(port->ofproto->connmgr, &port->pp,
- OFPPR_MODIFY);
+ connmgr_send_port_status(port->ofproto->connmgr, NULL,
+ &port->pp, OFPPR_MODIFY);
}
}
* Don't close the old netdev yet in case port_modified has to
* remove a retained reference to it.*/
port->netdev = netdev;
- port->change_seq = netdev_change_seq(netdev);
+ port->change_seq = netdev_get_change_seq(netdev);
if (port->ofproto->ofproto_class->port_modified) {
port->ofproto->ofproto_class->port_modified(port);
}
}
\f
-void
-ofproto_rule_ref(struct rule *rule)
+static void
+ofproto_rule_destroy__(struct rule *rule)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
- if (rule) {
- unsigned int orig;
+ cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
+ rule_actions_destroy(rule_get_actions(rule));
+ ovs_mutex_destroy(&rule->mutex);
+ rule->ofproto->ofproto_class->rule_dealloc(rule);
+}
- atomic_add(&rule->ref_count, 1, &orig);
- ovs_assert(orig != 0);
- }
+static void
+rule_destroy_cb(struct rule *rule)
+{
+ rule->ofproto->ofproto_class->rule_destruct(rule);
+ ofproto_rule_destroy__(rule);
}
void
-ofproto_rule_unref(struct rule *rule)
+ofproto_rule_ref(struct rule *rule)
{
if (rule) {
- unsigned int orig;
-
- atomic_sub(&rule->ref_count, 1, &orig);
- if (orig == 1) {
- rule->ofproto->ofproto_class->rule_destruct(rule);
- ofproto_rule_destroy__(rule);
- } else {
- ovs_assert(orig != 0);
- }
+ ovs_refcount_ref(&rule->ref_count);
}
}
-struct rule_actions *
-rule_get_actions(const struct rule *rule)
- OVS_EXCLUDED(rule->mutex)
+/* Decrements 'rule''s ref_count and schedules 'rule' to be destroyed if the
+ * ref_count reaches 0.
+ *
+ * Use of RCU allows short term use (between RCU quiescent periods) without
+ * keeping a reference. A reference must be taken if the rule needs to
+ * stay around accross the RCU quiescent periods. */
+void
+ofproto_rule_unref(struct rule *rule)
{
- struct rule_actions *actions;
-
- ovs_mutex_lock(&rule->mutex);
- actions = rule_get_actions__(rule);
- ovs_mutex_unlock(&rule->mutex);
-
- return actions;
+ if (rule && ovs_refcount_unref(&rule->ref_count) == 1) {
+ ovsrcu_postpone(rule_destroy_cb, rule);
+ }
}
-struct rule_actions *
-rule_get_actions__(const struct rule *rule)
- OVS_REQUIRES(rule->mutex)
+void
+ofproto_group_ref(struct ofgroup *group)
{
- rule_actions_ref(rule->actions);
- return rule->actions;
+ if (group) {
+ ovs_refcount_ref(&group->ref_count);
+ }
}
-static void
-ofproto_rule_destroy__(struct rule *rule)
- OVS_NO_THREAD_SAFETY_ANALYSIS
+void
+ofproto_group_unref(struct ofgroup *group)
{
- cls_rule_destroy(CONST_CAST(struct cls_rule *, &rule->cr));
- rule_actions_unref(rule->actions);
- ovs_mutex_destroy(&rule->mutex);
- rule->ofproto->ofproto_class->rule_dealloc(rule);
+ if (group && ovs_refcount_unref(&group->ref_count) == 1) {
+ group->ofproto->ofproto_class->group_destruct(group);
+ ofputil_bucket_list_destroy(&group->buckets);
+ group->ofproto->ofproto_class->group_dealloc(group);
+ }
}
static uint32_t get_provider_meter_id(const struct ofproto *,
uint32_t of_meter_id);
-/* Creates and returns a new 'struct rule_actions', with a ref_count of 1,
- * whose actions are a copy of from the 'ofpacts_len' bytes of 'ofpacts'. */
-struct rule_actions *
-rule_actions_create(const struct ofproto *ofproto,
- const struct ofpact *ofpacts, size_t ofpacts_len)
+/* Creates and returns a new 'struct rule_actions', whose actions are a copy
+ * of from the 'ofpacts_len' bytes of 'ofpacts'. */
+const struct rule_actions *
+rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct rule_actions *actions;
- actions = xmalloc(sizeof *actions);
- atomic_init(&actions->ref_count, 1);
- actions->ofpacts = xmemdup(ofpacts, ofpacts_len);
+ actions = xmalloc(sizeof *actions + ofpacts_len);
actions->ofpacts_len = ofpacts_len;
- actions->provider_meter_id
- = get_provider_meter_id(ofproto,
- ofpacts_get_meter(ofpacts, ofpacts_len));
-
- return actions;
-}
+ actions->has_meter = ofpacts_get_meter(ofpacts, ofpacts_len) != 0;
+ memcpy(actions->ofpacts, ofpacts, ofpacts_len);
-/* Increments 'actions''s ref_count. */
-void
-rule_actions_ref(struct rule_actions *actions)
-{
- if (actions) {
- unsigned int orig;
+ actions->has_learn_with_delete = (next_learn_with_delete(actions, NULL)
+ != NULL);
- atomic_add(&actions->ref_count, 1, &orig);
- ovs_assert(orig != 0);
- }
+ return actions;
}
-/* Decrements 'actions''s ref_count and frees 'actions' if the ref_count
- * reaches 0. */
+/* Free the actions after the RCU quiescent period is reached. */
void
-rule_actions_unref(struct rule_actions *actions)
+rule_actions_destroy(const struct rule_actions *actions)
{
if (actions) {
- unsigned int orig;
-
- atomic_sub(&actions->ref_count, 1, &orig);
- if (orig == 1) {
- free(actions->ofpacts);
- free(actions);
- } else {
- ovs_assert(orig != 0);
- }
+ ovsrcu_postpone(free, CONST_CAST(struct rule_actions *, actions));
}
}
ofproto_rule_has_out_port(const struct rule *rule, ofp_port_t port)
OVS_REQUIRES(ofproto_mutex)
{
- return (port == OFPP_ANY
- || ofpacts_output_to_port(rule->actions->ofpacts,
- rule->actions->ofpacts_len, port));
+ if (port == OFPP_ANY) {
+ return true;
+ } else {
+ const struct rule_actions *actions = rule_get_actions(rule);
+ return ofpacts_output_to_port(actions->ofpacts,
+ actions->ofpacts_len, port);
+ }
}
/* Returns true if 'rule' has group and equals group_id. */
ofproto_rule_has_out_group(const struct rule *rule, uint32_t group_id)
OVS_REQUIRES(ofproto_mutex)
{
- return (group_id == OFPG11_ANY
- || ofpacts_output_to_group(rule->actions->ofpacts,
- rule->actions->ofpacts_len, group_id));
-}
-
-/* Returns true if a rule related to 'op' has an OpenFlow OFPAT_OUTPUT or
- * OFPAT_ENQUEUE action that outputs to 'out_port'. */
-bool
-ofoperation_has_out_port(const struct ofoperation *op, ofp_port_t out_port)
- OVS_REQUIRES(ofproto_mutex)
-{
- if (ofproto_rule_has_out_port(op->rule, out_port)) {
+ if (group_id == OFPG_ANY) {
return true;
+ } else {
+ const struct rule_actions *actions = rule_get_actions(rule);
+ return ofpacts_output_to_group(actions->ofpacts,
+ actions->ofpacts_len, group_id);
}
-
- switch (op->type) {
- case OFOPERATION_ADD:
- case OFOPERATION_DELETE:
- return false;
-
- case OFOPERATION_MODIFY:
- case OFOPERATION_REPLACE:
- return ofpacts_output_to_port(op->actions->ofpacts,
- op->actions->ofpacts_len, out_port);
- }
-
- NOT_REACHED();
}
static void
guarded_list_pop_all(&ofproto->rule_executes, &executes);
LIST_FOR_EACH_SAFE (e, next, list_node, &executes) {
- union flow_in_port in_port_;
struct flow flow;
- in_port_.ofp_port = e->in_port;
- flow_extract(e->packet, 0, 0, NULL, &in_port_, &flow);
+ flow_extract(e->packet, NULL, &flow);
+ flow.in_port.ofp_port = e->in_port;
ofproto->ofproto_class->rule_execute(e->rule, &flow, e->packet);
rule_execute_destroy(e);
}
}
-/* Returns true if 'rule' should be hidden from the controller.
- *
- * Rules with priority higher than UINT16_MAX are set up by ofproto itself
- * (e.g. by in-band control) and are intentionally hidden from the
- * controller. */
-static bool
-ofproto_rule_is_hidden(const struct rule *rule)
-{
- return rule->cr.priority > UINT16_MAX;
-}
-
-static enum oftable_flags
-rule_get_flags(const struct rule *rule)
-{
- return rule->ofproto->tables[rule->table_id].flags;
-}
-
static bool
-rule_is_modifiable(const struct rule *rule)
+rule_is_readonly(const struct rule *rule)
{
- return !(rule_get_flags(rule) & OFTABLE_READONLY);
+ const struct oftable *table = &rule->ofproto->tables[rule->table_id];
+ return (table->flags & OFTABLE_READONLY) != 0;
}
\f
-static enum ofperr
-handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
+static uint32_t
+hash_learned_cookie(ovs_be64 cookie_, uint8_t table_id)
{
- ofconn_send_reply(ofconn, make_echo_reply(oh));
- return 0;
+ uint64_t cookie = (OVS_FORCE uint64_t) cookie_;
+ return hash_3words(cookie, cookie >> 32, table_id);
}
-static enum ofperr
+static void
+learned_cookies_update_one__(struct ofproto *ofproto,
+ const struct ofpact_learn *learn,
+ int delta, struct list *dead_cookies)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ uint32_t hash = hash_learned_cookie(learn->cookie, learn->table_id);
+ struct learned_cookie *c;
+
+ HMAP_FOR_EACH_WITH_HASH (c, u.hmap_node, hash, &ofproto->learned_cookies) {
+ if (c->cookie == learn->cookie && c->table_id == learn->table_id) {
+ c->n += delta;
+ ovs_assert(c->n >= 0);
+
+ if (!c->n) {
+ hmap_remove(&ofproto->learned_cookies, &c->u.hmap_node);
+ list_push_back(dead_cookies, &c->u.list_node);
+ }
+
+ return;
+ }
+ }
+
+ ovs_assert(delta > 0);
+ c = xmalloc(sizeof *c);
+ hmap_insert(&ofproto->learned_cookies, &c->u.hmap_node, hash);
+ c->cookie = learn->cookie;
+ c->table_id = learn->table_id;
+ c->n = delta;
+}
+
+static const struct ofpact_learn *
+next_learn_with_delete(const struct rule_actions *actions,
+ const struct ofpact_learn *start)
+{
+ const struct ofpact *pos;
+
+ for (pos = start ? ofpact_next(&start->ofpact) : actions->ofpacts;
+ pos < ofpact_end(actions->ofpacts, actions->ofpacts_len);
+ pos = ofpact_next(pos)) {
+ if (pos->type == OFPACT_LEARN) {
+ const struct ofpact_learn *learn = ofpact_get_LEARN(pos);
+ if (learn->flags & NX_LEARN_F_DELETE_LEARNED) {
+ return learn;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static void
+learned_cookies_update__(struct ofproto *ofproto,
+ const struct rule_actions *actions,
+ int delta, struct list *dead_cookies)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ if (actions->has_learn_with_delete) {
+ const struct ofpact_learn *learn;
+
+ for (learn = next_learn_with_delete(actions, NULL); learn;
+ learn = next_learn_with_delete(actions, learn)) {
+ learned_cookies_update_one__(ofproto, learn, delta, dead_cookies);
+ }
+ }
+}
+
+static void
+learned_cookies_inc(struct ofproto *ofproto,
+ const struct rule_actions *actions)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ learned_cookies_update__(ofproto, actions, +1, NULL);
+}
+
+static void
+learned_cookies_dec(struct ofproto *ofproto,
+ const struct rule_actions *actions,
+ struct list *dead_cookies)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ learned_cookies_update__(ofproto, actions, -1, dead_cookies);
+}
+
+static void
+learned_cookies_flush(struct ofproto *ofproto, struct list *dead_cookies)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ struct learned_cookie *c, *next;
+
+ LIST_FOR_EACH_SAFE (c, next, u.list_node, dead_cookies) {
+ struct rule_criteria criteria;
+ struct rule_collection rules;
+ struct match match;
+
+ match_init_catchall(&match);
+ rule_criteria_init(&criteria, c->table_id, &match, 0,
+ c->cookie, OVS_BE64_MAX, OFPP_ANY, OFPG_ANY);
+ rule_criteria_require_rw(&criteria, false);
+ collect_rules_loose(ofproto, &criteria, &rules);
+ delete_flows__(&rules, OFPRR_DELETE, NULL);
+ rule_criteria_destroy(&criteria);
+ rule_collection_destroy(&rules);
+
+ list_remove(&c->u.list_node);
+ free(c);
+ }
+}
+\f
+static enum ofperr
+handle_echo_request(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ ofconn_send_reply(ofconn, make_echo_reply(oh));
+ return 0;
+}
+
+static enum ofperr
handle_features_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofport *port;
bool arp_match_ip;
struct ofpbuf *b;
- int n_tables;
- int i;
ofproto->ofproto_class->get_features(ofproto, &arp_match_ip,
&features.actions);
ovs_assert(features.actions & OFPUTIL_A_OUTPUT); /* sanity check */
- /* Count only non-hidden tables in the number of tables. (Hidden tables,
- * if present, are always at the end.) */
- n_tables = ofproto->n_tables;
- for (i = 0; i < ofproto->n_tables; i++) {
- if (ofproto->tables[i].flags & OFTABLE_HIDDEN) {
- n_tables = i;
- break;
- }
- }
-
features.datapath_id = ofproto->datapath_id;
features.n_buffers = pktbuf_capacity();
- features.n_tables = n_tables;
+ features.n_tables = ofproto_get_n_visible_tables(ofproto);
features.capabilities = (OFPUTIL_C_FLOW_STATS | OFPUTIL_C_TABLE_STATS |
OFPUTIL_C_PORT_STATS | OFPUTIL_C_QUEUE_STATS);
if (arp_match_ip) {
uint64_t ofpacts_stub[1024 / 8];
struct ofpbuf ofpacts;
struct flow flow;
- union flow_in_port in_port_;
enum ofperr error;
COVERAGE_INC(ofproto_packet_out);
}
/* Verify actions against packet, then send packet if successful. */
- in_port_.ofp_port = po.in_port;
- flow_extract(payload, 0, 0, NULL, &in_port_, &flow);
+ flow_extract(payload, NULL, &flow);
+ flow.in_port.ofp_port = po.in_port;
error = ofproto_check_ofpacts(p, po.ofpacts, po.ofpacts_len);
if (!error) {
error = p->ofproto_class->packet_out(p, payload, &flow,
}
static void
-update_port_config(struct ofport *port,
+update_port_config(struct ofconn *ofconn, struct ofport *port,
enum ofputil_port_config config,
enum ofputil_port_config mask)
{
- enum ofputil_port_config old_config = port->pp.config;
- enum ofputil_port_config toggle;
+ enum ofputil_port_config toggle = (config ^ port->pp.config) & mask;
- toggle = (config ^ port->pp.config) & mask;
- if (toggle & OFPUTIL_PC_PORT_DOWN) {
- if (config & OFPUTIL_PC_PORT_DOWN) {
- netdev_turn_flags_off(port->netdev, NETDEV_UP, NULL);
- } else {
- netdev_turn_flags_on(port->netdev, NETDEV_UP, NULL);
- }
+ if (toggle & OFPUTIL_PC_PORT_DOWN
+ && (config & OFPUTIL_PC_PORT_DOWN
+ ? netdev_turn_flags_off(port->netdev, NETDEV_UP, NULL)
+ : netdev_turn_flags_on(port->netdev, NETDEV_UP, NULL))) {
+ /* We tried to bring the port up or down, but it failed, so don't
+ * update the "down" bit. */
toggle &= ~OFPUTIL_PC_PORT_DOWN;
}
- port->pp.config ^= toggle;
- if (port->pp.config != old_config) {
+ if (toggle) {
+ enum ofputil_port_config old_config = port->pp.config;
+ port->pp.config ^= toggle;
port->ofproto->ofproto_class->port_reconfigured(port, old_config);
+ connmgr_send_port_status(port->ofproto->connmgr, ofconn, &port->pp,
+ OFPPR_MODIFY);
}
}
return error;
}
- error = ofputil_decode_port_mod(oh, &pm);
+ error = ofputil_decode_port_mod(oh, &pm, false);
if (error) {
return error;
}
} else if (!eth_addr_equals(port->pp.hw_addr, pm.hw_addr)) {
return OFPERR_OFPPMFC_BAD_HW_ADDR;
} else {
- update_port_config(port, pm.config, pm.mask);
+ update_port_config(ofconn, port, pm.config, pm.mask);
if (pm.advertise) {
netdev_set_advertisements(port->netdev, pm.advertise);
}
ots[i].instructions = htonl(OFPIT11_ALL);
ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
- ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
+ fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
- ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
+ fat_rwlock_unlock(&p->tables[i].cls.rwlock);
}
p->ofproto_class->get_tables(p, ots);
ofputil_append_port_stat(replies, &ops);
}
-static enum ofperr
-handle_port_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_port_request(struct ofconn *ofconn,
+ const struct ofp_header *request, ofp_port_t port_no,
+ void (*cb)(struct ofport *, struct list *replies))
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofport *port;
struct list replies;
- ofp_port_t port_no;
- enum ofperr error;
-
- error = ofputil_decode_port_stats_request(request, &port_no);
- if (error) {
- return error;
- }
ofpmp_init(&replies, request);
if (port_no != OFPP_ANY) {
- port = ofproto_get_port(p, port_no);
+ port = ofproto_get_port(ofproto, port_no);
if (port) {
- append_port_stat(port, &replies);
+ cb(port, &replies);
}
} else {
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- append_port_stat(port, &replies);
+ HMAP_FOR_EACH (port, hmap_node, &ofproto->ports) {
+ cb(port, &replies);
}
}
ofconn_send_replies(ofconn, &replies);
- return 0;
+}
+
+static enum ofperr
+handle_port_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ ofp_port_t port_no;
+ enum ofperr error;
+
+ error = ofputil_decode_port_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_stat);
+ }
+ return error;
+}
+
+static void
+append_port_desc(struct ofport *port, struct list *replies)
+{
+ ofputil_append_port_desc_stats_reply(&port->pp, replies);
}
static enum ofperr
handle_port_desc_stats_request(struct ofconn *ofconn,
const struct ofp_header *request)
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
- enum ofp_version version;
- struct ofport *port;
- struct list replies;
-
- ofpmp_init(&replies, request);
+ ofp_port_t port_no;
+ enum ofperr error;
- version = ofputil_protocol_to_ofp_version(ofconn_get_protocol(ofconn));
- HMAP_FOR_EACH (port, hmap_node, &p->ports) {
- ofputil_append_port_desc_stats_reply(version, &port->pp, &replies);
+ error = ofputil_decode_port_desc_stats_request(request, &port_no);
+ if (!error) {
+ handle_port_request(ofconn, request, port_no, append_port_desc);
}
-
- ofconn_send_replies(ofconn, &replies);
- return 0;
+ return error;
}
static uint32_t
hash_cookie(ovs_be64 cookie)
{
- return hash_2words((OVS_FORCE uint64_t)cookie >> 32,
- (OVS_FORCE uint64_t)cookie);
+ return hash_uint64((OVS_FORCE uint64_t)cookie);
}
static void
hindex_remove(&ofproto->cookies, &rule->cookie_node);
}
-static void
-ofproto_rule_change_cookie(struct ofproto *ofproto, struct rule *rule,
- ovs_be64 new_cookie)
- OVS_REQUIRES(ofproto_mutex)
-{
- if (new_cookie != rule->flow_cookie) {
- cookies_remove(ofproto, rule);
-
- ovs_mutex_lock(&rule->mutex);
- rule->flow_cookie = new_cookie;
- ovs_mutex_unlock(&rule->mutex);
-
- cookies_insert(ofproto, rule);
- }
-}
-
static void
calc_duration(long long int start, long long int now,
uint32_t *sec, uint32_t *nsec)
}
/* Checks whether 'table_id' is 0xff or a valid table ID in 'ofproto'. Returns
- * 0 if 'table_id' is OK, otherwise an OpenFlow error code. */
-static enum ofperr
+ * true if 'table_id' is OK, false otherwise. */
+static bool
check_table_id(const struct ofproto *ofproto, uint8_t table_id)
{
- return (table_id == 0xff || table_id < ofproto->n_tables
- ? 0
- : OFPERR_OFPBRC_BAD_TABLE_ID);
-
+ return table_id == OFPTT_ALL || table_id < ofproto->n_tables;
}
static struct oftable *
/* Initializes 'criteria' in a straightforward way based on the other
* parameters.
*
+ * By default, the criteria include flows that are read-only, on the assumption
+ * that the collected flows won't be modified. Call rule_criteria_require_rw()
+ * if flows will be modified.
+ *
* For "loose" matching, the 'priority' parameter is unimportant and may be
* supplied as 0. */
static void
criteria->cookie_mask = cookie_mask;
criteria->out_port = out_port;
criteria->out_group = out_group;
+
+ /* We ordinarily want to skip hidden rules, but there has to be a way for
+ * code internal to OVS to modify and delete them, so if the criteria
+ * specify a priority that can only be for a hidden flow, then allow hidden
+ * rules to be selected. (This doesn't allow OpenFlow clients to meddle
+ * with hidden flows because OpenFlow uses only a 16-bit field to specify
+ * priority.) */
+ criteria->include_hidden = priority > UINT16_MAX;
+
+ /* We assume that the criteria are being used to collect flows for reading
+ * but not modification. Thus, we should collect read-only flows. */
+ criteria->include_readonly = true;
+}
+
+/* By default, criteria initialized by rule_criteria_init() will match flows
+ * that are read-only, on the assumption that the collected flows won't be
+ * modified. Call this function to match only flows that are be modifiable.
+ *
+ * Specify 'can_write_readonly' as false in ordinary circumstances, true if the
+ * caller has special privileges that allow it to modify even "read-only"
+ * flows. */
+static void
+rule_criteria_require_rw(struct rule_criteria *criteria,
+ bool can_write_readonly)
+{
+ criteria->include_readonly = can_write_readonly;
}
static void
}
}
-static enum ofperr
+/* Checks whether 'rule' matches 'c' and, if so, adds it to 'rules'. This
+ * function verifies most of the criteria in 'c' itself, but the caller must
+ * check 'c->cr' itself.
+ *
+ * Increments '*n_readonly' if 'rule' wasn't added because it's read-only (and
+ * 'c' only includes modifiable rules). */
+static void
collect_rule(struct rule *rule, const struct rule_criteria *c,
- struct rule_collection *rules)
+ struct rule_collection *rules, size_t *n_readonly)
OVS_REQUIRES(ofproto_mutex)
{
- /* We ordinarily want to skip hidden rules, but there has to be a way for
- * code internal to OVS to modify and delete them, so if the criteria
- * specify a priority that can only be for a hidden flow, then allow hidden
- * rules to be selected. (This doesn't allow OpenFlow clients to meddle
- * with hidden flows because OpenFlow uses only a 16-bit field to specify
- * priority.) */
- if (ofproto_rule_is_hidden(rule) && c->cr.priority <= UINT16_MAX) {
- return 0;
- } else if (rule->pending) {
- return OFPROTO_POSTPONE;
- } else {
- if ((c->table_id == rule->table_id || c->table_id == 0xff)
- && ofproto_rule_has_out_port(rule, c->out_port)
- && ofproto_rule_has_out_group(rule, c->out_group)
- && !((rule->flow_cookie ^ c->cookie) & c->cookie_mask)) {
+ if ((c->table_id == rule->table_id || c->table_id == 0xff)
+ && ofproto_rule_has_out_port(rule, c->out_port)
+ && ofproto_rule_has_out_group(rule, c->out_group)
+ && !((rule->flow_cookie ^ c->cookie) & c->cookie_mask)
+ && (!rule_is_hidden(rule) || c->include_hidden)) {
+ /* Rule matches all the criteria... */
+ if (!rule_is_readonly(rule) || c->include_readonly) {
+ /* ...add it. */
rule_collection_add(rules, rule);
+ } else {
+ /* ...except it's read-only. */
+ ++*n_readonly;
}
- return 0;
}
}
* OFPFC_MODIFY and OFPFC_DELETE requests. Puts the selected rules on list
* 'rules'.
*
- * Hidden rules are always omitted.
- *
* Returns 0 on success, otherwise an OpenFlow error code. */
static enum ofperr
collect_rules_loose(struct ofproto *ofproto,
OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table;
- enum ofperr error;
+ enum ofperr error = 0;
+ size_t n_readonly = 0;
rule_collection_init(rules);
- error = check_table_id(ofproto, criteria->table_id);
- if (error) {
+ if (!check_table_id(ofproto, criteria->table_id)) {
+ error = OFPERR_OFPBRC_BAD_TABLE_ID;
goto exit;
}
hash_cookie(criteria->cookie),
&ofproto->cookies) {
if (cls_rule_is_loose_match(&rule->cr, &criteria->cr.match)) {
- error = collect_rule(rule, criteria, rules);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
}
} else {
struct cls_cursor cursor;
struct rule *rule;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &criteria->cr);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
- error = collect_rule(rule, criteria, rules);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
}
}
exit:
+ if (!error && !rules->n && n_readonly) {
+ /* We didn't find any rules to modify. We did find some read-only
+ * rules that we're not allowed to modify, so report that. */
+ error = OFPERR_OFPBRC_EPERM;
+ }
if (error) {
rule_collection_destroy(rules);
}
* OFPFC_MODIFY_STRICT and OFPFC_DELETE_STRICT requests. Puts the selected
* rules on list 'rules'.
*
- * Hidden rules are always omitted.
- *
* Returns 0 on success, otherwise an OpenFlow error code. */
static enum ofperr
collect_rules_strict(struct ofproto *ofproto,
OVS_REQUIRES(ofproto_mutex)
{
struct oftable *table;
- int error;
+ size_t n_readonly = 0;
+ int error = 0;
rule_collection_init(rules);
- error = check_table_id(ofproto, criteria->table_id);
- if (error) {
+ if (!check_table_id(ofproto, criteria->table_id)) {
+ error = OFPERR_OFPBRC_BAD_TABLE_ID;
goto exit;
}
hash_cookie(criteria->cookie),
&ofproto->cookies) {
if (cls_rule_equal(&rule->cr, &criteria->cr)) {
- error = collect_rule(rule, criteria, rules);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
}
} else {
FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
struct rule *rule;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_rule_exactly(
&table->cls, &criteria->cr));
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
if (rule) {
- error = collect_rule(rule, criteria, rules);
- if (error) {
- break;
- }
+ collect_rule(rule, criteria, rules, &n_readonly);
}
}
}
exit:
+ if (!error && !rules->n && n_readonly) {
+ /* We didn't find any rules to modify. We did find some read-only
+ * rules that we're not allowed to modify, so report that. */
+ error = OFPERR_OFPBRC_EPERM;
+ }
if (error) {
rule_collection_destroy(rules);
}
long long int now = time_msec();
struct ofputil_flow_stats fs;
long long int created, used, modified;
- struct rule_actions *actions;
+ const struct rule_actions *actions;
enum ofputil_flow_mod_flags flags;
ovs_mutex_lock(&rule->mutex);
fs.idle_timeout = rule->idle_timeout;
fs.hard_timeout = rule->hard_timeout;
created = rule->created;
- used = rule->used;
modified = rule->modified;
- actions = rule_get_actions__(rule);
+ actions = rule_get_actions(rule);
flags = rule->flags;
ovs_mutex_unlock(&rule->mutex);
+ ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
+ &fs.byte_count, &used);
+
minimatch_expand(&rule->cr.match, &fs.match);
fs.table_id = rule->table_id;
calc_duration(created, now, &fs.duration_sec, &fs.duration_nsec);
fs.priority = rule->cr.priority;
fs.idle_age = age_secs(now - used);
fs.hard_age = age_secs(now - modified);
- ofproto->ofproto_class->rule_get_stats(rule, &fs.packet_count,
- &fs.byte_count);
fs.ofpacts = actions->ofpacts;
fs.ofpacts_len = actions->ofpacts_len;
fs.flags = flags;
ofputil_append_flow_stats_reply(&fs, &replies);
-
- rule_actions_unref(actions);
}
rule_collection_unref(&rules);
flow_stats_ds(struct rule *rule, struct ds *results)
{
uint64_t packet_count, byte_count;
- struct rule_actions *actions;
- long long int created;
+ const struct rule_actions *actions;
+ long long int created, used;
- rule->ofproto->ofproto_class->rule_get_stats(rule,
- &packet_count, &byte_count);
+ rule->ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
+ &byte_count, &used);
ovs_mutex_lock(&rule->mutex);
- actions = rule_get_actions__(rule);
+ actions = rule_get_actions(rule);
created = rule->created;
ovs_mutex_unlock(&rule->mutex);
ofpacts_format(actions->ofpacts, actions->ofpacts_len, results);
ds_put_cstr(results, "\n");
-
- rule_actions_unref(actions);
}
/* Adds a pretty-printed description of all flows to 'results', including
struct cls_cursor cursor;
struct rule *rule;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
flow_stats_ds(rule, results);
}
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
}
}
ofproto->ofproto_class->get_netflow_ids(ofproto, engine_type, engine_id);
}
-/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'. Returns
- * true if the port's CFM status was successfully stored into '*status'.
- * Returns false if the port did not have CFM configured, in which case
- * '*status' is indeterminate.
+/* Checks the status of CFM configured on 'ofp_port' within 'ofproto'.
+ * Returns 0 if the port's CFM status was successfully stored into
+ * '*status'. Returns positive errno if the port did not have CFM
+ * configured. Returns negative number if there is no status change
+ * since last update.
*
- * The caller must provide and owns '*status', and must free 'status->rmps'. */
-bool
+ * The caller must provide and own '*status', and must free 'status->rmps'.
+ * '*status' is indeterminate if the return value is non-zero. */
+int
ofproto_port_get_cfm_status(const struct ofproto *ofproto, ofp_port_t ofp_port,
struct ofproto_cfm_status *status)
{
struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
- return (ofport
- && ofproto->ofproto_class->get_cfm_status
- && ofproto->ofproto_class->get_cfm_status(ofport, status));
+ return (ofport && ofproto->ofproto_class->get_cfm_status
+ ? ofproto->ofproto_class->get_cfm_status(ofport, status)
+ : EOPNOTSUPP);
}
static enum ofperr
struct rule *rule = rules.rules[i];
uint64_t packet_count;
uint64_t byte_count;
+ long long int used;
ofproto->ofproto_class->rule_get_stats(rule, &packet_count,
- &byte_count);
+ &byte_count, &used);
if (packet_count == UINT64_MAX) {
unknown_packets = true;
return error;
}
-static bool
-is_flow_deletion_pending(const struct ofproto *ofproto,
- const struct cls_rule *cls_rule,
- uint8_t table_id)
- OVS_REQUIRES(ofproto_mutex)
-{
- if (!hmap_is_empty(&ofproto->deletions)) {
- struct ofoperation *op;
-
- HMAP_FOR_EACH_WITH_HASH (op, hmap_node,
- cls_rule_hash(cls_rule, table_id),
- &ofproto->deletions) {
- if (cls_rule_equal(cls_rule, &op->rule->cr)) {
- return true;
- }
- }
- }
-
- return false;
-}
-
static bool
should_evict_a_rule(struct oftable *table, unsigned int extra_space)
OVS_REQUIRES(ofproto_mutex)
}
static enum ofperr
-evict_rules_from_table(struct ofproto *ofproto, struct oftable *table,
- unsigned int extra_space)
+evict_rules_from_table(struct oftable *table, unsigned int extra_space)
OVS_REQUIRES(ofproto_mutex)
{
while (should_evict_a_rule(table, extra_space)) {
if (!choose_rule_to_evict(table, &rule)) {
return OFPERR_OFPFMFC_TABLE_FULL;
- } else if (rule->pending) {
- return OFPROTO_POSTPONE;
} else {
- struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
- delete_flow__(rule, group, OFPRR_EVICTION);
- ofopgroup_submit(group);
+ ofproto_rule_delete__(rule, OFPRR_EVICTION);
}
}
* 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
* if any. */
static enum ofperr
-add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
- struct ofputil_flow_mod *fm, const struct ofp_header *request)
+add_flow(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
+ const struct rule_actions *actions;
struct oftable *table;
- struct ofopgroup *group;
struct cls_rule cr;
struct rule *rule;
uint8_t table_id;
- int error;
+ int error = 0;
- error = check_table_id(ofproto, fm->table_id);
- if (error) {
+ if (!check_table_id(ofproto, fm->table_id)) {
+ error = OFPERR_OFPBRC_BAD_TABLE_ID;
return error;
}
}
table = &ofproto->tables[table_id];
-
- if (table->flags & OFTABLE_READONLY) {
+ if (table->flags & OFTABLE_READONLY
+ && !(fm->flags & OFPUTIL_FF_NO_READONLY)) {
return OFPERR_OFPBRC_EPERM;
}
+ if (!(fm->flags & OFPUTIL_FF_HIDDEN_FIELDS)) {
+ if (!match_has_default_hidden_fields(&fm->match)) {
+ VLOG_WARN_RL(&rl, "%s: (add_flow) only internal flows can set "
+ "non-default values to hidden fields", ofproto->name);
+ return OFPERR_OFPBRC_EPERM;
+ }
+ }
+
cls_rule_init(&cr, &fm->match, fm->priority);
/* Transform "add" into "modify" if there's an existing identical flow. */
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, &cr));
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
if (rule) {
- cls_rule_destroy(&cr);
- if (!rule_is_modifiable(rule)) {
- return OFPERR_OFPBRC_EPERM;
- } else if (rule->pending) {
- return OFPROTO_POSTPONE;
- } else {
- struct rule_collection rules;
+ struct rule_collection rules;
- rule_collection_init(&rules);
- rule_collection_add(&rules, rule);
- fm->modify_cookie = true;
- error = modify_flows__(ofproto, ofconn, fm, request, &rules);
- rule_collection_destroy(&rules);
+ cls_rule_destroy(&cr);
- return error;
- }
- }
+ rule_collection_init(&rules);
+ rule_collection_add(&rules, rule);
+ fm->modify_cookie = true;
+ error = modify_flows__(ofproto, fm, &rules, req);
+ rule_collection_destroy(&rules);
- /* Serialize against pending deletion. */
- if (is_flow_deletion_pending(ofproto, &cr, table_id)) {
- cls_rule_destroy(&cr);
- return OFPROTO_POSTPONE;
+ return error;
}
/* Check for overlap, if requested. */
if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
bool overlaps;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
overlaps = classifier_rule_overlaps(&table->cls, &cr);
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
if (overlaps) {
cls_rule_destroy(&cr);
}
/* If necessary, evict an existing rule to clear out space. */
- error = evict_rules_from_table(ofproto, table, 1);
+ error = evict_rules_from_table(table, 1);
if (error) {
cls_rule_destroy(&cr);
return error;
/* Initialize base state. */
*CONST_CAST(struct ofproto **, &rule->ofproto) = ofproto;
cls_rule_move(CONST_CAST(struct cls_rule *, &rule->cr), &cr);
- atomic_init(&rule->ref_count, 1);
- rule->pending = NULL;
+ ovs_refcount_init(&rule->ref_count);
rule->flow_cookie = fm->new_cookie;
- rule->created = rule->modified = rule->used = time_msec();
+ rule->created = rule->modified = time_msec();
ovs_mutex_init(&rule->mutex);
ovs_mutex_lock(&rule->mutex);
*CONST_CAST(uint8_t *, &rule->table_id) = table - ofproto->tables;
rule->flags = fm->flags & OFPUTIL_FF_STATE;
- rule->actions = rule_actions_create(ofproto, fm->ofpacts, fm->ofpacts_len);
+ actions = rule_actions_create(fm->ofpacts, fm->ofpacts_len);
+ ovsrcu_set(&rule->actions, actions);
list_init(&rule->meter_list_node);
rule->eviction_group = NULL;
list_init(&rule->expirable);
return error;
}
- /* Insert rule. */
- oftable_insert_rule(rule);
+ if (fm->hard_timeout || fm->idle_timeout) {
+ list_insert(&ofproto->expirable, &rule->expirable);
+ }
+ cookies_insert(ofproto, rule);
+ eviction_group_add_rule(rule);
+ if (actions->has_meter) {
+ meter_insert_rule(rule);
+ }
- group = ofopgroup_create(ofproto, ofconn, request, fm->buffer_id);
- ofoperation_create(group, rule, OFOPERATION_ADD, 0);
- ofproto->ofproto_class->rule_insert(rule);
- ofopgroup_submit(group);
+ fat_rwlock_wrlock(&table->cls.rwlock);
+ classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
+ fat_rwlock_unlock(&table->cls.rwlock);
- return error;
+ error = ofproto->ofproto_class->rule_insert(rule);
+ if (error) {
+ oftable_remove_rule(rule);
+ ofproto_rule_unref(rule);
+ return error;
+ }
+ learned_cookies_inc(ofproto, actions);
+
+ if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
+ if (ofproto->vlan_bitmap) {
+ uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
+ if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
+ bitmap_set1(ofproto->vlan_bitmap, vid);
+ ofproto->vlans_changed = true;
+ }
+ } else {
+ ofproto->vlans_changed = true;
+ }
+ }
+
+ ofmonitor_report(ofproto->connmgr, rule, NXFME_ADDED, 0,
+ req ? req->ofconn : NULL, req ? req->xid : 0);
+
+ return req ? send_buffered_packet(req->ofconn, fm->buffer_id, rule) : 0;
}
\f
/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */
*
* Returns 0 on success, otherwise an OpenFlow error code. */
static enum ofperr
-modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
- struct ofputil_flow_mod *fm, const struct ofp_header *request,
- const struct rule_collection *rules)
+modify_flows__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
+ const struct rule_collection *rules,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
- enum ofoperation_type type;
- struct ofopgroup *group;
- enum ofperr error;
+ struct list dead_cookies = LIST_INITIALIZER(&dead_cookies);
+ enum nx_flow_update_event event;
size_t i;
- type = fm->command == OFPFC_ADD ? OFOPERATION_REPLACE : OFOPERATION_MODIFY;
- group = ofopgroup_create(ofproto, ofconn, request, fm->buffer_id);
- error = OFPERR_OFPBRC_EPERM;
+ if (ofproto->ofproto_class->rule_premodify_actions) {
+ for (i = 0; i < rules->n; i++) {
+ struct rule *rule = rules->rules[i];
+ enum ofperr error;
+
+ error = ofproto->ofproto_class->rule_premodify_actions(
+ rule, fm->ofpacts, fm->ofpacts_len);
+ if (error) {
+ return error;
+ }
+ }
+ }
+
+ event = fm->command == OFPFC_ADD ? NXFME_ADDED : NXFME_MODIFIED;
for (i = 0; i < rules->n; i++) {
struct rule *rule = rules->rules[i];
- struct ofoperation *op;
- bool actions_changed;
- bool reset_counters;
- /* FIXME: Implement OFPFUTIL_FF_RESET_COUNTS */
+ /* 'fm' says that */
+ bool change_cookie = (fm->modify_cookie
+ && fm->new_cookie != OVS_BE64_MAX
+ && fm->new_cookie != rule->flow_cookie);
- if (rule_is_modifiable(rule)) {
- /* At least one rule is modifiable, don't report EPERM error. */
- error = 0;
- } else {
- continue;
- }
+ const struct rule_actions *actions = rule_get_actions(rule);
+ bool change_actions = !ofpacts_equal(fm->ofpacts, fm->ofpacts_len,
+ actions->ofpacts,
+ actions->ofpacts_len);
- actions_changed = !ofpacts_equal(fm->ofpacts, fm->ofpacts_len,
- rule->actions->ofpacts,
- rule->actions->ofpacts_len);
+ bool reset_counters = (fm->flags & OFPUTIL_FF_RESET_COUNTS) != 0;
+
+ long long int now = time_msec();
- op = ofoperation_create(group, rule, type, 0);
+ /* FIXME: Implement OFPFUTIL_FF_RESET_COUNTS */
- if (fm->modify_cookie && fm->new_cookie != OVS_BE64_MAX) {
- ofproto_rule_change_cookie(ofproto, rule, fm->new_cookie);
+ if (change_cookie) {
+ cookies_remove(ofproto, rule);
}
- if (type == OFOPERATION_REPLACE) {
- ovs_mutex_lock(&rule->mutex);
+
+ ovs_mutex_lock(&rule->mutex);
+ if (fm->command == OFPFC_ADD) {
rule->idle_timeout = fm->idle_timeout;
rule->hard_timeout = fm->hard_timeout;
- ovs_mutex_unlock(&rule->mutex);
-
rule->flags = fm->flags & OFPUTIL_FF_STATE;
+ rule->created = now;
+ }
+ if (change_cookie) {
+ rule->flow_cookie = fm->new_cookie;
+ }
+ rule->modified = now;
+ ovs_mutex_unlock(&rule->mutex);
+
+ if (change_cookie) {
+ cookies_insert(ofproto, rule);
+ }
+ if (fm->command == OFPFC_ADD) {
if (fm->idle_timeout || fm->hard_timeout) {
if (!rule->eviction_group) {
eviction_group_add_rule(rule);
}
}
- reset_counters = (fm->flags & OFPUTIL_FF_RESET_COUNTS) != 0;
- if (actions_changed || reset_counters) {
- struct rule_actions *new_actions;
+ if (change_actions) {
+ ovsrcu_set(&rule->actions, rule_actions_create(fm->ofpacts,
+ fm->ofpacts_len));
+ rule_actions_destroy(actions);
+ }
- op->actions = rule->actions;
- new_actions = rule_actions_create(ofproto,
- fm->ofpacts, fm->ofpacts_len);
+ if (change_actions || reset_counters) {
+ ofproto->ofproto_class->rule_modify_actions(rule, reset_counters);
+ }
- ovs_mutex_lock(&rule->mutex);
- rule->actions = new_actions;
- ovs_mutex_unlock(&rule->mutex);
+ if (event != NXFME_MODIFIED || change_actions || change_cookie) {
+ ofmonitor_report(ofproto->connmgr, rule, event, 0,
+ req ? req->ofconn : NULL, req ? req->xid : 0);
+ }
- rule->ofproto->ofproto_class->rule_modify_actions(rule,
- reset_counters);
- } else {
- ofoperation_complete(op, 0);
+ if (change_actions) {
+ learned_cookies_inc(ofproto, rule_get_actions(rule));
+ learned_cookies_dec(ofproto, actions, &dead_cookies);
}
}
- ofopgroup_submit(group);
+ learned_cookies_flush(ofproto, &dead_cookies);
- return error;
+ if (fm->buffer_id != UINT32_MAX && req) {
+ return send_buffered_packet(req->ofconn, fm->buffer_id,
+ rules->rules[0]);
+ }
+
+ return 0;
}
static enum ofperr
-modify_flows_add(struct ofproto *ofproto, struct ofconn *ofconn,
- struct ofputil_flow_mod *fm, const struct ofp_header *request)
+modify_flows_add(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
if (fm->cookie_mask != htonll(0) || fm->new_cookie == OVS_BE64_MAX) {
return 0;
}
- return add_flow(ofproto, ofconn, fm, request);
+ return add_flow(ofproto, fm, req);
}
/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on
* 'ofconn' is used to retrieve the packet buffer specified in fm->buffer_id,
* if any. */
static enum ofperr
-modify_flows_loose(struct ofproto *ofproto, struct ofconn *ofconn,
- struct ofputil_flow_mod *fm,
- const struct ofp_header *request)
+modify_flows_loose(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
rule_criteria_init(&criteria, fm->table_id, &fm->match, 0,
fm->cookie, fm->cookie_mask, OFPP_ANY, OFPG11_ANY);
+ rule_criteria_require_rw(&criteria,
+ (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
error = collect_rules_loose(ofproto, &criteria, &rules);
rule_criteria_destroy(&criteria);
if (!error) {
error = (rules.n > 0
- ? modify_flows__(ofproto, ofconn, fm, request, &rules)
- : modify_flows_add(ofproto, ofconn, fm, request));
+ ? modify_flows__(ofproto, fm, &rules, req)
+ : modify_flows_add(ofproto, fm, req));
}
rule_collection_destroy(&rules);
}
/* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error
- * code on failure.
- *
- * 'ofconn' is used to retrieve the packet buffer specified in fm->buffer_id,
- * if any. */
+ * code on failure. */
static enum ofperr
-modify_flow_strict(struct ofproto *ofproto, struct ofconn *ofconn,
- struct ofputil_flow_mod *fm,
- const struct ofp_header *request)
+modify_flow_strict(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
rule_criteria_init(&criteria, fm->table_id, &fm->match, fm->priority,
fm->cookie, fm->cookie_mask, OFPP_ANY, OFPG11_ANY);
+ rule_criteria_require_rw(&criteria,
+ (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
error = collect_rules_strict(ofproto, &criteria, &rules);
rule_criteria_destroy(&criteria);
if (!error) {
if (rules.n == 0) {
- error = modify_flows_add(ofproto, ofconn, fm, request);
+ error = modify_flows_add(ofproto, fm, req);
} else if (rules.n == 1) {
- error = modify_flows__(ofproto, ofconn, fm, request, &rules);
+ error = modify_flows__(ofproto, fm, &rules, req);
}
}
\f
/* OFPFC_DELETE implementation. */
+/* Deletes the rules listed in 'rules'. */
static void
-delete_flow__(struct rule *rule, struct ofopgroup *group,
- enum ofp_flow_removed_reason reason)
+delete_flows__(const struct rule_collection *rules,
+ enum ofp_flow_removed_reason reason,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofproto *ofproto = rule->ofproto;
+ if (rules->n) {
+ struct list dead_cookies = LIST_INITIALIZER(&dead_cookies);
+ struct ofproto *ofproto = rules->rules[0]->ofproto;
+ size_t i;
- ofproto_rule_send_removed(rule, reason);
+ for (i = 0; i < rules->n; i++) {
+ struct rule *rule = rules->rules[i];
+ const struct rule_actions *actions = rule_get_actions(rule);
- ofoperation_create(group, rule, OFOPERATION_DELETE, reason);
- oftable_remove_rule(rule);
- ofproto->ofproto_class->rule_delete(rule);
-}
+ ofproto_rule_send_removed(rule, reason);
-/* Deletes the rules listed in 'rules'.
- *
- * Returns 0 on success, otherwise an OpenFlow error code. */
-static enum ofperr
-delete_flows__(struct ofproto *ofproto, struct ofconn *ofconn,
- const struct ofp_header *request,
- const struct rule_collection *rules,
- enum ofp_flow_removed_reason reason)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofopgroup *group;
- size_t i;
+ ofmonitor_report(ofproto->connmgr, rule, NXFME_DELETED, reason,
+ req ? req->ofconn : NULL, req ? req->xid : 0);
+ oftable_remove_rule(rule);
+ ofproto->ofproto_class->rule_delete(rule);
- group = ofopgroup_create(ofproto, ofconn, request, UINT32_MAX);
- for (i = 0; i < rules->n; i++) {
- delete_flow__(rules->rules[i], group, reason);
+ learned_cookies_dec(ofproto, actions, &dead_cookies);
+ }
+ learned_cookies_flush(ofproto, &dead_cookies);
+ ofmonitor_flush(ofproto->connmgr);
}
- ofopgroup_submit(group);
-
- return 0;
}
/* Implements OFPFC_DELETE. */
static enum ofperr
-delete_flows_loose(struct ofproto *ofproto, struct ofconn *ofconn,
+delete_flows_loose(struct ofproto *ofproto,
const struct ofputil_flow_mod *fm,
- const struct ofp_header *request)
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
rule_criteria_init(&criteria, fm->table_id, &fm->match, 0,
fm->cookie, fm->cookie_mask,
fm->out_port, fm->out_group);
+ rule_criteria_require_rw(&criteria,
+ (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
error = collect_rules_loose(ofproto, &criteria, &rules);
rule_criteria_destroy(&criteria);
if (!error && rules.n > 0) {
- error = delete_flows__(ofproto, ofconn, request, &rules, OFPRR_DELETE);
+ delete_flows__(&rules, fm->delete_reason, req);
}
rule_collection_destroy(&rules);
/* Implements OFPFC_DELETE_STRICT. */
static enum ofperr
-delete_flow_strict(struct ofproto *ofproto, struct ofconn *ofconn,
- const struct ofputil_flow_mod *fm,
- const struct ofp_header *request)
+delete_flow_strict(struct ofproto *ofproto, const struct ofputil_flow_mod *fm,
+ const struct flow_mod_requester *req)
OVS_REQUIRES(ofproto_mutex)
{
struct rule_criteria criteria;
rule_criteria_init(&criteria, fm->table_id, &fm->match, fm->priority,
fm->cookie, fm->cookie_mask,
fm->out_port, fm->out_group);
+ rule_criteria_require_rw(&criteria,
+ (fm->flags & OFPUTIL_FF_NO_READONLY) != 0);
error = collect_rules_strict(ofproto, &criteria, &rules);
rule_criteria_destroy(&criteria);
if (!error && rules.n > 0) {
- error = delete_flows__(ofproto, ofconn, request, &rules, OFPRR_DELETE);
+ delete_flows__(&rules, fm->delete_reason, req);
}
rule_collection_destroy(&rules);
OVS_REQUIRES(ofproto_mutex)
{
struct ofputil_flow_removed fr;
+ long long int used;
- if (ofproto_rule_is_hidden(rule) ||
+ if (rule_is_hidden(rule) ||
!(rule->flags & OFPUTIL_FF_SEND_FLOW_REM)) {
return;
}
fr.hard_timeout = rule->hard_timeout;
ovs_mutex_unlock(&rule->mutex);
rule->ofproto->ofproto_class->rule_get_stats(rule, &fr.packet_count,
- &fr.byte_count);
+ &fr.byte_count, &used);
connmgr_send_flow_removed(rule->ofproto->connmgr, &fr);
}
* OFPRR_HARD_TIMEOUT or OFPRR_IDLE_TIMEOUT), and then removes 'rule' from its
* ofproto.
*
- * 'rule' must not have a pending operation (that is, 'rule->pending' must be
- * NULL).
- *
* ofproto implementation ->run() functions should use this function to expire
* OpenFlow flows. */
void
ofproto_rule_expire(struct rule *rule, uint8_t reason)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofproto *ofproto = rule->ofproto;
-
- ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT
- || reason == OFPRR_DELETE || reason == OFPRR_GROUP_DELETE);
-
- ofproto_rule_delete__(ofproto, rule, reason);
+ ofproto_rule_delete__(rule, reason);
}
/* Reduces '*timeout' to no more than 'max'. A value of zero in either case
uint64_t ofpacts_stub[1024 / 8];
struct ofpbuf ofpacts;
enum ofperr error;
- long long int now;
error = reject_slave_controller(ofconn);
if (error) {
error = ofproto_check_ofpacts(ofproto, fm.ofpacts, fm.ofpacts_len);
}
if (!error) {
- error = handle_flow_mod__(ofproto, ofconn, &fm, oh);
+ struct flow_mod_requester req;
+
+ req.ofconn = ofconn;
+ req.xid = oh->xid;
+ error = handle_flow_mod__(ofproto, &fm, &req);
}
if (error) {
goto exit_free_ofpacts;
}
- /* Record the operation for logging a summary report. */
- switch (fm.command) {
- case OFPFC_ADD:
- ofproto->n_add++;
- break;
-
- case OFPFC_MODIFY:
- case OFPFC_MODIFY_STRICT:
- ofproto->n_modify++;
- break;
-
- case OFPFC_DELETE:
- case OFPFC_DELETE_STRICT:
- ofproto->n_delete++;
- break;
- }
-
- now = time_msec();
- if (ofproto->next_op_report == LLONG_MAX) {
- ofproto->first_op = now;
- ofproto->next_op_report = MAX(now + 10 * 1000,
- ofproto->op_backoff);
- ofproto->op_backoff = ofproto->next_op_report + 60 * 1000;
- }
- ofproto->last_op = now;
+ ofconn_report_flow_mod(ofconn, fm.command);
exit_free_ofpacts:
ofpbuf_uninit(&ofpacts);
}
static enum ofperr
-handle_flow_mod__(struct ofproto *ofproto, struct ofconn *ofconn,
- struct ofputil_flow_mod *fm, const struct ofp_header *oh)
+handle_flow_mod__(struct ofproto *ofproto, struct ofputil_flow_mod *fm,
+ const struct flow_mod_requester *req)
OVS_EXCLUDED(ofproto_mutex)
{
enum ofperr error;
ovs_mutex_lock(&ofproto_mutex);
- if (ofproto->n_pending < 50) {
- switch (fm->command) {
- case OFPFC_ADD:
- error = add_flow(ofproto, ofconn, fm, oh);
- break;
+ switch (fm->command) {
+ case OFPFC_ADD:
+ error = add_flow(ofproto, fm, req);
+ break;
- case OFPFC_MODIFY:
- error = modify_flows_loose(ofproto, ofconn, fm, oh);
- break;
+ case OFPFC_MODIFY:
+ error = modify_flows_loose(ofproto, fm, req);
+ break;
- case OFPFC_MODIFY_STRICT:
- error = modify_flow_strict(ofproto, ofconn, fm, oh);
- break;
+ case OFPFC_MODIFY_STRICT:
+ error = modify_flow_strict(ofproto, fm, req);
+ break;
- case OFPFC_DELETE:
- error = delete_flows_loose(ofproto, ofconn, fm, oh);
- break;
+ case OFPFC_DELETE:
+ error = delete_flows_loose(ofproto, fm, req);
+ break;
- case OFPFC_DELETE_STRICT:
- error = delete_flow_strict(ofproto, ofconn, fm, oh);
- break;
+ case OFPFC_DELETE_STRICT:
+ error = delete_flow_strict(ofproto, fm, req);
+ break;
- default:
- if (fm->command > 0xff) {
- VLOG_WARN_RL(&rl, "%s: flow_mod has explicit table_id but "
- "flow_mod_table_id extension is not enabled",
- ofproto->name);
- }
- error = OFPERR_OFPFMFC_BAD_COMMAND;
- break;
+ default:
+ if (fm->command > 0xff) {
+ VLOG_WARN_RL(&rl, "%s: flow_mod has explicit table_id but "
+ "flow_mod_table_id extension is not enabled",
+ ofproto->name);
}
- } else {
- ovs_assert(!list_is_empty(&ofproto->pending));
- error = OFPROTO_POSTPONE;
+ error = OFPERR_OFPFMFC_BAD_COMMAND;
+ break;
}
+ ofmonitor_flush(ofproto->connmgr);
ovs_mutex_unlock(&ofproto_mutex);
run_rule_executes(ofproto);
}
if (request.role != OFPCR12_ROLE_NOCHANGE) {
- if (ofconn_get_role(ofconn) != request.role
- && ofconn_has_pending_opgroups(ofconn)) {
- return OFPROTO_POSTPONE;
- }
-
if (request.have_generation_id
&& !ofconn_set_master_election_id(ofconn, request.generation_id)) {
return OFPERR_OFPRRFC_STALE;
cur = ofconn_get_protocol(ofconn);
next = ofputil_protocol_set_base(cur, next_base);
- if (cur != next && ofconn_has_pending_opgroups(ofconn)) {
- /* Avoid sending async messages in surprising protocol. */
- return OFPROTO_POSTPONE;
- }
-
ofconn_set_protocol(ofconn, next);
+
return 0;
}
return OFPERR_OFPBRC_EPERM;
}
- if (format != ofconn_get_packet_in_format(ofconn)
- && ofconn_has_pending_opgroups(ofconn)) {
- /* Avoid sending async message in surprsing packet in format. */
- return OFPROTO_POSTPONE;
- }
-
ofconn_set_packet_in_format(ofconn, format);
return 0;
}
{
struct ofpbuf *buf;
- if (ofconn_has_pending_opgroups(ofconn)) {
- return OFPROTO_POSTPONE;
- }
-
buf = ofpraw_alloc_reply((oh->version == OFP10_VERSION
? OFPRAW_OFPT10_BARRIER_REPLY
: OFPRAW_OFPT11_BARRIER_REPLY), oh, 0);
struct list *msgs)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofoperation *op = rule->pending;
const struct rule_actions *actions;
struct ofputil_flow_update fu;
struct match match;
- if (op && op->type == OFOPERATION_ADD) {
- /* We'll report the final flow when the operation completes. Reporting
- * it now would cause a duplicate report later. */
- return;
- }
-
fu.event = (flags & (NXFMF_INITIAL | NXFMF_ADD)
? NXFME_ADDED : NXFME_MODIFIED);
fu.reason = 0;
fu.match = &match;
fu.priority = rule->cr.priority;
- if (!(flags & NXFMF_ACTIONS)) {
- actions = NULL;
- } else if (!op) {
- actions = rule->actions;
- } else {
- /* An operation is in progress. Use the previous version of the flow's
- * actions, so that when the operation commits we report the change. */
- switch (op->type) {
- case OFOPERATION_ADD:
- NOT_REACHED();
-
- case OFOPERATION_MODIFY:
- case OFOPERATION_REPLACE:
- actions = op->actions ? op->actions : rule->actions;
- break;
-
- case OFOPERATION_DELETE:
- actions = rule->actions;
- break;
-
- default:
- NOT_REACHED();
- }
- }
+ actions = flags & NXFMF_ACTIONS ? rule_get_actions(rule) : NULL;
fu.ofpacts = actions ? actions->ofpacts : NULL;
fu.ofpacts_len = actions ? actions->ofpacts_len : 0;
{
enum nx_flow_monitor_flags update;
- if (ofproto_rule_is_hidden(rule)) {
+ if (rule_is_hidden(rule)) {
return;
}
- if (!(rule->pending
- ? ofoperation_has_out_port(rule->pending, m->out_port)
- : ofproto_rule_has_out_port(rule, m->out_port))) {
+ if (!ofproto_rule_has_out_port(rule, m->out_port)) {
return;
}
OVS_REQUIRES(ofproto_mutex)
{
const struct ofproto *ofproto = ofconn_get_ofproto(m->ofconn);
- const struct ofoperation *op;
const struct oftable *table;
struct cls_rule target;
struct cls_cursor cursor;
struct rule *rule;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, &target);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
- ovs_assert(!rule->pending); /* XXX */
- ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
- }
- ovs_rwlock_unlock(&table->cls.rwlock);
- }
-
- HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
- struct rule *rule = op->rule;
-
- if (((m->table_id == 0xff
- ? !(ofproto->tables[rule->table_id].flags & OFTABLE_HIDDEN)
- : m->table_id == rule->table_id))
- && cls_rule_is_loose_match(&rule->cr, &target.match)) {
ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
}
+ fat_rwlock_unlock(&table->cls.rwlock);
}
cls_rule_destroy(&target);
}
ofmonitor_destroy(m);
error = 0;
} else {
- error = OFPERR_NXBRC_FM_BAD_ID;
+ error = OFPERR_OFPMOFC_UNKNOWN_MONITOR;
}
ovs_mutex_unlock(&ofproto_mutex);
/* Meters implementation.
*
* Meter table entry, indexed by the OpenFlow meter_id.
- * These are always dynamically allocated to allocate enough space for
- * the bands.
* 'created' is used to compute the duration for meter stats.
* 'list rules' is needed so that we can delete the dependent rules when the
* meter table entry is deleted.
return UINT32_MAX;
}
+/* Finds the meter invoked by 'rule''s actions and adds 'rule' to the meter's
+ * list of rules. */
+static void
+meter_insert_rule(struct rule *rule)
+{
+ const struct rule_actions *a = rule_get_actions(rule);
+ uint32_t meter_id = ofpacts_get_meter(a->ofpacts, a->ofpacts_len);
+ struct meter *meter = rule->ofproto->meters[meter_id];
+
+ list_insert(&meter->rules, &rule->meter_list_node);
+}
+
static void
meter_update(struct meter *meter, const struct ofputil_meter_config *config)
{
}
static enum ofperr
-handle_delete_meter(struct ofconn *ofconn, const struct ofp_header *oh,
- struct ofputil_meter_mod *mm)
+handle_delete_meter(struct ofconn *ofconn, struct ofputil_meter_mod *mm)
OVS_EXCLUDED(ofproto_mutex)
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct rule *rule;
LIST_FOR_EACH (rule, meter_list_node, &meter->rules) {
- if (rule->pending) {
- error = OFPROTO_POSTPONE;
- goto exit;
- }
rule_collection_add(&rules, rule);
}
}
}
if (rules.n > 0) {
- delete_flows__(ofproto, ofconn, oh, &rules, OFPRR_METER_DELETE);
+ delete_flows__(&rules, OFPRR_METER_DELETE, NULL);
}
/* Delete the meters. */
meter_delete(ofproto, first, last);
-exit:
ovs_mutex_unlock(&ofproto_mutex);
rule_collection_destroy(&rules);
break;
case OFPMC13_DELETE:
- error = handle_delete_meter(ofconn, oh, &mm);
+ error = handle_delete_meter(ofconn, &mm);
break;
default:
return 0;
}
-bool
-ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
- struct ofgroup **group)
- OVS_TRY_RDLOCK(true, (*group)->rwlock)
+static bool
+ofproto_group_lookup__(const struct ofproto *ofproto, uint32_t group_id,
+ struct ofgroup **group)
+ OVS_REQ_RDLOCK(ofproto->groups_rwlock)
{
- ovs_rwlock_rdlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
hash_int(group_id, 0), &ofproto->groups) {
if ((*group)->group_id == group_id) {
- ovs_rwlock_rdlock(&(*group)->rwlock);
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
return true;
}
}
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
+
return false;
}
-void
-ofproto_group_release(struct ofgroup *group)
- OVS_RELEASES(group->rwlock)
+/* If the group exists, this function increments the groups's reference count.
+ *
+ * Make sure to call ofproto_group_unref() after no longer needing to maintain
+ * a reference to the group. */
+bool
+ofproto_group_lookup(const struct ofproto *ofproto, uint32_t group_id,
+ struct ofgroup **group)
{
- ovs_rwlock_unlock(&group->rwlock);
-}
+ bool found;
-static bool
-ofproto_group_write_lookup(const struct ofproto *ofproto, uint32_t group_id,
- struct ofgroup **group)
- OVS_TRY_WRLOCK(true, ofproto->groups_rwlock)
- OVS_TRY_WRLOCK(true, (*group)->rwlock)
-{
- ovs_rwlock_wrlock(&ofproto->groups_rwlock);
- HMAP_FOR_EACH_IN_BUCKET (*group, hmap_node,
- hash_int(group_id, 0), &ofproto->groups) {
- if ((*group)->group_id == group_id) {
- ovs_rwlock_wrlock(&(*group)->rwlock);
- return true;
- }
+ ovs_rwlock_rdlock(&ofproto->groups_rwlock);
+ found = ofproto_group_lookup__(ofproto, group_id, group);
+ if (found) {
+ ofproto_group_ref(*group);
}
ovs_rwlock_unlock(&ofproto->groups_rwlock);
- return false;
+ return found;
}
static bool
group_get_ref_count(struct ofgroup *group)
OVS_EXCLUDED(ofproto_mutex)
{
- struct ofproto *ofproto = group->ofproto;
+ struct ofproto *ofproto = CONST_CAST(struct ofproto *, group->ofproto);
struct rule_criteria criteria;
struct rule_collection rules;
struct match match;
static void
append_group_stats(struct ofgroup *group, struct list *replies)
- OVS_REQ_RDLOCK(group->rwlock)
{
struct ofputil_group_stats ogs;
- struct ofproto *ofproto = group->ofproto;
+ const struct ofproto *ofproto = group->ofproto;
long long int now = time_msec();
int error;
free(ogs.bucket_stats);
}
-static enum ofperr
-handle_group_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+static void
+handle_group_request(struct ofconn *ofconn,
+ const struct ofp_header *request, uint32_t group_id,
+ void (*cb)(struct ofgroup *, struct list *replies))
{
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- enum ofperr error;
struct ofgroup *group;
- uint32_t group_id;
-
- error = ofputil_decode_group_stats_request(request, &group_id);
- if (error) {
- return error;
- }
+ struct list replies;
ofpmp_init(&replies, request);
-
if (group_id == OFPG_ALL) {
ovs_rwlock_rdlock(&ofproto->groups_rwlock);
HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
- ovs_rwlock_rdlock(&group->rwlock);
- append_group_stats(group, &replies);
- ovs_rwlock_unlock(&group->rwlock);
+ cb(group, &replies);
}
ovs_rwlock_unlock(&ofproto->groups_rwlock);
} else {
if (ofproto_group_lookup(ofproto, group_id, &group)) {
- append_group_stats(group, &replies);
- ofproto_group_release(group);
+ cb(group, &replies);
+ ofproto_group_unref(group);
}
}
-
ofconn_send_replies(ofconn, &replies);
-
- return 0;
}
static enum ofperr
-handle_group_desc_stats_request(struct ofconn *ofconn,
- const struct ofp_header *request)
+handle_group_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
{
- struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct list replies;
- struct ofputil_group_desc gds;
- struct ofgroup *group;
-
- ofpmp_init(&replies, request);
+ uint32_t group_id;
+ enum ofperr error;
- ovs_rwlock_rdlock(&ofproto->groups_rwlock);
- HMAP_FOR_EACH (group, hmap_node, &ofproto->groups) {
- gds.group_id = group->group_id;
- gds.type = group->type;
- ofputil_append_group_desc_reply(&gds, &group->buckets, &replies);
+ error = ofputil_decode_group_stats_request(request, &group_id);
+ if (error) {
+ return error;
}
- ovs_rwlock_unlock(&ofproto->groups_rwlock);
- ofconn_send_replies(ofconn, &replies);
+ handle_group_request(ofconn, request, group_id, append_group_stats);
+ return 0;
+}
+
+static void
+append_group_desc(struct ofgroup *group, struct list *replies)
+{
+ struct ofputil_group_desc gds;
+
+ gds.group_id = group->group_id;
+ gds.type = group->type;
+ ofputil_append_group_desc_reply(&gds, &group->buckets, replies);
+}
+static enum ofperr
+handle_group_desc_stats_request(struct ofconn *ofconn,
+ const struct ofp_header *request)
+{
+ handle_group_request(ofconn, request,
+ ofputil_decode_group_desc_request(request),
+ append_group_desc);
return 0;
}
return 0;
}
-/* Implements OFPGC11_ADD
- * in which no matching flow already exists in the flow table.
- *
- * Adds the flow specified by 'ofm', which is followed by 'n_actions'
- * ofp_actions, to the ofproto's flow table. Returns 0 on success, an OpenFlow
- * error code on failure, or OFPROTO_POSTPONE if the operation cannot be
- * initiated now but may be retried later.
- *
- * Upon successful return, takes ownership of 'fm->ofpacts'. On failure,
- * ownership remains with the caller.
- *
- * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id,
- * if any. */
static enum ofperr
-add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
+init_group(struct ofproto *ofproto, struct ofputil_group_mod *gm,
+ struct ofgroup **ofgroup)
{
- struct ofgroup *ofgroup;
enum ofperr error;
+ const long long int now = time_msec();
if (gm->group_id > OFPG_MAX) {
return OFPERR_OFPGMFC_INVALID_GROUP;
return OFPERR_OFPGMFC_BAD_TYPE;
}
- /* Allocate new group and initialize it. */
- ofgroup = ofproto->ofproto_class->group_alloc();
- if (!ofgroup) {
- VLOG_WARN_RL(&rl, "%s: failed to create group", ofproto->name);
+ *ofgroup = ofproto->ofproto_class->group_alloc();
+ if (!*ofgroup) {
+ VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
return OFPERR_OFPGMFC_OUT_OF_GROUPS;
}
- ovs_rwlock_init(&ofgroup->rwlock);
- ofgroup->ofproto = ofproto;
- ofgroup->group_id = gm->group_id;
- ofgroup->type = gm->type;
- ofgroup->created = ofgroup->modified = time_msec();
+ (*ofgroup)->ofproto = ofproto;
+ *CONST_CAST(uint32_t *, &((*ofgroup)->group_id)) = gm->group_id;
+ *CONST_CAST(enum ofp11_group_type *, &(*ofgroup)->type) = gm->type;
+ *CONST_CAST(long long int *, &((*ofgroup)->created)) = now;
+ *CONST_CAST(long long int *, &((*ofgroup)->modified)) = now;
+ ovs_refcount_init(&(*ofgroup)->ref_count);
- list_move(&ofgroup->buckets, &gm->buckets);
- ofgroup->n_buckets = list_size(&ofgroup->buckets);
+ list_move(&(*ofgroup)->buckets, &gm->buckets);
+ *CONST_CAST(uint32_t *, &(*ofgroup)->n_buckets) =
+ list_size(&(*ofgroup)->buckets);
/* Construct called BEFORE any locks are held. */
- error = ofproto->ofproto_class->group_construct(ofgroup);
+ error = ofproto->ofproto_class->group_construct(*ofgroup);
+ if (error) {
+ ofputil_bucket_list_destroy(&(*ofgroup)->buckets);
+ ofproto->ofproto_class->group_dealloc(*ofgroup);
+ }
+ return error;
+}
+
+/* Implements the OFPGC11_ADD operation specified by 'gm', adding a group to
+ * 'ofproto''s group table. Returns 0 on success or an OpenFlow error code on
+ * failure. */
+static enum ofperr
+add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
+{
+ struct ofgroup *ofgroup;
+ enum ofperr error;
+
+ /* Allocate new group and initialize it. */
+ error = init_group(ofproto, gm, &ofgroup);
if (error) {
- goto free_out;
+ return error;
}
/* We wrlock as late as possible to minimize the time we jam any other
unlock_out:
ovs_rwlock_unlock(&ofproto->groups_rwlock);
ofproto->ofproto_class->group_destruct(ofgroup);
- free_out:
ofputil_bucket_list_destroy(&ofgroup->buckets);
ofproto->ofproto_class->group_dealloc(ofgroup);
return error;
}
-/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code on
- * failure.
+/* Implements OFPGC11_MODIFY. Returns 0 on success or an OpenFlow error code
+ * on failure.
*
- * 'ofconn' is used to retrieve the packet buffer specified in fm->buffer_id,
- * if any. */
+ * Note that the group is re-created and then replaces the old group in
+ * ofproto's ofgroup hash map. Thus, the group is never altered while users of
+ * the xlate module hold a pointer to the group. */
static enum ofperr
modify_group(struct ofproto *ofproto, struct ofputil_group_mod *gm)
{
- struct ofgroup *ofgroup;
- struct ofgroup *victim;
+ struct ofgroup *ofgroup, *new_ofgroup, *retiring;
enum ofperr error;
- if (gm->group_id > OFPG_MAX) {
- return OFPERR_OFPGMFC_INVALID_GROUP;
- }
-
- if (gm->type > OFPGT11_FF) {
- return OFPERR_OFPGMFC_BAD_TYPE;
+ error = init_group(ofproto, gm, &new_ofgroup);
+ if (error) {
+ return error;
}
- victim = ofproto->ofproto_class->group_alloc();
- if (!victim) {
- VLOG_WARN_RL(&rl, "%s: failed to allocate group", ofproto->name);
- return OFPERR_OFPGMFC_OUT_OF_GROUPS;
- }
+ retiring = new_ofgroup;
- if (!ofproto_group_write_lookup(ofproto, gm->group_id, &ofgroup)) {
+ ovs_rwlock_wrlock(&ofproto->groups_rwlock);
+ if (!ofproto_group_lookup__(ofproto, gm->group_id, &ofgroup)) {
error = OFPERR_OFPGMFC_UNKNOWN_GROUP;
- goto free_out;
+ goto out;
}
- /* Both group's and its container's write locks held now.
- * Also, n_groups[] is protected by ofproto->groups_rwlock. */
+
+ /* Ofproto's group write lock is held now. */
if (ofgroup->type != gm->type
&& ofproto->n_groups[gm->type] >= ofproto->ogf.max_groups[gm->type]) {
error = OFPERR_OFPGMFC_OUT_OF_GROUPS;
- goto unlock_out;
+ goto out;
}
- *victim = *ofgroup;
- list_move(&victim->buckets, &ofgroup->buckets);
+ /* The group creation time does not change during modification. */
+ *CONST_CAST(long long int *, &(new_ofgroup->created)) = ofgroup->created;
+ *CONST_CAST(long long int *, &(new_ofgroup->modified)) = time_msec();
- ofgroup->type = gm->type;
- list_move(&ofgroup->buckets, &gm->buckets);
- ofgroup->n_buckets = list_size(&ofgroup->buckets);
-
- error = ofproto->ofproto_class->group_modify(ofgroup, victim);
- if (!error) {
- ofputil_bucket_list_destroy(&victim->buckets);
- ofproto->n_groups[victim->type]--;
- ofproto->n_groups[ofgroup->type]++;
- ofgroup->modified = time_msec();
- } else {
- ofputil_bucket_list_destroy(&ofgroup->buckets);
+ error = ofproto->ofproto_class->group_modify(new_ofgroup);
+ if (error) {
+ goto out;
+ }
- *ofgroup = *victim;
- list_move(&ofgroup->buckets, &victim->buckets);
+ retiring = ofgroup;
+ /* Replace ofgroup in ofproto's groups hash map with new_ofgroup. */
+ hmap_remove(&ofproto->groups, &ofgroup->hmap_node);
+ hmap_insert(&ofproto->groups, &new_ofgroup->hmap_node,
+ hash_int(new_ofgroup->group_id, 0));
+ if (ofgroup->type != new_ofgroup->type) {
+ ofproto->n_groups[ofgroup->type]--;
+ ofproto->n_groups[new_ofgroup->type]++;
}
- unlock_out:
- ovs_rwlock_unlock(&ofgroup->rwlock);
+out:
+ ofproto_group_unref(retiring);
ovs_rwlock_unlock(&ofproto->groups_rwlock);
- free_out:
- ofproto->ofproto_class->group_dealloc(victim);
return error;
}
/* Delete all flow entries containing this group in a group action */
match_init_catchall(&match);
flow_mod_init(&fm, &match, 0, NULL, 0, OFPFC_DELETE);
+ fm.delete_reason = OFPRR_GROUP_DELETE;
fm.out_group = ofgroup->group_id;
- handle_flow_mod__(ofproto, NULL, &fm, NULL);
+ handle_flow_mod__(ofproto, &fm, NULL);
- /* Must wait until existing readers are done,
- * while holding the container's write lock at the same time. */
- ovs_rwlock_wrlock(&ofgroup->rwlock);
hmap_remove(&ofproto->groups, &ofgroup->hmap_node);
/* No-one can find this group any more. */
ofproto->n_groups[ofgroup->type]--;
ovs_rwlock_unlock(&ofproto->groups_rwlock);
-
- ofproto->ofproto_class->group_destruct(ofgroup);
- ofputil_bucket_list_destroy(&ofgroup->buckets);
- ovs_rwlock_unlock(&ofgroup->rwlock);
- ovs_rwlock_destroy(&ofgroup->rwlock);
- ofproto->ofproto_class->group_dealloc(ofgroup);
+ ofproto_group_unref(ofgroup);
}
-/* Implements OFPGC_DELETE. */
+/* Implements OFPGC11_DELETE. */
static void
delete_group(struct ofproto *ofproto, uint32_t group_id)
{
}
}
+enum ofproto_table_config
+ofproto_table_get_config(const struct ofproto *ofproto, uint8_t table_id)
+{
+ unsigned int value;
+ atomic_read(&ofproto->tables[table_id].config, &value);
+ return (enum ofproto_table_config)value;
+}
+
+static enum ofperr
+table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm)
+{
+ /* Only accept currently supported configurations */
+ if (tm->config & ~OFPTC11_TABLE_MISS_MASK) {
+ return OFPERR_OFPTMFC_BAD_CONFIG;
+ }
+
+ if (tm->table_id == OFPTT_ALL) {
+ int i;
+ for (i = 0; i < ofproto->n_tables; i++) {
+ atomic_store(&ofproto->tables[i].config,
+ (unsigned int)tm->config);
+ }
+ } else if (!check_table_id(ofproto, tm->table_id)) {
+ return OFPERR_OFPTMFC_BAD_TABLE;
+ } else {
+ atomic_store(&ofproto->tables[tm->table_id].config,
+ (unsigned int)tm->config);
+ }
+
+ return 0;
+}
+
static enum ofperr
handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_table_mod tm;
enum ofperr error;
return error;
}
- /* XXX Actual table mod support is not implemented yet. */
- return 0;
+ return table_mod(ofproto, &tm);
+}
+
+static enum ofperr
+handle_bundle_control(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ enum ofperr error;
+ struct ofputil_bundle_ctrl_msg bctrl;
+ struct ofpbuf *buf;
+ struct ofputil_bundle_ctrl_msg reply;
+
+ error = ofputil_decode_bundle_ctrl(oh, &bctrl);
+ if (error) {
+ return error;
+ }
+ reply.flags = 0;
+ reply.bundle_id = bctrl.bundle_id;
+
+ switch (bctrl.type) {
+ case OFPBCT_OPEN_REQUEST:
+ error = ofp_bundle_open(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_OPEN_REPLY;
+ break;
+ case OFPBCT_CLOSE_REQUEST:
+ error = ofp_bundle_close(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_CLOSE_REPLY;;
+ break;
+ case OFPBCT_COMMIT_REQUEST:
+ error = ofp_bundle_commit(ofconn, bctrl.bundle_id, bctrl.flags);
+ reply.type = OFPBCT_COMMIT_REPLY;
+ break;
+ case OFPBCT_DISCARD_REQUEST:
+ error = ofp_bundle_discard(ofconn, bctrl.bundle_id);
+ reply.type = OFPBCT_DISCARD_REPLY;
+ break;
+
+ case OFPBCT_OPEN_REPLY:
+ case OFPBCT_CLOSE_REPLY:
+ case OFPBCT_COMMIT_REPLY:
+ case OFPBCT_DISCARD_REPLY:
+ return OFPERR_OFPBFC_BAD_TYPE;
+ break;
+ }
+
+ if (!error) {
+ buf = ofputil_encode_bundle_ctrl_reply(oh, &reply);
+ ofconn_send_reply(ofconn, buf);
+ }
+ return error;
+}
+
+
+static enum ofperr
+handle_bundle_add(struct ofconn *ofconn, const struct ofp_header *oh)
+{
+ enum ofperr error;
+ struct ofputil_bundle_add_msg badd;
+
+ error = ofputil_decode_bundle_add(oh, &badd);
+ if (error) {
+ return error;
+ }
+
+ return ofp_bundle_add_message(ofconn, &badd);
}
static enum ofperr
handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg)
OVS_EXCLUDED(ofproto_mutex)
{
- const struct ofp_header *oh = msg->data;
+ const struct ofp_header *oh = ofpbuf_data(msg);
enum ofptype type;
enum ofperr error;
case OFPTYPE_QUEUE_GET_CONFIG_REQUEST:
return handle_queue_get_config_request(ofconn, oh);
+ case OFPTYPE_BUNDLE_CONTROL:
+ return handle_bundle_control(ofconn, oh);
+
+ case OFPTYPE_BUNDLE_ADD_MESSAGE:
+ return handle_bundle_add(ofconn, oh);
+
case OFPTYPE_HELLO:
case OFPTYPE_ERROR:
case OFPTYPE_FEATURES_REPLY:
}
}
-static bool
+static void
handle_openflow(struct ofconn *ofconn, const struct ofpbuf *ofp_msg)
OVS_EXCLUDED(ofproto_mutex)
{
int error = handle_openflow__(ofconn, ofp_msg);
- if (error && error != OFPROTO_POSTPONE) {
- ofconn_send_error(ofconn, ofp_msg->data, error);
+ if (error) {
+ ofconn_send_error(ofconn, ofpbuf_data(ofp_msg), error);
}
COVERAGE_INC(ofproto_recv_openflow);
- return error != OFPROTO_POSTPONE;
}
\f
/* Asynchronous operations. */
-/* Creates and returns a new ofopgroup that is not associated with any
- * OpenFlow connection.
- *
- * The caller should add operations to the returned group with
- * ofoperation_create() and then submit it with ofopgroup_submit(). */
-static struct ofopgroup *
-ofopgroup_create_unattached(struct ofproto *ofproto)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofopgroup *group = xzalloc(sizeof *group);
- group->ofproto = ofproto;
- list_init(&group->ofproto_node);
- list_init(&group->ops);
- list_init(&group->ofconn_node);
- return group;
-}
-
-/* Creates and returns a new ofopgroup for 'ofproto'.
- *
- * If 'ofconn' is NULL, the new ofopgroup is not associated with any OpenFlow
- * connection. The 'request' and 'buffer_id' arguments are ignored.
- *
- * If 'ofconn' is nonnull, then the new ofopgroup is associated with 'ofconn'.
- * If the ofopgroup eventually fails, then the error reply will include
- * 'request'. If the ofopgroup eventually succeeds, then the packet with
- * buffer id 'buffer_id' on 'ofconn' will be sent by 'ofconn''s ofproto.
- *
- * The caller should add operations to the returned group with
- * ofoperation_create() and then submit it with ofopgroup_submit(). */
-static struct ofopgroup *
-ofopgroup_create(struct ofproto *ofproto, struct ofconn *ofconn,
- const struct ofp_header *request, uint32_t buffer_id)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofopgroup *group = ofopgroup_create_unattached(ofproto);
- if (ofconn) {
- size_t request_len = ntohs(request->length);
-
- ovs_assert(ofconn_get_ofproto(ofconn) == ofproto);
-
- ofconn_add_opgroup(ofconn, &group->ofconn_node);
- group->ofconn = ofconn;
- group->request = xmemdup(request, MIN(request_len, 64));
- group->buffer_id = buffer_id;
- }
- return group;
-}
-
-/* Submits 'group' for processing.
- *
- * If 'group' contains no operations (e.g. none were ever added, or all of the
- * ones that were added completed synchronously), then it is destroyed
- * immediately. Otherwise it is added to the ofproto's list of pending
- * groups. */
-static void
-ofopgroup_submit(struct ofopgroup *group)
- OVS_REQUIRES(ofproto_mutex)
-{
- if (!group->n_running) {
- ofopgroup_complete(group);
- } else {
- list_push_back(&group->ofproto->pending, &group->ofproto_node);
- group->ofproto->n_pending++;
- }
-}
-
-static void
-ofopgroup_complete(struct ofopgroup *group)
+static enum ofperr
+send_buffered_packet(struct ofconn *ofconn, uint32_t buffer_id,
+ struct rule *rule)
OVS_REQUIRES(ofproto_mutex)
{
- struct ofproto *ofproto = group->ofproto;
-
- struct ofconn *abbrev_ofconn;
- ovs_be32 abbrev_xid;
-
- struct ofoperation *op, *next_op;
- int error;
-
- ovs_assert(!group->n_running);
-
- error = 0;
- LIST_FOR_EACH (op, group_node, &group->ops) {
- if (op->error) {
- error = op->error;
- break;
- }
- }
-
- if (!error && group->ofconn && group->buffer_id != UINT32_MAX) {
- LIST_FOR_EACH (op, group_node, &group->ops) {
- if (op->type != OFOPERATION_DELETE) {
- struct ofpbuf *packet;
- ofp_port_t in_port;
-
- error = ofconn_pktbuf_retrieve(group->ofconn, group->buffer_id,
- &packet, &in_port);
- if (packet) {
- struct rule_execute *re;
-
- ovs_assert(!error);
-
- ofproto_rule_ref(op->rule);
-
- re = xmalloc(sizeof *re);
- re->rule = op->rule;
- re->in_port = in_port;
- re->packet = packet;
-
- if (!guarded_list_push_back(&ofproto->rule_executes,
- &re->list_node, 1024)) {
- ofproto_rule_unref(op->rule);
- ofpbuf_delete(re->packet);
- free(re);
- }
- }
- break;
- }
- }
- }
-
- if (!error && !list_is_empty(&group->ofconn_node)) {
- abbrev_ofconn = group->ofconn;
- abbrev_xid = group->request->xid;
- } else {
- abbrev_ofconn = NULL;
- abbrev_xid = htonl(0);
- }
- LIST_FOR_EACH_SAFE (op, next_op, group_node, &group->ops) {
- struct rule *rule = op->rule;
-
- /* We generally want to report the change to active OpenFlow flow
- monitors (e.g. NXST_FLOW_MONITOR). There are three exceptions:
-
- - The operation failed.
-
- - The affected rule is not visible to controllers.
-
- - The operation's only effect was to update rule->modified. */
- if (!(op->error
- || ofproto_rule_is_hidden(rule)
- || (op->type == OFOPERATION_MODIFY
- && op->actions
- && rule->flow_cookie == op->flow_cookie))) {
- /* Check that we can just cast from ofoperation_type to
- * nx_flow_update_event. */
- enum nx_flow_update_event event_type;
-
- switch (op->type) {
- case OFOPERATION_ADD:
- case OFOPERATION_REPLACE:
- event_type = NXFME_ADDED;
- break;
-
- case OFOPERATION_DELETE:
- event_type = NXFME_DELETED;
- break;
+ enum ofperr error = 0;
+ if (ofconn && buffer_id != UINT32_MAX) {
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ struct ofpbuf *packet;
+ ofp_port_t in_port;
- case OFOPERATION_MODIFY:
- event_type = NXFME_MODIFIED;
- break;
+ error = ofconn_pktbuf_retrieve(ofconn, buffer_id, &packet, &in_port);
+ if (packet) {
+ struct rule_execute *re;
- default:
- NOT_REACHED();
- }
+ ofproto_rule_ref(rule);
- ofmonitor_report(ofproto->connmgr, rule, event_type,
- op->reason, abbrev_ofconn, abbrev_xid);
- }
+ re = xmalloc(sizeof *re);
+ re->rule = rule;
+ re->in_port = in_port;
+ re->packet = packet;
- rule->pending = NULL;
-
- switch (op->type) {
- case OFOPERATION_ADD:
- if (!op->error) {
- uint16_t vid_mask;
-
- vid_mask = minimask_get_vid_mask(&rule->cr.match.mask);
- if (vid_mask == VLAN_VID_MASK) {
- if (ofproto->vlan_bitmap) {
- uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
- if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
- bitmap_set1(ofproto->vlan_bitmap, vid);
- ofproto->vlans_changed = true;
- }
- } else {
- ofproto->vlans_changed = true;
- }
- }
- } else {
- oftable_remove_rule(rule);
+ if (!guarded_list_push_back(&ofproto->rule_executes,
+ &re->list_node, 1024)) {
ofproto_rule_unref(rule);
+ ofpbuf_delete(re->packet);
+ free(re);
}
- break;
-
- case OFOPERATION_DELETE:
- ovs_assert(!op->error);
- ofproto_rule_unref(rule);
- op->rule = NULL;
- break;
-
- case OFOPERATION_MODIFY:
- case OFOPERATION_REPLACE:
- if (!op->error) {
- long long int now = time_msec();
-
- rule->modified = now;
- if (op->type == OFOPERATION_REPLACE) {
- rule->created = rule->used = now;
- }
- } else {
- ofproto_rule_change_cookie(ofproto, rule, op->flow_cookie);
- ovs_mutex_lock(&rule->mutex);
- rule->idle_timeout = op->idle_timeout;
- rule->hard_timeout = op->hard_timeout;
- ovs_mutex_unlock(&rule->mutex);
- if (op->actions) {
- struct rule_actions *old_actions;
-
- ovs_mutex_lock(&rule->mutex);
- old_actions = rule->actions;
- rule->actions = op->actions;
- ovs_mutex_unlock(&rule->mutex);
-
- op->actions = NULL;
- rule_actions_unref(old_actions);
- }
- rule->flags = op->flags;
- }
- break;
-
- default:
- NOT_REACHED();
- }
-
- ofoperation_destroy(op);
- }
-
- ofmonitor_flush(ofproto->connmgr);
-
- if (!list_is_empty(&group->ofproto_node)) {
- ovs_assert(ofproto->n_pending > 0);
- ofproto->n_pending--;
- list_remove(&group->ofproto_node);
- }
- if (!list_is_empty(&group->ofconn_node)) {
- list_remove(&group->ofconn_node);
- if (error) {
- ofconn_send_error(group->ofconn, group->request, error);
}
- connmgr_retry(ofproto->connmgr);
- }
- free(group->request);
- free(group);
-}
-
-/* Initiates a new operation on 'rule', of the specified 'type', within
- * 'group'. Prior to calling, 'rule' must not have any pending operation.
- *
- * For a 'type' of OFOPERATION_DELETE, 'reason' should specify the reason that
- * the flow is being deleted. For other 'type's, 'reason' is ignored (use 0).
- *
- * Returns the newly created ofoperation (which is also available as
- * rule->pending). */
-static struct ofoperation *
-ofoperation_create(struct ofopgroup *group, struct rule *rule,
- enum ofoperation_type type,
- enum ofp_flow_removed_reason reason)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofproto *ofproto = group->ofproto;
- struct ofoperation *op;
-
- ovs_assert(!rule->pending);
-
- op = rule->pending = xzalloc(sizeof *op);
- op->group = group;
- list_push_back(&group->ops, &op->group_node);
- op->rule = rule;
- op->type = type;
- op->reason = reason;
- op->flow_cookie = rule->flow_cookie;
- ovs_mutex_lock(&rule->mutex);
- op->idle_timeout = rule->idle_timeout;
- op->hard_timeout = rule->hard_timeout;
- ovs_mutex_unlock(&rule->mutex);
- op->flags = rule->flags;
-
- group->n_running++;
-
- if (type == OFOPERATION_DELETE) {
- hmap_insert(&ofproto->deletions, &op->hmap_node,
- cls_rule_hash(&rule->cr, rule->table_id));
- }
-
- return op;
-}
-
-static void
-ofoperation_destroy(struct ofoperation *op)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofopgroup *group = op->group;
-
- if (op->rule) {
- op->rule->pending = NULL;
- }
- if (op->type == OFOPERATION_DELETE) {
- hmap_remove(&group->ofproto->deletions, &op->hmap_node);
- }
- list_remove(&op->group_node);
- rule_actions_unref(op->actions);
- free(op);
-}
-
-/* Indicates that 'op' completed with status 'error', which is either 0 to
- * indicate success or an OpenFlow error code on failure.
- *
- * If 'error' is 0, indicating success, the operation will be committed
- * permanently to the flow table.
- *
- * If 'error' is nonzero, then generally the operation will be rolled back:
- *
- * - If 'op' is an "add flow" operation, ofproto removes the new rule or
- * restores the original rule. The caller must have uninitialized any
- * derived state in the new rule, as in step 5 of in the "Life Cycle" in
- * ofproto/ofproto-provider.h. ofoperation_complete() performs steps 6 and
- * and 7 for the new rule, calling its ->rule_dealloc() function.
- *
- * - If 'op' is a "modify flow" operation, ofproto restores the original
- * actions.
- *
- * - 'op' must not be a "delete flow" operation. Removing a rule is not
- * allowed to fail. It must always succeed.
- *
- * Please see the large comment in ofproto/ofproto-provider.h titled
- * "Asynchronous Operation Support" for more information. */
-void
-ofoperation_complete(struct ofoperation *op, enum ofperr error)
-{
- struct ofopgroup *group = op->group;
-
- ovs_assert(group->n_running > 0);
- ovs_assert(!error || op->type != OFOPERATION_DELETE);
-
- op->error = error;
- if (!--group->n_running && !list_is_empty(&group->ofproto_node)) {
- /* This function can be called from ->rule_construct(), in which case
- * ofproto_mutex is held, or it can be called from ->run(), in which
- * case ofproto_mutex is not held. But only in the latter case can we
- * arrive here, so we can safely take ofproto_mutex now. */
- ovs_mutex_lock(&ofproto_mutex);
- ovs_assert(op->rule->pending == op);
- ofopgroup_complete(group);
- ovs_mutex_unlock(&ofproto_mutex);
}
+ return error;
}
\f
static uint64_t
return false;
}
-
-/* Searches 'ofproto' for tables that have more flows than their configured
- * maximum and that have flow eviction enabled, and evicts as many flows as
- * necessary and currently feasible from them.
- *
- * This triggers only when an OpenFlow table has N flows in it and then the
- * client configures a maximum number of flows less than N. */
-static void
-ofproto_evict(struct ofproto *ofproto)
-{
- struct oftable *table;
-
- ovs_mutex_lock(&ofproto_mutex);
- OFPROTO_FOR_EACH_TABLE (table, ofproto) {
- evict_rules_from_table(ofproto, table, 0);
- }
- ovs_mutex_unlock(&ofproto_mutex);
-}
\f
/* Eviction groups. */
/* Returns an eviction priority for 'rule'. The return value should be
* interpreted so that higher priorities make a rule more attractive candidates
- * for eviction. */
+ * for eviction.
+ * Called only if have a timeout. */
static uint32_t
-rule_eviction_priority(struct rule *rule)
+rule_eviction_priority(struct ofproto *ofproto, struct rule *rule)
OVS_REQUIRES(ofproto_mutex)
{
- long long int hard_expiration;
- long long int idle_expiration;
- long long int expiration;
+ long long int expiration = LLONG_MAX;
+ long long int modified;
uint32_t expiration_offset;
- /* Calculate time of expiration. */
+ /* 'modified' needs protection even when we hold 'ofproto_mutex'. */
ovs_mutex_lock(&rule->mutex);
- hard_expiration = (rule->hard_timeout
- ? rule->modified + rule->hard_timeout * 1000
- : LLONG_MAX);
- idle_expiration = (rule->idle_timeout
- ? rule->used + rule->idle_timeout * 1000
- : LLONG_MAX);
- expiration = MIN(hard_expiration, idle_expiration);
+ modified = rule->modified;
ovs_mutex_unlock(&rule->mutex);
+
+ if (rule->hard_timeout) {
+ expiration = modified + rule->hard_timeout * 1000;
+ }
+ if (rule->idle_timeout) {
+ uint64_t packets, bytes;
+ long long int used;
+ long long int idle_expiration;
+
+ ofproto->ofproto_class->rule_get_stats(rule, &packets, &bytes, &used);
+ idle_expiration = used + rule->idle_timeout * 1000;
+ expiration = MIN(expiration, idle_expiration);
+ }
+
if (expiration == LLONG_MAX) {
return 0;
}
struct oftable *table = &ofproto->tables[rule->table_id];
bool has_timeout;
- ovs_mutex_lock(&rule->mutex);
+ /* Timeouts may be modified only when holding 'ofproto_mutex'. We have it
+ * so no additional protection is needed. */
has_timeout = rule->hard_timeout || rule->idle_timeout;
- ovs_mutex_unlock(&rule->mutex);
if (table->eviction_fields && has_timeout) {
struct eviction_group *evg;
rule->eviction_group = evg;
heap_insert(&evg->rules, &rule->evg_node,
- rule_eviction_priority(rule));
+ rule_eviction_priority(ofproto, rule));
eviction_group_resized(table, evg);
}
}
memset(table, 0, sizeof *table);
classifier_init(&table->cls, flow_segment_u32s);
table->max_flows = UINT_MAX;
+ atomic_init(&table->config, (unsigned int)OFPROTO_TABLE_MISS_DEFAULT);
+
+ fat_rwlock_wrlock(&table->cls.rwlock);
+ classifier_set_prefix_fields(&table->cls, default_prefix_fields,
+ ARRAY_SIZE(default_prefix_fields));
+ fat_rwlock_unlock(&table->cls.rwlock);
+
+ atomic_init(&table->n_matched, 0);
+ atomic_init(&table->n_missed, 0);
}
/* Destroys 'table', including its classifier and eviction groups.
static void
oftable_destroy(struct oftable *table)
{
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
ovs_assert(classifier_is_empty(&table->cls));
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
oftable_disable_eviction(table);
classifier_destroy(&table->cls);
free(table->name);
hmap_init(&table->eviction_groups_by_id);
heap_init(&table->eviction_groups_by_size);
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ fat_rwlock_rdlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
eviction_group_add_rule(rule);
}
- ovs_rwlock_unlock(&table->cls.rwlock);
+ fat_rwlock_unlock(&table->cls.rwlock);
}
/* Removes 'rule' from the oftable that contains it. */
{
struct classifier *cls = &ofproto->tables[rule->table_id].cls;
- ovs_rwlock_wrlock(&cls->rwlock);
+ fat_rwlock_wrlock(&cls->rwlock);
classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
- ovs_rwlock_unlock(&cls->rwlock);
+ fat_rwlock_unlock(&cls->rwlock);
cookies_remove(ofproto, rule);
{
oftable_remove_rule__(rule->ofproto, rule);
}
-
-/* Inserts 'rule' into its oftable, which must not already contain any rule for
- * the same cls_rule. */
-static void
-oftable_insert_rule(struct rule *rule)
- OVS_REQUIRES(ofproto_mutex)
-{
- struct ofproto *ofproto = rule->ofproto;
- struct oftable *table = &ofproto->tables[rule->table_id];
- bool may_expire;
-
- ovs_mutex_lock(&rule->mutex);
- may_expire = rule->hard_timeout || rule->idle_timeout;
- ovs_mutex_unlock(&rule->mutex);
-
- if (may_expire) {
- list_insert(&ofproto->expirable, &rule->expirable);
- }
-
- cookies_insert(ofproto, rule);
-
- if (rule->actions->provider_meter_id != UINT32_MAX) {
- uint32_t meter_id = ofpacts_get_meter(rule->actions->ofpacts,
- rule->actions->ofpacts_len);
- struct meter *meter = ofproto->meters[meter_id];
- list_insert(&meter->rules, &rule->meter_list_node);
- }
- ovs_rwlock_wrlock(&table->cls.rwlock);
- classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
- ovs_rwlock_unlock(&table->cls.rwlock);
- eviction_group_add_rule(rule);
-}
\f
/* unixctl commands. */
void
ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap)
{
+ struct match match;
+ struct cls_rule target;
const struct oftable *oftable;
+ match_init_catchall(&match);
+ match_set_vlan_vid_masked(&match, htons(VLAN_CFI), htons(VLAN_CFI));
+ cls_rule_init(&target, &match, 0);
+
free(ofproto->vlan_bitmap);
ofproto->vlan_bitmap = bitmap_allocate(4096);
ofproto->vlans_changed = false;
OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
- const struct cls_subtable *table;
+ struct cls_cursor cursor;
+ struct rule *rule;
- ovs_rwlock_rdlock(&oftable->cls.rwlock);
- HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) {
- if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
- const struct cls_rule *rule;
+ fat_rwlock_rdlock(&oftable->cls.rwlock);
+ cls_cursor_init(&cursor, &oftable->cls, &target);
+ CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
+ if (minimask_get_vid_mask(&rule->cr.match.mask) == VLAN_VID_MASK) {
+ uint16_t vid = miniflow_get_vid(&rule->cr.match.flow);
- HMAP_FOR_EACH (rule, hmap_node, &table->rules) {
- uint16_t vid = miniflow_get_vid(&rule->match.flow);
- bitmap_set1(vlan_bitmap, vid);
- bitmap_set1(ofproto->vlan_bitmap, vid);
- }
+ bitmap_set1(vlan_bitmap, vid);
+ bitmap_set1(ofproto->vlan_bitmap, vid);
}
}
- ovs_rwlock_unlock(&oftable->cls.rwlock);
+ fat_rwlock_unlock(&oftable->cls.rwlock);
}
+
+ cls_rule_destroy(&target);
}
/* Returns true if new VLANs have come into use by the flow table since the