/*
- * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
struct ofproto up;
struct dpif_backer *backer;
+ /* Unique identifier for this instantiation of this bridge in this running
+ * process. */
+ struct uuid uuid;
+
ATOMIC(cls_version_t) tables_version; /* For classifier lookups. */
uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
/* Special OpenFlow rules. */
struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
- struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
+ struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
/* Bridging. */
struct netflow *netflow;
uint64_t change_seq; /* Connectivity status changes. */
/* Work queues. */
- struct guarded_list pins; /* Contains "struct ofputil_packet_in"s. */
- struct seq *pins_seq; /* For notifying 'pins' reception. */
- uint64_t pins_seqno;
+ struct guarded_list ams; /* Contains "struct ofproto_async_msgs"s. */
+ struct seq *ams_seq; /* For notifying 'ams' reception. */
+ uint64_t ams_seqno;
};
/* All existing ofproto_dpif instances, indexed by ->up.name. */
ofproto_flow_mod(&ofproto->up, &ofm);
}
-/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
- * Takes ownership of 'pin' and pin->packet. */
+/* Appends 'am' to the queue of asynchronous messages to be sent to the
+ * controller. Takes ownership of 'am' and any data it points to. */
void
-ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
- struct ofproto_packet_in *pin)
+ofproto_dpif_send_async_msg(struct ofproto_dpif *ofproto,
+ struct ofproto_async_msg *am)
{
- if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) {
+ if (!guarded_list_push_back(&ofproto->ams, &am->list_node, 1024)) {
COVERAGE_INC(packet_in_overflow);
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
+ ofproto_async_msg_free(am);
}
/* Wakes up main thread for packet-in I/O. */
- seq_change(ofproto->pins_seq);
+ seq_change(ofproto->ams_seq);
}
/* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
}
- dpif_poll_threads_set(backer->dpif, n_dpdk_rxqs, pmd_cpu_mask);
+ dpif_poll_threads_set(backer->dpif, pmd_cpu_mask);
if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
return error;
}
+ uuid_generate(&ofproto->uuid);
atomic_init(&ofproto->tables_version, CLS_MIN_VERSION);
ofproto->netflow = NULL;
ofproto->sflow = NULL;
ovs_mutex_init_adaptive(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
- guarded_list_init(&ofproto->pins);
+ guarded_list_init(&ofproto->ams);
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
sset_init(&ofproto->port_poll_set);
ofproto->port_poll_errno = 0;
ofproto->change_seq = 0;
- ofproto->pins_seq = seq_create();
- ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+ ofproto->ams_seq = seq_create();
+ ofproto->ams_seqno = seq_read(ofproto->ams_seq);
SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
controller = ofpact_put_CONTROLLER(&ofpacts);
controller->max_len = UINT16_MAX;
controller->controller_id = 0;
- controller->reason = OFPR_NO_MATCH;
- ofpact_pad(&ofpacts);
+ controller->reason = OFPR_IMPLICIT_MISS;
+ ofpact_finish(&ofpacts, &controller->ofpact);
error = add_internal_miss_flow(ofproto, id++, &ofpacts,
&ofproto->miss_rule);
destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofproto_packet_in *pin;
+ struct ofproto_async_msg *am;
struct rule_dpif *rule;
struct oftable *table;
- struct ovs_list pins;
+ struct ovs_list ams;
ofproto->backer->need_revalidate = REV_RECONFIGURE;
xlate_txn_start();
}
ofproto_group_delete_all(&ofproto->up);
- guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_POP (pin, list_node, &pins) {
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
+ guarded_list_pop_all(&ofproto->ams, &ams);
+ LIST_FOR_EACH_POP (am, list_node, &ams) {
+ ofproto_async_msg_free(am);
}
- guarded_list_destroy(&ofproto->pins);
+ guarded_list_destroy(&ofproto->ams);
recirc_free_ofproto(ofproto, ofproto->up.name);
ovs_mutex_destroy(&ofproto->stats_mutex);
ovs_mutex_destroy(&ofproto->vsp_mutex);
- seq_destroy(ofproto->pins_seq);
+ seq_destroy(ofproto->ams_seq);
close_dpif_backer(ofproto->backer);
}
mcast_snooping_mdb_flush(ofproto->ms);
}
- /* Always updates the ofproto->pins_seqno to avoid frequent wakeup during
+ /* Always updates the ofproto->ams_seqno to avoid frequent wakeup during
* flow restore. Even though nothing is processed during flow restore,
- * all queued 'pins' will be handled immediately when flow restore
+ * all queued 'ams' will be handled immediately when flow restore
* completes. */
- ofproto->pins_seqno = seq_read(ofproto->pins_seq);
+ ofproto->ams_seqno = seq_read(ofproto->ams_seq);
/* Do not perform any periodic activity required by 'ofproto' while
* waiting for flow restore to complete. */
if (!ofproto_get_flow_restore_wait()) {
- struct ofproto_packet_in *pin;
- struct ovs_list pins;
+ struct ofproto_async_msg *am;
+ struct ovs_list ams;
- guarded_list_pop_all(&ofproto->pins, &pins);
- LIST_FOR_EACH_POP (pin, list_node, &pins) {
- connmgr_send_packet_in(ofproto->up.connmgr, pin);
- free(CONST_CAST(void *, pin->up.packet));
- free(pin);
+ guarded_list_pop_all(&ofproto->ams, &ams);
+ LIST_FOR_EACH_POP (am, list_node, &ams) {
+ connmgr_send_async_msg(ofproto->up.connmgr, am);
+ ofproto_async_msg_free(am);
}
}
}
seq_wait(udpif_dump_seq(ofproto->backer->udpif), ofproto->dump_seq);
- seq_wait(ofproto->pins_seq, ofproto->pins_seqno);
+ seq_wait(ofproto->ams_seq, ofproto->ams_seqno);
}
static void
}
static void
-port_destruct(struct ofport *port_)
+port_destruct(struct ofport *port_, bool del)
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
sizeof namebuf);
- if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
+ if (del && dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
/* The underlying device is still there, so delete it. This
* happens when the ofproto is being destroyed, since the caller
* assumes that removal of attached ports will happen as part of
/* We always unwildcard nw_frag (for IP), so they
* need not be unwildcarded here. */
if (flow->nw_frag & FLOW_NW_FRAG_ANY
- && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
- if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+ && ofproto->up.frag_handling != OFPUTIL_FRAG_NX_MATCH) {
+ if (ofproto->up.frag_handling == OFPUTIL_FRAG_NORMAL) {
/* We must pretend that transport ports are unavailable. */
flow->tp_src = htons(0);
flow->tp_dst = htons(0);
} else {
- /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM).
+ /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
* Use the drop_frags_rule (which cannot disappear). */
rule = ofproto->drop_frags_rule;
if (stats) {
static bool
set_frag_handling(struct ofproto *ofproto_,
- enum ofp_config_flags frag_handling)
+ enum ofputil_frag_handling frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- if (frag_handling != OFPC_FRAG_REASM) {
+ if (frag_handling != OFPUTIL_FRAG_REASM) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
return true;
} else {
ofpacts_len, packet);
return 0;
}
+
+static enum ofperr
+nxt_resume(struct ofproto *ofproto_,
+ const struct ofputil_packet_in_private *pin)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+
+ /* Translate pin into datapath actions. */
+ uint64_t odp_actions_stub[1024 / 8];
+ struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
+ enum slow_path_reason slow;
+ enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow);
+
+ /* Steal 'pin->packet' and put it into a dp_packet. */
+ struct dp_packet packet;
+ dp_packet_init(&packet, pin->public.packet_len);
+ dp_packet_put(&packet, pin->public.packet, pin->public.packet_len);
+
+ /* Execute the datapath actions on the packet. */
+ struct dpif_execute execute = {
+ .actions = odp_actions.data,
+ .actions_len = odp_actions.size,
+ .needs_help = (slow & SLOW_ACTION) != 0,
+ .packet = &packet,
+ };
+ dpif_execute(ofproto->backer->dpif, &execute);
+
+ /* Clean up. */
+ ofpbuf_uninit(&odp_actions);
+ dp_packet_uninit(&packet);
+
+ return error;
+}
\f
/* NetFlow. */
struct rule_dpif *rule;
int error;
- ofm.fm.match = *match;
- ofm.fm.priority = priority;
- ofm.fm.new_cookie = htonll(0);
- ofm.fm.cookie = htonll(0);
- ofm.fm.cookie_mask = htonll(0);
- ofm.fm.modify_cookie = false;
- ofm.fm.table_id = TBL_INTERNAL;
- ofm.fm.command = OFPFC_ADD;
- ofm.fm.idle_timeout = idle_timeout;
- ofm.fm.hard_timeout = 0;
- ofm.fm.importance = 0;
- ofm.fm.buffer_id = 0;
- ofm.fm.out_port = 0;
- ofm.fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
- ofm.fm.ofpacts = ofpacts->data;
- ofm.fm.ofpacts_len = ofpacts->size;
- ofm.fm.delete_reason = OVS_OFPRR_NONE;
+ ofm.fm = (struct ofputil_flow_mod) {
+ .match = *match,
+ .priority = priority,
+ .table_id = TBL_INTERNAL,
+ .command = OFPFC_ADD,
+ .idle_timeout = idle_timeout,
+ .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
+ .ofpacts = ofpacts->data,
+ .ofpacts_len = ofpacts->size,
+ .delete_reason = OVS_OFPRR_NONE,
+ };
error = ofproto_flow_mod(&ofproto->up, &ofm);
if (error) {
struct ofproto_flow_mod ofm;
int error;
- ofm.fm.match = *match;
- ofm.fm.priority = priority;
- ofm.fm.new_cookie = htonll(0);
- ofm.fm.cookie = htonll(0);
- ofm.fm.cookie_mask = htonll(0);
- ofm.fm.modify_cookie = false;
- ofm.fm.table_id = TBL_INTERNAL;
- ofm.fm.flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY;
- ofm.fm.command = OFPFC_DELETE_STRICT;
+ ofm.fm = (struct ofputil_flow_mod) {
+ .match = *match,
+ .priority = priority,
+ .table_id = TBL_INTERNAL,
+ .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
+ .command = OFPFC_DELETE_STRICT,
+ };
error = ofproto_flow_mod(&ofproto->up, &ofm);
if (error) {
return 0;
}
+const struct uuid *
+ofproto_dpif_get_uuid(const struct ofproto_dpif *ofproto)
+{
+ return &ofproto->uuid;
+}
+
const struct ofproto_class ofproto_dpif_class = {
init,
enumerate_types,
rule_execute,
set_frag_handling,
packet_out,
+ nxt_resume,
set_netflow,
get_netflow_ids,
set_sflow,