2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
23 #include "byte-order.h"
24 #include "connectivity.h"
29 #include "fail-open.h"
30 #include "guarded-list.h"
34 #include "mac-learning.h"
35 #include "mcast-snooping.h"
36 #include "multipath.h"
37 #include "netdev-vport.h"
42 #include "odp-execute.h"
43 #include "ofproto/ofproto-dpif.h"
44 #include "ofproto/ofproto-provider.h"
45 #include "ofproto-dpif-ipfix.h"
46 #include "ofproto-dpif-mirror.h"
47 #include "ofproto-dpif-monitor.h"
48 #include "ofproto-dpif-rid.h"
49 #include "ofproto-dpif-sflow.h"
50 #include "ofproto-dpif-upcall.h"
51 #include "ofproto-dpif-xlate.h"
52 #include "openvswitch/ofp-actions.h"
53 #include "openvswitch/dynamic-string.h"
54 #include "openvswitch/meta-flow.h"
55 #include "openvswitch/ofp-parse.h"
56 #include "openvswitch/ofp-print.h"
57 #include "openvswitch/ofp-util.h"
58 #include "openvswitch/ofpbuf.h"
59 #include "openvswitch/vlog.h"
62 #include "ovs-router.h"
63 #include "poll-loop.h"
69 #include "unaligned.h"
71 #include "vlan-bitmap.h"
73 VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
75 COVERAGE_DEFINE(ofproto_dpif_expired);
76 COVERAGE_DEFINE(packet_in_overflow);
85 * - Do include packets and bytes from datapath flows which have not
86 * recently been processed by a revalidator. */
87 struct ovs_mutex stats_mutex;
88 struct dpif_flow_stats stats OVS_GUARDED;
90 /* In non-NULL, will point to a new rule (for which a reference is held) to
91 * which all the stats updates should be forwarded. This exists only
92 * transitionally when flows are replaced.
94 * Protected by stats_mutex. If both 'rule->stats_mutex' and
95 * 'rule->new_rule->stats_mutex' must be held together, acquire them in that
97 struct rule_dpif *new_rule OVS_GUARDED;
99 /* If non-zero then the recirculation id that has
100 * been allocated for use with this rule.
101 * The recirculation id and associated internal flow should
102 * be freed when the rule is freed */
106 /* RULE_CAST() depends on this. */
107 BUILD_ASSERT_DECL(offsetof(struct rule_dpif, up) == 0);
109 static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
110 long long int *used);
111 static struct rule_dpif *rule_dpif_cast(const struct rule *);
112 static void rule_expire(struct rule_dpif *);
119 * - Do include packets and bytes from datapath flows which have not
120 * recently been processed by a revalidator. */
121 struct ovs_mutex stats_mutex;
122 uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
123 uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
127 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
128 struct ofproto_dpif *ofproto; /* Owning ofproto. */
129 void *aux; /* Key supplied by ofproto's client. */
130 char *name; /* Identifier for log messages. */
133 struct ovs_list ports; /* Contains "struct ofport"s. */
134 enum port_vlan_mode vlan_mode; /* VLAN mode */
135 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
136 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
137 * NULL if all VLANs are trunked. */
138 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
139 struct bond *bond; /* Nonnull iff more than one port. */
140 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
143 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
146 static void bundle_remove(struct ofport *);
147 static void bundle_update(struct ofbundle *);
148 static void bundle_destroy(struct ofbundle *);
149 static void bundle_del_port(struct ofport_dpif *);
150 static void bundle_run(struct ofbundle *);
151 static void bundle_wait(struct ofbundle *);
152 static void bundle_flush_macs(struct ofbundle *, bool);
153 static void bundle_move(struct ofbundle *, struct ofbundle *);
155 static void stp_run(struct ofproto_dpif *ofproto);
156 static void stp_wait(struct ofproto_dpif *ofproto);
157 static int set_stp_port(struct ofport *,
158 const struct ofproto_port_stp_settings *);
160 static void rstp_run(struct ofproto_dpif *ofproto);
161 static void set_rstp_port(struct ofport *,
162 const struct ofproto_port_rstp_settings *);
165 struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
169 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
170 struct ovs_list bundle_node;/* In struct ofbundle's "ports" list. */
171 struct cfm *cfm; /* Connectivity Fault Management, if any. */
172 struct bfd *bfd; /* BFD, if any. */
173 struct lldp *lldp; /* lldp, if any. */
174 bool may_enable; /* May be enabled in bonds. */
175 bool is_tunnel; /* This port is a tunnel. */
176 bool is_layer3; /* This is a layer 3 port. */
177 long long int carrier_seq; /* Carrier status changes. */
178 struct ofport_dpif *peer; /* Peer if patch port. */
181 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
182 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
183 long long int stp_state_entered;
185 /* Rapid Spanning Tree. */
186 struct rstp_port *rstp_port; /* Rapid Spanning Tree Protocol, if any. */
187 enum rstp_state rstp_state; /* Always RSTP_DISABLED if RSTP not in use. */
189 /* Queue to DSCP mapping. */
190 struct ofproto_port_queue *qdscp;
194 static odp_port_t ofp_port_to_odp_port(const struct ofproto_dpif *,
197 static ofp_port_t odp_port_to_ofp_port(const struct ofproto_dpif *,
200 static struct ofport_dpif *
201 ofport_dpif_cast(const struct ofport *ofport)
203 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
206 static void port_run(struct ofport_dpif *);
207 static int set_bfd(struct ofport *, const struct smap *);
208 static int set_cfm(struct ofport *, const struct cfm_settings *);
209 static int set_lldp(struct ofport *ofport_, const struct smap *cfg);
210 static void ofport_update_peer(struct ofport_dpif *);
212 /* Reasons that we might need to revalidate every datapath flow, and
213 * corresponding coverage counters.
215 * A value of 0 means that there is no need to revalidate.
217 * It would be nice to have some cleaner way to integrate with coverage
218 * counters, but with only a few reasons I guess this is good enough for
220 enum revalidate_reason {
221 REV_RECONFIGURE = 1, /* Switch configuration changed. */
222 REV_STP, /* Spanning tree protocol port status change. */
223 REV_RSTP, /* RSTP port status change. */
224 REV_BOND, /* Bonding changed. */
225 REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
226 REV_FLOW_TABLE, /* Flow table changed. */
227 REV_MAC_LEARNING, /* Mac learning changed. */
228 REV_MCAST_SNOOPING, /* Multicast snooping changed. */
230 COVERAGE_DEFINE(rev_reconfigure);
231 COVERAGE_DEFINE(rev_stp);
232 COVERAGE_DEFINE(rev_rstp);
233 COVERAGE_DEFINE(rev_bond);
234 COVERAGE_DEFINE(rev_port_toggled);
235 COVERAGE_DEFINE(rev_flow_table);
236 COVERAGE_DEFINE(rev_mac_learning);
237 COVERAGE_DEFINE(rev_mcast_snooping);
239 /* All datapaths of a given type share a single dpif backer instance. */
246 struct ovs_rwlock odp_to_ofport_lock;
247 struct hmap odp_to_ofport_map OVS_GUARDED; /* Contains "struct ofport"s. */
249 struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
251 enum revalidate_reason need_revalidate; /* Revalidate all flows. */
253 bool recv_set_enable; /* Enables or disables receiving packets. */
255 /* Version string of the datapath stored in OVSDB. */
256 char *dp_version_string;
258 /* Datapath feature support. */
259 struct dpif_backer_support support;
260 struct atomic_count tnl_count;
263 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
264 static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
266 struct ofproto_dpif {
267 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
269 struct dpif_backer *backer;
271 /* Unique identifier for this instantiation of this bridge in this running
275 ATOMIC(cls_version_t) tables_version; /* For classifier lookups. */
277 uint64_t dump_seq; /* Last read of udpif_dump_seq(). */
279 /* Special OpenFlow rules. */
280 struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
281 struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
282 struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
285 struct netflow *netflow;
286 struct dpif_sflow *sflow;
287 struct dpif_ipfix *ipfix;
288 struct hmap bundles; /* Contains "struct ofbundle"s. */
289 struct mac_learning *ml;
290 struct mcast_snooping *ms;
291 bool has_bonded_bundles;
293 struct mbridge *mbridge;
295 struct ovs_mutex stats_mutex;
296 struct netdev_stats stats OVS_GUARDED; /* To account packets generated and
297 * consumed in userspace. */
301 long long int stp_last_tick;
303 /* Rapid Spanning Tree. */
305 long long int rstp_last_tick;
308 struct sset ports; /* Set of standard port names. */
309 struct sset ghost_ports; /* Ports with no datapath port. */
310 struct sset port_poll_set; /* Queued names for port_poll() reply. */
311 int port_poll_errno; /* Last errno for port_poll() reply. */
312 uint64_t change_seq; /* Connectivity status changes. */
315 struct guarded_list ams; /* Contains "struct ofproto_async_msgs"s. */
316 struct seq *ams_seq; /* For notifying 'ams' reception. */
320 /* All existing ofproto_dpif instances, indexed by ->up.name. */
321 static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
323 static bool ofproto_use_tnl_push_pop = true;
324 static void ofproto_unixctl_init(void);
326 static inline struct ofproto_dpif *
327 ofproto_dpif_cast(const struct ofproto *ofproto)
329 ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
330 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
334 ofproto_dpif_get_enable_ufid(const struct dpif_backer *backer)
336 return backer->support.ufid;
339 struct dpif_backer_support *
340 ofproto_dpif_get_support(const struct ofproto_dpif *ofproto)
342 return &ofproto->backer->support;
345 static void ofproto_trace(struct ofproto_dpif *, struct flow *,
346 const struct dp_packet *packet,
347 const struct ofpact[], size_t ofpacts_len,
350 /* Global variables. */
351 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
353 /* Initial mappings of port to bridge mappings. */
354 static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
356 /* Executes 'fm'. The caller retains ownership of 'fm' and everything in
359 ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto,
360 const struct ofputil_flow_mod *fm)
362 struct ofproto_flow_mod ofm;
364 /* Multiple threads may do this for the same 'fm' at the same time.
365 * Allocate ofproto_flow_mod with execution context from stack.
367 * Note: This copy could be avoided by making ofproto_flow_mod more
368 * complex, but that may not be desireable, and a learn action is not that
369 * fast to begin with. */
371 ofproto_flow_mod(&ofproto->up, &ofm);
374 /* Appends 'am' to the queue of asynchronous messages to be sent to the
375 * controller. Takes ownership of 'am' and any data it points to. */
377 ofproto_dpif_send_async_msg(struct ofproto_dpif *ofproto,
378 struct ofproto_async_msg *am)
380 if (!guarded_list_push_back(&ofproto->ams, &am->list_node, 1024)) {
381 COVERAGE_INC(packet_in_overflow);
382 ofproto_async_msg_free(am);
385 /* Wakes up main thread for packet-in I/O. */
386 seq_change(ofproto->ams_seq);
389 /* The default "table-miss" behaviour for OpenFlow1.3+ is to drop the
390 * packet rather than to send the packet to the controller.
392 * This function returns false to indicate that a packet_in message
393 * for a "table-miss" should be sent to at least one controller.
394 * False otherwise. */
396 ofproto_dpif_wants_packet_in_on_miss(struct ofproto_dpif *ofproto)
398 return connmgr_wants_packet_in_on_miss(ofproto->up.connmgr);
401 /* Factory functions. */
404 init(const struct shash *iface_hints)
406 struct shash_node *node;
408 /* Make a local copy, since we don't own 'iface_hints' elements. */
409 SHASH_FOR_EACH(node, iface_hints) {
410 const struct iface_hint *orig_hint = node->data;
411 struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
413 new_hint->br_name = xstrdup(orig_hint->br_name);
414 new_hint->br_type = xstrdup(orig_hint->br_type);
415 new_hint->ofp_port = orig_hint->ofp_port;
417 shash_add(&init_ofp_ports, node->name, new_hint);
420 ofproto_unixctl_init();
425 enumerate_types(struct sset *types)
427 dp_enumerate_types(types);
431 enumerate_names(const char *type, struct sset *names)
433 struct ofproto_dpif *ofproto;
436 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
437 if (strcmp(type, ofproto->up.type)) {
440 sset_add(names, ofproto->up.name);
447 del(const char *type, const char *name)
452 error = dpif_open(name, type, &dpif);
454 error = dpif_delete(dpif);
461 port_open_type(const char *datapath_type, const char *port_type)
463 return dpif_port_open_type(datapath_type, port_type);
466 /* Type functions. */
468 static void process_dpif_port_changes(struct dpif_backer *);
469 static void process_dpif_all_ports_changed(struct dpif_backer *);
470 static void process_dpif_port_change(struct dpif_backer *,
471 const char *devname);
472 static void process_dpif_port_error(struct dpif_backer *, int error);
474 static struct ofproto_dpif *
475 lookup_ofproto_dpif_by_port_name(const char *name)
477 struct ofproto_dpif *ofproto;
479 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
480 if (sset_contains(&ofproto->ports, name)) {
489 ofproto_dpif_backer_enabled(struct dpif_backer* backer)
491 return backer->recv_set_enable;
495 type_run(const char *type)
497 struct dpif_backer *backer;
499 backer = shash_find_data(&all_dpif_backers, type);
501 /* This is not necessarily a problem, since backers are only
502 * created on demand. */
507 if (dpif_run(backer->dpif)) {
508 backer->need_revalidate = REV_RECONFIGURE;
511 udpif_run(backer->udpif);
513 /* If vswitchd started with other_config:flow_restore_wait set as "true",
514 * and the configuration has now changed to "false", enable receiving
515 * packets from the datapath. */
516 if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
519 backer->recv_set_enable = true;
521 error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
523 VLOG_ERR("Failed to enable receiving packets in dpif.");
526 dpif_flow_flush(backer->dpif);
527 backer->need_revalidate = REV_RECONFIGURE;
530 if (backer->recv_set_enable) {
531 udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
534 dpif_poll_threads_set(backer->dpif, pmd_cpu_mask);
536 if (backer->need_revalidate) {
537 struct ofproto_dpif *ofproto;
538 struct simap_node *node;
539 struct simap tmp_backers;
541 /* Handle tunnel garbage collection. */
542 simap_init(&tmp_backers);
543 simap_swap(&backer->tnl_backers, &tmp_backers);
545 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
546 struct ofport_dpif *iter;
548 if (backer != ofproto->backer) {
552 HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
553 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
556 if (!iter->is_tunnel) {
560 dp_port = netdev_vport_get_dpif_port(iter->up.netdev,
561 namebuf, sizeof namebuf);
562 node = simap_find(&tmp_backers, dp_port);
564 simap_put(&backer->tnl_backers, dp_port, node->data);
565 simap_delete(&tmp_backers, node);
566 node = simap_find(&backer->tnl_backers, dp_port);
568 node = simap_find(&backer->tnl_backers, dp_port);
570 odp_port_t odp_port = ODPP_NONE;
572 if (!dpif_port_add(backer->dpif, iter->up.netdev,
574 simap_put(&backer->tnl_backers, dp_port,
575 odp_to_u32(odp_port));
576 node = simap_find(&backer->tnl_backers, dp_port);
581 iter->odp_port = node ? u32_to_odp(node->data) : ODPP_NONE;
582 if (tnl_port_reconfigure(iter, iter->up.netdev,
584 ovs_native_tunneling_is_on(ofproto), dp_port)) {
585 backer->need_revalidate = REV_RECONFIGURE;
590 SIMAP_FOR_EACH (node, &tmp_backers) {
591 dpif_port_del(backer->dpif, u32_to_odp(node->data));
593 simap_destroy(&tmp_backers);
595 switch (backer->need_revalidate) {
596 case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
597 case REV_STP: COVERAGE_INC(rev_stp); break;
598 case REV_RSTP: COVERAGE_INC(rev_rstp); break;
599 case REV_BOND: COVERAGE_INC(rev_bond); break;
600 case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
601 case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
602 case REV_MAC_LEARNING: COVERAGE_INC(rev_mac_learning); break;
603 case REV_MCAST_SNOOPING: COVERAGE_INC(rev_mcast_snooping); break;
605 backer->need_revalidate = 0;
607 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
608 struct ofport_dpif *ofport;
609 struct ofbundle *bundle;
611 if (ofproto->backer != backer) {
616 xlate_ofproto_set(ofproto, ofproto->up.name,
617 ofproto->backer->dpif, ofproto->ml,
618 ofproto->stp, ofproto->rstp, ofproto->ms,
619 ofproto->mbridge, ofproto->sflow, ofproto->ipfix,
621 ofproto->up.forward_bpdu,
622 connmgr_has_in_band(ofproto->up.connmgr),
623 &ofproto->backer->support);
625 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
626 xlate_bundle_set(ofproto, bundle, bundle->name,
627 bundle->vlan_mode, bundle->vlan,
628 bundle->trunks, bundle->use_priority_tags,
629 bundle->bond, bundle->lacp,
633 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
634 int stp_port = ofport->stp_port
635 ? stp_port_no(ofport->stp_port)
637 xlate_ofport_set(ofproto, ofport->bundle, ofport,
638 ofport->up.ofp_port, ofport->odp_port,
639 ofport->up.netdev, ofport->cfm, ofport->bfd,
640 ofport->lldp, ofport->peer, stp_port,
641 ofport->rstp_port, ofport->qdscp,
642 ofport->n_qdscp, ofport->up.pp.config,
643 ofport->up.pp.state, ofport->is_tunnel,
649 udpif_revalidate(backer->udpif);
652 process_dpif_port_changes(backer);
657 /* Check for and handle port changes in 'backer''s dpif. */
659 process_dpif_port_changes(struct dpif_backer *backer)
665 error = dpif_port_poll(backer->dpif, &devname);
671 process_dpif_all_ports_changed(backer);
675 process_dpif_port_change(backer, devname);
680 process_dpif_port_error(backer, error);
687 process_dpif_all_ports_changed(struct dpif_backer *backer)
689 struct ofproto_dpif *ofproto;
690 struct dpif_port dpif_port;
691 struct dpif_port_dump dump;
692 struct sset devnames;
695 sset_init(&devnames);
696 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
697 if (ofproto->backer == backer) {
698 struct ofport *ofport;
700 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
701 sset_add(&devnames, netdev_get_name(ofport->netdev));
705 DPIF_PORT_FOR_EACH (&dpif_port, &dump, backer->dpif) {
706 sset_add(&devnames, dpif_port.name);
709 SSET_FOR_EACH (devname, &devnames) {
710 process_dpif_port_change(backer, devname);
712 sset_destroy(&devnames);
716 process_dpif_port_change(struct dpif_backer *backer, const char *devname)
718 struct ofproto_dpif *ofproto;
719 struct dpif_port port;
721 /* Don't report on the datapath's device. */
722 if (!strcmp(devname, dpif_base_name(backer->dpif))) {
726 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
727 &all_ofproto_dpifs) {
728 if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
733 ofproto = lookup_ofproto_dpif_by_port_name(devname);
734 if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
735 /* The port was removed. If we know the datapath,
736 * report it through poll_set(). If we don't, it may be
737 * notifying us of a removal we initiated, so ignore it.
738 * If there's a pending ENOBUFS, let it stand, since
739 * everything will be reevaluated. */
740 if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
741 sset_add(&ofproto->port_poll_set, devname);
742 ofproto->port_poll_errno = 0;
744 } else if (!ofproto) {
745 /* The port was added, but we don't know with which
746 * ofproto we should associate it. Delete it. */
747 dpif_port_del(backer->dpif, port.port_no);
749 struct ofport_dpif *ofport;
751 ofport = ofport_dpif_cast(shash_find_data(
752 &ofproto->up.port_by_name, devname));
754 && ofport->odp_port != port.port_no
755 && !odp_port_to_ofport(backer, port.port_no))
757 /* 'ofport''s datapath port number has changed from
758 * 'ofport->odp_port' to 'port.port_no'. Update our internal data
759 * structures to match. */
760 ovs_rwlock_wrlock(&backer->odp_to_ofport_lock);
761 hmap_remove(&backer->odp_to_ofport_map, &ofport->odp_port_node);
762 ofport->odp_port = port.port_no;
763 hmap_insert(&backer->odp_to_ofport_map, &ofport->odp_port_node,
764 hash_odp_port(port.port_no));
765 ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
766 backer->need_revalidate = REV_RECONFIGURE;
769 dpif_port_destroy(&port);
772 /* Propagate 'error' to all ofprotos based on 'backer'. */
774 process_dpif_port_error(struct dpif_backer *backer, int error)
776 struct ofproto_dpif *ofproto;
778 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
779 if (ofproto->backer == backer) {
780 sset_clear(&ofproto->port_poll_set);
781 ofproto->port_poll_errno = error;
787 type_wait(const char *type)
789 struct dpif_backer *backer;
791 backer = shash_find_data(&all_dpif_backers, type);
793 /* This is not necessarily a problem, since backers are only
794 * created on demand. */
798 dpif_wait(backer->dpif);
801 /* Basic life-cycle. */
803 static int add_internal_flows(struct ofproto_dpif *);
805 static struct ofproto *
808 struct ofproto_dpif *ofproto = xzalloc(sizeof *ofproto);
813 dealloc(struct ofproto *ofproto_)
815 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
820 close_dpif_backer(struct dpif_backer *backer)
822 ovs_assert(backer->refcount > 0);
824 if (--backer->refcount) {
828 udpif_destroy(backer->udpif);
830 simap_destroy(&backer->tnl_backers);
831 ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
832 hmap_destroy(&backer->odp_to_ofport_map);
833 shash_find_and_delete(&all_dpif_backers, backer->type);
835 free(backer->dp_version_string);
836 dpif_close(backer->dpif);
840 /* Datapath port slated for removal from datapath. */
842 struct ovs_list list_node;
846 static bool check_variable_length_userdata(struct dpif_backer *backer);
847 static void check_support(struct dpif_backer *backer);
850 open_dpif_backer(const char *type, struct dpif_backer **backerp)
852 struct dpif_backer *backer;
853 struct dpif_port_dump port_dump;
854 struct dpif_port port;
855 struct shash_node *node;
856 struct ovs_list garbage_list;
857 struct odp_garbage *garbage;
864 backer = shash_find_data(&all_dpif_backers, type);
871 backer_name = xasprintf("ovs-%s", type);
873 /* Remove any existing datapaths, since we assume we're the only
874 * userspace controlling the datapath. */
876 dp_enumerate_names(type, &names);
877 SSET_FOR_EACH(name, &names) {
878 struct dpif *old_dpif;
880 /* Don't remove our backer if it exists. */
881 if (!strcmp(name, backer_name)) {
885 if (dpif_open(name, type, &old_dpif)) {
886 VLOG_WARN("couldn't open old datapath %s to remove it", name);
888 dpif_delete(old_dpif);
889 dpif_close(old_dpif);
892 sset_destroy(&names);
894 backer = xmalloc(sizeof *backer);
896 error = dpif_create_and_open(backer_name, type, &backer->dpif);
899 VLOG_ERR("failed to open datapath of type %s: %s", type,
900 ovs_strerror(error));
904 backer->udpif = udpif_create(backer, backer->dpif);
906 backer->type = xstrdup(type);
907 backer->refcount = 1;
908 hmap_init(&backer->odp_to_ofport_map);
909 ovs_rwlock_init(&backer->odp_to_ofport_lock);
910 backer->need_revalidate = 0;
911 simap_init(&backer->tnl_backers);
912 backer->recv_set_enable = !ofproto_get_flow_restore_wait();
915 if (backer->recv_set_enable) {
916 dpif_flow_flush(backer->dpif);
919 /* Loop through the ports already on the datapath and remove any
920 * that we don't need anymore. */
921 ovs_list_init(&garbage_list);
922 dpif_port_dump_start(&port_dump, backer->dpif);
923 while (dpif_port_dump_next(&port_dump, &port)) {
924 node = shash_find(&init_ofp_ports, port.name);
925 if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
926 garbage = xmalloc(sizeof *garbage);
927 garbage->odp_port = port.port_no;
928 ovs_list_push_front(&garbage_list, &garbage->list_node);
931 dpif_port_dump_done(&port_dump);
933 LIST_FOR_EACH_POP (garbage, list_node, &garbage_list) {
934 dpif_port_del(backer->dpif, garbage->odp_port);
938 shash_add(&all_dpif_backers, type, backer);
940 check_support(backer);
941 atomic_count_init(&backer->tnl_count, 0);
943 error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
945 VLOG_ERR("failed to listen on datapath of type %s: %s",
946 type, ovs_strerror(error));
947 close_dpif_backer(backer);
951 if (backer->recv_set_enable) {
952 udpif_set_threads(backer->udpif, n_handlers, n_revalidators);
955 /* This check fails if performed before udpif threads have been set,
956 * as the kernel module checks that the 'pid' in userspace action
958 backer->support.variable_length_userdata
959 = check_variable_length_userdata(backer);
960 backer->dp_version_string = dpif_get_dp_version(backer->dpif);
966 ovs_native_tunneling_is_on(struct ofproto_dpif *ofproto)
968 return ofproto_use_tnl_push_pop && ofproto->backer->support.tnl_push_pop &&
969 atomic_count_get(&ofproto->backer->tnl_count);
972 /* Tests whether 'backer''s datapath supports recirculation. Only newer
973 * datapaths support OVS_KEY_ATTR_RECIRC_ID in keys. We need to disable some
974 * features on older datapaths that don't support this feature.
976 * Returns false if 'backer' definitely does not support recirculation, true if
977 * it seems to support recirculation or if at least the error we get is
980 check_recirc(struct dpif_backer *backer)
983 struct odputil_keybuf keybuf;
986 struct odp_flow_key_parms odp_parms = {
993 memset(&flow, 0, sizeof flow);
997 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
998 odp_flow_key_from_flow(&odp_parms, &key);
999 enable_recirc = dpif_probe_feature(backer->dpif, "recirculation", &key,
1002 if (enable_recirc) {
1003 VLOG_INFO("%s: Datapath supports recirculation",
1004 dpif_name(backer->dpif));
1006 VLOG_INFO("%s: Datapath does not support recirculation",
1007 dpif_name(backer->dpif));
1010 return enable_recirc;
1013 /* Tests whether 'dpif' supports unique flow ids. We can skip serializing
1014 * some flow attributes for datapaths that support this feature.
1016 * Returns true if 'dpif' supports UFID for flow operations.
1017 * Returns false if 'dpif' does not support UFID. */
1019 check_ufid(struct dpif_backer *backer)
1022 struct odputil_keybuf keybuf;
1026 struct odp_flow_key_parms odp_parms = {
1030 memset(&flow, 0, sizeof flow);
1031 flow.dl_type = htons(0x1234);
1033 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1034 odp_flow_key_from_flow(&odp_parms, &key);
1035 dpif_flow_hash(backer->dpif, key.data, key.size, &ufid);
1037 enable_ufid = dpif_probe_feature(backer->dpif, "UFID", &key, &ufid);
1040 VLOG_INFO("%s: Datapath supports unique flow ids",
1041 dpif_name(backer->dpif));
1043 VLOG_INFO("%s: Datapath does not support unique flow ids",
1044 dpif_name(backer->dpif));
1049 /* Tests whether 'backer''s datapath supports variable-length
1050 * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. We need
1051 * to disable some features on older datapaths that don't support this
1054 * Returns false if 'backer' definitely does not support variable-length
1055 * userdata, true if it seems to support them or if at least the error we get
1058 check_variable_length_userdata(struct dpif_backer *backer)
1060 struct eth_header *eth;
1061 struct ofpbuf actions;
1062 struct dpif_execute execute;
1063 struct dp_packet packet;
1067 /* Compose a userspace action that will cause an ERANGE error on older
1068 * datapaths that don't support variable-length userdata.
1070 * We really test for using userdata longer than 8 bytes, but older
1071 * datapaths accepted these, silently truncating the userdata to 8 bytes.
1072 * The same older datapaths rejected userdata shorter than 8 bytes, so we
1073 * test for that instead as a proxy for longer userdata support. */
1074 ofpbuf_init(&actions, 64);
1075 start = nl_msg_start_nested(&actions, OVS_ACTION_ATTR_USERSPACE);
1076 nl_msg_put_u32(&actions, OVS_USERSPACE_ATTR_PID,
1077 dpif_port_get_pid(backer->dpif, ODPP_NONE, 0));
1078 nl_msg_put_unspec_zero(&actions, OVS_USERSPACE_ATTR_USERDATA, 4);
1079 nl_msg_end_nested(&actions, start);
1081 /* Compose a dummy ethernet packet. */
1082 dp_packet_init(&packet, ETH_HEADER_LEN);
1083 eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
1084 eth->eth_type = htons(0x1234);
1086 /* Execute the actions. On older datapaths this fails with ERANGE, on
1087 * newer datapaths it succeeds. */
1088 execute.actions = actions.data;
1089 execute.actions_len = actions.size;
1090 execute.packet = &packet;
1091 execute.needs_help = false;
1092 execute.probe = true;
1095 error = dpif_execute(backer->dpif, &execute);
1097 dp_packet_uninit(&packet);
1098 ofpbuf_uninit(&actions);
1105 /* Variable-length userdata is not supported. */
1106 VLOG_WARN("%s: datapath does not support variable-length userdata "
1107 "feature (needs Linux 3.10+ or kernel module from OVS "
1108 "1..11+). The NXAST_SAMPLE action will be ignored.",
1109 dpif_name(backer->dpif));
1113 /* Something odd happened. We're not sure whether variable-length
1114 * userdata is supported. Default to "yes". */
1115 VLOG_WARN("%s: variable-length userdata feature probe failed (%s)",
1116 dpif_name(backer->dpif), ovs_strerror(error));
1121 /* Tests the MPLS label stack depth supported by 'backer''s datapath.
1123 * Returns the number of elements in a struct flow's mpls_lse field
1124 * if the datapath supports at least that many entries in an
1126 * Otherwise returns the number of MPLS push actions supported by
1129 check_max_mpls_depth(struct dpif_backer *backer)
1134 for (n = 0; n < FLOW_MAX_MPLS_LABELS; n++) {
1135 struct odputil_keybuf keybuf;
1137 struct odp_flow_key_parms odp_parms = {
1141 memset(&flow, 0, sizeof flow);
1142 flow.dl_type = htons(ETH_TYPE_MPLS);
1143 flow_set_mpls_bos(&flow, n, 1);
1145 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
1146 odp_flow_key_from_flow(&odp_parms, &key);
1147 if (!dpif_probe_feature(backer->dpif, "MPLS", &key, NULL)) {
1152 VLOG_INFO("%s: MPLS label stack length probed as %d",
1153 dpif_name(backer->dpif), n);
1157 /* Tests whether 'backer''s datapath supports masked data in
1158 * OVS_ACTION_ATTR_SET actions. We need to disable some features on older
1159 * datapaths that don't support this feature. */
1161 check_masked_set_action(struct dpif_backer *backer)
1163 struct eth_header *eth;
1164 struct ofpbuf actions;
1165 struct dpif_execute execute;
1166 struct dp_packet packet;
1168 struct ovs_key_ethernet key, mask;
1170 /* Compose a set action that will cause an EINVAL error on older
1171 * datapaths that don't support masked set actions.
1172 * Avoid using a full mask, as it could be translated to a non-masked
1173 * set action instead. */
1174 ofpbuf_init(&actions, 64);
1175 memset(&key, 0x53, sizeof key);
1176 memset(&mask, 0x7f, sizeof mask);
1177 commit_masked_set_action(&actions, OVS_KEY_ATTR_ETHERNET, &key, &mask,
1180 /* Compose a dummy ethernet packet. */
1181 dp_packet_init(&packet, ETH_HEADER_LEN);
1182 eth = dp_packet_put_zeros(&packet, ETH_HEADER_LEN);
1183 eth->eth_type = htons(0x1234);
1185 /* Execute the actions. On older datapaths this fails with EINVAL, on
1186 * newer datapaths it succeeds. */
1187 execute.actions = actions.data;
1188 execute.actions_len = actions.size;
1189 execute.packet = &packet;
1190 execute.needs_help = false;
1191 execute.probe = true;
1194 error = dpif_execute(backer->dpif, &execute);
1196 dp_packet_uninit(&packet);
1197 ofpbuf_uninit(&actions);
1200 /* Masked set action is not supported. */
1201 VLOG_INFO("%s: datapath does not support masked set action feature.",
1202 dpif_name(backer->dpif));
1207 #define CHECK_FEATURE__(NAME, SUPPORT, FIELD, VALUE) \
1209 check_##NAME(struct dpif_backer *backer) \
1212 struct odputil_keybuf keybuf; \
1213 struct ofpbuf key; \
1215 struct odp_flow_key_parms odp_parms = { \
1222 memset(&flow, 0, sizeof flow); \
1223 flow.FIELD = VALUE; \
1225 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); \
1226 odp_flow_key_from_flow(&odp_parms, &key); \
1227 enable = dpif_probe_feature(backer->dpif, #NAME, &key, NULL); \
1230 VLOG_INFO("%s: Datapath supports "#NAME, dpif_name(backer->dpif)); \
1232 VLOG_INFO("%s: Datapath does not support "#NAME, \
1233 dpif_name(backer->dpif)); \
1238 #define CHECK_FEATURE(FIELD) CHECK_FEATURE__(FIELD, FIELD, FIELD, 1)
1240 CHECK_FEATURE(ct_state)
1241 CHECK_FEATURE(ct_zone)
1242 CHECK_FEATURE(ct_mark)
1243 CHECK_FEATURE__(ct_label, ct_label, ct_label.u64.lo, 1)
1244 CHECK_FEATURE__(ct_state_nat, ct_state, ct_state, CS_TRACKED|CS_SRC_NAT)
1246 #undef CHECK_FEATURE
1247 #undef CHECK_FEATURE__
1250 check_support(struct dpif_backer *backer)
1252 /* This feature needs to be tested after udpif threads are set. */
1253 backer->support.variable_length_userdata = false;
1255 backer->support.odp.recirc = check_recirc(backer);
1256 backer->support.odp.max_mpls_depth = check_max_mpls_depth(backer);
1257 backer->support.masked_set_action = check_masked_set_action(backer);
1258 backer->support.ufid = check_ufid(backer);
1259 backer->support.tnl_push_pop = dpif_supports_tnl_push_pop(backer->dpif);
1261 backer->support.odp.ct_state = check_ct_state(backer);
1262 backer->support.odp.ct_zone = check_ct_zone(backer);
1263 backer->support.odp.ct_mark = check_ct_mark(backer);
1264 backer->support.odp.ct_label = check_ct_label(backer);
1266 backer->support.odp.ct_state_nat = check_ct_state_nat(backer);
1270 construct(struct ofproto *ofproto_)
1272 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1273 struct shash_node *node, *next;
1276 /* Tunnel module can get used right after the udpif threads are running. */
1277 ofproto_tunnel_init();
1279 error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
1284 uuid_generate(&ofproto->uuid);
1285 atomic_init(&ofproto->tables_version, CLS_MIN_VERSION);
1286 ofproto->netflow = NULL;
1287 ofproto->sflow = NULL;
1288 ofproto->ipfix = NULL;
1289 ofproto->stp = NULL;
1290 ofproto->rstp = NULL;
1291 ofproto->dump_seq = 0;
1292 hmap_init(&ofproto->bundles);
1293 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
1295 ofproto->mbridge = mbridge_create();
1296 ofproto->has_bonded_bundles = false;
1297 ofproto->lacp_enabled = false;
1298 ovs_mutex_init_adaptive(&ofproto->stats_mutex);
1300 guarded_list_init(&ofproto->ams);
1302 sset_init(&ofproto->ports);
1303 sset_init(&ofproto->ghost_ports);
1304 sset_init(&ofproto->port_poll_set);
1305 ofproto->port_poll_errno = 0;
1306 ofproto->change_seq = 0;
1307 ofproto->ams_seq = seq_create();
1308 ofproto->ams_seqno = seq_read(ofproto->ams_seq);
1311 SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
1312 struct iface_hint *iface_hint = node->data;
1314 if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
1315 /* Check if the datapath already has this port. */
1316 if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
1317 sset_add(&ofproto->ports, node->name);
1320 free(iface_hint->br_name);
1321 free(iface_hint->br_type);
1323 shash_delete(&init_ofp_ports, node);
1327 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
1328 hash_string(ofproto->up.name, 0));
1329 memset(&ofproto->stats, 0, sizeof ofproto->stats);
1331 ofproto_init_tables(ofproto_, N_TABLES);
1332 error = add_internal_flows(ofproto);
1334 ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
1340 add_internal_miss_flow(struct ofproto_dpif *ofproto, int id,
1341 const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
1347 match_init_catchall(&match);
1348 match_set_reg(&match, 0, id);
1350 error = ofproto_dpif_add_internal_flow(ofproto, &match, 0, 0, ofpacts,
1352 *rulep = error ? NULL : rule_dpif_cast(rule);
1358 add_internal_flows(struct ofproto_dpif *ofproto)
1360 struct ofpact_controller *controller;
1361 uint64_t ofpacts_stub[128 / 8];
1362 struct ofpbuf ofpacts;
1363 struct rule *unused_rulep OVS_UNUSED;
1368 ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1371 controller = ofpact_put_CONTROLLER(&ofpacts);
1372 controller->max_len = UINT16_MAX;
1373 controller->controller_id = 0;
1374 controller->reason = OFPR_IMPLICIT_MISS;
1375 ofpact_finish_CONTROLLER(&ofpacts, &controller);
1377 error = add_internal_miss_flow(ofproto, id++, &ofpacts,
1378 &ofproto->miss_rule);
1383 ofpbuf_clear(&ofpacts);
1384 error = add_internal_miss_flow(ofproto, id++, &ofpacts,
1385 &ofproto->no_packet_in_rule);
1390 error = add_internal_miss_flow(ofproto, id++, &ofpacts,
1391 &ofproto->drop_frags_rule);
1396 /* Drop any run away non-recirc rule lookups. Recirc_id has to be
1397 * zero when reaching this rule.
1399 * (priority=2), recirc_id=0, actions=drop
1401 ofpbuf_clear(&ofpacts);
1402 match_init_catchall(&match);
1403 match_set_recirc_id(&match, 0);
1404 error = ofproto_dpif_add_internal_flow(ofproto, &match, 2, 0, &ofpacts,
1410 destruct(struct ofproto *ofproto_)
1412 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1413 struct ofproto_async_msg *am;
1414 struct rule_dpif *rule;
1415 struct oftable *table;
1416 struct ovs_list ams;
1418 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1420 xlate_remove_ofproto(ofproto);
1423 /* Ensure that the upcall processing threads have no remaining references
1424 * to the ofproto or anything in it. */
1425 udpif_synchronize(ofproto->backer->udpif);
1427 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
1429 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1430 CLS_FOR_EACH (rule, up.cr, &table->cls) {
1431 ofproto_rule_delete(&ofproto->up, &rule->up);
1434 ofproto_group_delete_all(&ofproto->up);
1436 guarded_list_pop_all(&ofproto->ams, &ams);
1437 LIST_FOR_EACH_POP (am, list_node, &ams) {
1438 ofproto_async_msg_free(am);
1440 guarded_list_destroy(&ofproto->ams);
1442 recirc_free_ofproto(ofproto, ofproto->up.name);
1444 mbridge_unref(ofproto->mbridge);
1446 netflow_unref(ofproto->netflow);
1447 dpif_sflow_unref(ofproto->sflow);
1448 dpif_ipfix_unref(ofproto->ipfix);
1449 hmap_destroy(&ofproto->bundles);
1450 mac_learning_unref(ofproto->ml);
1451 mcast_snooping_unref(ofproto->ms);
1453 sset_destroy(&ofproto->ports);
1454 sset_destroy(&ofproto->ghost_ports);
1455 sset_destroy(&ofproto->port_poll_set);
1457 ovs_mutex_destroy(&ofproto->stats_mutex);
1459 seq_destroy(ofproto->ams_seq);
1461 close_dpif_backer(ofproto->backer);
1465 run(struct ofproto *ofproto_)
1467 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1468 uint64_t new_seq, new_dump_seq;
1470 if (mbridge_need_revalidate(ofproto->mbridge)) {
1471 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1472 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
1473 mac_learning_flush(ofproto->ml);
1474 ovs_rwlock_unlock(&ofproto->ml->rwlock);
1475 mcast_snooping_mdb_flush(ofproto->ms);
1478 /* Always updates the ofproto->ams_seqno to avoid frequent wakeup during
1479 * flow restore. Even though nothing is processed during flow restore,
1480 * all queued 'ams' will be handled immediately when flow restore
1482 ofproto->ams_seqno = seq_read(ofproto->ams_seq);
1484 /* Do not perform any periodic activity required by 'ofproto' while
1485 * waiting for flow restore to complete. */
1486 if (!ofproto_get_flow_restore_wait()) {
1487 struct ofproto_async_msg *am;
1488 struct ovs_list ams;
1490 guarded_list_pop_all(&ofproto->ams, &ams);
1491 LIST_FOR_EACH_POP (am, list_node, &ams) {
1492 connmgr_send_async_msg(ofproto->up.connmgr, am);
1493 ofproto_async_msg_free(am);
1497 if (ofproto->netflow) {
1498 netflow_run(ofproto->netflow);
1500 if (ofproto->sflow) {
1501 dpif_sflow_run(ofproto->sflow);
1503 if (ofproto->ipfix) {
1504 dpif_ipfix_run(ofproto->ipfix);
1507 new_seq = seq_read(connectivity_seq_get());
1508 if (ofproto->change_seq != new_seq) {
1509 struct ofport_dpif *ofport;
1511 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1515 ofproto->change_seq = new_seq;
1517 if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
1518 struct ofbundle *bundle;
1520 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1527 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
1528 if (mac_learning_run(ofproto->ml)) {
1529 ofproto->backer->need_revalidate = REV_MAC_LEARNING;
1531 ovs_rwlock_unlock(&ofproto->ml->rwlock);
1533 if (mcast_snooping_run(ofproto->ms)) {
1534 ofproto->backer->need_revalidate = REV_MCAST_SNOOPING;
1537 new_dump_seq = seq_read(udpif_dump_seq(ofproto->backer->udpif));
1538 if (ofproto->dump_seq != new_dump_seq) {
1539 struct rule *rule, *next_rule;
1541 /* We know stats are relatively fresh, so now is a good time to do some
1543 ofproto->dump_seq = new_dump_seq;
1545 /* Expire OpenFlow flows whose idle_timeout or hard_timeout
1547 ovs_mutex_lock(&ofproto_mutex);
1548 LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
1549 &ofproto->up.expirable) {
1550 rule_expire(rule_dpif_cast(rule));
1552 ovs_mutex_unlock(&ofproto_mutex);
1554 /* All outstanding data in existing flows has been accounted, so it's a
1555 * good time to do bond rebalancing. */
1556 if (ofproto->has_bonded_bundles) {
1557 struct ofbundle *bundle;
1559 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1561 bond_rebalance(bundle->bond);
1570 ofproto_dpif_wait(struct ofproto *ofproto_)
1572 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1574 if (ofproto_get_flow_restore_wait()) {
1578 if (ofproto->sflow) {
1579 dpif_sflow_wait(ofproto->sflow);
1581 if (ofproto->ipfix) {
1582 dpif_ipfix_wait(ofproto->ipfix);
1584 if (ofproto->lacp_enabled || ofproto->has_bonded_bundles) {
1585 struct ofbundle *bundle;
1587 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1588 bundle_wait(bundle);
1591 if (ofproto->netflow) {
1592 netflow_wait(ofproto->netflow);
1594 ovs_rwlock_rdlock(&ofproto->ml->rwlock);
1595 mac_learning_wait(ofproto->ml);
1596 ovs_rwlock_unlock(&ofproto->ml->rwlock);
1597 mcast_snooping_wait(ofproto->ms);
1599 if (ofproto->backer->need_revalidate) {
1600 poll_immediate_wake();
1603 seq_wait(udpif_dump_seq(ofproto->backer->udpif), ofproto->dump_seq);
1604 seq_wait(ofproto->ams_seq, ofproto->ams_seqno);
1608 type_get_memory_usage(const char *type, struct simap *usage)
1610 struct dpif_backer *backer;
1612 backer = shash_find_data(&all_dpif_backers, type);
1614 udpif_get_memory_usage(backer->udpif, usage);
1619 flush(struct ofproto *ofproto_)
1621 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1622 struct dpif_backer *backer = ofproto->backer;
1625 udpif_flush(backer->udpif);
1630 query_tables(struct ofproto *ofproto,
1631 struct ofputil_table_features *features,
1632 struct ofputil_table_stats *stats)
1634 strcpy(features->name, "classifier");
1639 for (i = 0; i < ofproto->n_tables; i++) {
1640 unsigned long missed, matched;
1642 atomic_read_relaxed(&ofproto->tables[i].n_matched, &matched);
1643 atomic_read_relaxed(&ofproto->tables[i].n_missed, &missed);
1645 stats[i].matched_count = matched;
1646 stats[i].lookup_count = matched + missed;
1652 set_tables_version(struct ofproto *ofproto_, cls_version_t version)
1654 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1656 atomic_store_relaxed(&ofproto->tables_version, version);
1660 static struct ofport *
1663 struct ofport_dpif *port = xzalloc(sizeof *port);
1668 port_dealloc(struct ofport *port_)
1670 struct ofport_dpif *port = ofport_dpif_cast(port_);
1675 port_construct(struct ofport *port_)
1677 struct ofport_dpif *port = ofport_dpif_cast(port_);
1678 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1679 const struct netdev *netdev = port->up.netdev;
1680 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1681 const char *dp_port_name;
1682 struct dpif_port dpif_port;
1685 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1686 port->bundle = NULL;
1690 port->may_enable = false;
1691 port->stp_port = NULL;
1692 port->stp_state = STP_DISABLED;
1693 port->rstp_port = NULL;
1694 port->rstp_state = RSTP_DISABLED;
1695 port->is_tunnel = false;
1699 port->carrier_seq = netdev_get_carrier_resets(netdev);
1700 port->is_layer3 = netdev_vport_is_layer3(netdev);
1702 if (netdev_vport_is_patch(netdev)) {
1703 /* By bailing out here, we don't submit the port to the sFlow module
1704 * to be considered for counter polling export. This is correct
1705 * because the patch port represents an interface that sFlow considers
1706 * to be "internal" to the switch as a whole, and therefore not a
1707 * candidate for counter polling. */
1708 port->odp_port = ODPP_NONE;
1709 ofport_update_peer(port);
1713 dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
1714 error = dpif_port_query_by_name(ofproto->backer->dpif, dp_port_name,
1720 port->odp_port = dpif_port.port_no;
1722 if (netdev_get_tunnel_config(netdev)) {
1723 atomic_count_inc(&ofproto->backer->tnl_count);
1724 error = tnl_port_add(port, port->up.netdev, port->odp_port,
1725 ovs_native_tunneling_is_on(ofproto), dp_port_name);
1727 atomic_count_dec(&ofproto->backer->tnl_count);
1728 dpif_port_destroy(&dpif_port);
1732 port->is_tunnel = true;
1733 if (ofproto->ipfix) {
1734 dpif_ipfix_add_tunnel_port(ofproto->ipfix, port_, port->odp_port);
1737 /* Sanity-check that a mapping doesn't already exist. This
1738 * shouldn't happen for non-tunnel ports. */
1739 if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
1740 VLOG_ERR("port %s already has an OpenFlow port number",
1742 dpif_port_destroy(&dpif_port);
1746 ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock);
1747 hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
1748 hash_odp_port(port->odp_port));
1749 ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
1751 dpif_port_destroy(&dpif_port);
1753 if (ofproto->sflow) {
1754 dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
1761 port_destruct(struct ofport *port_, bool del)
1763 struct ofport_dpif *port = ofport_dpif_cast(port_);
1764 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1765 const char *devname = netdev_get_name(port->up.netdev);
1766 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1767 const char *dp_port_name;
1769 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1771 xlate_ofport_remove(port);
1774 dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
1776 if (del && dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
1777 /* The underlying device is still there, so delete it. This
1778 * happens when the ofproto is being destroyed, since the caller
1779 * assumes that removal of attached ports will happen as part of
1781 if (!port->is_tunnel) {
1782 dpif_port_del(ofproto->backer->dpif, port->odp_port);
1787 port->peer->peer = NULL;
1791 if (port->odp_port != ODPP_NONE && !port->is_tunnel) {
1792 ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock);
1793 hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
1794 ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
1797 if (port->is_tunnel) {
1798 atomic_count_dec(&ofproto->backer->tnl_count);
1801 if (port->is_tunnel && ofproto->ipfix) {
1802 dpif_ipfix_del_tunnel_port(ofproto->ipfix, port->odp_port);
1806 sset_find_and_delete(&ofproto->ports, devname);
1807 sset_find_and_delete(&ofproto->ghost_ports, devname);
1808 bundle_remove(port_);
1809 set_cfm(port_, NULL);
1810 set_bfd(port_, NULL);
1811 set_lldp(port_, NULL);
1812 if (port->stp_port) {
1813 stp_port_disable(port->stp_port);
1815 set_rstp_port(port_, NULL);
1816 if (ofproto->sflow) {
1817 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
1824 port_modified(struct ofport *port_)
1826 struct ofport_dpif *port = ofport_dpif_cast(port_);
1827 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1828 const char *dp_port_name;
1829 struct netdev *netdev = port->up.netdev;
1831 if (port->bundle && port->bundle->bond) {
1832 bond_slave_set_netdev(port->bundle->bond, port, netdev);
1836 cfm_set_netdev(port->cfm, netdev);
1840 bfd_set_netdev(port->bfd, netdev);
1843 ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
1844 port->lldp, &port->up.pp.hw_addr);
1846 dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
1848 if (port->is_tunnel) {
1849 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1851 if (tnl_port_reconfigure(port, netdev, port->odp_port,
1852 ovs_native_tunneling_is_on(ofproto),
1854 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1858 ofport_update_peer(port);
1862 port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
1864 struct ofport_dpif *port = ofport_dpif_cast(port_);
1865 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1866 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
1868 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
1869 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
1870 OFPUTIL_PC_NO_PACKET_IN)) {
1871 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1873 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
1874 bundle_update(port->bundle);
1880 set_sflow(struct ofproto *ofproto_,
1881 const struct ofproto_sflow_options *sflow_options)
1883 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1884 struct dpif_sflow *ds = ofproto->sflow;
1886 if (sflow_options) {
1887 uint32_t old_probability = ds ? dpif_sflow_get_probability(ds) : 0;
1889 struct ofport_dpif *ofport;
1891 ds = ofproto->sflow = dpif_sflow_create();
1892 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1893 dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
1896 dpif_sflow_set_options(ds, sflow_options);
1897 if (dpif_sflow_get_probability(ds) != old_probability) {
1898 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1902 dpif_sflow_unref(ds);
1903 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1904 ofproto->sflow = NULL;
1912 struct ofproto *ofproto_,
1913 const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options,
1914 const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options,
1915 size_t n_flow_exporters_options)
1917 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1918 struct dpif_ipfix *di = ofproto->ipfix;
1919 bool has_options = bridge_exporter_options || flow_exporters_options;
1920 bool new_di = false;
1922 if (has_options && !di) {
1923 di = ofproto->ipfix = dpif_ipfix_create();
1928 /* Call set_options in any case to cleanly flush the flow
1929 * caches in the last exporters that are to be destroyed. */
1930 dpif_ipfix_set_options(
1931 di, bridge_exporter_options, flow_exporters_options,
1932 n_flow_exporters_options);
1934 /* Add tunnel ports only when a new ipfix created */
1935 if (new_di == true) {
1936 struct ofport_dpif *ofport;
1937 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1938 if (ofport->is_tunnel == true) {
1939 dpif_ipfix_add_tunnel_port(di, &ofport->up, ofport->odp_port);
1945 dpif_ipfix_unref(di);
1946 ofproto->ipfix = NULL;
1954 set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
1956 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1957 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1958 struct cfm *old = ofport->cfm;
1963 ofport->cfm = cfm_create(ofport->up.netdev);
1966 if (cfm_configure(ofport->cfm, s)) {
1973 cfm_unref(ofport->cfm);
1976 if (ofport->cfm != old) {
1977 ofproto->backer->need_revalidate = REV_RECONFIGURE;
1979 ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
1980 ofport->lldp, &ofport->up.pp.hw_addr);
1985 cfm_status_changed(struct ofport *ofport_)
1987 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1989 return ofport->cfm ? cfm_check_status_change(ofport->cfm) : true;
1993 get_cfm_status(const struct ofport *ofport_,
1994 struct cfm_status *status)
1996 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2000 cfm_get_status(ofport->cfm, status);
2009 set_bfd(struct ofport *ofport_, const struct smap *cfg)
2011 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
2012 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2016 ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev),
2017 cfg, ofport->up.netdev);
2018 if (ofport->bfd != old) {
2019 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2021 ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
2022 ofport->lldp, &ofport->up.pp.hw_addr);
2027 bfd_status_changed(struct ofport *ofport_)
2029 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2031 return ofport->bfd ? bfd_check_status_change(ofport->bfd) : true;
2035 get_bfd_status(struct ofport *ofport_, struct smap *smap)
2037 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2041 bfd_get_status(ofport->bfd, smap);
2050 set_lldp(struct ofport *ofport_,
2051 const struct smap *cfg)
2053 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2057 if (!ofport->lldp) {
2058 struct ofproto_dpif *ofproto;
2060 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2061 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2062 ofport->lldp = lldp_create(ofport->up.netdev, ofport_->mtu, cfg);
2065 if (!lldp_configure(ofport->lldp, cfg)) {
2070 lldp_unref(ofport->lldp);
2071 ofport->lldp = NULL;
2074 ofproto_dpif_monitor_port_update(ofport,
2078 &ofport->up.pp.hw_addr);
2083 get_lldp_status(const struct ofport *ofport_,
2084 struct lldp_status *status OVS_UNUSED)
2086 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2088 return ofport->lldp ? true : false;
2092 set_aa(struct ofproto *ofproto OVS_UNUSED,
2093 const struct aa_settings *s)
2095 return aa_configure(s);
2099 aa_mapping_set(struct ofproto *ofproto_ OVS_UNUSED, void *aux,
2100 const struct aa_mapping_settings *s)
2102 return aa_mapping_register(aux, s);
2106 aa_mapping_unset(struct ofproto *ofproto OVS_UNUSED, void *aux)
2108 return aa_mapping_unregister(aux);
2112 aa_vlan_get_queued(struct ofproto *ofproto OVS_UNUSED, struct ovs_list *list)
2114 return aa_get_vlan_queued(list);
2118 aa_vlan_get_queue_size(struct ofproto *ofproto OVS_UNUSED)
2120 return aa_get_vlan_queue_size();
2124 /* Spanning Tree. */
2126 /* Called while rstp_mutex is held. */
2128 rstp_send_bpdu_cb(struct dp_packet *pkt, void *ofport_, void *ofproto_)
2130 struct ofproto_dpif *ofproto = ofproto_;
2131 struct ofport_dpif *ofport = ofport_;
2132 struct eth_header *eth = dp_packet_l2(pkt);
2134 netdev_get_etheraddr(ofport->up.netdev, ð->eth_src);
2135 if (eth_addr_is_zero(eth->eth_src)) {
2136 VLOG_WARN_RL(&rl, "%s port %d: cannot send RSTP BPDU on a port which "
2137 "does not have a configured source MAC address.",
2138 ofproto->up.name, ofp_to_u16(ofport->up.ofp_port));
2140 ofproto_dpif_send_packet(ofport, pkt);
2142 dp_packet_delete(pkt);
2146 send_bpdu_cb(struct dp_packet *pkt, int port_num, void *ofproto_)
2148 struct ofproto_dpif *ofproto = ofproto_;
2149 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
2150 struct ofport_dpif *ofport;
2152 ofport = stp_port_get_aux(sp);
2154 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
2155 ofproto->up.name, port_num);
2157 struct eth_header *eth = dp_packet_l2(pkt);
2159 netdev_get_etheraddr(ofport->up.netdev, ð->eth_src);
2160 if (eth_addr_is_zero(eth->eth_src)) {
2161 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
2162 "with unknown MAC", ofproto->up.name, port_num);
2164 ofproto_dpif_send_packet(ofport, pkt);
2167 dp_packet_delete(pkt);
2170 /* Configure RSTP on 'ofproto_' using the settings defined in 's'. */
2172 set_rstp(struct ofproto *ofproto_, const struct ofproto_rstp_settings *s)
2174 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2176 /* Only revalidate flows if the configuration changed. */
2177 if (!s != !ofproto->rstp) {
2178 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2182 if (!ofproto->rstp) {
2183 ofproto->rstp = rstp_create(ofproto_->name, s->address,
2184 rstp_send_bpdu_cb, ofproto);
2185 ofproto->rstp_last_tick = time_msec();
2187 rstp_set_bridge_address(ofproto->rstp, s->address);
2188 rstp_set_bridge_priority(ofproto->rstp, s->priority);
2189 rstp_set_bridge_ageing_time(ofproto->rstp, s->ageing_time);
2190 rstp_set_bridge_force_protocol_version(ofproto->rstp,
2191 s->force_protocol_version);
2192 rstp_set_bridge_max_age(ofproto->rstp, s->bridge_max_age);
2193 rstp_set_bridge_forward_delay(ofproto->rstp, s->bridge_forward_delay);
2194 rstp_set_bridge_transmit_hold_count(ofproto->rstp,
2195 s->transmit_hold_count);
2197 struct ofport *ofport;
2198 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
2199 set_rstp_port(ofport, NULL);
2201 rstp_unref(ofproto->rstp);
2202 ofproto->rstp = NULL;
2207 get_rstp_status(struct ofproto *ofproto_, struct ofproto_rstp_status *s)
2209 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2211 if (ofproto->rstp) {
2213 s->root_id = rstp_get_root_id(ofproto->rstp);
2214 s->bridge_id = rstp_get_bridge_id(ofproto->rstp);
2215 s->designated_id = rstp_get_designated_id(ofproto->rstp);
2216 s->root_path_cost = rstp_get_root_path_cost(ofproto->rstp);
2217 s->designated_port_id = rstp_get_designated_port_id(ofproto->rstp);
2218 s->bridge_port_id = rstp_get_bridge_port_id(ofproto->rstp);
2225 update_rstp_port_state(struct ofport_dpif *ofport)
2227 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2228 enum rstp_state state;
2230 /* Figure out new state. */
2231 state = ofport->rstp_port ? rstp_port_get_state(ofport->rstp_port)
2235 if (ofport->rstp_state != state) {
2236 enum ofputil_port_state of_state;
2239 VLOG_DBG("port %s: RSTP state changed from %s to %s",
2240 netdev_get_name(ofport->up.netdev),
2241 rstp_state_name(ofport->rstp_state),
2242 rstp_state_name(state));
2244 if (rstp_learn_in_state(ofport->rstp_state)
2245 != rstp_learn_in_state(state)) {
2246 /* XXX: Learning action flows should also be flushed. */
2247 if (ofport->bundle) {
2248 if (!rstp_shift_root_learned_address(ofproto->rstp)
2249 || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
2250 bundle_flush_macs(ofport->bundle, false);
2254 fwd_change = rstp_forward_in_state(ofport->rstp_state)
2255 != rstp_forward_in_state(state);
2257 ofproto->backer->need_revalidate = REV_RSTP;
2258 ofport->rstp_state = state;
2260 if (fwd_change && ofport->bundle) {
2261 bundle_update(ofport->bundle);
2264 /* Update the RSTP state bits in the OpenFlow port description. */
2265 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
2266 of_state |= (state == RSTP_LEARNING ? OFPUTIL_PS_STP_LEARN
2267 : state == RSTP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
2268 : state == RSTP_DISCARDING ? OFPUTIL_PS_STP_LISTEN
2270 ofproto_port_set_state(&ofport->up, of_state);
2275 rstp_run(struct ofproto_dpif *ofproto)
2277 if (ofproto->rstp) {
2278 long long int now = time_msec();
2279 long long int elapsed = now - ofproto->rstp_last_tick;
2280 struct rstp_port *rp;
2281 struct ofport_dpif *ofport;
2283 /* Every second, decrease the values of the timers. */
2284 if (elapsed >= 1000) {
2285 rstp_tick_timers(ofproto->rstp);
2286 ofproto->rstp_last_tick = now;
2289 while ((ofport = rstp_get_next_changed_port_aux(ofproto->rstp, &rp))) {
2290 update_rstp_port_state(ofport);
2294 /* FIXME: This check should be done on-event (i.e., when setting
2295 * p->fdb_flush) and not periodically.
2297 while ((ofport = rstp_check_and_reset_fdb_flush(ofproto->rstp, &rp))) {
2298 if (!rstp_shift_root_learned_address(ofproto->rstp)
2299 || rstp_get_old_root_aux(ofproto->rstp) != ofport) {
2300 bundle_flush_macs(ofport->bundle, false);
2304 if (rstp_shift_root_learned_address(ofproto->rstp)) {
2305 struct ofport_dpif *old_root_aux =
2306 (struct ofport_dpif *)rstp_get_old_root_aux(ofproto->rstp);
2307 struct ofport_dpif *new_root_aux =
2308 (struct ofport_dpif *)rstp_get_new_root_aux(ofproto->rstp);
2309 if (old_root_aux != NULL && new_root_aux != NULL) {
2310 bundle_move(old_root_aux->bundle, new_root_aux->bundle);
2311 rstp_reset_root_changed(ofproto->rstp);
2317 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2319 set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
2321 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2323 /* Only revalidate flows if the configuration changed. */
2324 if (!s != !ofproto->stp) {
2325 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2329 if (!ofproto->stp) {
2330 ofproto->stp = stp_create(ofproto_->name, s->system_id,
2331 send_bpdu_cb, ofproto);
2332 ofproto->stp_last_tick = time_msec();
2335 stp_set_bridge_id(ofproto->stp, s->system_id);
2336 stp_set_bridge_priority(ofproto->stp, s->priority);
2337 stp_set_hello_time(ofproto->stp, s->hello_time);
2338 stp_set_max_age(ofproto->stp, s->max_age);
2339 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
2341 struct ofport *ofport;
2343 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
2344 set_stp_port(ofport, NULL);
2347 stp_unref(ofproto->stp);
2348 ofproto->stp = NULL;
2355 get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
2357 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2361 s->bridge_id = stp_get_bridge_id(ofproto->stp);
2362 s->designated_root = stp_get_designated_root(ofproto->stp);
2363 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
2372 update_stp_port_state(struct ofport_dpif *ofport)
2374 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2375 enum stp_state state;
2377 /* Figure out new state. */
2378 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
2382 if (ofport->stp_state != state) {
2383 enum ofputil_port_state of_state;
2386 VLOG_DBG("port %s: STP state changed from %s to %s",
2387 netdev_get_name(ofport->up.netdev),
2388 stp_state_name(ofport->stp_state),
2389 stp_state_name(state));
2390 if (stp_learn_in_state(ofport->stp_state)
2391 != stp_learn_in_state(state)) {
2392 /* xxx Learning action flows should also be flushed. */
2393 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2394 mac_learning_flush(ofproto->ml);
2395 ovs_rwlock_unlock(&ofproto->ml->rwlock);
2396 mcast_snooping_mdb_flush(ofproto->ms);
2398 fwd_change = stp_forward_in_state(ofport->stp_state)
2399 != stp_forward_in_state(state);
2401 ofproto->backer->need_revalidate = REV_STP;
2402 ofport->stp_state = state;
2403 ofport->stp_state_entered = time_msec();
2405 if (fwd_change && ofport->bundle) {
2406 bundle_update(ofport->bundle);
2409 /* Update the STP state bits in the OpenFlow port description. */
2410 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
2411 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
2412 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
2413 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
2414 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
2416 ofproto_port_set_state(&ofport->up, of_state);
2420 /* Configures STP on 'ofport_' using the settings defined in 's'. The
2421 * caller is responsible for assigning STP port numbers and ensuring
2422 * there are no duplicates. */
2424 set_stp_port(struct ofport *ofport_,
2425 const struct ofproto_port_stp_settings *s)
2427 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2428 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2429 struct stp_port *sp = ofport->stp_port;
2431 if (!s || !s->enable) {
2433 ofport->stp_port = NULL;
2434 stp_port_disable(sp);
2435 update_stp_port_state(ofport);
2438 } else if (sp && stp_port_no(sp) != s->port_num
2439 && ofport == stp_port_get_aux(sp)) {
2440 /* The port-id changed, so disable the old one if it's not
2441 * already in use by another port. */
2442 stp_port_disable(sp);
2445 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
2447 /* Set name before enabling the port so that debugging messages can print
2449 stp_port_set_name(sp, netdev_get_name(ofport->up.netdev));
2450 stp_port_enable(sp);
2452 stp_port_set_aux(sp, ofport);
2453 stp_port_set_priority(sp, s->priority);
2454 stp_port_set_path_cost(sp, s->path_cost);
2456 update_stp_port_state(ofport);
2462 get_stp_port_status(struct ofport *ofport_,
2463 struct ofproto_port_stp_status *s)
2465 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2466 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2467 struct stp_port *sp = ofport->stp_port;
2469 if (!ofproto->stp || !sp) {
2475 s->port_id = stp_port_get_id(sp);
2476 s->state = stp_port_get_state(sp);
2477 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
2478 s->role = stp_port_get_role(sp);
2484 get_stp_port_stats(struct ofport *ofport_,
2485 struct ofproto_port_stp_stats *s)
2487 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2488 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2489 struct stp_port *sp = ofport->stp_port;
2491 if (!ofproto->stp || !sp) {
2497 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
2503 stp_run(struct ofproto_dpif *ofproto)
2506 long long int now = time_msec();
2507 long long int elapsed = now - ofproto->stp_last_tick;
2508 struct stp_port *sp;
2511 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
2512 ofproto->stp_last_tick = now;
2514 while (stp_get_changed_port(ofproto->stp, &sp)) {
2515 struct ofport_dpif *ofport = stp_port_get_aux(sp);
2518 update_stp_port_state(ofport);
2522 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
2523 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2524 mac_learning_flush(ofproto->ml);
2525 ovs_rwlock_unlock(&ofproto->ml->rwlock);
2526 mcast_snooping_mdb_flush(ofproto->ms);
2532 stp_wait(struct ofproto_dpif *ofproto)
2535 poll_timer_wait(1000);
2539 /* Configures RSTP on 'ofport_' using the settings defined in 's'. The
2540 * caller is responsible for assigning RSTP port numbers and ensuring
2541 * there are no duplicates. */
2543 set_rstp_port(struct ofport *ofport_,
2544 const struct ofproto_port_rstp_settings *s)
2546 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2547 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2548 struct rstp_port *rp = ofport->rstp_port;
2550 if (!s || !s->enable) {
2552 rstp_port_set_aux(rp, NULL);
2553 rstp_port_set_state(rp, RSTP_DISABLED);
2554 rstp_port_set_mac_operational(rp, false);
2555 ofport->rstp_port = NULL;
2556 rstp_port_unref(rp);
2557 update_rstp_port_state(ofport);
2562 /* Check if need to add a new port. */
2564 rp = ofport->rstp_port = rstp_add_port(ofproto->rstp);
2567 rstp_port_set(rp, s->port_num, s->priority, s->path_cost,
2568 s->admin_edge_port, s->auto_edge,
2569 s->admin_p2p_mac_state, s->admin_port_state, s->mcheck,
2571 update_rstp_port_state(ofport);
2572 /* Synchronize operational status. */
2573 rstp_port_set_mac_operational(rp, ofport->may_enable);
2577 get_rstp_port_status(struct ofport *ofport_,
2578 struct ofproto_port_rstp_status *s)
2580 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2581 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2582 struct rstp_port *rp = ofport->rstp_port;
2584 if (!ofproto->rstp || !rp) {
2590 rstp_port_get_status(rp, &s->port_id, &s->state, &s->role,
2591 &s->designated_bridge_id, &s->designated_port_id,
2592 &s->designated_path_cost, &s->tx_count,
2593 &s->rx_count, &s->error_count, &s->uptime);
2598 set_queues(struct ofport *ofport_, const struct ofproto_port_queue *qdscp,
2601 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2602 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2604 if (ofport->n_qdscp != n_qdscp
2605 || (n_qdscp && memcmp(ofport->qdscp, qdscp,
2606 n_qdscp * sizeof *qdscp))) {
2607 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2608 free(ofport->qdscp);
2609 ofport->qdscp = n_qdscp
2610 ? xmemdup(qdscp, n_qdscp * sizeof *qdscp)
2612 ofport->n_qdscp = n_qdscp;
2620 /* Expires all MAC learning entries associated with 'bundle' and forces its
2621 * ofproto to revalidate every flow.
2623 * Normally MAC learning entries are removed only from the ofproto associated
2624 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2625 * are removed from every ofproto. When patch ports and SLB bonds are in use
2626 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2627 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2628 * with the host from which it migrated. */
2630 bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
2632 struct ofproto_dpif *ofproto = bundle->ofproto;
2633 struct mac_learning *ml = ofproto->ml;
2634 struct mac_entry *mac, *next_mac;
2636 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2637 ovs_rwlock_wrlock(&ml->rwlock);
2638 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2639 if (mac_entry_get_port(ml, mac) == bundle) {
2641 struct ofproto_dpif *o;
2643 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2645 struct mac_entry *e;
2647 ovs_rwlock_wrlock(&o->ml->rwlock);
2648 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan);
2650 mac_learning_expire(o->ml, e);
2652 ovs_rwlock_unlock(&o->ml->rwlock);
2657 mac_learning_expire(ml, mac);
2660 ovs_rwlock_unlock(&ml->rwlock);
2664 bundle_move(struct ofbundle *old, struct ofbundle *new)
2666 struct ofproto_dpif *ofproto = old->ofproto;
2667 struct mac_learning *ml = ofproto->ml;
2668 struct mac_entry *mac, *next_mac;
2670 ovs_assert(new->ofproto == old->ofproto);
2672 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2673 ovs_rwlock_wrlock(&ml->rwlock);
2674 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2675 if (mac_entry_get_port(ml, mac) == old) {
2676 mac_entry_set_port(ml, mac, new);
2679 ovs_rwlock_unlock(&ml->rwlock);
2682 static struct ofbundle *
2683 bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
2685 struct ofbundle *bundle;
2687 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
2688 &ofproto->bundles) {
2689 if (bundle->aux == aux) {
2697 bundle_update(struct ofbundle *bundle)
2699 struct ofport_dpif *port;
2701 bundle->floodable = true;
2702 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
2703 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2705 || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
2706 || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
2707 bundle->floodable = false;
2714 bundle_del_port(struct ofport_dpif *port)
2716 struct ofbundle *bundle = port->bundle;
2718 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2720 ovs_list_remove(&port->bundle_node);
2721 port->bundle = NULL;
2724 lacp_slave_unregister(bundle->lacp, port);
2727 bond_slave_unregister(bundle->bond, port);
2730 bundle_update(bundle);
2734 bundle_add_port(struct ofbundle *bundle, ofp_port_t ofp_port,
2735 struct lacp_slave_settings *lacp)
2737 struct ofport_dpif *port;
2739 port = ofp_port_to_ofport(bundle->ofproto, ofp_port);
2744 if (port->bundle != bundle) {
2745 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2747 bundle_remove(&port->up);
2750 port->bundle = bundle;
2751 ovs_list_push_back(&bundle->ports, &port->bundle_node);
2752 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2754 || (bundle->ofproto->stp && !stp_forward_in_state(port->stp_state))
2755 || (bundle->ofproto->rstp && !rstp_forward_in_state(port->rstp_state))) {
2756 bundle->floodable = false;
2760 bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2761 lacp_slave_register(bundle->lacp, port, lacp);
2768 bundle_destroy(struct ofbundle *bundle)
2770 struct ofproto_dpif *ofproto;
2771 struct ofport_dpif *port, *next_port;
2777 ofproto = bundle->ofproto;
2778 mbridge_unregister_bundle(ofproto->mbridge, bundle);
2781 xlate_bundle_remove(bundle);
2784 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2785 bundle_del_port(port);
2788 bundle_flush_macs(bundle, true);
2789 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
2791 free(bundle->trunks);
2792 lacp_unref(bundle->lacp);
2793 bond_unref(bundle->bond);
2798 bundle_set(struct ofproto *ofproto_, void *aux,
2799 const struct ofproto_bundle_settings *s)
2801 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2802 bool need_flush = false;
2803 struct ofport_dpif *port;
2804 struct ofbundle *bundle;
2805 unsigned long *trunks;
2811 bundle_destroy(bundle_lookup(ofproto, aux));
2815 ovs_assert(s->n_slaves == 1 || s->bond != NULL);
2816 ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
2818 bundle = bundle_lookup(ofproto, aux);
2820 bundle = xmalloc(sizeof *bundle);
2822 bundle->ofproto = ofproto;
2823 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
2824 hash_pointer(aux, 0));
2826 bundle->name = NULL;
2828 ovs_list_init(&bundle->ports);
2829 bundle->vlan_mode = PORT_VLAN_TRUNK;
2831 bundle->trunks = NULL;
2832 bundle->use_priority_tags = s->use_priority_tags;
2833 bundle->lacp = NULL;
2834 bundle->bond = NULL;
2836 bundle->floodable = true;
2837 mbridge_register_bundle(ofproto->mbridge, bundle);
2840 if (!bundle->name || strcmp(s->name, bundle->name)) {
2842 bundle->name = xstrdup(s->name);
2847 ofproto->lacp_enabled = true;
2848 if (!bundle->lacp) {
2849 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2850 bundle->lacp = lacp_create();
2852 lacp_configure(bundle->lacp, s->lacp);
2854 lacp_unref(bundle->lacp);
2855 bundle->lacp = NULL;
2858 /* Update set of ports. */
2860 for (i = 0; i < s->n_slaves; i++) {
2861 if (!bundle_add_port(bundle, s->slaves[i],
2862 s->lacp ? &s->lacp_slaves[i] : NULL)) {
2866 if (!ok || ovs_list_size(&bundle->ports) != s->n_slaves) {
2867 struct ofport_dpif *next_port;
2869 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2870 for (i = 0; i < s->n_slaves; i++) {
2871 if (s->slaves[i] == port->up.ofp_port) {
2876 bundle_del_port(port);
2880 ovs_assert(ovs_list_size(&bundle->ports) <= s->n_slaves);
2882 if (ovs_list_is_empty(&bundle->ports)) {
2883 bundle_destroy(bundle);
2887 /* Set VLAN tagging mode */
2888 if (s->vlan_mode != bundle->vlan_mode
2889 || s->use_priority_tags != bundle->use_priority_tags) {
2890 bundle->vlan_mode = s->vlan_mode;
2891 bundle->use_priority_tags = s->use_priority_tags;
2896 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
2897 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
2899 if (vlan != bundle->vlan) {
2900 bundle->vlan = vlan;
2904 /* Get trunked VLANs. */
2905 switch (s->vlan_mode) {
2906 case PORT_VLAN_ACCESS:
2910 case PORT_VLAN_TRUNK:
2911 trunks = CONST_CAST(unsigned long *, s->trunks);
2914 case PORT_VLAN_NATIVE_UNTAGGED:
2915 case PORT_VLAN_NATIVE_TAGGED:
2916 if (vlan != 0 && (!s->trunks
2917 || !bitmap_is_set(s->trunks, vlan)
2918 || bitmap_is_set(s->trunks, 0))) {
2919 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2921 trunks = bitmap_clone(s->trunks, 4096);
2923 trunks = bitmap_allocate1(4096);
2925 bitmap_set1(trunks, vlan);
2926 bitmap_set0(trunks, 0);
2928 trunks = CONST_CAST(unsigned long *, s->trunks);
2935 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
2936 free(bundle->trunks);
2937 if (trunks == s->trunks) {
2938 bundle->trunks = vlan_bitmap_clone(trunks);
2940 bundle->trunks = trunks;
2945 if (trunks != s->trunks) {
2950 if (!ovs_list_is_short(&bundle->ports)) {
2951 bundle->ofproto->has_bonded_bundles = true;
2953 if (bond_reconfigure(bundle->bond, s->bond)) {
2954 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2957 bundle->bond = bond_create(s->bond, ofproto);
2958 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2961 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
2962 bond_slave_register(bundle->bond, port,
2963 port->up.ofp_port, port->up.netdev);
2966 bond_unref(bundle->bond);
2967 bundle->bond = NULL;
2970 /* If we changed something that would affect MAC learning, un-learn
2971 * everything on this port and force flow revalidation. */
2973 bundle_flush_macs(bundle, false);
2980 bundle_remove(struct ofport *port_)
2982 struct ofport_dpif *port = ofport_dpif_cast(port_);
2983 struct ofbundle *bundle = port->bundle;
2986 bundle_del_port(port);
2987 if (ovs_list_is_empty(&bundle->ports)) {
2988 bundle_destroy(bundle);
2989 } else if (ovs_list_is_short(&bundle->ports)) {
2990 bond_unref(bundle->bond);
2991 bundle->bond = NULL;
2997 send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
2999 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
3000 struct ofport_dpif *port = port_;
3004 error = netdev_get_etheraddr(port->up.netdev, &ea);
3006 struct dp_packet packet;
3009 dp_packet_init(&packet, 0);
3010 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
3012 memcpy(packet_pdu, pdu, pdu_size);
3014 ofproto_dpif_send_packet(port, &packet);
3015 dp_packet_uninit(&packet);
3017 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
3018 "%s (%s)", port->bundle->name,
3019 netdev_get_name(port->up.netdev), ovs_strerror(error));
3024 bundle_send_learning_packets(struct ofbundle *bundle)
3026 struct ofproto_dpif *ofproto = bundle->ofproto;
3027 int error, n_packets, n_errors;
3028 struct mac_entry *e;
3030 struct ovs_list list_node;
3031 struct ofport_dpif *port;
3032 struct dp_packet *pkt;
3034 struct ovs_list packets;
3036 ovs_list_init(&packets);
3037 ovs_rwlock_rdlock(&ofproto->ml->rwlock);
3038 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
3039 if (mac_entry_get_port(ofproto->ml, e) != bundle) {
3040 pkt_node = xmalloc(sizeof *pkt_node);
3041 pkt_node->pkt = bond_compose_learning_packet(bundle->bond,
3043 (void **)&pkt_node->port);
3044 ovs_list_push_back(&packets, &pkt_node->list_node);
3047 ovs_rwlock_unlock(&ofproto->ml->rwlock);
3049 error = n_packets = n_errors = 0;
3050 LIST_FOR_EACH_POP (pkt_node, list_node, &packets) {
3053 ret = ofproto_dpif_send_packet(pkt_node->port, pkt_node->pkt);
3054 dp_packet_delete(pkt_node->pkt);
3064 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
3065 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
3066 "packets, last error was: %s",
3067 bundle->name, n_errors, n_packets, ovs_strerror(error));
3069 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
3070 bundle->name, n_packets);
3075 bundle_run(struct ofbundle *bundle)
3078 lacp_run(bundle->lacp, send_pdu_cb);
3081 struct ofport_dpif *port;
3083 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
3084 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
3087 if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
3088 bundle->ofproto->backer->need_revalidate = REV_BOND;
3091 if (bond_should_send_learning_packets(bundle->bond)) {
3092 bundle_send_learning_packets(bundle);
3098 bundle_wait(struct ofbundle *bundle)
3101 lacp_wait(bundle->lacp);
3104 bond_wait(bundle->bond);
3111 mirror_set__(struct ofproto *ofproto_, void *aux,
3112 const struct ofproto_mirror_settings *s)
3114 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3115 struct ofbundle **srcs, **dsts;
3120 mirror_destroy(ofproto->mbridge, aux);
3124 srcs = xmalloc(s->n_srcs * sizeof *srcs);
3125 dsts = xmalloc(s->n_dsts * sizeof *dsts);
3127 for (i = 0; i < s->n_srcs; i++) {
3128 srcs[i] = bundle_lookup(ofproto, s->srcs[i]);
3131 for (i = 0; i < s->n_dsts; i++) {
3132 dsts[i] = bundle_lookup(ofproto, s->dsts[i]);
3135 error = mirror_set(ofproto->mbridge, aux, s->name, srcs, s->n_srcs, dsts,
3136 s->n_dsts, s->src_vlans,
3137 bundle_lookup(ofproto, s->out_bundle), s->out_vlan);
3144 mirror_get_stats__(struct ofproto *ofproto, void *aux,
3145 uint64_t *packets, uint64_t *bytes)
3147 return mirror_get_stats(ofproto_dpif_cast(ofproto)->mbridge, aux, packets,
3152 set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
3154 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3155 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
3156 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
3157 mac_learning_flush(ofproto->ml);
3159 ovs_rwlock_unlock(&ofproto->ml->rwlock);
3164 is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
3166 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3167 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
3168 return bundle && mirror_bundle_out(ofproto->mbridge, bundle) != 0;
3172 forward_bpdu_changed(struct ofproto *ofproto_)
3174 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3175 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3179 set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
3182 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3183 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
3184 mac_learning_set_idle_time(ofproto->ml, idle_time);
3185 mac_learning_set_max_entries(ofproto->ml, max_entries);
3186 ovs_rwlock_unlock(&ofproto->ml->rwlock);
3189 /* Configures multicast snooping on 'ofport' using the settings
3190 * defined in 's'. */
3192 set_mcast_snooping(struct ofproto *ofproto_,
3193 const struct ofproto_mcast_snooping_settings *s)
3195 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3197 /* Only revalidate flows if the configuration changed. */
3198 if (!s != !ofproto->ms) {
3199 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3204 ofproto->ms = mcast_snooping_create();
3207 ovs_rwlock_wrlock(&ofproto->ms->rwlock);
3208 mcast_snooping_set_idle_time(ofproto->ms, s->idle_time);
3209 mcast_snooping_set_max_entries(ofproto->ms, s->max_entries);
3210 if (mcast_snooping_set_flood_unreg(ofproto->ms, s->flood_unreg)) {
3211 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3213 ovs_rwlock_unlock(&ofproto->ms->rwlock);
3215 mcast_snooping_unref(ofproto->ms);
3222 /* Configures multicast snooping port's flood settings on 'ofproto'. */
3224 set_mcast_snooping_port(struct ofproto *ofproto_, void *aux,
3225 const struct ofproto_mcast_snooping_port_settings *s)
3227 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3228 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
3230 if (ofproto->ms && s) {
3231 ovs_rwlock_wrlock(&ofproto->ms->rwlock);
3232 mcast_snooping_set_port_flood(ofproto->ms, bundle, s->flood);
3233 mcast_snooping_set_port_flood_reports(ofproto->ms, bundle,
3235 ovs_rwlock_unlock(&ofproto->ms->rwlock);
3243 struct ofport_dpif *
3244 ofp_port_to_ofport(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
3246 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
3247 return ofport ? ofport_dpif_cast(ofport) : NULL;
3251 ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
3252 struct ofproto_port *ofproto_port,
3253 struct dpif_port *dpif_port)
3255 ofproto_port->name = dpif_port->name;
3256 ofproto_port->type = dpif_port->type;
3257 ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
3261 ofport_update_peer(struct ofport_dpif *ofport)
3263 const struct ofproto_dpif *ofproto;
3264 struct dpif_backer *backer;
3267 if (!netdev_vport_is_patch(ofport->up.netdev)) {
3271 backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
3272 backer->need_revalidate = REV_RECONFIGURE;
3275 ofport->peer->peer = NULL;
3276 ofport->peer = NULL;
3279 peer_name = netdev_vport_patch_peer(ofport->up.netdev);
3284 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
3285 struct ofport *peer_ofport;
3286 struct ofport_dpif *peer;
3289 if (ofproto->backer != backer) {
3293 peer_ofport = shash_find_data(&ofproto->up.port_by_name, peer_name);
3298 peer = ofport_dpif_cast(peer_ofport);
3299 peer_peer = netdev_vport_patch_peer(peer->up.netdev);
3300 if (peer_peer && !strcmp(netdev_get_name(ofport->up.netdev),
3302 ofport->peer = peer;
3303 ofport->peer->peer = ofport;
3313 port_run(struct ofport_dpif *ofport)
3315 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
3316 bool carrier_changed = carrier_seq != ofport->carrier_seq;
3317 bool enable = netdev_get_carrier(ofport->up.netdev);
3318 bool cfm_enable = false;
3319 bool bfd_enable = false;
3321 ofport->carrier_seq = carrier_seq;
3324 int cfm_opup = cfm_get_opup(ofport->cfm);
3326 cfm_enable = !cfm_get_fault(ofport->cfm);
3328 if (cfm_opup >= 0) {
3329 cfm_enable = cfm_enable && cfm_opup;
3334 bfd_enable = bfd_forwarding(ofport->bfd);
3337 if (ofport->bfd || ofport->cfm) {
3338 enable = enable && (cfm_enable || bfd_enable);
3341 if (ofport->bundle) {
3342 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
3343 if (carrier_changed) {
3344 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
3348 if (ofport->may_enable != enable) {
3349 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3351 ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
3353 if (ofport->rstp_port) {
3354 rstp_port_set_mac_operational(ofport->rstp_port, enable);
3358 ofport->may_enable = enable;
3362 port_query_by_name(const struct ofproto *ofproto_, const char *devname,
3363 struct ofproto_port *ofproto_port)
3365 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3366 struct dpif_port dpif_port;
3369 if (sset_contains(&ofproto->ghost_ports, devname)) {
3370 const char *type = netdev_get_type_from_name(devname);
3372 /* We may be called before ofproto->up.port_by_name is populated with
3373 * the appropriate ofport. For this reason, we must get the name and
3374 * type from the netdev layer directly. */
3376 const struct ofport *ofport;
3378 ofport = shash_find_data(&ofproto->up.port_by_name, devname);
3379 ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
3380 ofproto_port->name = xstrdup(devname);
3381 ofproto_port->type = xstrdup(type);
3387 if (!sset_contains(&ofproto->ports, devname)) {
3390 error = dpif_port_query_by_name(ofproto->backer->dpif,
3391 devname, &dpif_port);
3393 ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
3399 port_add(struct ofproto *ofproto_, struct netdev *netdev)
3401 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3402 const char *devname = netdev_get_name(netdev);
3403 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
3404 const char *dp_port_name;
3406 if (netdev_vport_is_patch(netdev)) {
3407 sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
3411 dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
3412 if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
3413 odp_port_t port_no = ODPP_NONE;
3416 error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
3420 if (netdev_get_tunnel_config(netdev)) {
3421 simap_put(&ofproto->backer->tnl_backers,
3422 dp_port_name, odp_to_u32(port_no));
3426 if (netdev_get_tunnel_config(netdev)) {
3427 sset_add(&ofproto->ghost_ports, devname);
3429 sset_add(&ofproto->ports, devname);
3435 port_del(struct ofproto *ofproto_, ofp_port_t ofp_port)
3437 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3438 struct ofport_dpif *ofport = ofp_port_to_ofport(ofproto, ofp_port);
3445 sset_find_and_delete(&ofproto->ghost_ports,
3446 netdev_get_name(ofport->up.netdev));
3447 ofproto->backer->need_revalidate = REV_RECONFIGURE;
3448 if (!ofport->is_tunnel && !netdev_vport_is_patch(ofport->up.netdev)) {
3449 error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
3451 /* The caller is going to close ofport->up.netdev. If this is a
3452 * bonded port, then the bond is using that netdev, so remove it
3453 * from the bond. The client will need to reconfigure everything
3454 * after deleting ports, so then the slave will get re-added. */
3455 bundle_remove(&ofport->up);
3462 port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
3464 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3467 error = netdev_get_stats(ofport->up.netdev, stats);
3469 if (!error && ofport_->ofp_port == OFPP_LOCAL) {
3470 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3472 ovs_mutex_lock(&ofproto->stats_mutex);
3473 /* ofproto->stats.tx_packets represents packets that we created
3474 * internally and sent to some port (e.g. packets sent with
3475 * ofproto_dpif_send_packet()). Account for them as if they had
3476 * come from OFPP_LOCAL and got forwarded. */
3478 if (stats->rx_packets != UINT64_MAX) {
3479 stats->rx_packets += ofproto->stats.tx_packets;
3482 if (stats->rx_bytes != UINT64_MAX) {
3483 stats->rx_bytes += ofproto->stats.tx_bytes;
3486 /* ofproto->stats.rx_packets represents packets that were received on
3487 * some port and we processed internally and dropped (e.g. STP).
3488 * Account for them as if they had been forwarded to OFPP_LOCAL. */
3490 if (stats->tx_packets != UINT64_MAX) {
3491 stats->tx_packets += ofproto->stats.rx_packets;
3494 if (stats->tx_bytes != UINT64_MAX) {
3495 stats->tx_bytes += ofproto->stats.rx_bytes;
3497 ovs_mutex_unlock(&ofproto->stats_mutex);
3504 port_get_lacp_stats(const struct ofport *ofport_, struct lacp_slave_stats *stats)
3506 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3507 if (ofport->bundle && ofport->bundle->lacp) {
3508 if (lacp_get_slave_stats(ofport->bundle->lacp, ofport, stats)) {
3515 struct port_dump_state {
3520 struct ofproto_port port;
3525 port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
3527 *statep = xzalloc(sizeof(struct port_dump_state));
3532 port_dump_next(const struct ofproto *ofproto_, void *state_,
3533 struct ofproto_port *port)
3535 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3536 struct port_dump_state *state = state_;
3537 const struct sset *sset;
3538 struct sset_node *node;
3540 if (state->has_port) {
3541 ofproto_port_destroy(&state->port);
3542 state->has_port = false;
3544 sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
3545 while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
3548 error = port_query_by_name(ofproto_, node->name, &state->port);
3550 *port = state->port;
3551 state->has_port = true;
3553 } else if (error != ENODEV) {
3558 if (!state->ghost) {
3559 state->ghost = true;
3562 return port_dump_next(ofproto_, state_, port);
3569 port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
3571 struct port_dump_state *state = state_;
3573 if (state->has_port) {
3574 ofproto_port_destroy(&state->port);
3581 port_poll(const struct ofproto *ofproto_, char **devnamep)
3583 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3585 if (ofproto->port_poll_errno) {
3586 int error = ofproto->port_poll_errno;
3587 ofproto->port_poll_errno = 0;
3591 if (sset_is_empty(&ofproto->port_poll_set)) {
3595 *devnamep = sset_pop(&ofproto->port_poll_set);
3600 port_poll_wait(const struct ofproto *ofproto_)
3602 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3603 dpif_port_poll_wait(ofproto->backer->dpif);
3607 port_is_lacp_current(const struct ofport *ofport_)
3609 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3610 return (ofport->bundle && ofport->bundle->lacp
3611 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
3615 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3616 * then delete it entirely. */
3618 rule_expire(struct rule_dpif *rule)
3619 OVS_REQUIRES(ofproto_mutex)
3621 uint16_t hard_timeout, idle_timeout;
3622 long long int now = time_msec();
3625 hard_timeout = rule->up.hard_timeout;
3626 idle_timeout = rule->up.idle_timeout;
3628 /* Has 'rule' expired? */
3630 long long int modified;
3632 ovs_mutex_lock(&rule->up.mutex);
3633 modified = rule->up.modified;
3634 ovs_mutex_unlock(&rule->up.mutex);
3636 if (now > modified + hard_timeout * 1000) {
3637 reason = OFPRR_HARD_TIMEOUT;
3641 if (reason < 0 && idle_timeout) {
3644 ovs_mutex_lock(&rule->stats_mutex);
3645 used = rule->stats.used;
3646 ovs_mutex_unlock(&rule->stats_mutex);
3648 if (now > used + idle_timeout * 1000) {
3649 reason = OFPRR_IDLE_TIMEOUT;
3654 COVERAGE_INC(ofproto_dpif_expired);
3655 ofproto_rule_expire(&rule->up, reason);
3660 ofproto_dpif_set_packet_odp_port(const struct ofproto_dpif *ofproto,
3661 ofp_port_t in_port, struct dp_packet *packet)
3663 if (in_port == OFPP_NONE) {
3664 in_port = OFPP_LOCAL;
3666 packet->md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
3670 ofproto_dpif_execute_actions__(struct ofproto_dpif *ofproto,
3671 const struct flow *flow,
3672 struct rule_dpif *rule,
3673 const struct ofpact *ofpacts, size_t ofpacts_len,
3674 int indentation, int depth, int resubmits,
3675 struct dp_packet *packet)
3677 struct dpif_flow_stats stats;
3678 struct xlate_out xout;
3679 struct xlate_in xin;
3680 struct dpif_execute execute;
3683 ovs_assert((rule != NULL) != (ofpacts != NULL));
3685 dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
3688 rule_dpif_credit_stats(rule, &stats);
3691 uint64_t odp_actions_stub[1024 / 8];
3692 struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
3693 xlate_in_init(&xin, ofproto, flow, flow->in_port.ofp_port, rule,
3694 stats.tcp_flags, packet, NULL, &odp_actions);
3695 xin.ofpacts = ofpacts;
3696 xin.ofpacts_len = ofpacts_len;
3697 xin.resubmit_stats = &stats;
3698 xin.indentation = indentation;
3700 xin.resubmits = resubmits;
3701 if (xlate_actions(&xin, &xout) != XLATE_OK) {
3706 execute.actions = odp_actions.data;
3707 execute.actions_len = odp_actions.size;
3709 pkt_metadata_from_flow(&packet->md, flow);
3710 execute.packet = packet;
3711 execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
3712 execute.probe = false;
3715 /* Fix up in_port. */
3716 ofproto_dpif_set_packet_odp_port(ofproto, flow->in_port.ofp_port, packet);
3718 error = dpif_execute(ofproto->backer->dpif, &execute);
3720 xlate_out_uninit(&xout);
3721 ofpbuf_uninit(&odp_actions);
3726 /* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
3727 * 'flow' must reflect the data in 'packet'. */
3729 ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
3730 const struct flow *flow,
3731 struct rule_dpif *rule,
3732 const struct ofpact *ofpacts, size_t ofpacts_len,
3733 struct dp_packet *packet)
3735 return ofproto_dpif_execute_actions__(ofproto, flow, rule, ofpacts,
3736 ofpacts_len, 0, 0, 0, packet);
3740 rule_dpif_credit_stats(struct rule_dpif *rule,
3741 const struct dpif_flow_stats *stats)
3743 ovs_mutex_lock(&rule->stats_mutex);
3744 if (OVS_UNLIKELY(rule->new_rule)) {
3745 rule_dpif_credit_stats(rule->new_rule, stats);
3747 rule->stats.n_packets += stats->n_packets;
3748 rule->stats.n_bytes += stats->n_bytes;
3749 rule->stats.used = MAX(rule->stats.used, stats->used);
3751 ovs_mutex_unlock(&rule->stats_mutex);
3755 rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
3756 OVS_REQUIRES(rule->up.mutex)
3758 return rule->up.flow_cookie;
3762 rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout,
3763 uint16_t hard_timeout)
3765 ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
3768 /* Returns 'rule''s actions. The returned actions are RCU-protected, and can
3769 * be read until the calling thread quiesces. */
3770 const struct rule_actions *
3771 rule_dpif_get_actions(const struct rule_dpif *rule)
3773 return rule_get_actions(&rule->up);
3776 /* Sets 'rule''s recirculation id. */
3778 rule_dpif_set_recirc_id(struct rule_dpif *rule, uint32_t id)
3779 OVS_REQUIRES(rule->up.mutex)
3781 ovs_assert(!rule->recirc_id || rule->recirc_id == id);
3782 if (rule->recirc_id == id) {
3783 /* Release the new reference to the same id. */
3786 rule->recirc_id = id;
3790 /* Sets 'rule''s recirculation id. */
3792 rule_set_recirc_id(struct rule *rule_, uint32_t id)
3794 struct rule_dpif *rule = rule_dpif_cast(rule_);
3796 ovs_mutex_lock(&rule->up.mutex);
3797 rule_dpif_set_recirc_id(rule, id);
3798 ovs_mutex_unlock(&rule->up.mutex);
3802 ofproto_dpif_get_tables_version(struct ofproto_dpif *ofproto OVS_UNUSED)
3804 cls_version_t version;
3806 atomic_read_relaxed(&ofproto->tables_version, &version);
3811 /* The returned rule (if any) is valid at least until the next RCU quiescent
3812 * period. If the rule needs to stay around longer, the caller should take
3815 * 'flow' is non-const to allow for temporary modifications during the lookup.
3816 * Any changes are restored before returning. */
3817 static struct rule_dpif *
3818 rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto, cls_version_t version,
3819 uint8_t table_id, struct flow *flow,
3820 struct flow_wildcards *wc)
3822 struct classifier *cls = &ofproto->up.tables[table_id].cls;
3823 return rule_dpif_cast(rule_from_cls_rule(classifier_lookup(cls, version,
3827 /* Look up 'flow' in 'ofproto''s classifier version 'version', starting from
3828 * table '*table_id'. Returns the rule that was found, which may be one of the
3829 * special rules according to packet miss hadling. If 'may_packet_in' is
3830 * false, returning of the miss_rule (which issues packet ins for the
3831 * controller) is avoided. Updates 'wc', if nonnull, to reflect the fields
3832 * that were used during the lookup.
3834 * If 'honor_table_miss' is true, the first lookup occurs in '*table_id', but
3835 * if none is found then the table miss configuration for that table is
3836 * honored, which can result in additional lookups in other OpenFlow tables.
3837 * In this case the function updates '*table_id' to reflect the final OpenFlow
3838 * table that was searched.
3840 * If 'honor_table_miss' is false, then only one table lookup occurs, in
3843 * The rule is returned in '*rule', which is valid at least until the next
3844 * RCU quiescent period. If the '*rule' needs to stay around longer, the
3845 * caller must take a reference.
3847 * 'in_port' allows the lookup to take place as if the in port had the value
3848 * 'in_port'. This is needed for resubmit action support.
3850 * 'flow' is non-const to allow for temporary modifications during the lookup.
3851 * Any changes are restored before returning. */
3853 rule_dpif_lookup_from_table(struct ofproto_dpif *ofproto,
3854 cls_version_t version, struct flow *flow,
3855 struct flow_wildcards *wc,
3856 const struct dpif_flow_stats *stats,
3857 uint8_t *table_id, ofp_port_t in_port,
3858 bool may_packet_in, bool honor_table_miss)
3860 ovs_be16 old_tp_src = flow->tp_src, old_tp_dst = flow->tp_dst;
3861 ofp_port_t old_in_port = flow->in_port.ofp_port;
3862 enum ofputil_table_miss miss_config;
3863 struct rule_dpif *rule;
3866 /* We always unwildcard nw_frag (for IP), so they
3867 * need not be unwildcarded here. */
3868 if (flow->nw_frag & FLOW_NW_FRAG_ANY
3869 && ofproto->up.frag_handling != OFPUTIL_FRAG_NX_MATCH) {
3870 if (ofproto->up.frag_handling == OFPUTIL_FRAG_NORMAL) {
3871 /* We must pretend that transport ports are unavailable. */
3872 flow->tp_src = htons(0);
3873 flow->tp_dst = htons(0);
3875 /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
3876 * Use the drop_frags_rule (which cannot disappear). */
3877 rule = ofproto->drop_frags_rule;
3879 struct oftable *tbl = &ofproto->up.tables[*table_id];
3882 atomic_add_relaxed(&tbl->n_matched, stats->n_packets, &orig);
3888 /* Look up a flow with 'in_port' as the input port. Then restore the
3889 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
3890 * have surprising behavior). */
3891 flow->in_port.ofp_port = in_port;
3893 /* Our current implementation depends on n_tables == N_TABLES, and
3894 * TBL_INTERNAL being the last table. */
3895 BUILD_ASSERT_DECL(N_TABLES == TBL_INTERNAL + 1);
3897 miss_config = OFPUTIL_TABLE_MISS_CONTINUE;
3899 for (next_id = *table_id;
3900 next_id < ofproto->up.n_tables;
3901 next_id++, next_id += (next_id == TBL_INTERNAL))
3903 *table_id = next_id;
3904 rule = rule_dpif_lookup_in_table(ofproto, version, next_id, flow, wc);
3906 struct oftable *tbl = &ofproto->up.tables[next_id];
3909 atomic_add_relaxed(rule ? &tbl->n_matched : &tbl->n_missed,
3910 stats->n_packets, &orig);
3913 goto out; /* Match. */
3915 if (honor_table_miss) {
3916 miss_config = ofproto_table_get_miss_config(&ofproto->up,
3918 if (miss_config == OFPUTIL_TABLE_MISS_CONTINUE) {
3925 rule = ofproto->no_packet_in_rule;
3926 if (may_packet_in) {
3927 if (miss_config == OFPUTIL_TABLE_MISS_CONTINUE
3928 || miss_config == OFPUTIL_TABLE_MISS_CONTROLLER) {
3929 struct ofport_dpif *port;
3931 port = ofp_port_to_ofport(ofproto, old_in_port);
3933 VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
3935 } else if (!(port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN)) {
3936 rule = ofproto->miss_rule;
3938 } else if (miss_config == OFPUTIL_TABLE_MISS_DEFAULT &&
3939 connmgr_wants_packet_in_on_miss(ofproto->up.connmgr)) {
3940 rule = ofproto->miss_rule;
3944 /* Restore port numbers, as they may have been modified above. */
3945 flow->tp_src = old_tp_src;
3946 flow->tp_dst = old_tp_dst;
3947 /* Restore the old in port. */
3948 flow->in_port.ofp_port = old_in_port;
3954 complete_operation(struct rule_dpif *rule)
3955 OVS_REQUIRES(ofproto_mutex)
3957 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3959 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
3962 static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
3964 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
3967 static struct rule *
3970 struct rule_dpif *rule = xzalloc(sizeof *rule);
3975 rule_dealloc(struct rule *rule_)
3977 struct rule_dpif *rule = rule_dpif_cast(rule_);
3982 check_mask(struct ofproto_dpif *ofproto, const struct miniflow *flow)
3984 const struct odp_support *support;
3985 uint16_t ct_state, ct_zone;
3989 support = &ofproto_dpif_get_support(ofproto)->odp;
3990 ct_state = MINIFLOW_GET_U16(flow, ct_state);
3991 if (support->ct_state && support->ct_zone && support->ct_mark
3992 && support->ct_label && support->ct_state_nat) {
3993 return ct_state & CS_UNSUPPORTED_MASK ? OFPERR_OFPBMC_BAD_MASK : 0;
3996 ct_zone = MINIFLOW_GET_U16(flow, ct_zone);
3997 ct_mark = MINIFLOW_GET_U32(flow, ct_mark);
3998 ct_label = MINIFLOW_GET_U128(flow, ct_label);
4000 if ((ct_state && !support->ct_state)
4001 || (ct_state & CS_UNSUPPORTED_MASK)
4002 || ((ct_state & (CS_SRC_NAT | CS_DST_NAT)) && !support->ct_state_nat)
4003 || (ct_zone && !support->ct_zone)
4004 || (ct_mark && !support->ct_mark)
4005 || (!ovs_u128_is_zero(ct_label) && !support->ct_label)) {
4006 return OFPERR_OFPBMC_BAD_MASK;
4013 check_actions(const struct ofproto_dpif *ofproto,
4014 const struct rule_actions *const actions)
4016 const struct ofpact *ofpact;
4018 OFPACT_FOR_EACH (ofpact, actions->ofpacts, actions->ofpacts_len) {
4019 const struct odp_support *support;
4020 const struct ofpact_conntrack *ct;
4021 const struct ofpact *a;
4023 if (ofpact->type != OFPACT_CT) {
4027 ct = CONTAINER_OF(ofpact, struct ofpact_conntrack, ofpact);
4028 support = &ofproto_dpif_get_support(ofproto)->odp;
4030 if (!support->ct_state) {
4031 return OFPERR_OFPBAC_BAD_TYPE;
4033 if ((ct->zone_imm || ct->zone_src.field) && !support->ct_zone) {
4034 return OFPERR_OFPBAC_BAD_ARGUMENT;
4037 OFPACT_FOR_EACH(a, ct->actions, ofpact_ct_get_action_len(ct)) {
4038 const struct mf_field *dst = ofpact_get_mf_dst(a);
4040 if (a->type == OFPACT_NAT && !support->ct_state_nat) {
4041 /* The backer doesn't seem to support the NAT bits in
4042 * 'ct_state': assume that it doesn't support the NAT
4044 return OFPERR_OFPBAC_BAD_TYPE;
4046 if (dst && ((dst->id == MFF_CT_MARK && !support->ct_mark)
4047 || (dst->id == MFF_CT_LABEL && !support->ct_label))) {
4048 return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
4057 rule_check(struct rule *rule)
4059 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->ofproto);
4062 err = check_mask(ofproto, &rule->cr.match.mask->masks);
4066 return check_actions(ofproto, rule->actions);
4070 rule_construct(struct rule *rule_)
4071 OVS_NO_THREAD_SAFETY_ANALYSIS
4073 struct rule_dpif *rule = rule_dpif_cast(rule_);
4076 error = rule_check(rule_);
4081 ovs_mutex_init_adaptive(&rule->stats_mutex);
4082 rule->stats.n_packets = 0;
4083 rule->stats.n_bytes = 0;
4084 rule->stats.used = rule->up.modified;
4085 rule->recirc_id = 0;
4086 rule->new_rule = NULL;
4092 rule_insert(struct rule *rule_, struct rule *old_rule_, bool forward_stats)
4093 OVS_REQUIRES(ofproto_mutex)
4095 struct rule_dpif *rule = rule_dpif_cast(rule_);
4097 if (old_rule_ && forward_stats) {
4098 struct rule_dpif *old_rule = rule_dpif_cast(old_rule_);
4100 ovs_assert(!old_rule->new_rule);
4102 /* Take a reference to the new rule, and refer all stats updates from
4103 * the old rule to the new rule. */
4104 rule_dpif_ref(rule);
4106 ovs_mutex_lock(&old_rule->stats_mutex);
4107 ovs_mutex_lock(&rule->stats_mutex);
4108 old_rule->new_rule = rule; /* Forward future stats. */
4109 rule->stats = old_rule->stats; /* Transfer stats to the new rule. */
4110 ovs_mutex_unlock(&rule->stats_mutex);
4111 ovs_mutex_unlock(&old_rule->stats_mutex);
4114 complete_operation(rule);
4118 rule_delete(struct rule *rule_)
4119 OVS_REQUIRES(ofproto_mutex)
4121 struct rule_dpif *rule = rule_dpif_cast(rule_);
4122 complete_operation(rule);
4126 rule_destruct(struct rule *rule_)
4127 OVS_NO_THREAD_SAFETY_ANALYSIS
4129 struct rule_dpif *rule = rule_dpif_cast(rule_);
4131 ovs_mutex_destroy(&rule->stats_mutex);
4132 /* Release reference to the new rule, if any. */
4133 if (rule->new_rule) {
4134 rule_dpif_unref(rule->new_rule);
4136 if (rule->recirc_id) {
4137 recirc_free_id(rule->recirc_id);
4142 rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
4143 long long int *used)
4145 struct rule_dpif *rule = rule_dpif_cast(rule_);
4147 ovs_mutex_lock(&rule->stats_mutex);
4148 if (OVS_UNLIKELY(rule->new_rule)) {
4149 rule_get_stats(&rule->new_rule->up, packets, bytes, used);
4151 *packets = rule->stats.n_packets;
4152 *bytes = rule->stats.n_bytes;
4153 *used = rule->stats.used;
4155 ovs_mutex_unlock(&rule->stats_mutex);
4159 rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
4160 struct dp_packet *packet)
4162 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4164 ofproto_dpif_execute_actions(ofproto, flow, rule, NULL, 0, packet);
4168 rule_execute(struct rule *rule, const struct flow *flow,
4169 struct dp_packet *packet)
4171 rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
4172 dp_packet_delete(packet);
4176 static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
4178 return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
4181 static struct ofgroup *
4184 struct group_dpif *group = xzalloc(sizeof *group);
4189 group_dealloc(struct ofgroup *group_)
4191 struct group_dpif *group = group_dpif_cast(group_);
4196 group_construct_stats(struct group_dpif *group)
4197 OVS_REQUIRES(group->stats_mutex)
4199 struct ofputil_bucket *bucket;
4200 const struct ovs_list *buckets;
4202 group->packet_count = 0;
4203 group->byte_count = 0;
4205 group_dpif_get_buckets(group, &buckets);
4206 LIST_FOR_EACH (bucket, list_node, buckets) {
4207 bucket->stats.packet_count = 0;
4208 bucket->stats.byte_count = 0;
4213 group_dpif_credit_stats(struct group_dpif *group,
4214 struct ofputil_bucket *bucket,
4215 const struct dpif_flow_stats *stats)
4217 ovs_mutex_lock(&group->stats_mutex);
4218 group->packet_count += stats->n_packets;
4219 group->byte_count += stats->n_bytes;
4221 bucket->stats.packet_count += stats->n_packets;
4222 bucket->stats.byte_count += stats->n_bytes;
4223 } else { /* Credit to all buckets */
4224 const struct ovs_list *buckets;
4226 group_dpif_get_buckets(group, &buckets);
4227 LIST_FOR_EACH (bucket, list_node, buckets) {
4228 bucket->stats.packet_count += stats->n_packets;
4229 bucket->stats.byte_count += stats->n_bytes;
4232 ovs_mutex_unlock(&group->stats_mutex);
4236 group_construct(struct ofgroup *group_)
4238 struct group_dpif *group = group_dpif_cast(group_);
4240 ovs_mutex_init_adaptive(&group->stats_mutex);
4241 ovs_mutex_lock(&group->stats_mutex);
4242 group_construct_stats(group);
4243 ovs_mutex_unlock(&group->stats_mutex);
4248 group_destruct(struct ofgroup *group_)
4250 struct group_dpif *group = group_dpif_cast(group_);
4251 ovs_mutex_destroy(&group->stats_mutex);
4255 group_modify(struct ofgroup *group_)
4257 struct ofproto_dpif *ofproto = ofproto_dpif_cast(group_->ofproto);
4259 ofproto->backer->need_revalidate = REV_FLOW_TABLE;
4265 group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
4267 struct group_dpif *group = group_dpif_cast(group_);
4268 struct ofputil_bucket *bucket;
4269 const struct ovs_list *buckets;
4270 struct bucket_counter *bucket_stats;
4272 ovs_mutex_lock(&group->stats_mutex);
4273 ogs->packet_count = group->packet_count;
4274 ogs->byte_count = group->byte_count;
4276 group_dpif_get_buckets(group, &buckets);
4277 bucket_stats = ogs->bucket_stats;
4278 LIST_FOR_EACH (bucket, list_node, buckets) {
4279 bucket_stats->packet_count = bucket->stats.packet_count;
4280 bucket_stats->byte_count = bucket->stats.byte_count;
4283 ovs_mutex_unlock(&group->stats_mutex);
4288 /* If the group exists, this function increments the groups's reference count.
4290 * Make sure to call group_dpif_unref() after no longer needing to maintain
4291 * a reference to the group. */
4293 group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
4294 struct group_dpif **group)
4296 struct ofgroup *ofgroup;
4299 found = ofproto_group_lookup(&ofproto->up, group_id, &ofgroup);
4300 *group = found ? group_dpif_cast(ofgroup) : NULL;
4306 group_dpif_get_buckets(const struct group_dpif *group,
4307 const struct ovs_list **buckets)
4309 *buckets = &group->up.buckets;
4312 enum ofp11_group_type
4313 group_dpif_get_type(const struct group_dpif *group)
4315 return group->up.type;
4319 group_dpif_get_selection_method(const struct group_dpif *group)
4321 return group->up.props.selection_method;
4324 /* Sends 'packet' out 'ofport'.
4325 * May modify 'packet'.
4326 * Returns 0 if successful, otherwise a positive errno value. */
4328 ofproto_dpif_send_packet(const struct ofport_dpif *ofport, struct dp_packet *packet)
4330 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
4333 error = xlate_send_packet(ofport, packet);
4335 ovs_mutex_lock(&ofproto->stats_mutex);
4336 ofproto->stats.tx_packets++;
4337 ofproto->stats.tx_bytes += dp_packet_size(packet);
4338 ovs_mutex_unlock(&ofproto->stats_mutex);
4343 group_dpif_get_selection_method_param(const struct group_dpif *group)
4345 return group->up.props.selection_method_param;
4348 const struct field_array *
4349 group_dpif_get_fields(const struct group_dpif *group)
4351 return &group->up.props.fields;
4354 /* Return the version string of the datapath that backs up
4358 get_datapath_version(const struct ofproto *ofproto_)
4360 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4362 return ofproto->backer->dp_version_string;
4366 set_frag_handling(struct ofproto *ofproto_,
4367 enum ofputil_frag_handling frag_handling)
4369 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4370 if (frag_handling != OFPUTIL_FRAG_REASM) {
4371 ofproto->backer->need_revalidate = REV_RECONFIGURE;
4379 packet_out(struct ofproto *ofproto_, struct dp_packet *packet,
4380 const struct flow *flow,
4381 const struct ofpact *ofpacts, size_t ofpacts_len)
4383 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4385 ofproto_dpif_execute_actions(ofproto, flow, NULL, ofpacts,
4386 ofpacts_len, packet);
4391 nxt_resume(struct ofproto *ofproto_,
4392 const struct ofputil_packet_in_private *pin)
4394 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4396 /* Translate pin into datapath actions. */
4397 uint64_t odp_actions_stub[1024 / 8];
4398 struct ofpbuf odp_actions = OFPBUF_STUB_INITIALIZER(odp_actions_stub);
4399 enum slow_path_reason slow;
4400 enum ofperr error = xlate_resume(ofproto, pin, &odp_actions, &slow);
4402 /* Steal 'pin->packet' and put it into a dp_packet. */
4403 struct dp_packet packet;
4404 dp_packet_init(&packet, pin->public.packet_len);
4405 dp_packet_put(&packet, pin->public.packet, pin->public.packet_len);
4407 pkt_metadata_from_flow(&packet.md, &pin->public.flow_metadata.flow);
4409 /* Fix up in_port. */
4410 ofproto_dpif_set_packet_odp_port(ofproto,
4411 pin->public.flow_metadata.flow.in_port.ofp_port,
4414 struct flow headers;
4415 flow_extract(&packet, &headers);
4417 /* Execute the datapath actions on the packet. */
4418 struct dpif_execute execute = {
4419 .actions = odp_actions.data,
4420 .actions_len = odp_actions.size,
4421 .needs_help = (slow & SLOW_ACTION) != 0,
4424 dpif_execute(ofproto->backer->dpif, &execute);
4427 ofpbuf_uninit(&odp_actions);
4428 dp_packet_uninit(&packet);
4436 set_netflow(struct ofproto *ofproto_,
4437 const struct netflow_options *netflow_options)
4439 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4441 if (netflow_options) {
4442 if (!ofproto->netflow) {
4443 ofproto->netflow = netflow_create();
4444 ofproto->backer->need_revalidate = REV_RECONFIGURE;
4446 return netflow_set_options(ofproto->netflow, netflow_options);
4447 } else if (ofproto->netflow) {
4448 ofproto->backer->need_revalidate = REV_RECONFIGURE;
4449 netflow_unref(ofproto->netflow);
4450 ofproto->netflow = NULL;
4457 get_netflow_ids(const struct ofproto *ofproto_,
4458 uint8_t *engine_type, uint8_t *engine_id)
4460 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
4462 dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
4465 static struct ofproto_dpif *
4466 ofproto_dpif_lookup(const char *name)
4468 struct ofproto_dpif *ofproto;
4470 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
4471 hash_string(name, 0), &all_ofproto_dpifs) {
4472 if (!strcmp(ofproto->up.name, name)) {
4480 ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
4481 const char *argv[], void *aux OVS_UNUSED)
4483 struct ofproto_dpif *ofproto;
4486 ofproto = ofproto_dpif_lookup(argv[1]);
4488 unixctl_command_reply_error(conn, "no such bridge");
4491 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
4492 mac_learning_flush(ofproto->ml);
4493 ovs_rwlock_unlock(&ofproto->ml->rwlock);
4495 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
4496 ovs_rwlock_wrlock(&ofproto->ml->rwlock);
4497 mac_learning_flush(ofproto->ml);
4498 ovs_rwlock_unlock(&ofproto->ml->rwlock);
4502 unixctl_command_reply(conn, "table successfully flushed");
4506 ofproto_unixctl_mcast_snooping_flush(struct unixctl_conn *conn, int argc,
4507 const char *argv[], void *aux OVS_UNUSED)
4509 struct ofproto_dpif *ofproto;
4512 ofproto = ofproto_dpif_lookup(argv[1]);
4514 unixctl_command_reply_error(conn, "no such bridge");
4518 if (!mcast_snooping_enabled(ofproto->ms)) {
4519 unixctl_command_reply_error(conn, "multicast snooping is disabled");
4522 mcast_snooping_mdb_flush(ofproto->ms);
4524 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
4525 if (!mcast_snooping_enabled(ofproto->ms)) {
4528 mcast_snooping_mdb_flush(ofproto->ms);
4532 unixctl_command_reply(conn, "table successfully flushed");
4535 static struct ofport_dpif *
4536 ofbundle_get_a_port(const struct ofbundle *bundle)
4538 return CONTAINER_OF(ovs_list_front(&bundle->ports), struct ofport_dpif,
4543 ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
4544 const char *argv[], void *aux OVS_UNUSED)
4546 struct ds ds = DS_EMPTY_INITIALIZER;
4547 const struct ofproto_dpif *ofproto;
4548 const struct mac_entry *e;
4550 ofproto = ofproto_dpif_lookup(argv[1]);
4552 unixctl_command_reply_error(conn, "no such bridge");
4556 ds_put_cstr(&ds, " port VLAN MAC Age\n");
4557 ovs_rwlock_rdlock(&ofproto->ml->rwlock);
4558 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
4559 struct ofbundle *bundle = mac_entry_get_port(ofproto->ml, e);
4560 char name[OFP_MAX_PORT_NAME_LEN];
4562 ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
4564 ds_put_format(&ds, "%5s %4d "ETH_ADDR_FMT" %3d\n",
4565 name, e->vlan, ETH_ADDR_ARGS(e->mac),
4566 mac_entry_age(ofproto->ml, e));
4568 ovs_rwlock_unlock(&ofproto->ml->rwlock);
4569 unixctl_command_reply(conn, ds_cstr(&ds));
4574 ofproto_unixctl_mcast_snooping_show(struct unixctl_conn *conn,
4575 int argc OVS_UNUSED,
4577 void *aux OVS_UNUSED)
4579 struct ds ds = DS_EMPTY_INITIALIZER;
4580 const struct ofproto_dpif *ofproto;
4581 const struct ofbundle *bundle;
4582 const struct mcast_group *grp;
4583 struct mcast_group_bundle *b;
4584 struct mcast_mrouter_bundle *mrouter;
4586 ofproto = ofproto_dpif_lookup(argv[1]);
4588 unixctl_command_reply_error(conn, "no such bridge");
4592 if (!mcast_snooping_enabled(ofproto->ms)) {
4593 unixctl_command_reply_error(conn, "multicast snooping is disabled");
4597 ds_put_cstr(&ds, " port VLAN GROUP Age\n");
4598 ovs_rwlock_rdlock(&ofproto->ms->rwlock);
4599 LIST_FOR_EACH (grp, group_node, &ofproto->ms->group_lru) {
4600 LIST_FOR_EACH(b, bundle_node, &grp->bundle_lru) {
4601 char name[OFP_MAX_PORT_NAME_LEN];
4604 ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
4606 ds_put_format(&ds, "%5s %4d ", name, grp->vlan);
4607 ipv6_format_mapped(&grp->addr, &ds);
4608 ds_put_format(&ds, " %3d\n",
4609 mcast_bundle_age(ofproto->ms, b));
4613 /* ports connected to multicast routers */
4614 LIST_FOR_EACH(mrouter, mrouter_node, &ofproto->ms->mrouter_lru) {
4615 char name[OFP_MAX_PORT_NAME_LEN];
4617 bundle = mrouter->port;
4618 ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
4620 ds_put_format(&ds, "%5s %4d querier %3d\n",
4621 name, mrouter->vlan,
4622 mcast_mrouter_age(ofproto->ms, mrouter));
4624 ovs_rwlock_unlock(&ofproto->ms->rwlock);
4625 unixctl_command_reply(conn, ds_cstr(&ds));
4630 struct xlate_out xout;
4631 struct xlate_in xin;
4632 const struct flow *key;
4635 struct flow_wildcards wc;
4636 struct ofpbuf odp_actions;
4640 trace_format_rule(struct ds *result, int level, const struct rule_dpif *rule)
4642 const struct rule_actions *actions;
4645 ds_put_char_multiple(result, '\t', level);
4647 ds_put_cstr(result, "No match\n");
4651 ovs_mutex_lock(&rule->up.mutex);
4652 cookie = rule->up.flow_cookie;
4653 ovs_mutex_unlock(&rule->up.mutex);
4655 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
4656 rule ? rule->up.table_id : 0, ntohll(cookie));
4657 cls_rule_format(&rule->up.cr, result);
4658 ds_put_char(result, '\n');
4660 actions = rule_dpif_get_actions(rule);
4662 ds_put_char_multiple(result, '\t', level);
4663 ds_put_cstr(result, "OpenFlow actions=");
4664 ofpacts_format(actions->ofpacts, actions->ofpacts_len, result);
4665 ds_put_char(result, '\n');
4669 trace_format_flow(struct ds *result, int level, const char *title,
4670 struct trace_ctx *trace)
4672 ds_put_char_multiple(result, '\t', level);
4673 ds_put_format(result, "%s: ", title);
4674 /* Do not report unchanged flows for resubmits. */
4675 if ((level > 0 && flow_equal(&trace->xin.flow, &trace->flow))
4676 || (level == 0 && flow_equal(&trace->xin.flow, trace->key))) {
4677 ds_put_cstr(result, "unchanged");
4679 flow_format(result, &trace->xin.flow);
4680 trace->flow = trace->xin.flow;
4682 ds_put_char(result, '\n');
4686 trace_format_regs(struct ds *result, int level, const char *title,
4687 struct trace_ctx *trace)
4691 ds_put_char_multiple(result, '\t', level);
4692 ds_put_format(result, "%s:", title);
4693 for (i = 0; i < FLOW_N_REGS; i++) {
4694 ds_put_format(result, " reg%"PRIuSIZE"=0x%"PRIx32, i, trace->flow.regs[i]);
4696 ds_put_char(result, '\n');
4700 trace_format_odp(struct ds *result, int level, const char *title,
4701 struct trace_ctx *trace)
4703 struct ofpbuf *odp_actions = &trace->odp_actions;
4705 ds_put_char_multiple(result, '\t', level);
4706 ds_put_format(result, "%s: ", title);
4707 format_odp_actions(result, odp_actions->data, odp_actions->size);
4708 ds_put_char(result, '\n');
4712 trace_format_megaflow(struct ds *result, int level, const char *title,
4713 struct trace_ctx *trace)
4717 ds_put_char_multiple(result, '\t', level);
4718 ds_put_format(result, "%s: ", title);
4719 match_init(&match, trace->key, &trace->wc);
4720 match_format(&match, result, OFP_DEFAULT_PRIORITY);
4721 ds_put_char(result, '\n');
4724 static void trace_report(struct xlate_in *, int indentation,
4725 const char *format, ...)
4726 OVS_PRINTF_FORMAT(3, 4);
4727 static void trace_report_valist(struct xlate_in *, int indentation,
4728 const char *format, va_list args)
4729 OVS_PRINTF_FORMAT(3, 0);
4732 trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int indentation)
4734 struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
4735 struct ds *result = trace->result;
4738 if (rule == xin->ofproto->miss_rule) {
4739 trace_report(xin, indentation,
4740 "No match, flow generates \"packet in\"s.");
4741 } else if (rule == xin->ofproto->no_packet_in_rule) {
4742 trace_report(xin, indentation, "No match, packets dropped because "
4743 "OFPPC_NO_PACKET_IN is set on in_port.");
4744 } else if (rule == xin->ofproto->drop_frags_rule) {
4745 trace_report(xin, indentation,
4746 "Packets dropped because they are IP fragments and "
4747 "the fragment handling mode is \"drop\".");
4751 ds_put_char(result, '\n');
4753 trace_format_flow(result, indentation, "Resubmitted flow", trace);
4754 trace_format_regs(result, indentation, "Resubmitted regs", trace);
4755 trace_format_odp(result, indentation, "Resubmitted odp", trace);
4756 trace_format_megaflow(result, indentation, "Resubmitted megaflow",
4759 trace_format_rule(result, indentation, rule);
4763 trace_report_valist(struct xlate_in *xin, int indentation,
4764 const char *format, va_list args)
4766 struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
4767 struct ds *result = trace->result;
4769 ds_put_char_multiple(result, '\t', indentation);
4770 ds_put_format_valist(result, format, args);
4771 ds_put_char(result, '\n');
4775 trace_report(struct xlate_in *xin, int indentation, const char *format, ...)
4779 va_start(args, format);
4780 trace_report_valist(xin, indentation, format, args);
4784 /* Parses the 'argc' elements of 'argv', ignoring argv[0]. The following
4785 * forms are supported:
4787 * - [dpname] odp_flow [-generate | packet]
4788 * - bridge br_flow [-generate | packet]
4790 * On success, initializes '*ofprotop' and 'flow' and returns NULL. On failure
4791 * returns a nonnull malloced error message. */
4792 static char * OVS_WARN_UNUSED_RESULT
4793 parse_flow_and_packet(int argc, const char *argv[],
4794 struct ofproto_dpif **ofprotop, struct flow *flow,
4795 struct dp_packet **packetp)
4797 const struct dpif_backer *backer = NULL;
4798 const char *error = NULL;
4800 struct simap port_names = SIMAP_INITIALIZER(&port_names);
4801 struct dp_packet *packet;
4802 struct ofpbuf odp_key;
4803 struct ofpbuf odp_mask;
4805 ofpbuf_init(&odp_key, 0);
4806 ofpbuf_init(&odp_mask, 0);
4808 /* Handle "-generate" or a hex string as the last argument. */
4809 if (!strcmp(argv[argc - 1], "-generate")) {
4810 packet = dp_packet_new(0);
4813 error = eth_from_hex(argv[argc - 1], &packet);
4816 } else if (argc == 4) {
4817 /* The 3-argument form must end in "-generate' or a hex string. */
4823 /* odp_flow can have its in_port specified as a name instead of port no.
4824 * We do not yet know whether a given flow is a odp_flow or a br_flow.
4825 * But, to know whether a flow is odp_flow through odp_flow_from_string(),
4826 * we need to create a simap of name to port no. */
4828 const char *dp_type;
4829 if (!strncmp(argv[1], "ovs-", 4)) {
4830 dp_type = argv[1] + 4;
4834 backer = shash_find_data(&all_dpif_backers, dp_type);
4835 } else if (argc == 2) {
4836 struct shash_node *node;
4837 if (shash_count(&all_dpif_backers) == 1) {
4838 node = shash_first(&all_dpif_backers);
4839 backer = node->data;
4842 error = "Syntax error";
4845 if (backer && backer->dpif) {
4846 struct dpif_port dpif_port;
4847 struct dpif_port_dump port_dump;
4848 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, backer->dpif) {
4849 simap_put(&port_names, dpif_port.name,
4850 odp_to_u32(dpif_port.port_no));
4854 /* Parse the flow and determine whether a datapath or
4855 * bridge is specified. If function odp_flow_key_from_string()
4856 * returns 0, the flow is a odp_flow. If function
4857 * parse_ofp_exact_flow() returns NULL, the flow is a br_flow. */
4858 if (!odp_flow_from_string(argv[argc - 1], &port_names,
4859 &odp_key, &odp_mask)) {
4861 error = "Cannot find the datapath";
4865 if (odp_flow_key_to_flow(odp_key.data, odp_key.size, flow) == ODP_FIT_ERROR) {
4866 error = "Failed to parse datapath flow key";
4870 *ofprotop = xlate_lookup_ofproto(backer, flow,
4871 &flow->in_port.ofp_port);
4872 if (*ofprotop == NULL) {
4873 error = "Invalid datapath flow";
4877 char *err = parse_ofp_exact_flow(flow, NULL, argv[argc - 1], NULL);
4880 m_err = xasprintf("Bad openflow flow syntax: %s", err);
4885 error = "Must specify bridge name";
4889 *ofprotop = ofproto_dpif_lookup(argv[1]);
4891 error = "Unknown bridge name";
4897 /* Generate a packet, if requested. */
4899 if (!dp_packet_size(packet)) {
4900 flow_compose(packet, flow);
4902 /* Use the metadata from the flow and the packet argument
4903 * to reconstruct the flow. */
4904 pkt_metadata_from_flow(&packet->md, flow);
4905 flow_extract(packet, flow);
4910 if (error && !m_err) {
4911 m_err = xstrdup(error);
4914 dp_packet_delete(packet);
4918 ofpbuf_uninit(&odp_key);
4919 ofpbuf_uninit(&odp_mask);
4920 simap_destroy(&port_names);
4925 ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
4926 void *aux OVS_UNUSED)
4928 struct ofproto_dpif *ofproto;
4929 struct dp_packet *packet;
4933 error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
4938 ofproto_trace(ofproto, &flow, packet, NULL, 0, &result);
4939 unixctl_command_reply(conn, ds_cstr(&result));
4940 ds_destroy(&result);
4941 dp_packet_delete(packet);
4943 unixctl_command_reply_error(conn, error);
4949 ofproto_unixctl_trace_actions(struct unixctl_conn *conn, int argc,
4950 const char *argv[], void *aux OVS_UNUSED)
4952 enum ofputil_protocol usable_protocols;
4953 struct ofproto_dpif *ofproto;
4954 bool enforce_consistency;
4955 struct ofpbuf ofpacts;
4956 struct dp_packet *packet;
4961 /* Three kinds of error return values! */
4967 ofpbuf_init(&ofpacts, 0);
4969 /* Parse actions. */
4970 error = ofpacts_parse_actions(argv[--argc], &ofpacts, &usable_protocols);
4972 unixctl_command_reply_error(conn, error);
4977 /* OpenFlow 1.1 and later suggest that the switch enforces certain forms of
4978 * consistency between the flow and the actions. With -consistent, we
4979 * enforce consistency even for a flow supported in OpenFlow 1.0. */
4980 if (!strcmp(argv[1], "-consistent")) {
4981 enforce_consistency = true;
4985 enforce_consistency = false;
4988 error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
4990 unixctl_command_reply_error(conn, error);
4995 /* Do the same checks as handle_packet_out() in ofproto.c.
4997 * We pass a 'table_id' of 0 to ofpacts_check(), which isn't
4998 * strictly correct because these actions aren't in any table, but it's OK
4999 * because it 'table_id' is used only to check goto_table instructions, but
5000 * packet-outs take a list of actions and therefore it can't include
5003 * We skip the "meter" check here because meter is an instruction, not an
5004 * action, and thus cannot appear in ofpacts. */
5005 in_port = ofp_to_u16(flow.in_port.ofp_port);
5006 if (in_port >= ofproto->up.max_ports && in_port < ofp_to_u16(OFPP_MAX)) {
5007 unixctl_command_reply_error(conn, "invalid in_port");
5010 if (enforce_consistency) {
5011 retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
5012 u16_to_ofp(ofproto->up.max_ports),
5013 0, ofproto->up.n_tables,
5016 retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
5017 u16_to_ofp(ofproto->up.max_ports), 0,
5018 ofproto->up.n_tables, &usable_protocols);
5021 retval = ofproto_check_ofpacts(&ofproto->up, ofpacts.data,
5027 ds_put_format(&result, "Bad actions: %s", ofperr_to_string(retval));
5028 unixctl_command_reply_error(conn, ds_cstr(&result));
5032 ofproto_trace(ofproto, &flow, packet,
5033 ofpacts.data, ofpacts.size, &result);
5034 unixctl_command_reply(conn, ds_cstr(&result));
5037 ds_destroy(&result);
5038 dp_packet_delete(packet);
5039 ofpbuf_uninit(&ofpacts);
5042 /* Implements a "trace" through 'ofproto''s flow table, appending a textual
5043 * description of the results to 'ds'.
5045 * The trace follows a packet with the specified 'flow' through the flow
5046 * table. 'packet' may be nonnull to trace an actual packet, with consequent
5047 * side effects (if it is nonnull then its flow must be 'flow').
5049 * If 'ofpacts' is nonnull then its 'ofpacts_len' bytes specify the actions to
5050 * trace, otherwise the actions are determined by a flow table lookup. */
5052 ofproto_trace(struct ofproto_dpif *ofproto, struct flow *flow,
5053 const struct dp_packet *packet,
5054 const struct ofpact ofpacts[], size_t ofpacts_len,
5057 struct trace_ctx trace;
5058 enum xlate_error error;
5060 ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
5061 ds_put_cstr(ds, "Flow: ");
5062 flow_format(ds, flow);
5063 ds_put_char(ds, '\n');
5065 ofpbuf_init(&trace.odp_actions, 0);
5068 trace.key = flow; /* Original flow key, used for megaflow. */
5069 trace.flow = *flow; /* May be modified by actions. */
5070 xlate_in_init(&trace.xin, ofproto, flow, flow->in_port.ofp_port, NULL,
5071 ntohs(flow->tcp_flags), packet, &trace.wc,
5072 &trace.odp_actions);
5073 trace.xin.ofpacts = ofpacts;
5074 trace.xin.ofpacts_len = ofpacts_len;
5075 trace.xin.resubmit_hook = trace_resubmit;
5076 trace.xin.report_hook = trace_report_valist;
5078 error = xlate_actions(&trace.xin, &trace.xout);
5079 ds_put_char(ds, '\n');
5080 trace.xin.flow.actset_output = 0;
5081 trace_format_flow(ds, 0, "Final flow", &trace);
5082 trace_format_megaflow(ds, 0, "Megaflow", &trace);
5084 ds_put_cstr(ds, "Datapath actions: ");
5085 format_odp_actions(ds, trace.odp_actions.data, trace.odp_actions.size);
5087 if (error != XLATE_OK) {
5088 ds_put_format(ds, "\nTranslation failed (%s), packet is dropped.\n",
5089 xlate_strerror(error));
5090 } else if (trace.xout.slow) {
5091 enum slow_path_reason slow;
5093 ds_put_cstr(ds, "\nThis flow is handled by the userspace "
5094 "slow path because it:");
5096 slow = trace.xout.slow;
5098 enum slow_path_reason bit = rightmost_1bit(slow);
5100 ds_put_format(ds, "\n\t- %s.",
5101 slow_path_reason_to_explanation(bit));
5107 xlate_out_uninit(&trace.xout);
5108 ofpbuf_uninit(&trace.odp_actions);
5111 /* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
5112 * of the 'ofproto_shash' nodes. It is the responsibility of the caller
5113 * to destroy 'ofproto_shash' and free the returned value. */
5114 static const struct shash_node **
5115 get_ofprotos(struct shash *ofproto_shash)
5117 const struct ofproto_dpif *ofproto;
5119 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5120 char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
5121 shash_add_nocopy(ofproto_shash, name, ofproto);
5124 return shash_sort(ofproto_shash);
5128 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
5129 const char *argv[] OVS_UNUSED,
5130 void *aux OVS_UNUSED)
5132 struct ds ds = DS_EMPTY_INITIALIZER;
5133 struct shash ofproto_shash;
5134 const struct shash_node **sorted_ofprotos;
5137 shash_init(&ofproto_shash);
5138 sorted_ofprotos = get_ofprotos(&ofproto_shash);
5139 for (i = 0; i < shash_count(&ofproto_shash); i++) {
5140 const struct shash_node *node = sorted_ofprotos[i];
5141 ds_put_format(&ds, "%s\n", node->name);
5144 shash_destroy(&ofproto_shash);
5145 free(sorted_ofprotos);
5147 unixctl_command_reply(conn, ds_cstr(&ds));
5152 dpif_show_backer(const struct dpif_backer *backer, struct ds *ds)
5154 const struct shash_node **ofprotos;
5155 struct dpif_dp_stats dp_stats;
5156 struct shash ofproto_shash;
5159 dpif_get_dp_stats(backer->dpif, &dp_stats);
5161 ds_put_format(ds, "%s: hit:%"PRIu64" missed:%"PRIu64"\n",
5162 dpif_name(backer->dpif), dp_stats.n_hit, dp_stats.n_missed);
5164 shash_init(&ofproto_shash);
5165 ofprotos = get_ofprotos(&ofproto_shash);
5166 for (i = 0; i < shash_count(&ofproto_shash); i++) {
5167 struct ofproto_dpif *ofproto = ofprotos[i]->data;
5168 const struct shash_node **ports;
5171 if (ofproto->backer != backer) {
5175 ds_put_format(ds, "\t%s:\n", ofproto->up.name);
5177 ports = shash_sort(&ofproto->up.port_by_name);
5178 for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) {
5179 const struct shash_node *node = ports[j];
5180 struct ofport *ofport = node->data;
5182 odp_port_t odp_port;
5184 ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
5187 odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
5188 if (odp_port != ODPP_NONE) {
5189 ds_put_format(ds, "%"PRIu32":", odp_port);
5191 ds_put_cstr(ds, "none:");
5194 ds_put_format(ds, " (%s", netdev_get_type(ofport->netdev));
5197 if (!netdev_get_config(ofport->netdev, &config)) {
5198 const struct smap_node **nodes;
5201 nodes = smap_sort(&config);
5202 for (i = 0; i < smap_count(&config); i++) {
5203 const struct smap_node *node = nodes[i];
5204 ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
5205 node->key, node->value);
5209 smap_destroy(&config);
5211 ds_put_char(ds, ')');
5212 ds_put_char(ds, '\n');
5216 shash_destroy(&ofproto_shash);
5221 ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
5222 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
5224 struct ds ds = DS_EMPTY_INITIALIZER;
5225 const struct shash_node **backers;
5228 backers = shash_sort(&all_dpif_backers);
5229 for (i = 0; i < shash_count(&all_dpif_backers); i++) {
5230 dpif_show_backer(backers[i]->data, &ds);
5234 unixctl_command_reply(conn, ds_cstr(&ds));
5239 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
5240 int argc OVS_UNUSED, const char *argv[],
5241 void *aux OVS_UNUSED)
5243 const struct ofproto_dpif *ofproto;
5245 struct ds ds = DS_EMPTY_INITIALIZER;
5246 bool verbosity = false;
5248 struct dpif_port dpif_port;
5249 struct dpif_port_dump port_dump;
5250 struct hmap portno_names;
5252 struct dpif_flow_dump *flow_dump;
5253 struct dpif_flow_dump_thread *flow_dump_thread;
5257 ofproto = ofproto_dpif_lookup(argv[argc - 1]);
5259 unixctl_command_reply_error(conn, "no such bridge");
5263 if (argc > 2 && !strcmp(argv[1], "-m")) {
5267 hmap_init(&portno_names);
5268 DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, ofproto->backer->dpif) {
5269 odp_portno_names_set(&portno_names, dpif_port.port_no, dpif_port.name);
5273 flow_dump = dpif_flow_dump_create(ofproto->backer->dpif, false);
5274 flow_dump_thread = dpif_flow_dump_thread_create(flow_dump);
5275 while (dpif_flow_dump_next(flow_dump_thread, &f, 1)) {
5278 if (odp_flow_key_to_flow(f.key, f.key_len, &flow) == ODP_FIT_ERROR
5279 || xlate_lookup_ofproto(ofproto->backer, &flow, NULL) != ofproto) {
5284 odp_format_ufid(&f.ufid, &ds);
5285 ds_put_cstr(&ds, " ");
5287 odp_flow_format(f.key, f.key_len, f.mask, f.mask_len,
5288 &portno_names, &ds, verbosity);
5289 ds_put_cstr(&ds, ", ");
5290 dpif_flow_stats_format(&f.stats, &ds);
5291 ds_put_cstr(&ds, ", actions:");
5292 format_odp_actions(&ds, f.actions, f.actions_len);
5293 ds_put_char(&ds, '\n');
5295 dpif_flow_dump_thread_destroy(flow_dump_thread);
5296 error = dpif_flow_dump_destroy(flow_dump);
5300 ds_put_format(&ds, "dpif/dump_flows failed: %s", ovs_strerror(errno));
5301 unixctl_command_reply_error(conn, ds_cstr(&ds));
5303 unixctl_command_reply(conn, ds_cstr(&ds));
5305 odp_portno_names_destroy(&portno_names);
5306 hmap_destroy(&portno_names);
5311 ofproto_revalidate_all_backers(void)
5313 const struct shash_node **backers;
5316 backers = shash_sort(&all_dpif_backers);
5317 for (i = 0; i < shash_count(&all_dpif_backers); i++) {
5318 struct dpif_backer *backer = backers[i]->data;
5319 backer->need_revalidate = REV_RECONFIGURE;
5325 disable_tnl_push_pop(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
5326 const char *argv[], void *aux OVS_UNUSED)
5328 if (!strcasecmp(argv[1], "off")) {
5329 ofproto_use_tnl_push_pop = false;
5330 unixctl_command_reply(conn, "Tunnel push-pop off");
5331 ofproto_revalidate_all_backers();
5332 } else if (!strcasecmp(argv[1], "on")) {
5333 ofproto_use_tnl_push_pop = true;
5334 unixctl_command_reply(conn, "Tunnel push-pop on");
5335 ofproto_revalidate_all_backers();
5337 unixctl_command_reply_error(conn, "Invalid argument");
5342 ofproto_unixctl_init(void)
5344 static bool registered;
5350 unixctl_command_register(
5352 "{[dp_name] odp_flow | bridge br_flow} [-generate|packet]",
5353 1, 3, ofproto_unixctl_trace, NULL);
5354 unixctl_command_register(
5355 "ofproto/trace-packet-out",
5356 "[-consistent] {[dp_name] odp_flow | bridge br_flow} [-generate|packet] actions",
5357 2, 6, ofproto_unixctl_trace_actions, NULL);
5358 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
5359 ofproto_unixctl_fdb_flush, NULL);
5360 unixctl_command_register("fdb/show", "bridge", 1, 1,
5361 ofproto_unixctl_fdb_show, NULL);
5362 unixctl_command_register("mdb/flush", "[bridge]", 0, 1,
5363 ofproto_unixctl_mcast_snooping_flush, NULL);
5364 unixctl_command_register("mdb/show", "bridge", 1, 1,
5365 ofproto_unixctl_mcast_snooping_show, NULL);
5366 unixctl_command_register("dpif/dump-dps", "", 0, 0,
5367 ofproto_unixctl_dpif_dump_dps, NULL);
5368 unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show,
5370 unixctl_command_register("dpif/dump-flows", "[-m] bridge", 1, 2,
5371 ofproto_unixctl_dpif_dump_flows, NULL);
5373 unixctl_command_register("ofproto/tnl-push-pop", "[on]|[off]", 1, 1,
5374 disable_tnl_push_pop, NULL);
5377 /* Returns true if 'table' is the table used for internal rules,
5378 * false otherwise. */
5380 table_is_internal(uint8_t table_id)
5382 return table_id == TBL_INTERNAL;
5387 ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
5389 const struct ofport_dpif *ofport = ofp_port_to_ofport(ofproto, ofp_port);
5390 return ofport ? ofport->odp_port : ODPP_NONE;
5393 struct ofport_dpif *
5394 odp_port_to_ofport(const struct dpif_backer *backer, odp_port_t odp_port)
5396 struct ofport_dpif *port;
5398 ovs_rwlock_rdlock(&backer->odp_to_ofport_lock);
5399 HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, hash_odp_port(odp_port),
5400 &backer->odp_to_ofport_map) {
5401 if (port->odp_port == odp_port) {
5402 ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
5407 ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
5412 odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
5414 struct ofport_dpif *port;
5416 port = odp_port_to_ofport(ofproto->backer, odp_port);
5417 if (port && &ofproto->up == port->up.ofproto) {
5418 return port->up.ofp_port;
5425 ofproto_dpif_add_internal_flow(struct ofproto_dpif *ofproto,
5426 const struct match *match, int priority,
5427 uint16_t idle_timeout,
5428 const struct ofpbuf *ofpacts,
5429 struct rule **rulep)
5431 struct ofproto_flow_mod ofm;
5432 struct rule_dpif *rule;
5435 ofm.fm = (struct ofputil_flow_mod) {
5437 .priority = priority,
5438 .table_id = TBL_INTERNAL,
5439 .command = OFPFC_ADD,
5440 .idle_timeout = idle_timeout,
5441 .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
5442 .ofpacts = ofpacts->data,
5443 .ofpacts_len = ofpacts->size,
5444 .delete_reason = OVS_OFPRR_NONE,
5447 error = ofproto_flow_mod(&ofproto->up, &ofm);
5449 VLOG_ERR_RL(&rl, "failed to add internal flow (%s)",
5450 ofperr_to_string(error));
5455 rule = rule_dpif_lookup_in_table(ofproto,
5456 ofproto_dpif_get_tables_version(ofproto),
5457 TBL_INTERNAL, &ofm.fm.match.flow,
5468 ofproto_dpif_delete_internal_flow(struct ofproto_dpif *ofproto,
5469 struct match *match, int priority)
5471 struct ofproto_flow_mod ofm;
5474 ofm.fm = (struct ofputil_flow_mod) {
5476 .priority = priority,
5477 .table_id = TBL_INTERNAL,
5478 .flags = OFPUTIL_FF_HIDDEN_FIELDS | OFPUTIL_FF_NO_READONLY,
5479 .command = OFPFC_DELETE_STRICT,
5482 error = ofproto_flow_mod(&ofproto->up, &ofm);
5484 VLOG_ERR_RL(&rl, "failed to delete internal flow (%s)",
5485 ofperr_to_string(error));
5493 ofproto_dpif_get_uuid(const struct ofproto_dpif *ofproto)
5495 return &ofproto->uuid;
5498 const struct ofproto_class ofproto_dpif_class = {
5512 NULL, /* get_memory_usage. */
5513 type_get_memory_usage,
5532 port_is_lacp_current,
5533 port_get_lacp_stats,
5534 NULL, /* rule_choose_table */
5559 aa_vlan_get_queue_size,
5566 get_stp_port_status,
5571 get_rstp_port_status,
5578 is_mirror_output_bundle,
5579 forward_bpdu_changed,
5580 set_mac_table_config,
5582 set_mcast_snooping_port,
5583 NULL, /* meter_get_features */
5584 NULL, /* meter_set */
5585 NULL, /* meter_get */
5586 NULL, /* meter_del */
5587 group_alloc, /* group_alloc */
5588 group_construct, /* group_construct */
5589 group_destruct, /* group_destruct */
5590 group_dealloc, /* group_dealloc */
5591 group_modify, /* group_modify */
5592 group_get_stats, /* group_get_stats */
5593 get_datapath_version, /* get_datapath_version */