Krishna Kondaka kkondaka@vmware.com
Kyle Mestery mestery@mestery.com
Kyle Upton kupton@baymicrosystems.com
+Lance Richardson lrichard@redhat.com
Lars Kellogg-Stedman lars@redhat.com
Leo Alterman lalterman@nicira.com
Lilijun jerry.lilijun@huawei.com
Reported-at: http://openvswitch.org/pipermail/dev/2014-June/040952.html
+ Submitted-at: <URL>
+
+ If a patch was submitted somewhere other than the Open vSwitch
+ development mailing list, such as a GitHub pull request, this header can
+ be used to reference the source.
+
+ Submitted-at: https://github.com/openvswitch/ovs/pull/92
+
VMware-BZ: #1234567
ONF-JIRA: EXT-12345
- clang, version 3.4 or later
+ - flake8 (for Python code)
+
Also, you may find the ovs-dev script found in utilities/ovs-dev.py useful.
Installation Requirements
-# Copyright (C) 2007-2015 Nicira, Inc.
+# Copyright (C) 2007-2016 Nicira, Inc.
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
check_DATA =
check_SCRIPTS =
pkgconfig_DATA =
+FLAKE8_PYFILES =
scriptsdir = $(pkgdatadir)/scripts
completiondir = $(sysconfdir)/bash_completion.d
SUFFIXES += .xml
%: %.xml
$(AM_V_GEN)$(run_python) $(srcdir)/build-aux/xml2nroff $< > $@.tmp \
+ -I $(srcdir) \
--version=$(VERSION) \
PKIDIR='$(PKIDIR)' \
LOGDIR='$(LOGDIR)' \
CLEANFILES += manpage-check
endif
+if HAVE_FLAKE8
+ALL_LOCAL += flake8-check
+# http://flake8.readthedocs.org/en/latest/warnings.html
+# All warnings explicitly selected or ignored should be listed below.
+#
+# E***, W*** -- warnings from pep8
+# E121 continuation line under-indented for hanging indent (only from flake8 v2.0)
+# E123 closing bracket does not match indentation of opening bracket's line
+# E125 continuation line with same indent as next logical line (only from flake8 v2.0)
+# E126 continuation line over-indented for hanging indent
+# E127 continuation line over-indented for visual indent
+# E128 continuation line under-indented for visual indent
+# E129 visually indented line with same indent as next logical line
+# E131 continuation line unaligned for hanging indent
+# W503 line break before binary operator
+# F*** -- warnings native to flake8
+# F811 redefinition of unused <name> from line <N> (only from flake8 v2.0)
+# D*** -- warnings from flake8-docstrings plugin
+# H*** -- warnings from flake8 hacking plugin (custom style checks beyond PEP8)
+# H231 Python 3.x incompatible 'except x,y:' construct
+# H233 Python 3.x incompatible use of print operator
+flake8-check: $(FLAKE8_PYFILES)
+ $(AM_V_GEN) if flake8 $^ --select=H231,H233 --ignore=E121,E123,E125,E126,E127,E128,E129,E131,W503,F811,D,H ${FLAKE8_FLAGS}; then touch $@; else exit 1; fi
+endif
+
include $(srcdir)/manpages.mk
$(srcdir)/manpages.mk: $(MAN_ROOTS) build-aux/sodepends.pl
@$(PERL) $(srcdir)/build-aux/sodepends.pl -I. -I$(srcdir) $(MAN_ROOTS) >$(@F).tmp
---------------------
- ovsdb-server:
* New "monitor2" and "update2" extensions to RFC 7047.
+ - OpenFlow:
+ * OpenFlow 1.1+ OFPT_QUEUE_GET_CONFIG_REQUEST now supports OFPP_ANY.
+
v2.5.0 - xx xxx xxxx
---------------------
CFLAGS="$ovs_save_CFLAGS"
LDFLAGS="$ovs_save_LDFLAGS"
OVS_LDFLAGS="$OVS_LDFLAGS -L$DPDK_LIB_DIR"
- OVS_CFLAGS="$OVS_CFLAGS -I$DPDK_INCLUDE -mssse3"
+ OVS_CFLAGS="$OVS_CFLAGS -I$DPDK_INCLUDE"
+ OVS_ENABLE_OPTION([-mssse3])
# DPDK pmd drivers are not linked unless --whole-archive is used.
#
#! /usr/bin/python
-# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+# Copyright (c) 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
corresponding VALUE, with characters &<>"' in VALUE escaped.
The following options are also available:
+ -I, --include=DIR search DIR for include files (default: .)
--version=VERSION use VERSION to display on document footer
-h, --help display this help message\
""" % {'argv0': argv0}
sys.exit(0)
-def manpage_to_nroff(xml_file, subst, version=None):
+def manpage_to_nroff(xml_file, subst, include_path, version=None):
with open(xml_file) as f:
content = f.read()
for k, v in subst.iteritems():
content = content.replace(k, v)
doc = xml.dom.minidom.parseString(content).documentElement
+ xi_nodes = doc.getElementsByTagName("xi:include")
+ for node in xi_nodes:
+ fn = node.getAttribute("href")
+ content = None
+ for dir in include_path:
+ try:
+ with open("%s/%s" % (dir, fn)) as xi_f:
+ content = xi_f.read()
+ except IOError:
+ pass
+ if not content:
+ sys.stderr.write("%s: could not open include file %s\n"
+ % (argv0, fn))
+ sys.exit(1)
+ for k, v in subst.iteritems():
+ content = content.replace(k, v)
+ xi_doc = xml.dom.minidom.parseString(content).documentElement
+ doc.replaceChild(xi_doc, node)
+
if version is None:
version = "UNKNOWN"
program = doc.attributes['program'].nodeValue
if __name__ == "__main__":
try:
- options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
- ['version=', 'help'])
+ options, args = getopt.gnu_getopt(sys.argv[1:], 'hVI:',
+ ['version=', 'help', 'include='])
except getopt.GetoptError, geo:
sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
sys.exit(1)
er_diagram = None
title = None
version = None
+ include_path = []
for key, value in options:
if key == '--version':
version = value
elif key in ['-h', '--help']:
usage()
+ elif key in ['-I', '--include']:
+ include_path.append(value)
else:
sys.exit(0)
+ if not include_path:
+ include_path = ['.']
if len(args) < 1:
sys.stderr.write("%s: exactly 1 non-option arguments required "
subst['@%s@' % var] = value
try:
- s = manpage_to_nroff(args[0], subst, version)
+ s = manpage_to_nroff(args[0], subst, include_path, version)
except build.nroff.error.Error, e:
sys.stderr.write("%s: %s\n" % (argv0, e.msg))
sys.exit(1)
OVS_CHECK_LIBCAPNG
OVS_CHECK_LOGDIR
OVS_CHECK_PYTHON
+OVS_CHECK_FLAKE8
OVS_CHECK_DOT
OVS_CHECK_IF_PACKET
OVS_CHECK_IF_DL
default:
OVS_LOG_ERROR("Unknown LSO transmit type:%d",
tsoInfo.Transmit.Type);
- return NDIS_STATUS_FAILURE;
}
OVS_LOG_TRACE("MSS %u packet len %u", mss,
packetLength);
POVS_VXLAN_VPORT vportVxlan;
UINT32 headRoom = OvsGetVxlanTunHdrSize();
UINT32 packetLength;
+ ULONG mss = 0;
/*
* XXX: the assumption currently is that the NBL is owned by OVS, and
tsoInfo.Value = NET_BUFFER_LIST_INFO(curNbl,
TcpLargeSendNetBufferListInfo);
- OVS_LOG_TRACE("MSS %u packet len %u", tsoInfo.LsoV1Transmit.MSS,
+ switch (tsoInfo.Transmit.Type) {
+ case NDIS_TCP_LARGE_SEND_OFFLOAD_V1_TYPE:
+ mss = tsoInfo.LsoV1Transmit.MSS;
+ break;
+ case NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE:
+ mss = tsoInfo.LsoV2Transmit.MSS;
+ break;
+ default:
+ OVS_LOG_ERROR("Unknown LSO transmit type:%d",
+ tsoInfo.Transmit.Type);
+ }
+ OVS_LOG_TRACE("MSS %u packet len %u", mss,
packetLength);
- if (tsoInfo.LsoV1Transmit.MSS) {
+ if (mss) {
OVS_LOG_TRACE("l4Offset %d", layers->l4Offset);
*newNbl = OvsTcpSegmentNBL(switchContext, curNbl, layers,
- tsoInfo.LsoV1Transmit.MSS, headRoom);
+ mss, headRoom);
if (*newNbl == NULL) {
OVS_LOG_ERROR("Unable to segment NBL");
return NDIS_STATUS_FAILURE;
sn->n_tunnels--;
if (sn->n_tunnels)
goto out;
-#ifdef HAVE_NF_REGISTER_NET_HOOK
- nf_unregister_net_hook(net, &nf_hook_ops);
-#else
- nf_unregister_hook(&nf_hook_ops);
-#endif
-
out:
n_tunnels--;
if (n_tunnels)
struct net *net = stt_dev->net;
list_del_rcu(&stt_dev->up_next);
+ synchronize_net();
tcp_sock_release(stt_dev->sock);
stt_dev->sock = NULL;
stt_cleanup(net);
struct net_device *dev, *aux;
LIST_HEAD(list);
+#ifdef HAVE_NF_REGISTER_NET_HOOK
+ /* Ideally this should be done from stt_stop(), But on some kernels
+ * nf-unreg operation needs RTNL-lock, which can cause deallock.
+ * So it is done from here. */
+ if (!list_empty(&nf_hook_ops.list))
+ nf_unregister_net_hook(net, &nf_hook_ops);
+#endif
+
rtnl_lock();
/* gather any stt devices that were moved into this ns */
if (rc)
goto out2;
+ INIT_LIST_HEAD(&nf_hook_ops.list);
pr_info("STT tunneling driver\n");
return 0;
out2:
void stt_cleanup_module(void)
{
+#ifndef HAVE_NF_REGISTER_NET_HOOK
+ if (!list_empty(&nf_hook_ops.list))
+ nf_unregister_hook(&nf_hook_ops);
+#endif
rtnl_link_unregister(&stt_link_ops);
unregister_pernet_subsys(&stt_net_ops);
}
debian/ifupdown.sh \
debian/source/format
+FLAKE8_PYFILES += \
+ debian/ovs-monitor-ipsec
+
check-debian-changelog-version:
@DEB_VERSION=`echo '$(VERSION)' | sed 's/pre/~pre/'`; \
if $(FGREP) '($(DEB_VERSION)' $(srcdir)/debian/changelog >/dev/null; \
import ovs.dirs
from ovs.db import error
-from ovs.db import types
import ovs.util
import ovs.daemon
import ovs.db.idl
if host in self.psk_hosts:
raise error.Error("host %s already defined for psk" % host)
- if vals["certificate"] == None:
+ if vals["certificate"] is None:
raise error.Error("'certificate' not defined for %s" % host)
- elif vals["private_key"] == None:
+ elif vals["private_key"] is None:
# Assume the private key is stored in the same PEM file as
# the certificate. We make a copy of "vals" so that we don't
# modify the original "vals", which would cause the script
try:
ipsec.add_entry(vals["local_ip"], vals["remote_ip"], vals)
- except error.Error, msg:
+ except error.Error as msg:
vlog.warn("skipping ipsec config for %s: %s" % (name, msg))
lib/dh1024.pem \
lib/dh2048.pem \
lib/dh4096.pem \
- lib/dirs.c.in
+ lib/dirs.c.in \
+ lib/db-ctl-base.xml
MAN_FRAGMENTS += \
lib/common.man \
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<p>
+ <p><var>Database Values</var></p>
+
+ <p>
+ Each column in the database accepts a fixed type of data. The
+ currently defined basic types, and their representations, are:
+ </p>
+
+ <dl>
+ <dt>integer</dt>
+ <dd>
+ A decimal integer in the range -2**63 to 2**63-1, inclusive.
+ </dd>
+
+ <dt>real</dt>
+ <dd>
+ A floating-point number.
+ </dd>
+
+ <dt>Boolean</dt>
+ <dd>
+ True or false, written <code>true</code> or <code>false</code>, respectively.
+ </dd>
+
+ <dt>string</dt>
+ <dd>
+ An arbitrary Unicode string, except that null bytes are not allowed.
+ Quotes are optional for most strings that begin with an English letter
+ or underscore and consist only of letters, underscores, hyphens, and
+ periods. However, <code>true</code> and <code>false</code> and strings that match
+ the syntax of UUIDs (see below) must be enclosed in double quotes to
+ distinguish them from other basic types. When double quotes are used,
+ the syntax is that of strings in JSON, e.g. backslashes may be used to
+ escape special characters. The empty string must be represented as a
+ pair of double quotes (<code>""</code>).
+ </dd>
+
+ <dt>UUID</dt>
+ <dd>
+ Either a universally unique identifier in the style of RFC 4122,
+ e.g. <code>f81d4fae-7dec-11d0-a765-00a0c91e6bf6</code>, or an <code>@</code><var>name</var>
+ defined by a <code>get</code> or <code>create</code> command within the same <code>ovn-nbctl</code>
+ invocation.
+ </dd>
+
+ </dl>
+
+ <p>
+ Multiple values in a single column may be separated by spaces or a
+ single comma. When multiple values are present, duplicates are not
+ allowed, and order is not important. Conversely, some database
+ columns can have an empty set of values, represented as <code>[]</code>, and
+ square brackets may optionally enclose other non-empty sets or single
+ values as well.
+ </p>
+
+ <p>
+ A few database columns are ``maps'' of key-value pairs, where the key
+ and the value are each some fixed database type. These are specified
+ in the form <var>key</var><code>=</code><var>value</var>, where <var>key</var> and <var>value</var>
+ follow the syntax for the column's key type and value type,
+ respectively. When multiple pairs are present (separated by spaces or
+ a comma), duplicate keys are not allowed, and again the order is not
+ important. Duplicate values are allowed. An empty map is represented
+ as <code>{}</code>. Curly braces may optionally enclose non-empty maps as
+ well (but use quotes to prevent the shell from expanding
+ <code>other-config={0=x,1=y}</code> into <code>other-config=0=x
+ other-config=1=y</code>, which may not have the desired effect).
+ </p>
+
+ <p><var>Database Command Syntax</var></p>
+
+ <dl>
+ <dt>[<code>--if-exists</code>] [<code>--columns=</code><var>column</var>[<code>,</code><var>column</var>]...] <code>list</code> <var>table</var> [<var>record</var>]...</dt>
+ <dd>
+ <p>
+ Lists the data in each specified <var>record</var>. If no
+ records are specified, lists all the records in <var>table</var>.
+ </p>
+ <p>
+ If <code>--columns</code> is specified, only the requested columns are
+ listed, in the specified order. Otherwise, all columns are listed, in
+ alphabetical order by column name.
+ </p>
+ <p>
+ Without <code>--if-exists</code>, it is an error if any specified
+ <var>record</var> does not exist. With <code>--if-exists</code>, the command
+ ignores any <var>record</var> that does not exist, without producing any
+ output.
+ </p>
+ </dd>
+
+ <dt>[<code>--columns=</code><var>column</var>[<code>,</code><var>column</var>]...] <code>find</code> <var>table</var> [<var>column</var>[<code>:</code><var>key</var>]<code>=</code><var>value</var>]...</dt>
+ <dd>
+ <p>
+ Lists the data in each record in <var>table</var> whose <var>column</var> equals
+ <var>value</var> or, if <var>key</var> is specified, whose <var>column</var> contains
+ a <var>key</var> with the specified <var>value</var>. The following operators
+ may be used where <code>=</code> is written in the syntax summary:
+ </p>
+ <dl>
+ <dt><code>= != < > <= >=</code></dt>
+ <dd>
+ <p>
+ Selects records in which <var>column</var>[<code>:</code><var>key</var>] equals, does not
+ equal, is less than, is greater than, is less than or equal to, or is
+ greater than or equal to <var>value</var>, respectively.</p>
+ <p>Consider <var>column</var>[<code>:</code><var>key</var>] and <var>value</var> as sets of
+ elements. Identical sets are considered equal. Otherwise, if the
+ sets have different numbers of elements, then the set with more
+ elements is considered to be larger. Otherwise, consider a element
+ from each set pairwise, in increasing order within each set. The
+ first pair that differs determines the result. (For a column that
+ contains key-value pairs, first all the keys are compared, and values
+ are considered only if the two sets contain identical keys.)
+ </p>
+ </dd>
+
+ <dt><code>{=} {!=}</code></dt>
+ <dd>
+ Test for set equality or inequality, respectively.
+ </dd>
+
+ <dt><code>{<=}</code></dt>
+ <dd>
+ Selects records in which <var>column</var>[<code>:</code><var>key</var>] is a subset of
+ <var>value</var>. For example, <code>flood-vlans{<=}1,2</code> selects records in
+ which the <code>flood-vlans</code> column is the empty set or contains 1 or 2
+ or both.
+ </dd>
+
+ <dt><code>{<}</code></dt>
+ <dd>
+ Selects records in which <var>column</var>[<code>:</code><var>key</var>] is a proper
+ subset of <var>value</var>. For example, <code>flood-vlans{<}1,2</code> selects
+ records in which the <code>flood-vlans</code> column is the empty set or
+ contains 1 or 2 but not both.
+ </dd>
+
+ <dt><code>{>=} {>}</code></dt>
+ <dd>
+ Same as <code>{<=}</code> and <code>{<}</code>, respectively, except that the
+ relationship is reversed. For example, <code>flood-vlans{>=}1,2</code>
+ selects records in which the <code>flood-vlans</code> column contains both 1
+ and 2.
+ </dd>
+
+ </dl>
+
+ <p>
+ For arithmetic operators (<code>= != < > <= >=</code>), when <var>key</var> is
+ specified but a particular record's <var>column</var> does not contain
+ <var>key</var>, the record is always omitted from the results. Thus, the
+ condition <code>other-config:mtu!=1500</code> matches records that have a
+ <code>mtu</code> key whose value is not 1500, but not those that lack an
+ <code>mtu</code> key.
+ </p>
+
+ <p>
+ For the set operators, when <var>key</var> is specified but a particular
+ record's <var>column</var> does not contain <var>key</var>, the comparison is
+ done against an empty set. Thus, the condition
+ <code>other-config:mtu{!=}1500</code> matches records that have a <code>mtu</code>
+ key whose value is not 1500 and those that lack an <code>mtu</code> key.
+ </p>
+
+ <p>
+ Don't forget to escape <code><</code> or <code>></code> from interpretation by the
+ shell.
+ </p>
+
+ <p>
+ If <code>--columns</code> is specified, only the requested columns are
+ listed, in the specified order. Otherwise all columns are listed, in
+ alphabetical order by column name.
+ </p>
+
+ <p>
+ The UUIDs shown for rows created in the same <code>ovn-nbctl</code>
+ invocation will be wrong.
+ </p>
+
+ </dd>
+
+ <dt>[<code>--if-exists</code>] [<code>--id=@</code><var>name</var>] <code>get</code> <var>table record</var> [<var>column</var>[<code>:</code><var>key</var>]]...</dt>
+ <dd>
+ <p>
+ Prints the value of each specified <var>column</var> in the given
+ <var>record</var> in <var>table</var>. For map columns, a <var>key</var> may
+ optionally be specified, in which case the value associated with
+ <var>key</var> in the column is printed, instead of the entire map.
+ </p>
+ <p>
+ Without <code>--if-exists</code>, it is an error if <var>record</var> does not
+ exist or <var>key</var> is specified, if <var>key</var> does not exist in
+ <var>record</var>. With <code>--if-exists</code>, a missing <var>record</var>
+ yields no output and a missing <var>key</var> prints a blank line.
+ </p>
+ <p>
+ If <code>@</code><var>name</var> is specified, then the UUID for <var>record</var> may be
+ referred to by that name later in the same <code>ovn-nbctl</code>
+ invocation in contexts where a UUID is expected.
+ </p>
+ <p>
+ Both <code>--id</code> and the <var>column</var> arguments are optional, but
+ usually at least one or the other should be specified. If both are
+ omitted, then <code>get</code> has no effect except to verify that
+ <var>record</var> exists in <var>table</var>.
+ </p>
+ <p>
+ <code>--id</code> and <code>--if-exists</code> cannot be used together.
+ </p>
+ </dd>
+
+ <dt>[<code>--if-exists</code>] <code>set</code> <var>table record column</var>[<code>:</code><var>key</var>]<code>=</code><var>value</var>...</dt>
+ <dd>
+ <p>
+ Sets the value of each specified <var>column</var> in the given
+ <var>record</var> in <var>table</var> to <var>value</var>. For map columns, a
+ <var>key</var> may optionally be specified, in which case the value
+ associated with <var>key</var> in that column is changed (or added, if none
+ exists), instead of the entire map.
+ </p>
+ <p>
+ Without <code>--if-exists</code>, it is an error if <var>record</var> does not
+ exist. With <code>--if-exists</code>, this command does nothing if
+ <var>record</var> does not exist.
+ </p>
+ </dd>
+ <dt>[<code>--if-exists</code>] <code>add</code> <var>table record column</var> [<var>key</var><code>=</code>]<var>value</var>...</dt>
+ <dd>
+ <p>
+ Adds the specified value or key-value pair to <var>column</var> in
+ <var>record</var> in <var>table</var>. If <var>column</var> is a map, then <var>key</var>
+ is required, otherwise it is prohibited. If <var>key</var> already exists
+ in a map column, then the current <var>value</var> is not replaced (use the
+ <code>set</code> command to replace an existing value).
+ </p>
+ <p>
+ Without <code>--if-exists</code>, it is an error if <var>record</var> does not
+ exist. With <code>--if-exists</code>, this command does nothing if
+ <var>record</var> does not exist.
+ </p>
+ </dd>
+
+ <dt>
+ <p>
+ [<code>--if-exists</code>] <code>remove</code> <var>table record column value</var>...
+ </p>
+ <p>
+ [<code>--if-exists</code>] <code>remove</code> <var>table record column key</var>...
+ </p>
+ <p>
+ [<code>--if-exists</code>] <code>remov</code> <var>table record column key</var><code>=</code><var>value</var>...
+ </p>
+ </dt>
+ <dd>
+ <p>
+ Removes the specified values or key-value pairs from <var>column</var> in
+ <var>record</var> in <var>table</var>. The first form applies to columns that
+ are not maps: each specified <var>value</var> is removed from the column.
+ The second and third forms apply to map columns: if only a <var>key</var>
+ is specified, then any key-value pair with the given <var>key</var> is
+ removed, regardless of its value; if a <var>value</var> is given then a
+ pair is removed only if both key and value match.
+ </p>
+ <p>
+ It is not an error if the column does not contain the specified key or
+ value or pair.
+ </p>
+ <p>
+ Without <code>--if-exists</code>, it is an error if <var>record</var> does not
+ exist. With <code>--if-exists</code>, this command does nothing if
+ <var>record</var> does not exist.
+ </p>
+ </dd>
+
+ <dt>[<code>--if-exists</code>] <code>clear</code> <var>table record column</var>...</dt>
+ <dd>
+ <p>
+ Sets each <var>column</var> in <var>record</var> in <var>table</var> to the empty set
+ or empty map, as appropriate. This command applies only to columns
+ that are allowed to be empty.
+ </p>
+ <p>
+ Without <code>--if-exists</code>, it is an error if <var>record</var> does not
+ exist. With <code>--if-exists</code>, this command does nothing if
+ <var>record</var> does not exist.
+ </p>
+ </dd>
+
+ <dt>[<code>--id=@</code><var>name</var>] <code>create</code> <var>table column</var>[<code>:</code><var>key</var>]<code>=</code><var>value</var>...</dt>
+ <dd>
+ <p>
+ Creates a new record in <var>table</var> and sets the initial values of
+ each <var>column</var>. Columns not explicitly set will receive their
+ default values. Outputs the UUID of the new row.
+ </p>
+ <p>
+ If <code>@</code><var>name</var> is specified, then the UUID for the new row may be
+ referred to by that name elsewhere in the same <code>\*(PN</code>
+ invocation in contexts where a UUID is expected. Such references may
+ precede or follow the <code>create</code> command.
+ </p>
+ <dl>
+ <dt>Caution (ovs-vsctl as exmaple)</dt>
+ <dd>
+ Records in the Open vSwitch database are significant only when they
+ can be reached directly or indirectly from the <code>Open_vSwitch</code>
+ table. Except for records in the <code>QoS</code> or <code>Queue</code> tables,
+ records that are not reachable from the <code>Open_vSwitch</code> table are
+ automatically deleted from the database. This deletion happens
+ immediately, without waiting for additional <code>ovs-vsctl</code> commands
+ or other database activity. Thus, a <code>create</code> command must
+ generally be accompanied by additional commands <var>within the same</var>
+ <code>ovs-vsctl</code> <var>invocation</var> to add a chain of references to the
+ newly created record from the top-level <code>Open_vSwitch</code> record.
+ The <code>EXAMPLES</code> section gives some examples that show how to do
+ this.
+ </dd>
+ </dl>
+ </dd>
+
+ <dt>[<code>--if-exists</code>] <code>destroy</code> <var>table record</var>...</dt>
+ <dd>
+ Deletes each specified <var>record</var> from <var>table</var>. Unless
+ <code>--if-exists</code> is specified, each <var>record</var>s must exist.
+ </dd>
+
+ <dt><code>--all destroy</code> <var>table</var></dt>
+ <dd>
+ <p>
+ Deletes all records from the <var>table</var>.
+ </p>
+ <dl>
+ <dt>Caution (ovs-vsctl as exmaple)</dt>
+ <dd>
+ The <code>destroy</code> command is only useful for records in the <code>QoS</code>
+ or <code>Queue</code> tables. Records in other tables are automatically
+ deleted from the database when they become unreachable from the
+ <code>Open_vSwitch</code> table. This means that deleting the last reference
+ to a record is sufficient for deleting the record itself. For records
+ in these tables, <code>destroy</code> is silently ignored. See the
+ <code>EXAMPLES</code> section below for more information.
+ </dd>
+ </dl>
+ </dd>
+
+ <dt><code>wait-until</code> <var>table record</var> [<var>column</var>[<code>:</code><var>key</var>]<code>=</code><var>value</var>]...</dt>
+ <dd>
+ <p>
+ Waits until <var>table</var> contains a record named <var>record</var> whose
+ <var>column</var> equals <var>value</var> or, if <var>key</var> is specified, whose
+ <var>column</var> contains a <var>key</var> with the specified <var>value</var>. Any
+ of the operators <code>!=</code>, <code><</code>, <code>></code>, <code><=</code>, or <code>>=</code> may
+ be substituted for <code>=</code> to test for inequality, less than, greater
+ than, less than or equal to, or greater than or equal to,
+ respectively. (Don't forget to escape <code><</code> or <code>></code> from
+ interpretation by the shell.)
+ </p>
+ <p>
+ If no <var>column</var>[<code>:</code><var>key</var>]<code>=</code><var>value</var> arguments are given,
+ this command waits only until <var>record</var> exists. If more than one
+ such argument is given, the command waits until all of them are
+ satisfied.
+ </p>
+ <dl>
+ <dt>Caution (ovs-vsctl as exmaple)</dt>
+ <dd>
+ Usually <code>wait-until</code> should be placed at the beginning of a set
+ of <code>ovs-vsctl</code> commands. For example, <code>wait-until bridge br0
+ -- get bridge br0 datapath_id</code> waits until a bridge named
+ <code>br0</code> is created, then prints its <code>datapath_id</code> column,
+ whereas <code>get bridge br0 datapath_id -- wait-until bridge br0</code>
+ will abort if no bridge named <code>br0</code> exists when <code>ovs-vsctl</code>
+ initially connects to the database.
+ </dd>
+ </dl>
+ <p>
+ Consider specifying <code>--timeout=0</code> along with
+ <code>--wait-until</code>, to prevent <code>ovn-nbctl</code> from terminating
+ after waiting only at most 5 seconds.
+ </p>
+ </dd>
+
+ <dt><code>comment</code> [<var>arg</var>]...</dt>
+ <dd>
+ <p>
+ This command has no effect on behavior, but any database log record
+ created by the command will include the command and its arguments.
+ </p>
+ </dd>
+
+ </dl>
+</p>
return p;
}
-/* Returns a string that describes some of 'b''s metadata plus a hex dump of up
- * to 'maxbytes' from the start of the buffer. */
-char *
-dp_packet_to_string(const struct dp_packet *b, size_t maxbytes)
-{
- struct ds s;
-
- ds_init(&s);
- ds_put_format(&s, "size=%"PRIu32", allocated=%"PRIu32", head=%"PRIuSIZE", tail=%"PRIuSIZE"\n",
- dp_packet_size(b), dp_packet_get_allocated(b),
- dp_packet_headroom(b), dp_packet_tailroom(b));
- ds_put_hex_dump(&s, dp_packet_data(b), MIN(dp_packet_size(b), maxbytes), 0, false);
- return ds_cstr(&s);
-}
-
static inline void
dp_packet_adjust_layer_offset(uint16_t *offset, int increment)
{
void *dp_packet_steal_data(struct dp_packet *);
-char *dp_packet_to_string(const struct dp_packet *, size_t maxbytes);
static inline bool dp_packet_equal(const struct dp_packet *,
const struct dp_packet *);
#define ASSERT_FLOWMAP_NOT_SET(FM, IDX) \
{ \
MINIFLOW_ASSERT(!((FM)->bits[(IDX) / MAP_T_BITS] & \
- (FLOWMAP_MAX << ((IDX) % MAP_T_BITS)))); \
+ (MAP_MAX << ((IDX) % MAP_T_BITS)))); \
for (size_t i = (IDX) / MAP_T_BITS + 1; i < FLOWMAP_UNITS; i++) { \
MINIFLOW_ASSERT(!(FM)->bits[i]); \
} \
flowmap_set(&MF.map, (OFS), 1); \
}
-#define miniflow_assert_in_map(MF, OFS) \
- MINIFLOW_ASSERT(FLOWMAP_IS_SET(MF.map, (OFS))); \
+#define miniflow_assert_in_map(MF, OFS) \
+ MINIFLOW_ASSERT(flowmap_is_set(&MF.map, (OFS))); \
ASSERT_FLOWMAP_NOT_SET(&MF.map, (OFS) + 1)
#define miniflow_push_uint64_(MF, OFS, VALUE) \
send_features_request(struct lswitch *sw)
{
struct ofpbuf *b;
- struct ofp_switch_config *osc;
int ofp_version = rconn_get_version(sw->rconn);
ovs_assert(ofp_version > 0 && ofp_version < 0xff);
queue_tx(sw, b);
/* Send OFPT_SET_CONFIG. */
- b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, ofp_version, sizeof *osc);
- osc = ofpbuf_put_zeros(b, sizeof *osc);
- osc->miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
- queue_tx(sw, b);
+ struct ofputil_switch_config config = {
+ .miss_send_len = OFP_DEFAULT_MISS_SEND_LEN
+ };
+ queue_tx(sw, ofputil_encode_set_config(&config, ofp_version));
}
static void
* - Established (0x02): This is part of an already existing connection.
* - Related (0x04): This is a separate connection that is related to an
* existing connection.
- * - Invalid (0x20): This flow could not be associated with a connection.
+ * - Reply (0x08): This flow is in the reply direction, ie it did not
+ * initiate the connection.
+ * - Invalid (0x10): This flow could not be associated with a connection.
* This could be set for a variety of reasons,
* including (but not limited to):
* - L3/L4 protocol handler is not loaded/unavailable.
* - L3/L4 protocol handler determines that the packet
* is malformed or invalid for the current FSM stage.
* - Packets are unexpected length for protocol.
- * - Reply (0x40): This flow is in the reply direction, ie it did not
- * initiate the connection.
- * - Tracked (0x80): Connection tracking has occurred.
+ * - Tracked (0x20): Connection tracking has occurred.
*
* The "Tracked" bit corresponds to the packet_state as described in the
* description of NXAST_CT action. The remaining bits correspond to
/*
- * Copyright (c) 2014, 2015 Nicira, Inc.
+ * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
int port_id;
};
-static bool thread_is_pmd(void);
+static bool dpdk_thread_is_pmd(void);
static int netdev_dpdk_construct(struct netdev *);
/* If we are on a non pmd thread we have to use the mempool mutex, because
* every non pmd thread shares the same mempool cache */
- if (!thread_is_pmd()) {
+ if (!dpdk_thread_is_pmd()) {
ovs_mutex_lock(&nonpmd_mempool_mutex);
}
dpdk_queue_flush(dev, qid);
}
- if (!thread_is_pmd()) {
+ if (!dpdk_thread_is_pmd()) {
ovs_mutex_unlock(&nonpmd_mempool_mutex);
}
}
*/
if (!strcmp(argv[1], flag) && (strlen(argv[2]) <= size)) {
changed = 1;
- *new_val = strdup(argv[2]);
+ *new_val = xstrdup(argv[2]);
VLOG_INFO("User-provided %s in use: %s", flag, *new_val);
} else {
VLOG_INFO("No %s provided - defaulting to %s", flag, default_val);
}
#ifdef VHOST_CUSE
- if (process_vhost_flags("-cuse_dev_name", strdup("vhost-net"),
+ if (process_vhost_flags("-cuse_dev_name", xstrdup("vhost-net"),
PATH_MAX, argv, &cuse_dev_name)) {
#else
- if (process_vhost_flags("-vhost_sock_dir", strdup(ovs_rundir()),
+ if (process_vhost_flags("-vhost_sock_dir", xstrdup(ovs_rundir()),
NAME_MAX, argv, &vhost_sock_dir)) {
struct stat s;
int err;
}
static bool
-thread_is_pmd(void)
+dpdk_thread_is_pmd(void)
{
return rte_lcore_id() != NON_PMD_CORE_ID;
}
{
char *save_ptr = NULL;
char *vac_up, *vac_down;
- char *value = strdup(setting);
+ char *value = xstrdup(setting);
+ char *ret_msg;
int vacancy_up, vacancy_down;
strtok_r(value, ":", &save_ptr);
vac_down = strtok_r(NULL, ",", &save_ptr);
if (!vac_down) {
- return xasprintf("Vacancy down value missing");
+ ret_msg = xasprintf("Vacancy down value missing");
+ goto exit;
}
if (!str_to_int(vac_down, 0, &vacancy_down) ||
vacancy_down < 0 || vacancy_down > 100) {
- return xasprintf("Invalid vacancy down value \"%s\"", vac_down);
+ ret_msg = xasprintf("Invalid vacancy down value \"%s\"", vac_down);
+ goto exit;
}
vac_up = strtok_r(NULL, ",", &save_ptr);
if (!vac_up) {
- return xasprintf("Vacancy up value missing");
+ ret_msg = xasprintf("Vacancy up value missing");
+ goto exit;
}
if (!str_to_int(vac_up, 0, &vacancy_up) ||
vacancy_up < 0 || vacancy_up > 100) {
- return xasprintf("Invalid vacancy up value \"%s\"", vac_up);
+ ret_msg = xasprintf("Invalid vacancy up value \"%s\"", vac_up);
+ goto exit;
}
if (vacancy_down > vacancy_up) {
- return xasprintf("Invalid vacancy range, vacancy up should be greater"
- " than vacancy down ""(%s)",
- ofperr_to_string(OFPERR_OFPBPC_BAD_VALUE));
+ ret_msg = xasprintf("Invalid vacancy range, vacancy up should be "
+ "greater than vacancy down (%s)",
+ ofperr_to_string(OFPERR_OFPBPC_BAD_VALUE));
+ goto exit;
}
+
+ free(value);
tm->table_vacancy.vacancy_down = vacancy_down;
tm->table_vacancy.vacancy_up = vacancy_up;
return NULL;
+
+exit:
+ free(value);
+ return ret_msg;
}
/* Convert 'table_id' and 'setting' (as described for the "mod-table" command
error = parse_ofp_flow_mod_str(&(*fms)[*n_fms], ds_cstr(&s), command,
&usable);
if (error) {
+ char *err_msg;
size_t i;
for (i = 0; i < *n_fms; i++) {
fclose(stream);
}
- return xasprintf("%s:%d: %s", file_name, line_number, error);
+ err_msg = xasprintf("%s:%d: %s", file_name, line_number, error);
+ free(error);
+ return err_msg;
}
*usable_protocols &= usable; /* Each line can narrow the set. */
*n_fms += 1;
}
static void
-ofp_print_switch_config(struct ds *string, const struct ofp_switch_config *osc)
+ofp_print_switch_config(struct ds *string,
+ const struct ofputil_switch_config *config)
{
- enum ofp_config_flags flags;
+ ds_put_format(string, " frags=%s",
+ ofputil_frag_handling_to_string(config->frag));
- flags = ntohs(osc->flags);
-
- ds_put_format(string, " frags=%s", ofputil_frag_handling_to_string(flags));
- flags &= ~OFPC_FRAG_MASK;
-
- if (flags & OFPC_INVALID_TTL_TO_CONTROLLER) {
+ if (config->invalid_ttl_to_controller > 0) {
ds_put_format(string, " invalid_ttl_to_controller");
- flags &= ~OFPC_INVALID_TTL_TO_CONTROLLER;
}
- if (flags) {
- ds_put_format(string, " ***unknown flags 0x%04"PRIx16"***", flags);
+ ds_put_format(string, " miss_send_len=%"PRIu16"\n", config->miss_send_len);
+}
+
+static void
+ofp_print_set_config(struct ds *string, const struct ofp_header *oh)
+{
+ struct ofputil_switch_config config;
+ enum ofperr error;
+
+ error = ofputil_decode_set_config(oh, &config);
+ if (error) {
+ ofp_print_error(string, error);
+ return;
}
+ ofp_print_switch_config(string, &config);
+}
- ds_put_format(string, " miss_send_len=%"PRIu16"\n", ntohs(osc->miss_send_len));
+static void
+ofp_print_get_config_reply(struct ds *string, const struct ofp_header *oh)
+{
+ struct ofputil_switch_config config;
+ ofputil_decode_get_config_reply(oh, &config);
+ ofp_print_switch_config(string, &config);
}
static void print_wild(struct ds *string, const char *leader, int is_wild,
break;
case OFPTYPE_GET_CONFIG_REPLY:
+ ofp_print_get_config_reply(string, oh);
+ break;
+
case OFPTYPE_SET_CONFIG:
- ofp_print_switch_config(string, ofpmsg_body(oh));
+ ofp_print_set_config(string, oh);
break;
case OFPTYPE_PACKET_IN:
case OFPRAW_OFPT10_QUEUE_GET_CONFIG_REQUEST:
qgcr10 = b.data;
*port = u16_to_ofp(ntohs(qgcr10->port));
- return 0;
+ break;
case OFPRAW_OFPT11_QUEUE_GET_CONFIG_REQUEST:
qgcr11 = b.data;
- return ofputil_port_from_ofp11(qgcr11->port, port);
+ enum ofperr error = ofputil_port_from_ofp11(qgcr11->port, port);
+ if (error || *port == OFPP_ANY) {
+ return error;
+ }
+ break;
+
+ default:
+ OVS_NOT_REACHED();
}
- OVS_NOT_REACHED();
+ return (ofp_to_u16(*port) < ofp_to_u16(OFPP_MAX)
+ ? 0
+ : OFPERR_OFPQOFC_BAD_PORT);
}
/* Constructs and returns the beginning of a reply to
opq10->queue_id = htonl(oqc->queue_id);
len_ofs = (char *) &opq10->len - (char *) reply->data;
} else {
- struct ofp11_queue_get_config_reply *qgcr11;
struct ofp12_packet_queue *opq12;
- ovs_be32 port;
-
- qgcr11 = reply->msg;
- port = qgcr11->port;
opq12 = ofpbuf_put_zeros(reply, sizeof *opq12);
- opq12->port = port;
+ opq12->port = ofputil_port_to_ofp11(oqc->port);
opq12->queue_id = htonl(oqc->queue_id);
len_ofs = (char *) &opq12->len - (char *) reply->data;
}
ofpmp_postappend(replies, start_ofs);
}
\f
+/* ofputil_switch_config */
+
+/* Decodes 'oh', which must be an OFPT_GET_CONFIG_REPLY or OFPT_SET_CONFIG
+ * message, into 'config'. Returns false if 'oh' contained any flags that
+ * aren't specified in its version of OpenFlow, true otherwise. */
+static bool
+ofputil_decode_switch_config(const struct ofp_header *oh,
+ struct ofputil_switch_config *config)
+{
+ const struct ofp_switch_config *osc;
+ struct ofpbuf b;
+
+ ofpbuf_use_const(&b, oh, ntohs(oh->length));
+ ofpraw_pull_assert(&b);
+ osc = ofpbuf_pull(&b, sizeof *osc);
+
+ config->frag = ntohs(osc->flags) & OFPC_FRAG_MASK;
+ config->miss_send_len = ntohs(osc->miss_send_len);
+
+ ovs_be16 valid_mask = htons(OFPC_FRAG_MASK);
+ if (oh->version < OFP13_VERSION) {
+ const ovs_be16 ttl_bit = htons(OFPC_INVALID_TTL_TO_CONTROLLER);
+ valid_mask |= ttl_bit;
+ config->invalid_ttl_to_controller = (osc->flags & ttl_bit) != 0;
+ } else {
+ config->invalid_ttl_to_controller = -1;
+ }
+
+ return !(osc->flags & ~valid_mask);
+}
+
+void
+ofputil_decode_get_config_reply(const struct ofp_header *oh,
+ struct ofputil_switch_config *config)
+{
+ ofputil_decode_switch_config(oh, config);
+}
+
+enum ofperr
+ofputil_decode_set_config(const struct ofp_header *oh,
+ struct ofputil_switch_config *config)
+{
+ return (ofputil_decode_switch_config(oh, config)
+ ? 0
+ : OFPERR_OFPSCFC_BAD_FLAGS);
+}
+
+static struct ofpbuf *
+ofputil_put_switch_config(const struct ofputil_switch_config *config,
+ struct ofpbuf *b)
+{
+ const struct ofp_header *oh = b->data;
+ struct ofp_switch_config *osc = ofpbuf_put_zeros(b, sizeof *osc);
+ osc->flags = htons(config->frag);
+ if (config->invalid_ttl_to_controller > 0 && oh->version < OFP13_VERSION) {
+ osc->flags |= htons(OFPC_INVALID_TTL_TO_CONTROLLER);
+ }
+ osc->miss_send_len = htons(config->miss_send_len);
+ return b;
+}
+
+struct ofpbuf *
+ofputil_encode_get_config_reply(const struct ofp_header *request,
+ const struct ofputil_switch_config *config)
+{
+ struct ofpbuf *b = ofpraw_alloc_reply(OFPRAW_OFPT_GET_CONFIG_REPLY,
+ request, 0);
+ return ofputil_put_switch_config(config, b);
+}
+
+struct ofpbuf *
+ofputil_encode_set_config(const struct ofputil_switch_config *config,
+ enum ofp_version version)
+{
+ struct ofpbuf *b = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, version, 0);
+ return ofputil_put_switch_config(config, b);
+}
+\f
/* ofputil_switch_features */
#define OFPC_COMMON (OFPC_FLOW_STATS | OFPC_TABLE_STATS | OFPC_PORT_STATS | \
}
const char *
-ofputil_frag_handling_to_string(enum ofp_config_flags flags)
+ofputil_frag_handling_to_string(enum ofputil_frag_handling frag)
{
- switch (flags & OFPC_FRAG_MASK) {
- case OFPC_FRAG_NORMAL: return "normal";
- case OFPC_FRAG_DROP: return "drop";
- case OFPC_FRAG_REASM: return "reassemble";
- case OFPC_FRAG_NX_MATCH: return "nx-match";
+ switch (frag) {
+ case OFPUTIL_FRAG_NORMAL: return "normal";
+ case OFPUTIL_FRAG_DROP: return "drop";
+ case OFPUTIL_FRAG_REASM: return "reassemble";
+ case OFPUTIL_FRAG_NX_MATCH: return "nx-match";
}
OVS_NOT_REACHED();
}
bool
-ofputil_frag_handling_from_string(const char *s, enum ofp_config_flags *flags)
+ofputil_frag_handling_from_string(const char *s,
+ enum ofputil_frag_handling *frag)
{
if (!strcasecmp(s, "normal")) {
- *flags = OFPC_FRAG_NORMAL;
+ *frag = OFPUTIL_FRAG_NORMAL;
} else if (!strcasecmp(s, "drop")) {
- *flags = OFPC_FRAG_DROP;
+ *frag = OFPUTIL_FRAG_DROP;
} else if (!strcasecmp(s, "reassemble")) {
- *flags = OFPC_FRAG_REASM;
+ *frag = OFPUTIL_FRAG_REASM;
} else if (!strcasecmp(s, "nx-match")) {
- *flags = OFPC_FRAG_NX_MATCH;
+ *frag = OFPUTIL_FRAG_NX_MATCH;
} else {
return false;
}
struct ofpbuf *ofputil_encode_packet_out(const struct ofputil_packet_out *,
enum ofputil_protocol protocol);
+enum ofputil_frag_handling {
+ OFPUTIL_FRAG_NORMAL = OFPC_FRAG_NORMAL, /* No special handling. */
+ OFPUTIL_FRAG_DROP = OFPC_FRAG_DROP, /* Drop fragments. */
+ OFPUTIL_FRAG_REASM = OFPC_FRAG_REASM, /* Reassemble (if supported). */
+ OFPUTIL_FRAG_NX_MATCH = OFPC_FRAG_NX_MATCH /* Match on frag bits. */
+};
+
+const char *ofputil_frag_handling_to_string(enum ofputil_frag_handling);
+bool ofputil_frag_handling_from_string(const char *,
+ enum ofputil_frag_handling *);
+
+/* Abstract struct ofp_switch_config. */
+struct ofputil_switch_config {
+ /* Fragment handling. */
+ enum ofputil_frag_handling frag;
+
+ /* 0: Do not send packet to controller when decrementing invalid IP TTL.
+ * 1: Do send packet to controller when decrementing invalid IP TTL.
+ * -1: Unspecified (only OpenFlow 1.1 and 1.2 support this setting. */
+ int invalid_ttl_to_controller;
+
+ /* Maximum bytes of packet to send to controller on miss. */
+ uint16_t miss_send_len;
+};
+
+void ofputil_decode_get_config_reply(const struct ofp_header *,
+ struct ofputil_switch_config *);
+struct ofpbuf *ofputil_encode_get_config_reply(
+ const struct ofp_header *request, const struct ofputil_switch_config *);
+
+enum ofperr ofputil_decode_set_config(const struct ofp_header *,
+ struct ofputil_switch_config *);
+struct ofpbuf *ofputil_encode_set_config(
+ const struct ofputil_switch_config *, enum ofp_version);
+
enum ofputil_port_config {
/* OpenFlow 1.0 and 1.1 share these values for these port config bits. */
OFPUTIL_PC_PORT_DOWN = 1 << 0, /* Port is administratively down. */
/* Queue configuration reply. */
struct ofputil_queue_config {
+ ofp_port_t port;
uint32_t queue_id;
/* Each of these optional values is expressed in tenths of a percent.
struct ofpbuf *make_echo_reply(const struct ofp_header *rq);
struct ofpbuf *ofputil_encode_barrier_request(enum ofp_version);
-
-const char *ofputil_frag_handling_to_string(enum ofp_config_flags);
-bool ofputil_frag_handling_from_string(const char *, enum ofp_config_flags *);
-
\f
/* Actions. */
lchassis->c_cap_enabled = LLDP_CAP_BRIDGE;
lchassis->c_id_subtype = LLDP_CHASSISID_SUBTYPE_LLADDR;
lchassis->c_id_len = ETH_ADDR_LEN;
- lchassis->c_id = xmalloc(ETH_ADDR_LEN);
list_init(&lchassis->c_mgmt);
lchassis->c_ttl = LLDP_CHASSIS_TTL;
/* Auto Attach element tlv */
hw->h_lport.p_element.type = LLDP_TLV_AA_ELEM_TYPE_CLIENT_VIRTUAL_SWITCH;
hw->h_lport.p_element.mgmt_vlan = 0;
- memcpy(&hw->h_lport.p_element.system_id.system_mac,
- lchassis->c_id, lchassis->c_id_len);
hw->h_lport.p_element.system_id.conn_type =
LLDP_TLV_AA_ELEM_CONN_TYPE_SINGLE;
hw->h_lport.p_element.system_id.rsvd = 0;
free(lldp);
}
-/* Unreference a specific LLDP instance.
+/* Reference a specific LLDP instance.
*/
struct lldp *
lldp_ref(const struct lldp *lldp_)
}
return lldp;
}
+
+void
+lldp_destroy_dummy(struct lldp *lldp)
+{
+ struct lldpd_hardware *hw, *hw_next;
+ struct lldpd_chassis *chassis, *chassis_next;
+ struct lldpd *cfg;
+
+ if (!lldp) {
+ return;
+ }
+
+ cfg = lldp->lldpd;
+
+ LIST_FOR_EACH_SAFE (hw, hw_next, h_entries, &cfg->g_hardware) {
+ list_remove(&hw->h_entries);
+ free(hw->h_lport.p_lastframe);
+ free(hw);
+ }
+
+ LIST_FOR_EACH_SAFE (chassis, chassis_next, list, &cfg->g_chassis) {
+ list_remove(&chassis->list);
+ free(chassis);
+ }
+
+ free(lldp->lldpd);
+ free(lldp);
+}
+
/* Used by unit tests */
struct lldp * lldp_create_dummy(void);
+void lldp_destroy_dummy(struct lldp *);
#endif /* OVS_LLDP_H */
return n_cores > 0 ? n_cores : 0;
}
+
+/* Returns 'true' if current thread is PMD thread. */
+bool
+thread_is_pmd(void)
+{
+ const char *name = get_subprogram_name();
+ return !strncmp(name, "pmd", 3);
+}
+
\f
/* ovsthread_key. */
/* Useful functions related to threading. */
int count_cpu_cores(void);
+bool thread_is_pmd(void);
#endif /* ovs-thread.h */
unsigned long int *written; /* Bitmap of columns from "new" to write. */
struct hmap_node txn_node; /* Node in ovsdb_idl_txn's list. */
+ /* Tracking data */
unsigned int change_seqno[OVSDB_IDL_CHANGE_MAX];
- struct ovs_list track_node;
+ struct ovs_list track_node; /* Rows modified/added/deleted by IDL */
+ unsigned long int *updated; /* Bitmap of columns updated by IDL */
};
struct ovsdb_idl_column {
-/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
return NULL;
}
+/* Returns true if a tracked 'column' in 'row' was updated by IDL, false
+ * otherwise. The tracking data is cleared by ovsdb_idl_track_clear()
+ *
+ * Function returns false if 'column' is not tracked (see
+ * ovsdb_idl_track_add_column()).
+ */
+bool
+ovsdb_idl_track_is_updated(const struct ovsdb_idl_row *row,
+ const struct ovsdb_idl_column *column)
+{
+ const struct ovsdb_idl_table_class *class;
+ size_t column_idx;
+
+ class = row->table->class;
+ column_idx = column - class->columns;
+
+ if (row->updated && bitmap_is_set(row->updated, column_idx)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
/* Flushes the tracked rows. Client calls this function after calling
* ovsdb_idl_run() and read all tracked rows with the ovsdb_idl_track_get_*()
* functions. This is usually done at the end of the client's processing
struct ovsdb_idl_row *row, *next;
LIST_FOR_EACH_SAFE(row, next, track_node, &table->track_list) {
+ if (row->updated) {
+ free(row->updated);
+ row->updated = NULL;
+ }
list_remove(&row->track_node);
list_init(&row->track_node);
if (ovsdb_idl_row_is_orphan(row)) {
return true;
}
-#if 0
-ovsdb_idl_row_apply_diff(struct ovsdb_idl_row *row,
- const struct json *diff_json)
-{
- struct ovsdb_idl_table *table = row->table;
- struct shash_node *node;
- bool changed = false;
-
- SHASH_FOR_EACH (node, json_object(diff_json)) {
- const char *column_name = node->name;
- const struct ovsdb_idl_column *column;
- struct ovsdb_datum diff;
- struct ovsdb_error *error;
-
- column = shash_find_data(&table->columns, column_name);
- if (!column) {
- VLOG_WARN_RL(&syntax_rl, "unknown column %s updating row "UUID_FMT,
- column_name, UUID_ARGS(&row->uuid));
- continue;
- }
-
- error = ovsdb_transient_datum_from_json(&diff, &column->type,
- node->data);
- if (!error) {
- unsigned int column_idx = column - table->class->columns;
- struct ovsdb_datum *old = &row->old[column_idx];
- struct ovsdb_datum new;
- struct ovsdb_error *error;
-
- error = ovsdb_datum_apply_diff(&new, old, &diff, &column->type);
- if (error) {
- VLOG_WARN_RL(&syntax_rl, "update2 failed to modify column "
- "%s row "UUID_FMT, column_name,
- UUID_ARGS(&row->uuid));
- ovsdb_error_destroy(error);
- } else {
- ovsdb_datum_swap(old, &new);
- ovsdb_datum_destroy(&new, &column->type);
- if (table->modes[column_idx] & OVSDB_IDL_ALERT) {
- changed = true;
- }
- }
- ovsdb_datum_destroy(&diff, &column->type);
- } else {
- char *s = ovsdb_error_to_string(error);
- VLOG_WARN_RL(&syntax_rl, "error parsing column %s in row "UUID_FMT
- " in table %s: %s", column_name,
- UUID_ARGS(&row->uuid), table->class->name, s);
- free(s);
- ovsdb_error_destroy(error);
- }
- }
- return changed;
-}
-#endif
-
/* Returns true if a column with mode OVSDB_IDL_MODE_RW changed, false
* otherwise.
*
enum ovsdb_idl_change change)
{
struct ovsdb_idl_table *table = row->table;
+ const struct ovsdb_idl_table_class *class = table->class;
struct shash_node *node;
bool changed = false;
bool apply_diff = diff_json != NULL;
list_push_front(&row->table->track_list,
&row->track_node);
}
+ if (!row->updated) {
+ row->updated = bitmap_allocate(class->n_columns);
+ }
+ bitmap_set1(row->updated, column_idx);
}
}
} else {
const struct ovsdb_idl_row *ovsdb_idl_track_get_first(
const struct ovsdb_idl *, const struct ovsdb_idl_table_class *);
const struct ovsdb_idl_row *ovsdb_idl_track_get_next(const struct ovsdb_idl_row *);
+bool ovsdb_idl_track_is_updated(const struct ovsdb_idl_row *row,
+ const struct ovsdb_idl_column *column);
void ovsdb_idl_track_clear(const struct ovsdb_idl *);
\f
cpu_usage = get_cpu_usage();
if (VLOG_IS_DBG_ENABLED()) {
level = VLL_DBG;
- } else if (cpu_usage > 50 && !VLOG_DROP_INFO(&rl)) {
+ } else if (cpu_usage > 50
+ && !thread_is_pmd()
+ && !VLOG_DROP_INFO(&rl)) {
level = VLL_INFO;
} else {
return;
list_remove(&rstp->node);
ovs_mutex_unlock(&rstp_mutex);
+ hmap_destroy(&rstp->ports);
free(rstp->name);
free(rstp);
}
/* SSL_CTX_add_client_CA makes a copy of cert's relevant data. */
SSL_CTX_add_client_CA(ctx, cert);
- /* SSL_CTX_use_certificate() takes ownership of the certificate passed in.
- * 'cert' is owned by sslv->ssl, so we need to duplicate it. */
- cert = X509_dup(cert);
- if (!cert) {
- out_of_memory();
- }
SSL_CTX_set_cert_store(ctx, X509_STORE_new());
if (SSL_CTX_load_verify_locations(ctx, ca_cert.file_name, NULL) != 1) {
VLOG_ERR("SSL_CTX_load_verify_locations: %s",
time_init();
coverage_clear();
coverage_run();
- if (*last_wakeup) {
+ if (*last_wakeup && !thread_is_pmd()) {
log_poll_interval(*last_wakeup);
}
start = time_msec();
fi
AM_CONDITIONAL([HAVE_PYTHON], [test "$HAVE_PYTHON" = yes])])
+dnl Checks for dot.
+AC_DEFUN([OVS_CHECK_FLAKE8],
+ [AC_CACHE_CHECK(
+ [for flake8],
+ [ovs_cv_flake8],
+ [if flake8 --version >/dev/null 2>&1; then
+ ovs_cv_flake8=yes
+ else
+ ovs_cv_flake8=no
+ fi])
+ AM_CONDITIONAL([HAVE_FLAKE8], [test "$ovs_cv_flake8" = yes])])
+
dnl Checks for dot.
AC_DEFUN([OVS_CHECK_DOT],
[AC_CACHE_CHECK(
# IPFIX enterprise entity definition macros.
EXTRA_DIST += ofproto/ipfix-enterprise-entities.def
+
+FLAKE8_PYFILES += ofproto/ipfix-gen-entities
# notice and this notice are preserved. This file is offered as-is,
# without warranty of any kind.
+from __future__ import print_function
+
import getopt
import re
import sys
import xml.sax
import xml.sax.handler
+
class IpfixEntityHandler(xml.sax.handler.ContentHandler):
RECORD_FIELDS = ['name', 'dataType', 'elementId', 'status']
self.current_record = dict()
def startDocument(self):
- print """\
+ print("""\
/* IPFIX entities. */
#ifndef IPFIX_ENTITY
#define IPFIX_ENTITY(ENUM, ID, SIZE, NAME)
#endif
-"""
+""")
def endDocument(self):
- print """
-#undef IPFIX_ENTITY"""
+ print("""
+#undef IPFIX_ENTITY""")
def startElement(self, name, attrs):
if name in self.RECORD_FIELDS:
self.current_record['dataTypeSize'] = self.DATA_TYPE_SIZE.get(
self.current_record['dataType'], 0)
- print 'IPFIX_ENTITY(%(enumName)s, %(elementId)s, ' \
- '%(dataTypeSize)i, %(name)s)' % self.current_record
+ print('IPFIX_ENTITY(%(enumName)s, %(elementId)s, '
+ '%(dataTypeSize)i, %(name)s)' % self.current_record)
self.current_record.clear()
def characters(self, content):
if self.current_field_name is not None:
self.current_field_value.append(content)
+
def print_ipfix_entity_macros(xml_file):
xml.sax.parse(xml_file, IpfixEntityHandler())
+
def usage(name):
- print """\
+ print("""\
%(name)s: IPFIX entity definition generator
Prints C macros defining IPFIX entities from the standard IANA file at
<http://www.iana.org/assignments/ipfix/ipfix.xml>
The following options are also available:
-h, --help display this help message
-V, --version display version information\
-""" % {'name': name}
+""" % {'name': name})
sys.exit(0)
if __name__ == '__main__':
-# try:
- try:
- options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
- ['help', 'version'])
- except getopt.GetoptError, geo:
- sys.stderr.write('%s: %s\n' % (sys.argv[0], geo.msg))
- sys.exit(1)
-
- for key, value in options:
- if key in ['-h', '--help']:
- usage()
- elif key in ['-V', '--version']:
- print 'ipfix-gen-entities (Open vSwitch)'
- else:
- sys.exit(0)
-
- if len(args) != 1:
- sys.stderr.write('%s: exactly 1 non-option arguments required '
- '(use --help for help)\n' % sys.argv[0])
- sys.exit(1)
-
- print_ipfix_entity_macros(args[0])
-
-# except Exception, e:
-# sys.stderr.write('%s: %s\n' % (sys.argv[0], e))
-# sys.exit(1)
+ try:
+ options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
+ ['help', 'version'])
+ except getopt.GetoptError as geo:
+ sys.stderr.write('%s: %s\n' % (sys.argv[0], geo.msg))
+ sys.exit(1)
+
+ for key, value in options:
+ if key in ['-h', '--help']:
+ usage()
+ elif key in ['-V', '--version']:
+ print('ipfix-gen-entities (Open vSwitch)')
+ else:
+ sys.exit(0)
+
+ if len(args) != 1:
+ sys.stderr.write('%s: exactly 1 non-option arguments required '
+ '(use --help for help)\n' % sys.argv[0])
+ sys.exit(1)
+
+ print_ipfix_entity_macros(args[0])
# Local variables:
# mode: python
/*
- * Copyright (c) 2014, 2015 Nicira, Inc.
+ * Copyright (c) 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#define RECIRC_POOL_STATIC_IDS 1024
+static void recirc_id_node_free(struct recirc_id_node *);
+
void
recirc_init(void)
{
* finished. */
LIST_FOR_EACH_POP (node, exp_node, &expired) {
cmap_remove(&id_map, &node->id_node, node->id);
- ovsrcu_postpone(free, node);
+ ovsrcu_postpone(recirc_id_node_free, node);
}
if (!list_is_empty(&expiring)) {
}
}
+static void
+recirc_state_free(struct recirc_state *state)
+{
+ ofpbuf_delete(state->stack);
+ free(state->ofpacts);
+}
+
/* Allocate a unique recirculation id for the given set of flow metadata.
* The ID space is 2^^32, so there should never be a situation in which all
* the IDs are used up. We loop until we find a free one.
return recirc_alloc_id__(&state, recirc_metadata_hash(&state))->id;
}
+static void
+recirc_id_node_free(struct recirc_id_node *node)
+{
+ recirc_state_free(CONST_CAST(struct recirc_state *, &node->state));
+ free(node);
+}
+
void
recirc_id_node_unref(const struct recirc_id_node *node_)
OVS_EXCLUDED(mutex)
-/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Nicira, Inc.
+/* Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
};
/* In the absence of a multiple-writer multiple-reader datastructure for
- * storing ukeys, we use a large number of cmaps, each with its own lock for
- * writing. */
+ * storing udpif_keys ("ukeys"), we use a large number of cmaps, each with its
+ * own lock for writing. */
#define N_UMAPS 512 /* per udpif. */
struct umap {
struct ovs_mutex mutex; /* Take for writing to the following. */
};
/* A thread that processes datapath flows, updates OpenFlow statistics, and
- * updates or removes them if necessary. */
+ * updates or removes them if necessary.
+ *
+ * Revalidator threads operate in two phases: "dump" and "sweep". In between
+ * each phase, all revalidators sync up so that all revalidator threads are
+ * either in one phase or the other, but not a combination.
+ *
+ * During the dump phase, revalidators fetch flows from the datapath and
+ * attribute the statistics to OpenFlow rules. Each datapath flow has a
+ * corresponding ukey which caches the most recently seen statistics. If
+ * a flow needs to be deleted (for example, because it is unused over a
+ * period of time), revalidator threads may delete the flow during the
+ * dump phase. The datapath is not guaranteed to reliably dump all flows
+ * from the datapath, and there is no mapping between datapath flows to
+ * revalidators, so a particular flow may be handled by zero or more
+ * revalidators during a single dump phase. To avoid duplicate attribution
+ * of statistics, ukeys are never deleted during this phase.
+ *
+ * During the sweep phase, each revalidator takes ownership of a different
+ * slice of umaps and sweeps through all ukeys in those umaps to figure out
+ * whether they need to be deleted. During this phase, revalidators may
+ * fetch individual flows which were not dumped during the dump phase to
+ * validate them and attribute statistics.
+ */
struct revalidator {
struct udpif *udpif; /* Parent udpif. */
pthread_t thread; /* Thread ID. */
&op->dop.u.flow_put.actions_len);
}
+/* Executes datapath operations 'ops' and attributes stats retrieved from the
+ * datapath as part of those operations. */
static void
-push_ukey_ops__(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
+push_dp_ops(struct udpif *udpif, struct ukey_op *ops, size_t n_ops)
{
struct dpif_op *opsp[REVALIDATE_MAX_BATCH];
size_t i;
}
}
+/* Executes datapath operations 'ops', attributes stats retrieved from the
+ * datapath, and deletes ukeys corresponding to deleted flows. */
static void
push_ukey_ops(struct udpif *udpif, struct umap *umap,
struct ukey_op *ops, size_t n_ops)
{
int i;
- push_ukey_ops__(udpif, ops, n_ops);
+ push_dp_ops(udpif, ops, n_ops);
ovs_mutex_lock(&umap->mutex);
for (i = 0; i < n_ops; i++) {
- ukey_delete(umap, ops[i].ukey);
+ if (ops[i].dop.type == DPIF_OP_FLOW_DEL) {
+ ukey_delete(umap, ops[i].ukey);
+ }
}
ovs_mutex_unlock(&umap->mutex);
}
}
if (n_ops) {
- push_ukey_ops__(udpif, ops, n_ops);
+ /* Push datapath ops but defer ukey deletion to 'sweep' phase. */
+ push_dp_ops(udpif, ops, n_ops);
}
ovsrcu_quiesce();
}
size_t n_ops = 0;
CMAP_FOR_EACH(ukey, cmap_node, &umap->cmap) {
- bool flow_exists, seq_mismatch;
- struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
- enum reval_result result;
+ bool flow_exists;
/* Handler threads could be holding a ukey lock while it installs a
* new flow, so don't hang around waiting for access to it. */
continue;
}
flow_exists = ukey->flow_exists;
- seq_mismatch = (ukey->dump_seq != dump_seq
- && ukey->reval_seq != reval_seq);
-
- if (purge) {
- result = UKEY_DELETE;
- } else if (!seq_mismatch) {
- result = UKEY_KEEP;
- } else {
- struct dpif_flow_stats stats;
- COVERAGE_INC(revalidate_missed_dp_flow);
- memset(&stats, 0, sizeof stats);
- result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
- reval_seq, &recircs);
- }
- if (result != UKEY_KEEP) {
- /* Takes ownership of 'recircs'. */
- reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
- &odp_actions);
+ if (flow_exists) {
+ struct recirc_refs recircs = RECIRC_REFS_EMPTY_INITIALIZER;
+ bool seq_mismatch = (ukey->dump_seq != dump_seq
+ && ukey->reval_seq != reval_seq);
+ enum reval_result result;
+
+ if (purge) {
+ result = UKEY_DELETE;
+ } else if (!seq_mismatch) {
+ result = UKEY_KEEP;
+ } else {
+ struct dpif_flow_stats stats;
+ COVERAGE_INC(revalidate_missed_dp_flow);
+ memset(&stats, 0, sizeof stats);
+ result = revalidate_ukey(udpif, ukey, &stats, &odp_actions,
+ reval_seq, &recircs);
+ }
+ if (result != UKEY_KEEP) {
+ /* Clears 'recircs' if filled by revalidate_ukey(). */
+ reval_op_init(&ops[n_ops++], result, udpif, ukey, &recircs,
+ &odp_actions);
+ }
}
ovs_mutex_unlock(&ukey->mutex);
- if (n_ops == REVALIDATE_MAX_BATCH) {
- push_ukey_ops(udpif, umap, ops, n_ops);
- n_ops = 0;
- }
-
if (!flow_exists) {
+ /* The common flow deletion case involves deletion of the flow
+ * during the dump phase and ukey deletion here. */
ovs_mutex_lock(&umap->mutex);
ukey_delete(umap, ukey);
ovs_mutex_unlock(&umap->mutex);
}
+
+ if (n_ops == REVALIDATE_MAX_BATCH) {
+ /* Update/delete missed flows and clean up corresponding ukeys
+ * if necessary. */
+ push_ukey_ops(udpif, umap, ops, n_ops);
+ n_ops = 0;
+ }
}
if (n_ops) {
/* Special OpenFlow rules. */
struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
- struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
+ struct rule_dpif *drop_frags_rule; /* Used in OFPUTIL_FRAG_DROP mode. */
/* Bridging. */
struct netflow *netflow;
/* We always unwildcard nw_frag (for IP), so they
* need not be unwildcarded here. */
if (flow->nw_frag & FLOW_NW_FRAG_ANY
- && ofproto->up.frag_handling != OFPC_FRAG_NX_MATCH) {
- if (ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
+ && ofproto->up.frag_handling != OFPUTIL_FRAG_NX_MATCH) {
+ if (ofproto->up.frag_handling == OFPUTIL_FRAG_NORMAL) {
/* We must pretend that transport ports are unavailable. */
flow->tp_src = htons(0);
flow->tp_dst = htons(0);
} else {
- /* Must be OFPC_FRAG_DROP (we don't have OFPC_FRAG_REASM).
+ /* Must be OFPUTIL_FRAG_DROP (we don't have OFPUTIL_FRAG_REASM).
* Use the drop_frags_rule (which cannot disappear). */
rule = ofproto->drop_frags_rule;
if (stats) {
static bool
set_frag_handling(struct ofproto *ofproto_,
- enum ofp_config_flags frag_handling)
+ enum ofputil_frag_handling frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- if (frag_handling != OFPC_FRAG_REASM) {
+ if (frag_handling != OFPUTIL_FRAG_REASM) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
return true;
} else {
char *sw_desc; /* Software version (NULL for default). */
char *serial_desc; /* Serial number (NULL for default). */
char *dp_desc; /* Datapath description (NULL for default). */
- enum ofp_config_flags frag_handling; /* One of OFPC_*. */
+ enum ofputil_frag_handling frag_handling;
/* Datapath. */
struct hmap ports; /* Contains "struct ofport"s. */
* which takes one of the following values, with the corresponding
* meanings:
*
- * - OFPC_FRAG_NORMAL: The switch should treat IP fragments the same way
- * as other packets, omitting TCP and UDP port numbers (always setting
- * them to 0).
+ * - OFPUTIL_FRAG_NORMAL: The switch should treat IP fragments the same
+ * way as other packets, omitting TCP and UDP port numbers (always
+ * setting them to 0).
*
- * - OFPC_FRAG_DROP: The switch should drop all IP fragments without
+ * - OFPUTIL_FRAG_DROP: The switch should drop all IP fragments without
* passing them through the flow table.
*
- * - OFPC_FRAG_REASM: The switch should reassemble IP fragments before
+ * - OFPUTIL_FRAG_REASM: The switch should reassemble IP fragments before
* passing packets through the flow table.
*
- * - OFPC_FRAG_NX_MATCH (a Nicira extension): Similar to OFPC_FRAG_NORMAL,
- * except that TCP and UDP port numbers should be included in fragments
- * with offset 0.
+ * - OFPUTIL_FRAG_NX_MATCH (a Nicira extension): Similar to
+ * OFPUTIL_FRAG_NORMAL, except that TCP and UDP port numbers should be
+ * included in fragments with offset 0.
*
* Implementations are not required to support every mode.
- * OFPC_FRAG_NORMAL is the default mode when an ofproto is created.
+ * OFPUTIL_FRAG_NORMAL is the default mode when an ofproto is created.
*
* At the time of the call to ->set_frag_handling(), the current mode is
* available in 'ofproto->frag_handling'. ->set_frag_handling() returns
* reflect the new mode.
*/
bool (*set_frag_handling)(struct ofproto *ofproto,
- enum ofp_config_flags frag_handling);
+ enum ofputil_frag_handling frag_handling);
/* Implements the OpenFlow OFPT_PACKET_OUT command. The datapath should
* execute the 'ofpacts_len' bytes of "struct ofpacts" in 'ofpacts'.
ofproto->sw_desc = NULL;
ofproto->serial_desc = NULL;
ofproto->dp_desc = NULL;
- ofproto->frag_handling = OFPC_FRAG_NORMAL;
+ ofproto->frag_handling = OFPUTIL_FRAG_NORMAL;
hmap_init(&ofproto->ports);
hmap_init(&ofproto->ofport_usage);
shash_init(&ofproto->port_by_name);
static enum ofperr
handle_get_config_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
- struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- struct ofp_switch_config *osc;
- enum ofp_config_flags flags;
- struct ofpbuf *buf;
+ struct ofputil_switch_config config;
+ config.frag = ofconn_get_ofproto(ofconn)->frag_handling;
+ config.invalid_ttl_to_controller
+ = ofconn_get_invalid_ttl_to_controller(ofconn);
+ config.miss_send_len = ofconn_get_miss_send_len(ofconn);
- /* Send reply. */
- buf = ofpraw_alloc_reply(OFPRAW_OFPT_GET_CONFIG_REPLY, oh, 0);
- osc = ofpbuf_put_uninit(buf, sizeof *osc);
- flags = ofproto->frag_handling;
- /* OFPC_INVALID_TTL_TO_CONTROLLER is deprecated in OF 1.3 */
- if (oh->version < OFP13_VERSION
- && ofconn_get_invalid_ttl_to_controller(ofconn)) {
- flags |= OFPC_INVALID_TTL_TO_CONTROLLER;
- }
- osc->flags = htons(flags);
- osc->miss_send_len = htons(ofconn_get_miss_send_len(ofconn));
- ofconn_send_reply(ofconn, buf);
+ ofconn_send_reply(ofconn, ofputil_encode_get_config_reply(oh, &config));
return 0;
}
static enum ofperr
handle_set_config(struct ofconn *ofconn, const struct ofp_header *oh)
{
- const struct ofp_switch_config *osc = ofpmsg_body(oh);
struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
- uint16_t flags = ntohs(osc->flags);
+ struct ofputil_switch_config config;
+ enum ofperr error;
+
+ error = ofputil_decode_set_config(oh, &config);
+ if (error) {
+ return error;
+ }
if (ofconn_get_type(ofconn) != OFCONN_PRIMARY
|| ofconn_get_role(ofconn) != OFPCR12_ROLE_SLAVE) {
- enum ofp_config_flags cur = ofproto->frag_handling;
- enum ofp_config_flags next = flags & OFPC_FRAG_MASK;
+ enum ofputil_frag_handling cur = ofproto->frag_handling;
+ enum ofputil_frag_handling next = config.frag;
- ovs_assert((cur & OFPC_FRAG_MASK) == cur);
if (cur != next) {
if (ofproto->ofproto_class->set_frag_handling(ofproto, next)) {
ofproto->frag_handling = next;
}
}
}
- /* OFPC_INVALID_TTL_TO_CONTROLLER is deprecated in OF 1.3 */
- ofconn_set_invalid_ttl_to_controller(ofconn,
- (oh->version < OFP13_VERSION
- && flags & OFPC_INVALID_TTL_TO_CONTROLLER));
- ofconn_set_miss_send_len(ofconn, ntohs(osc->miss_send_len));
+ if (config.invalid_ttl_to_controller >= 0) {
+ ofconn_set_invalid_ttl_to_controller(ofconn,
+ config.invalid_ttl_to_controller);
+ }
+
+ ofconn_set_miss_send_len(ofconn, config.miss_send_len);
return 0;
}
return 0;
}
-static enum ofperr
-handle_queue_get_config_request(struct ofconn *ofconn,
- const struct ofp_header *oh)
+static void
+put_queue_config(struct ofport *ofport, struct ofpbuf *reply)
{
- struct ofproto *p = ofconn_get_ofproto(ofconn);
struct netdev_queue_dump queue_dump;
- struct ofport *ofport;
unsigned int queue_id;
- struct ofpbuf *reply;
struct smap details;
- ofp_port_t request;
- enum ofperr error;
-
- error = ofputil_decode_queue_get_config_request(oh, &request);
- if (error) {
- return error;
- }
-
- ofport = ofproto_get_port(p, request);
- if (!ofport) {
- return OFPERR_OFPQOFC_BAD_PORT;
- }
-
- reply = ofputil_encode_queue_get_config_reply(oh);
smap_init(&details);
NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &queue_dump, ofport->netdev) {
/* None of the existing queues have compatible properties, so we
* hard-code omitting min_rate and max_rate. */
+ queue.port = ofport->ofp_port;
queue.queue_id = queue_id;
queue.min_rate = UINT16_MAX;
queue.max_rate = UINT16_MAX;
ofputil_append_queue_get_config_reply(reply, &queue);
}
smap_destroy(&details);
+}
+
+static enum ofperr
+handle_queue_get_config_request(struct ofconn *ofconn,
+ const struct ofp_header *oh)
+{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
+ ofp_port_t port;
+ enum ofperr error;
+ error = ofputil_decode_queue_get_config_request(oh, &port);
+ if (error) {
+ return error;
+ }
+
+ struct ofpbuf *reply = ofputil_encode_queue_get_config_reply(oh);
+ struct ofport *ofport;
+ if (port == OFPP_ANY) {
+ HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) {
+ put_queue_config(ofport, reply);
+ }
+ } else {
+ ofport = ofproto_get_port(ofproto, port);
+ if (!ofport) {
+ ofpbuf_delete(reply);
+ return OFPERR_OFPQOFC_BAD_PORT;
+ }
+ put_queue_config(ofport, reply);
+ }
ofconn_send_reply(ofconn, reply);
return 0;
<ul>
<li>
- Priority-150 flows that matches ARP requests to each known IP address
- <var>A</var> of logical port <var>P</var>, and respond ARP replies
- directly with corresponding Ethernet address <var>E</var>:
+ <p>
+ Priority-150 flows that matches ARP requests to each known IP address
+ <var>A</var> of logical port <var>P</var>, and respond with ARP
+ replies directly with corresponding Ethernet address <var>E</var>:
+ </p>
+
<pre>
eth.dst = eth.src;
eth.src = <var>E</var>;
inport = ""; /* Allow sending out inport. */
output;
</pre>
+
+ <p>
+ These flows are omitted for logical ports (other than router ports)
+ that are down.
+ </p>
</li>
<li>
return !lport->enabled || *lport->enabled;
}
+static bool
+lport_is_up(const struct nbrec_logical_port *lport)
+{
+ return !lport->up || *lport->up;
+}
+
static bool
has_stateful_acl(struct ovn_datapath *od)
{
continue;
}
+ /*
+ * Add ARP reply flows if either the
+ * - port is up or
+ * - port type is router
+ */
+ if (!lport_is_up(op->nbs) && strcmp(op->nbs->type, "router")) {
+ continue;
+ }
+
for (size_t i = 0; i < op->nbs->n_addresses; i++) {
struct eth_addr ea;
ovs_be32 ip;
</dl>
+ <h1>Database Commands</h1>
+ <p>These commands query and modify the contents of <code>ovsdb</code> tables.
+ They are a slight abstraction of the <code>ovsdb</code> interface and
+ as suchthey operate at a lower level than other <code>ovn-nbctl</code> commands.</p>
+ <p><var>Identifying Tables, Records, and Columns</var></p>
+ <p>Each of these commands has a <var>table</var> parameter to identify a table
+ within the database. Many of them also take a <var>record</var> parameter
+ that identifies a particular record within a table. The <var>record</var>
+ parameter may be the UUID for a record, and many tables offer
+ additional ways to identify records. Some commands also take
+ <var>column</var> parameters that identify a particular field within the
+ records in a table.</p>
+ <p>The following tables are currently defined:</p>
+ <dl>
+ <dt><code>Logical_Switch</code></dt>
+ <dd>
+ An L2 logical switch. Records may be identified by name.
+ </dd>
+
+ <dt><code>Logical_Port</code></dt>
+ <dd>
+ A port within an L2 logical switch. Records may be identified by name.
+ </dd>
+
+ <dt><code>ACL</code></dt>
+ <dd>
+ An ACL rule for a logical switch that points to it through its <var>acls</var> column.
+ </dd>
+
+ <dt><code>Logical_Router</code></dt>
+ <dd>
+ An L3 logical router. Records may be identified by name.
+ </dd>
+
+ <dt><code>Logical_Router_Port</code></dt>
+ <dd>
+ A port within an L3 logical router. Records may be identified by name.
+ </dd>
+
+ </dl>
+
+ <xi:include href="lib/db-ctl-base.xml" xmlns:xi="http://www.w3.org/2003/XInclude"/>
+
<h1>Options</h1>
<dl>
Set options related to the type of LPORT\n\
lport-get-options LPORT Get the type specific options for LPORT\n\
\n\
+%s\
+\n\
Options:\n\
--db=DATABASE connect to DATABASE\n\
(default: %s)\n\
-t, --timeout=SECS wait at most SECS seconds\n\
--dry-run do not commit changes to database\n\
--oneline print exactly one line of output per command\n",
- program_name, program_name, nbctl_default_db());
+ program_name, program_name, ctl_get_db_cmd_usage(), nbctl_default_db());
vlog_usage();
printf("\
--no-syslog equivalent to --verbose=nbctl:syslog:warn\n");
json_object_put(monitor_requests, table->name, monitor_request_array);
}
+static void
+destroy_monitored_table(struct monitored_table *mts, size_t n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct monitored_table *mt = &mts[i];
+ ovsdb_column_set_destroy(&mt->columns);
+ }
+
+ free(mts);
+}
+
static void
do_monitor__(struct jsonrpc *rpc, const char *database,
enum ovsdb_monitor_version version,
unixctl_server_wait(unixctl);
poll_block();
}
+
+ json_destroy(request_id);
+ unixctl_server_destroy(unixctl);
+ ovsdb_schema_destroy(schema);
+ destroy_monitored_table(mts, n_mts);
}
static void
print "};"
# Column indexes.
- printEnum(["%s_COL_%s" % (structName.upper(), columnName.upper())
+ printEnum("%s_column_id" % structName.lower(), ["%s_COL_%s" % (structName.upper(), columnName.upper())
for columnName in sorted(table.columns)]
+ ["%s_N_COLUMNS" % structName.upper()])
void %(s)s_init(struct %(s)s *);
void %(s)s_delete(const struct %(s)s *);
struct %(s)s *%(s)s_insert(struct ovsdb_idl_txn *);
+bool %(s)s_is_updated(const struct %(s)s *, enum %(s)s_column_id);
''' % {'s': structName, 'S': structName.upper()}
for columnName, column in sorted(table.columns.iteritems()):
print
# Table indexes.
- printEnum(["%sTABLE_%s" % (prefix.upper(), tableName.upper()) for tableName in sorted(schema.tables)] + ["%sN_TABLES" % prefix.upper()])
+ printEnum("%stable_id" % prefix.lower(), ["%sTABLE_%s" % (prefix.upper(), tableName.upper()) for tableName in sorted(schema.tables)] + ["%sN_TABLES" % prefix.upper()])
print
for tableName in schema.tables:
print "#define %(p)stable_%(t)s (%(p)stable_classes[%(P)sTABLE_%(T)s])" % {
print "\nconst char * %sget_db_version(void);" % prefix
print "\n#endif /* %(prefix)sIDL_HEADER */" % {'prefix': prefix.upper()}
-def printEnum(members):
+def printEnum(type, members):
if len(members) == 0:
return
- print "\nenum {";
+ print "\nenum %s {" % type
for member in members[:-1]:
print " %s," % member
print " %s" % members[-1]
%(s)s_insert(struct ovsdb_idl_txn *txn)
{
return %(s)s_cast(ovsdb_idl_txn_insert(txn, &%(p)stable_classes[%(P)sTABLE_%(T)s], NULL));
+}
+
+bool
+%(s)s_is_updated(const struct %(s)s *row, enum %(s)s_column_id column)
+{
+ return ovsdb_idl_track_is_updated(&row->header_, &%(s)s_columns[column]);
}''' % {'s': structName,
'p': prefix,
'P': prefix.upper(),
EXTRA_DIST += $(PYFILES)
PYCOV_CLEAN_FILES += $(PYFILES:.py=.py,cover)
+FLAKE8_PYFILES += \
+ $(filter-out python/ovs/dirs.py,$(PYFILES)) \
+ python/setup.py \
+ python/build/__init__.py \
+ python/build/nroff.py \
+ python/ovs/dirs.py.template
+
if HAVE_PYTHON
nobase_pkgdata_DATA = $(ovs_pyfiles) $(ovstest_pyfiles)
ovs-install-data-local:
# limitations under the License.
import re
+import sys
from ovs.db import error
+
def text_to_nroff(s, font=r'\fR'):
def escape(match):
c = match.group(0)
s = re.sub('(-[0-9]|--|[-"\'\\\\.])', escape, s)
return s
+
def escape_nroff_literal(s, font=r'\fB'):
return font + r'%s\fR' % text_to_nroff(s, font)
+
def inline_xml_to_nroff(node, font, to_upper=False, newline='\n'):
if node.nodeType == node.TEXT_NODE:
if to_upper:
elif node.hasAttribute('db'):
s += node.attributes['db'].nodeValue
else:
- raise error.Error("'ref' lacks required attributes: %s" % node.attributes.keys())
+ raise error.Error("'ref' lacks required attributes: %s"
+ % node.attributes.keys())
return s + font
elif node.tagName in ['var', 'dfn', 'i']:
s = r'\fI'
s += inline_xml_to_nroff(child, r'\fI', to_upper, newline)
return s + font
else:
- raise error.Error("element <%s> unknown or invalid here" % node.tagName)
+ raise error.Error("element <%s> unknown or invalid here"
+ % node.tagName)
elif node.nodeType == node.COMMENT_NODE:
return ''
else:
raise error.Error("unknown node %s in inline xml" % node)
+
def pre_to_nroff(nodes, para, font):
# This puts 'font' at the beginning of each line so that leading and
# trailing whitespace stripping later doesn't removed leading spaces
s += '\n.fi\n'
return s
+
+def fatal(msg):
+ sys.stderr.write('%s\n' % msg)
+ sys.exit(1)
+
+
def diagram_header_to_nroff(header_node):
header_fields = []
i = 0
pic_s = ""
for f in header_fields:
- pic_s += " %s: box \"%s\" width %s" % (f['tag'], f['name'], f['width'])
+ pic_s += " %s: box \"%s\" width %s" % (f['tag'], f['name'],
+ f['width'])
if f['fill'] == 'yes':
pic_s += " fill"
pic_s += '\n'
text_s += "\n"
return pic_s, text_s
+
def diagram_to_nroff(nodes, para):
pic_s = ''
text_s = ''
.RE
\\}"""
+
def block_xml_to_nroff(nodes, para='.PP'):
s = ''
for node in nodes:
pass
elif (li_node.nodeType != node.TEXT_NODE
or not li_node.data.isspace()):
- raise error.Error("<%s> element may only have <li> children" % node.tagName)
+ raise error.Error("<%s> element may only have "
+ "<li> children" % node.tagName)
s += ".RE\n"
elif node.tagName == 'dl':
if s != "":
continue
elif (li_node.nodeType != node.TEXT_NODE
or not li_node.data.isspace()):
- raise error.Error("<dl> element may only have <dt> and <dd> children")
+ raise error.Error("<dl> element may only have "
+ "<dt> and <dd> children")
s += block_xml_to_nroff(li_node.childNodes, ".IP")
s += ".RE\n"
elif node.tagName == 'p':
import ovs.dirs
import ovs.fatal_signal
-#import ovs.lockfile
import ovs.process
import ovs.socket_util
import ovs.timeval
global file_handle
file_handle = open(tmpfile, "w")
- except IOError, e:
+ except IOError as e:
_fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
try:
s = os.fstat(file_handle.fileno())
- except IOError, e:
+ except IOError as e:
_fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
try:
file_handle.write("%s\n" % pid)
file_handle.flush()
- except OSError, e:
+ except OSError as e:
_fatal("%s: write failed: %s" % (tmpfile, e.strerror))
try:
fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
- except IOError, e:
+ except IOError as e:
_fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
# Rename or link it to the correct name.
if _overwrite_pidfile:
try:
os.rename(tmpfile, _pidfile)
- except OSError, e:
+ except OSError as e:
_fatal("failed to rename \"%s\" to \"%s\" (%s)"
% (tmpfile, _pidfile, e.strerror))
else:
try:
os.link(tmpfile, _pidfile)
error = 0
- except OSError, e:
+ except OSError as e:
error = e.errno
if error == errno.EEXIST:
_check_already_running()
while True:
try:
return os.waitpid(pid, options)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.EINTR:
pass
return -e.errno, 0
def _fork_and_wait_for_startup():
try:
rfd, wfd = os.pipe()
- except OSError, e:
+ except OSError as e:
sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
pid = os.fork()
- except OSError, e:
+ except OSError as e:
sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
sys.exit(1)
try:
s = os.read(rfd, 1)
error = 0
- except OSError, e:
+ except OSError as e:
s = ""
error = e.errno
if error != errno.EINTR:
# Running in parent process.
os.close(rfd)
ovs.timeval.postfork()
- #ovs.lockfile.postfork()
global _daemonize_fd
_daemonize_fd = wfd
wakeup = last_restart + 10000
if now > wakeup:
break
- print "sleep %f" % ((wakeup - now) / 1000.0)
+ sys.stdout.write("sleep %f\n" % (
+ (wakeup - now) / 1000.0))
time.sleep((wakeup - now) / 1000.0)
last_restart = ovs.timeval.msec()
vlog.info("%s, exiting" % status_msg)
sys.exit(0)
- # Running in new daemon process.
+ # Running in new daemon process.
def _close_standard_fds():
try:
file_handle = open(pidfile, "r+")
- except IOError, e:
+ except IOError as e:
if e.errno == errno.ENOENT and delete_if_stale:
return 0
vlog.warn("%s: open: %s" % (pidfile, e.strerror))
# We won the right to delete the stale pidfile.
try:
os.unlink(pidfile)
- except IOError, e:
+ except IOError as e:
vlog.warn("%s: failed to delete stale pidfile (%s)"
% (pidfile, e.strerror))
return -e.errno
vlog.dbg("%s: deleted stale pidfile" % pidfile)
file_handle.close()
return 0
- except IOError, e:
+ except IOError as e:
if e.errno not in [errno.EACCES, errno.EAGAIN]:
vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
return -e.errno
try:
try:
error = int(file_handle.readline())
- except IOError, e:
+ except IOError as e:
vlog.warn("%s: read: %s" % (pidfile, e.strerror))
error = -e.errno
except ValueError:
if (msg.type == ovs.jsonrpc.Message.T_NOTIFY
and msg.method == "update"
and len(msg.params) == 2
- and msg.params[0] == None):
+ and msg.params[0] is None):
# Database contents changed.
self.__parse_update(msg.params[1])
elif (msg.type == ovs.jsonrpc.Message.T_REPLY
self._monitor_request_id = None
self.__clear()
self.__parse_update(msg.result)
- except error.Error, e:
+ except error.Error as e:
vlog.err("%s: parse error in received schema: %s"
% (self._session.get_name(), e))
self.__error()
and type(params) in (list, tuple)
and params
and params[0] == self.lock_name):
- self.__update_has_lock(self, new_has_lock)
+ self.__update_has_lock(new_has_lock)
if not new_has_lock:
self.is_lock_contended = True
def __parse_update(self, update):
try:
self.__do_parse_update(update)
- except error.Error, e:
+ except error.Error as e:
vlog.err("%s: error parsing update: %s"
% (self._session.get_name(), e))
try:
datum = ovs.db.data.Datum.from_json(column.type, datum_json)
- except error.Error, e:
+ except error.Error as e:
# XXX rate-limit
vlog.warn("error parsing column %s in table %s: %s"
% (column_name, table.name, e))
if ((self._table.name in self._idl.readonly) and
(column_name in self._idl.readonly[self._table.name])):
- vlog.warn("attempting to write to readonly column %s" % column_name)
+ vlog.warn("attempting to write to readonly column %s"
+ % column_name)
return
column = self._table.columns[column_name]
try:
datum = ovs.db.data.Datum.from_python(column.type, value,
_row_to_uuid)
- except error.Error, e:
+ except error.Error as e:
# XXX rate-limit
vlog.err("attempting to write bad value to column %s (%s)"
% (column_name, e))
continue
try:
datum = ovs.db.data.Datum.from_json(column.type, datum_json)
- except error.Error, e:
+ except error.Error as e:
# XXX rate-limit
vlog.warn("error parsing column %s in table %s: %s"
% (column_name, table.name, e))
of Idl.change_seqno. (Transaction.commit_block() calls Idl.run().)"""
# Status values that Transaction.commit() can return.
- UNCOMMITTED = "uncommitted" # Not yet committed or aborted.
- UNCHANGED = "unchanged" # Transaction didn't include any changes.
- INCOMPLETE = "incomplete" # Commit in progress, please wait.
- ABORTED = "aborted" # ovsdb_idl_txn_abort() called.
- SUCCESS = "success" # Commit successful.
- TRY_AGAIN = "try again" # Commit failed because a "verify" operation
- # reported an inconsistency, due to a network
- # problem, or other transient failure. Wait
- # for a change, then try again.
- NOT_LOCKED = "not locked" # Server hasn't given us the lock yet.
- ERROR = "error" # Commit failed due to a hard error.
+
+ # Not yet committed or aborted.
+ UNCOMMITTED = "uncommitted"
+ # Transaction didn't include any changes.
+ UNCHANGED = "unchanged"
+ # Commit in progress, please wait.
+ INCOMPLETE = "incomplete"
+ # ovsdb_idl_txn_abort() called.
+ ABORTED = "aborted"
+ # Commit successful.
+ SUCCESS = "success"
+ # Commit failed because a "verify" operation
+ # reported an inconsistency, due to a network
+ # problem, or other transient failure. Wait
+ # for a change, then try again.
+ TRY_AGAIN = "try again"
+ # Server hasn't given us the lock yet.
+ NOT_LOCKED = "not locked"
+ # Commit failed due to a hard error.
+ ERROR = "error"
@staticmethod
def status_to_string(status):
self._inc_column = column
def _fetch(self, row, column_name):
- self._fetch_requests.append({"row":row, "column_name":column_name})
+ self._fetch_requests.append({"row": row, "column_name": column_name})
def _write(self, row, column, datum):
assert row._changes is not None
# transaction only does writes of existing values, without making any
# real changes, we will drop the whole transaction later in
# ovsdb_idl_txn_commit().)
- if not column.alert and row._data and row._data.get(column.name) == datum:
+ if (not column.alert and row._data and
+ row._data.get(column.name) == datum):
new_value = row._changes.get(column.name)
if new_value is None or new_value == datum:
return
if len(fetched_rows) != 1:
# XXX rate-limit
vlog.warn('"select" reply "rows" has %d elements '
- 'instead of 1' % len(rows))
+ 'instead of 1' % len(fetched_rows))
continue
fetched_row = fetched_rows[0]
if not Transaction.__check_json_type(fetched_row, (dict,),
def json_type_to_string(type_):
- if type_ == None:
+ if type_ is None:
return "null"
elif type_ == bool:
return "boolean"
from ovs.db import error
import ovs.db.parser
-from ovs.db import types
+import ovs.db.types
def _check_id(name, json):
return DbSchema.from_json(self.to_json())
def __follow_ref_table(self, column, base, base_name):
- if not base or base.type != types.UuidType or not base.ref_table_name:
+ if (not base or base.type != ovs.db.types.UuidType
+ or not base.ref_table_name):
return
base.ref_table = self.tables.get(base.ref_table_name)
indexes_json = parser.get_optional("indexes", [list], [])
parser.finish()
- if max_rows == None:
+ if max_rows is None:
max_rows = sys.maxint
elif max_rows <= 0:
raise error.Error("maxRows must be at least 1", json)
parser = ovs.db.parser.Parser(json, "schema for column %s" % name)
mutable = parser.get_optional("mutable", [bool], True)
ephemeral = parser.get_optional("ephemeral", [bool], False)
- type_ = types.Type.from_json(parser.get("type", [dict, str, unicode]))
+ type_ = ovs.db.types.Type.from_json(parser.get("type",
+ [dict, str, unicode]))
parser.finish()
return ColumnSchema(name, mutable, not ephemeral, type_)
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The @variables@ in this file are replaced by default directories for
+# use in python/ovs/dirs.py in the source directory and replaced by the
+# configured directories for use in the installed python/ovs/dirs.py.
+#
import os
+
+# Note that the use of """ is to aid in dealing with paths with quotes in them.
PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """/usr/local/share/openvswitch""")
RUNDIR = os.environ.get("OVS_RUNDIR", """/var/run""")
LOGDIR = os.environ.get("OVS_LOGDIR", """/usr/local/var/log""")
-## The @variables@ in this file are replaced by default directories for
-## use in python/ovs/dirs.py in the source directory and replaced by the
-## configured directories for use in the installed python/ovs/dirs.py.
-##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# The @variables@ in this file are replaced by default directories for
+# use in python/ovs/dirs.py in the source directory and replaced by the
+# configured directories for use in the installed python/ovs/dirs.py.
+#
import os
+
+# Note that the use of """ is to aid in dealing with paths with quotes in them.
PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """@pkgdatadir@""")
RUNDIR = os.environ.get("OVS_RUNDIR", """@RUNDIR@""")
LOGDIR = os.environ.get("OVS_LOGDIR", """@LOGDIR@""")
try:
os.unlink(file_)
return 0
- except OSError, e:
+ except OSError as e:
return e.errno
\f
def from_string(s):
try:
s = unicode(s, 'utf-8')
- except UnicodeDecodeError, e:
+ except UnicodeDecodeError as e:
seq = ' '.join(["0x%2x" % ord(c)
for c in e.object[e.start:e.end] if ord(c) >= 0x80])
return ("not a valid UTF-8 string: invalid UTF-8 sequence %s" % seq)
class Parser(object):
- ## Maximum height of parsing stack. ##
+ # Maximum height of parsing stack. #
MAX_HEIGHT = 1000
def __init__(self, check_trailer=False):
elif self.parse_state != Parser.__parse_end:
self.__error("unexpected end of input")
- if self.error == None:
+ if self.error is None:
assert len(self.stack) == 1
return self.stack.pop()
else:
return "\"params\" must be JSON array"
pattern = {Message.T_REQUEST: 0x11001,
- Message.T_NOTIFY: 0x11000,
- Message.T_REPLY: 0x00101,
- Message.T_ERROR: 0x00011}.get(self.type)
+ Message.T_NOTIFY: 0x11000,
+ Message.T_REPLY: 0x00101,
+ Message.T_ERROR: 0x00011}.get(self.type)
if pattern is None:
return "invalid JSON-RPC message type %s" % self.type
request.id = "echo"
self.rpc.send(request)
else:
- assert action == None
+ assert action is None
def wait(self, poller):
if self.rpc is not None:
if not uuidRE.match(s):
raise error.Error("\"%s\" is not a valid UUID" % s, json)
return uuid.UUID(s)
- except error.Error, e:
+ except error.Error as e:
if not symtab:
raise e
try:
POLLHUP = 0x010
POLLNVAL = 0x020
+
# eventlet/gevent doesn't support select.poll. If select.poll is used,
# python interpreter is blocked as a whole instead of switching from the
# current thread that is about to block to other runnable thread.
try:
events = self.poll.poll(self.timeout)
self.__log_wakeup(events)
- except select.error, e:
+ except select.error as e:
# XXX rate-limit
error, msg = e
if error != errno.EINTR:
import os
import os.path
import random
-import select
import socket
import sys
os.symlink(long_dirname, link_name)
ovs.fatal_signal.add_file_to_unlink(link_name)
return os.path.join(link_name, os.path.basename(long_name))
- except OSError, e:
+ except OSError as e:
if e.errno != errno.EEXIST:
break
raise Exception("Failed to create temporary symlink")
try:
sock = socket.socket(socket.AF_UNIX, style)
- except socket.error, e:
+ except socket.error as e:
return get_exception_errno(e), None
try:
# Delete bind_path but ignore ENOENT.
try:
os.unlink(bind_path)
- except OSError, e:
+ except OSError as e:
if e.errno != errno.ENOENT:
return e.errno, None
try:
if sys.hexversion >= 0x02060000:
- os.fchmod(sock.fileno(), 0700)
+ os.fchmod(sock.fileno(), 0o700)
else:
- os.chmod("/dev/fd/%d" % sock.fileno(), 0700)
- except OSError, e:
+ os.chmod("/dev/fd/%d" % sock.fileno(), 0o700)
+ except OSError as e:
pass
if connect_path is not None:
try:
sock.connect(connect_path)
- except socket.error, e:
+ except socket.error as e:
if get_exception_errno(e) != errno.EINPROGRESS:
raise
return 0, sock
- except socket.error, e:
+ except socket.error as e:
sock.close()
if (bind_path is not None and
os.path.exists(bind_path)):
dirname = os.path.dirname(connect_path)
basename = os.path.basename(connect_path)
try:
- connect_dirfd = os.open(dirname, os.O_DIRECTORY | os.O_RDONLY)
- except OSError, err:
+ connect_dirfd = os.open(dirname,
+ os.O_DIRECTORY | os.O_RDONLY)
+ except OSError as err:
return get_exception_errno(err), None
- short_connect_path = "/proc/self/fd/%d/%s" % (connect_dirfd, basename)
+ short_connect_path = "/proc/self/fd/%d/%s" % (connect_dirfd,
+ basename)
if bind_path is not None:
dirname = os.path.dirname(bind_path)
basename = os.path.basename(bind_path)
try:
bind_dirfd = os.open(dirname, os.O_DIRECTORY | os.O_RDONLY)
- except OSError, err:
+ except OSError as err:
return get_exception_errno(err), None
- short_bind_path = "/proc/self/fd/%d/%s" % (bind_dirfd, basename)
+ short_bind_path = "/proc/self/fd/%d/%s" % (bind_dirfd,
+ basename)
try:
- return make_unix_socket(style, nonblock, short_bind_path, short_connect_path)
+ return make_unix_socket(style, nonblock, short_bind_path,
+ short_connect_path)
finally:
if connect_dirfd is not None:
os.close(connect_dirfd)
# XXX rate-limit
vlog.err("poll return POLLERR but send succeeded")
return errno.EPROTO
- except socket.error, e:
+ except socket.error as e:
return get_exception_errno(e)
else:
return 0
else:
sock = socket.socket(socket.AF_INET6, style, 0)
family = socket.AF_INET6
- except socket.error, e:
+ except socket.error as e:
return get_exception_errno(e), None
try:
set_dscp(sock, family, dscp)
try:
sock.connect(address)
- except socket.error, e:
+ except socket.error as e:
if get_exception_errno(e) != errno.EINPROGRESS:
raise
return 0, sock
- except socket.error, e:
+ except socket.error as e:
sock.close()
return get_exception_errno(e), None
if null_fd < 0:
try:
null_fd = os.open("/dev/null", os.O_RDWR)
- except OSError, e:
+ except OSError as e:
vlog.err("could not open /dev/null: %s" % os.strerror(e.errno))
return -e.errno
return null_fd
else:
bytes_written += retval
buf = buf[:retval]
- except OSError, e:
+ except OSError as e:
return e.errno, bytes_written
def set_nonblocking(sock):
try:
sock.setblocking(0)
- except socket.error, e:
+ except socket.error as e:
vlog.err("could not set nonblocking mode on socket: %s"
% os.strerror(get_exception_errno(e)))
val = dscp << 2
if family == socket.AF_INET:
- try:
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, val)
- except socket.error, e:
- raise
+ sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, val)
elif family == socket.AF_INET6:
- try:
- sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, val)
- except socket.error, e:
- raise
+ sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, val)
else:
- raise
+ raise ValueError('Invalid family %d' % family)
raise NotImplementedError("This method must be overrided by subclass")
@staticmethod
- def open_block((error, stream)):
+ def open_block(error_stream):
"""Blocks until a Stream completes its connection attempt, either
succeeding or failing. (error, stream) should be the tuple returned by
Stream.open(). Returns a tuple of the same form.
Typical usage:
error, stream = Stream.open_block(Stream.open("unix:/tmp/socket"))"""
+ # Py3 doesn't support tuple parameter unpacking - PEP 3113
+ error, stream = error_stream
if not error:
while True:
error = stream.connect()
try:
return (0, self.socket.recv(n))
- except socket.error, e:
+ except socket.error as e:
return (ovs.socket_util.get_exception_errno(e), "")
def send(self, buf):
try:
return self.socket.send(buf)
- except socket.error, e:
+ except socket.error as e:
return -ovs.socket_util.get_exception_errno(e)
def run(self):
try:
sock.listen(10)
- except socket.error, e:
+ except socket.error as e:
vlog.err("%s: listen: %s" % (name, os.strerror(e.error)))
sock.close()
return e.error, None
sock, addr = self.socket.accept()
ovs.socket_util.set_nonblocking(sock)
return 0, Stream(sock, "unix:%s" % addr, 0)
- except socket.error, e:
+ except socket.error as e:
error = ovs.socket_util.get_exception_errno(e)
if error != errno.EAGAIN:
# XXX rate-limit
@staticmethod
def _open(suffix, dscp):
connect_path = suffix
- return ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
- True, None, connect_path)
+ return ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
+ True, None, connect_path)
Stream.register_method("unix", UnixStream)
# Librt shared library could not be loaded
librt = None
+
def monotonic():
if not librt:
return time.time()
# Kernel does not support CLOCK_MONOTONIC
return time.time()
+
# Use time.monotonic() if Python version >= 3.3
if not hasattr(time, 'monotonic'):
time.monotonic = monotonic
+
def msec():
""" Returns the system's monotonic time if possible, otherwise returns the
current time as the amount of time since the epoch, in milliseconds, as a
commands[name] = _UnixctlCommand(usage, min_args, max_args, callback,
aux)
+
def socket_name_from_target(target):
assert isinstance(target, strtypes)
# See the License for the specific language governing permissions and
# limitations under the License.
-import copy
-import errno
import os
import types
version = "%s (Open vSwitch) %s" % (ovs.util.PROGRAM_NAME, version)
conn.reply(version)
+
class UnixctlServer(object):
def __init__(self, listener):
assert isinstance(listener, ovs.stream.PassiveStream)
return LEVELS.get(level_str.lower())
-class Vlog:
+class Vlog(object):
__inited = False
__msg_num = 0
__start_time = 0
matches = formatting.match(match)
# Do we need to apply padding?
if not matches.group(1) and replace != "":
- replace = replace.center(len(replace)+2)
+ replace = replace.center(len(replace) + 2)
# Does the field have a minimum width
if matches.group(2):
min_width = int(matches.group(2))
CONTROL_PORT = 15531
DATA_PORT = 15532
+
def ip_address(string):
"""Verifies if string is a valid IP address"""
try:
'OuterIP.')
return parser.parse_args()
+
def l3_initialize_args():
"""
Initialize argument parsing for ovs-l3ping utility.
rpcserver is an XML RPC server that allows RPC client to initiate tests
"""
+from __future__ import print_function
+
import exceptions
import sys
import xmlrpclib
rpc_server = TestArena()
reactor.listenTCP(port, server.Site(rpc_server))
try:
- print "Starting RPC server\n"
+ print("Starting RPC server\n")
sys.stdout.flush()
- # If this server was started from ovs-test client then we must flush
- # STDOUT so that client would know that server is ready to accept
- # XML RPC connections.
+ # If this server was started from ovs-test client then we must flush
+ # STDOUT so that client would know that server is ready to accept
+ # XML RPC connections.
reactor.run()
finally:
rpc_server.cleanup()
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import print_function
+
import math
import time
udpformat = '{0:>15} {1:>15} {2:>15} {3:>15} {4:>15}'
- print ("UDP test from %s:%u to %s:%u with target bandwidth %s" %
+ print("UDP test from %s:%u to %s:%u with target bandwidth %s" %
(sender[0], sender[1], receiver[0], receiver[1],
util.bandwidth_to_string(tbwidth)))
- print udpformat.format("Datagram Size", "Snt Datagrams", "Rcv Datagrams",
- "Datagram Loss", "Bandwidth")
+ print(udpformat.format("Datagram Size", "Snt Datagrams", "Rcv Datagrams",
+ "Datagram Loss", "Bandwidth"))
for size in port_sizes:
listen_handle = NO_HANDLE
listen_handle = server1.create_udp_listener(receiver[3])
if listen_handle == NO_HANDLE:
- print ("Server could not open UDP listening socket on port"
- " %u. Try to restart the server.\n" % receiver[3])
+ print("Server could not open UDP listening socket on port"
+ " %u. Try to restart the server.\n" % receiver[3])
return
send_handle = server2.create_udp_sender(
(util.ip_from_cidr(receiver[2]),
snt_packets) / 100
bwidth = (rcv_packets * size) / duration
- print udpformat.format(size, snt_packets, rcv_packets,
- '%.2f%%' % loss, util.bandwidth_to_string(bwidth))
+ print(udpformat.format(size, snt_packets, rcv_packets,
+ '%.2f%%' % loss, util.bandwidth_to_string(bwidth)))
finally:
if listen_handle != NO_HANDLE:
server1.close_udp_listener(listen_handle)
if send_handle != NO_HANDLE:
server2.close_udp_sender(send_handle)
- print "\n"
+ print("\n")
def do_tcp_tests(receiver, sender, duration):
server2 = util.rpc_client(sender[0], sender[1])
tcpformat = '{0:>15} {1:>15} {2:>15}'
- print "TCP test from %s:%u to %s:%u (full speed)" % (sender[0], sender[1],
- receiver[0], receiver[1])
- print tcpformat.format("Snt Bytes", "Rcv Bytes", "Bandwidth")
+ print("TCP test from %s:%u to %s:%u (full speed)" % (sender[0], sender[1],
+ receiver[0], receiver[1]))
+ print(tcpformat.format("Snt Bytes", "Rcv Bytes", "Bandwidth"))
listen_handle = NO_HANDLE
send_handle = NO_HANDLE
try:
listen_handle = server1.create_tcp_listener(receiver[3])
if listen_handle == NO_HANDLE:
- print ("Server was unable to open TCP listening socket on port"
- " %u. Try to restart the server.\n" % receiver[3])
+ print("Server was unable to open TCP listening socket on port"
+ " %u. Try to restart the server.\n" % receiver[3])
return
send_handle = server2.create_tcp_sender(util.ip_from_cidr(receiver[2]),
receiver[3], duration)
bwidth = rcv_bytes / duration
- print tcpformat.format(snt_bytes, rcv_bytes,
- util.bandwidth_to_string(bwidth))
+ print(tcpformat.format(snt_bytes, rcv_bytes,
+ util.bandwidth_to_string(bwidth)))
finally:
if listen_handle != NO_HANDLE:
server1.close_tcp_listener(listen_handle)
if send_handle != NO_HANDLE:
server2.close_tcp_sender(send_handle)
- print "\n"
+ print("\n")
def do_l3_tests(node1, node2, bandwidth, duration, ps, type):
server.del_bridge(DEFAULT_TEST_BRIDGE)
-
def do_vlan_tests(node1, node2, bandwidth, duration, ps, tag):
"""
Do VLAN tests between node1 and node2. Each node is given
def __init__(self):
self.stats = []
- def datagramReceived(self, data, (_1, _2)):
+ def datagramReceived(self, data, _1_2):
"""This function is called each time datagram is received"""
try:
self.stats.append(struct.unpack_from("Q", data, 0))
def startProtocol(self):
self.looper = LoopingCall(self.sendData)
period = self.duration / float(self.count)
- self.looper.start(period , now = False)
+ self.looper.start(period, now=False)
def stopProtocol(self):
if (self.looper is not None):
self.looper.stop()
self.looper = None
- def datagramReceived(self, data, (host, port)):
+ def datagramReceived(self, data, host_port):
pass
def sendData(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
indata = iface + ('\0' * (32 - len(iface)))
try:
- outdata = fcntl.ioctl(s.fileno(), 0x8921, indata) # socket.SIOCGIFMTU
+ outdata = fcntl.ioctl(s.fileno(), 0x8921, indata) # socket.SIOCGIFMTU
mtu = struct.unpack("16si12x", outdata)[1]
except:
return 0
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
- 0x8912, # SIOCGIFCONF
+ 0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
namestr = names.tostring()
def start_process(args):
try:
p = subprocess.Popen(args,
- stdin = subprocess.PIPE,
- stdout = subprocess.PIPE,
- stderr = subprocess.PIPE)
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
out, err = p.communicate()
return (p.returncode, out, err)
except exceptions.OSError:
p = subprocess.Popen(["ovs-test", "-s", str(port)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=sigint_intercept)
- fcntl.fcntl( p.stdout.fileno(),fcntl.F_SETFL,
+ fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL,
fcntl.fcntl(p.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
while p.poll() is None:
"""
vswitch module allows its callers to interact with OVS DB.
"""
-import exceptions
-import subprocess
-
import util
ret, _out, _err = util.start_process(["ovs-vsctl", "del-br", bridge])
return ret
+
def ovs_vsctl_del_pbridge(bridge, iface):
"""
This function deletes the OVS bridge and assigns the bridge IP address
try:
# Try to set the version from the generated ovs/version.py
- execfile("ovs/version.py")
+ exec(open("ovs/version.py").read())
except IOError:
print("Ensure version.py is created by running make python/ovs/version.py",
file=sys.stderr)
import re
+
def xapi_local():
return Session()
def __init__(self):
Table.__init__(self, Pool.__records)
+
class VIF(Table):
__records = ({"uuid": "6ab1b260-398e-49ba-827b-c7696108964c",
"other_config":
def __init__(self):
Table.__init__(self, VIF.__records)
+
class VM(Table):
__records = ({"uuid": "fcb8a3f6-dc04-41d2-8b8a-55afd2b755b8",
"other_config":
EXTRA_DIST += $(CHECK_PYFILES)
PYCOV_CLEAN_FILES += $(CHECK_PYFILES:.py=.py,cover) .coverage
+FLAKE8_PYFILES += $(CHECK_PYFILES)
+
if HAVE_OPENSSL
TESTPKI_FILES = \
tests/testpki-cacert.pem \
AT_CHECK([STRIP_XIDS stdout], [0], [dnl
OFPT_QUEUE_GET_CONFIG_REPLY: port=1
])
+AT_CHECK([ovs-ofctl queue-get-config br0], [0], [stdout])
+AT_CHECK([STRIP_XIDS stdout | sort], [0], [dnl
+OFPT_QUEUE_GET_CONFIG_REPLY: port=1
+OFPT_QUEUE_GET_CONFIG_REPLY: port=2
+])
AT_CHECK([ovs-ofctl queue-get-config br0 10], [0],
[OFPT_ERROR (xid=0x2): OFPQOFC_BAD_PORT
OFPT_QUEUE_GET_CONFIG_REQUEST (xid=0x2): port=10
AT_CHECK([STRIP_XIDS stdout], [0], [dnl
OFPT_QUEUE_GET_CONFIG_REPLY (OF1.2): port=1
])
+AT_CHECK([ovs-ofctl -O OpenFlow12 queue-get-config br0 ANY], [0], [stdout])
+AT_CHECK([STRIP_XIDS stdout], [0], [dnl
+OFPT_QUEUE_GET_CONFIG_REPLY (OF1.2): port=ANY
+])
AT_CHECK([ovs-ofctl -O OpenFlow12 queue-get-config br0 10], [0],
[OFPT_ERROR (OF1.2) (xid=0x2): OFPQOFC_BAD_PORT
OFPT_QUEUE_GET_CONFIG_REQUEST (OF1.2) (xid=0x2): port=10
"where": [],
"row": {"b": true}}]']],
[[000: i=1 r=2 b=true s=mystring u=<0> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<1> <2>] uuid=<3>
+000: updated columns: b ba i ia r ra s sa u ua
001: {"error":null,"result":[{"count":2}]}
002: i=0 r=0 b=true s= u=<4> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<5>
002: i=1 r=2 b=true s=mystring u=<0> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<1> <2>] uuid=<3>
+002: updated columns: b
003: done
]])
[[000: empty
001: {"error":null,"result":[{"uuid":["uuid","<0>"]},{"uuid":["uuid","<1>"]}]}
002: i=1 r=2 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<0>
+002: updated columns: b ba i ia r ra s sa u ua
003: {"error":null,"result":[{"count":2}]}
004: i=0 r=0 b=true s= u=<5> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+004: updated columns: b
005: {"error":null,"result":[{"count":2}]}
006: i=0 r=123.5 b=true s= u=<5> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
006: i=1 r=123.5 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<0>
+006: updated columns: r
+006: updated columns: r
007: {"error":null,"result":[{"uuid":["uuid","<6>"]}]}
008: i=-1 r=125 b=false s= u=<5> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
+008: updated columns: ba i ia r ra
009: {"error":null,"result":[{"count":2}]}
010: i=-1 r=125 b=false s=newstring u=<5> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
010: i=0 r=123.5 b=true s=newstring u=<5> ia=[] ra=[] ba=[] sa=[] ua=[] uuid=<1>
+010: updated columns: s
+010: updated columns: s
011: {"error":null,"result":[{"count":1}]}
012: ##deleted## uuid=<1>
013: reconnect
014: i=-1 r=125 b=false s=newstring u=<5> ia=[1] ra=[1.5] ba=[false] sa=[] ua=[] uuid=<6>
014: i=1 r=123.5 b=true s=mystring u=<2> ia=[1 2 3] ra=[-0.5] ba=[true] sa=[abc def] ua=[<3> <4>] uuid=<0>
+014: updated columns: b ba i ia r ra s sa u ua
+014: updated columns: ba i ia r ra s
015: done
]])
/* Verify auto attach values */
check_received_aa(&hardware.h_lport, nport, map_init);
+ lldpd_chassis_cleanup(nchassis, true);
+ lldpd_port_cleanup(nport, true);
+ free(nport);
+ lldp_destroy_dummy(lldp);
+
return 0;
}
# limitations under the License.
import argparse
-import logging
import signal
import sys
import time
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import codecs
import getopt
import sys
def print_json(json):
if type(json) in [str, unicode]:
- print "error: %s" % json
+ print("error: %s" % json)
return False
else:
ovs.json.to_stream(json, sys.stdout)
try:
options, args = getopt.gnu_getopt(argv[1:], '', ['multiple'])
- except getopt.GetoptError, geo:
+ except getopt.GetoptError as geo:
sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
sys.exit(1)
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import argparse
import errno
import os
if error:
rpc.close()
dead_rpcs.append(rpc)
- rpcs = [rpc for rpc in rpcs if not rpc in dead_rpcs]
+ rpcs = [rpc for rpc in rpcs if rpc not in dead_rpcs]
if done and not rpcs:
break
sys.stderr.write("error waiting for reply: %s\n" % os.strerror(error))
sys.exit(1)
- print ovs.json.to_string(msg.to_json())
+ print(ovs.json.to_string(msg.to_json()))
rpc.close()
command_name = args.command[0]
args = args.command_args
- if not command_name in commands:
+ if command_name not in commands:
sys.stderr.write("%s: unknown command \"%s\" "
"(use --help for help)\n" % (argv[0], command_name))
sys.exit(1)
def main():
SERVERS = {
- 'http': [TCPServer, SimpleHTTPRequestHandler, 80],
+ 'http': [TCPServer, SimpleHTTPRequestHandler, 80],
'http6': [TCPServerV6, SimpleHTTPRequestHandler, 80],
}
unixctl_server_wait(server);
poll_block();
}
+
+ ofpbuf_uninit(&buf);
+ unixctl_server_destroy(server);
}
static void
/* Apply diff to 'old' to create'reincarnation'. */
error = ovsdb_datum_apply_diff(&reincarnation, &old, &diff, &type);
if (error) {
- ovs_fatal(0, "%s", ovsdb_error_to_string(error));
+ char *string = ovsdb_error_to_string(error);
+ ovsdb_error_destroy(error);
+ ovs_fatal(0, "%s", string);
}
/* Test to make sure 'new' equals 'reincarnation'. */
return a->i < b->i ? -1 : a->i > b->i;
}
+static void
+print_idl_row_updated_simple(const struct idltest_simple *s, int step)
+{
+ size_t i;
+ bool updated = false;
+
+ for (i = 0; i < IDLTEST_SIMPLE_N_COLUMNS; i++) {
+ if (idltest_simple_is_updated(s, i)) {
+ if (!updated) {
+ printf("%03d: updated columns:", step);
+ updated = true;
+ }
+ printf(" %s", idltest_simple_columns[i].name);
+ }
+ }
+ if (updated) {
+ printf("\n");
+ }
+}
+
+static void
+print_idl_row_updated_link1(const struct idltest_link1 *l1, int step)
+{
+ size_t i;
+ bool updated = false;
+
+ for (i = 0; i < IDLTEST_LINK1_N_COLUMNS; i++) {
+ if (idltest_link1_is_updated(l1, i)) {
+ if (!updated) {
+ printf("%03d: updated columns:", step);
+ updated = true;
+ }
+ printf(" %s", idltest_link1_columns[i].name);
+ }
+ }
+ if (updated) {
+ printf("\n");
+ }
+}
+
+static void
+print_idl_row_updated_link2(const struct idltest_link2 *l2, int step)
+{
+ size_t i;
+ bool updated = false;
+
+ for (i = 0; i < IDLTEST_LINK2_N_COLUMNS; i++) {
+ if (idltest_link2_is_updated(l2, i)) {
+ if (!updated) {
+ printf("%03d: updated columns:", step);
+ updated = true;
+ }
+ printf(" %s", idltest_link2_columns[i].name);
+ }
+ }
+ if (updated) {
+ printf("\n");
+ }
+}
+
static void
print_idl_row_simple(const struct idltest_simple *s, int step)
{
printf("%s"UUID_FMT, i ? " " : "", UUID_ARGS(&s->ua[i]));
}
printf("] uuid="UUID_FMT"\n", UUID_ARGS(&s->header_.uuid));
+ print_idl_row_updated_simple(s, step);
}
static void
printf("%"PRId64, l1->l2->i);
}
printf(" uuid="UUID_FMT"\n", UUID_ARGS(&l1->header_.uuid));
+ print_idl_row_updated_link1(l1, step);
}
static void
printf("%"PRId64, l2->l1->i);
}
printf(" uuid="UUID_FMT"\n", UUID_ARGS(&l2->header_.uuid));
+ print_idl_row_updated_link2(l2, step);
}
static void
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import getopt
import re
import os
import ovs.db.idl
import ovs.db.schema
from ovs.db import data
-from ovs.db import types
+import ovs.db.types
import ovs.ovsuuid
import ovs.poller
import ovs.util
def do_default_atoms():
- for type_ in types.ATOMIC_TYPES:
- if type_ == types.VoidType:
+ for type_ in ovs.db.types.ATOMIC_TYPES:
+ if type_ == ovs.db.types.VoidType:
continue
sys.stdout.write("%s: " % type_.to_string())
def do_default_data():
any_errors = False
for n_min in 0, 1:
- for key in types.ATOMIC_TYPES:
- if key == types.VoidType:
+ for key in ovs.db.types.ATOMIC_TYPES:
+ if key == ovs.db.types.VoidType:
continue
- for value in types.ATOMIC_TYPES:
- if value == types.VoidType:
+ for value in ovs.db.types.ATOMIC_TYPES:
+ if value == ovs.db.types.VoidType:
valueBase = None
else:
- valueBase = types.BaseType(value)
- type_ = types.Type(types.BaseType(key), valueBase, n_min, 1)
+ valueBase = ovs.db.types.BaseType(value)
+ type_ = ovs.db.types.Type(ovs.db.types.BaseType(key),
+ valueBase, n_min, 1)
assert type_.is_valid()
sys.stdout.write("key %s, value %s, n_min %d: "
def do_parse_atomic_type(type_string):
type_json = unbox_json(ovs.json.from_string(type_string))
- atomic_type = types.AtomicType.from_json(type_json)
- print ovs.json.to_string(atomic_type.to_json(), sort_keys=True)
+ atomic_type = ovs.db.types.AtomicType.from_json(type_json)
+ print(ovs.json.to_string(atomic_type.to_json(), sort_keys=True))
def do_parse_base_type(type_string):
type_json = unbox_json(ovs.json.from_string(type_string))
- base_type = types.BaseType.from_json(type_json)
- print ovs.json.to_string(base_type.to_json(), sort_keys=True)
+ base_type = ovs.db.types.BaseType.from_json(type_json)
+ print(ovs.json.to_string(base_type.to_json(), sort_keys=True))
def do_parse_type(type_string):
type_json = unbox_json(ovs.json.from_string(type_string))
- type_ = types.Type.from_json(type_json)
- print ovs.json.to_string(type_.to_json(), sort_keys=True)
+ type_ = ovs.db.types.Type.from_json(type_json)
+ print(ovs.json.to_string(type_.to_json(), sort_keys=True))
def do_parse_atoms(type_string, *atom_strings):
type_json = unbox_json(ovs.json.from_string(type_string))
- base = types.BaseType.from_json(type_json)
+ base = ovs.db.types.BaseType.from_json(type_json)
for atom_string in atom_strings:
atom_json = unbox_json(ovs.json.from_string(atom_string))
try:
atom = data.Atom.from_json(base, atom_json)
- print ovs.json.to_string(atom.to_json())
- except error.Error, e:
- print e.args[0].encode("utf8")
+ print(ovs.json.to_string(atom.to_json()))
+ except error.Error as e:
+ print(e.args[0].encode("utf8"))
def do_parse_data(type_string, *data_strings):
type_json = unbox_json(ovs.json.from_string(type_string))
- type_ = types.Type.from_json(type_json)
+ type_ = ovs.db.types.Type.from_json(type_json)
for datum_string in data_strings:
datum_json = unbox_json(ovs.json.from_string(datum_string))
datum = data.Datum.from_json(type_, datum_json)
- print ovs.json.to_string(datum.to_json())
+ print(ovs.json.to_string(datum.to_json()))
def do_sort_atoms(type_string, atom_strings):
type_json = unbox_json(ovs.json.from_string(type_string))
- base = types.BaseType.from_json(type_json)
+ base = ovs.db.types.BaseType.from_json(type_json)
atoms = [data.Atom.from_json(base, atom_json)
for atom_json in unbox_json(ovs.json.from_string(atom_strings))]
- print ovs.json.to_string([data.Atom.to_json(atom)
- for atom in sorted(atoms)])
+ print(ovs.json.to_string([data.Atom.to_json(atom)
+ for atom in sorted(atoms)]))
def do_parse_column(name, column_string):
column_json = unbox_json(ovs.json.from_string(column_string))
column = ovs.db.schema.ColumnSchema.from_json(column_json, name)
- print ovs.json.to_string(column.to_json(), sort_keys=True)
+ print(ovs.json.to_string(column.to_json(), sort_keys=True))
def do_parse_table(name, table_string, default_is_root_string='false'):
default_is_root = default_is_root_string == 'true'
table_json = unbox_json(ovs.json.from_string(table_string))
table = ovs.db.schema.TableSchema.from_json(table_json, name)
- print ovs.json.to_string(table.to_json(default_is_root), sort_keys=True)
+ print(ovs.json.to_string(table.to_json(default_is_root), sort_keys=True))
def do_parse_schema(schema_string):
schema_json = unbox_json(ovs.json.from_string(schema_string))
schema = ovs.db.schema.DbSchema.from_json(schema_json)
- print ovs.json.to_string(schema.to_json(), sort_keys=True)
+ print(ovs.json.to_string(schema.to_json(), sort_keys=True))
def print_idl(idl, step):
txn.abort()
break
elif name == "destroy":
- print "%03d: destroy" % step
+ print("%03d: destroy" % step)
sys.stdout.flush()
txn.abort()
return
def do_idl(schema_file, remote, *commands):
schema_helper = ovs.db.idl.SchemaHelper(schema_file)
if commands and commands[0].startswith("?"):
- monitor = {}
readonly = {}
for x in commands[0][1:].split("?"):
readonly = []
def usage():
- print """\
+ print("""\
%(program_name)s: test utility for Open vSwitch database Python bindings
usage: %(program_name)s [OPTIONS] COMMAND ARG...
The following options are also available:
-t, --timeout=SECS give up after SECS seconds
-h, --help display this help message\
-""" % {'program_name': ovs.util.PROGRAM_NAME}
+""" % {'program_name': ovs.util.PROGRAM_NAME})
sys.exit(0)
options, args = getopt.gnu_getopt(argv[1:], 't:h',
['timeout',
'help'])
- except getopt.GetoptError, geo:
+ except getopt.GetoptError as geo:
sys.stderr.write("%s: %s\n" % (ovs.util.PROGRAM_NAME, geo.msg))
sys.exit(1)
command_name = args[0]
args = args[1:]
- if not command_name in commands:
+ if command_name not in commands:
sys.stderr.write("%s: unknown command \"%s\" "
"(use --help for help)\n" % (ovs.util.PROGRAM_NAME,
command_name))
if __name__ == '__main__':
try:
main(sys.argv)
- except error.Error, e:
+ except error.Error as e:
sys.stderr.write("%s\n" % e)
sys.exit(1)
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import errno
import sys
if action is None:
pass
elif action == ovs.reconnect.CONNECT:
- print " should connect"
+ print(" should connect")
elif action == ovs.reconnect.DISCONNECT:
- print " should disconnect"
+ print(" should disconnect")
elif action == ovs.reconnect.PROBE:
- print " should send probe"
+ print(" should send probe")
else:
assert False
global now
timeout = r.timeout(now)
if timeout >= 0:
- print " advance %d ms" % timeout
+ print(" advance %d ms" % timeout)
now += timeout
else:
- print " no timeout"
+ print(" no timeout")
def do_set_max_tries(arg):
print(" %sconnected" % negate)
if (old.last_connected != new.last_connected or
- (new.msec_since_connect != None and
+ (new.msec_since_connect is not None and
old.msec_since_connect != new.msec_since_connect - delta) or
(old.total_connected_duration != new.total_connected_duration - delta
and not (old.total_connected_duration == 0 and
% (new.msec_since_connect, new.total_connected_duration))
if (old.last_disconnected != new.last_disconnected or
- (new.msec_since_disconnect != None and
+ (new.msec_since_disconnect is not None and
old.msec_since_disconnect != new.msec_since_disconnect - delta)):
print(" disconnected at %d ms (%d ms ago)"
% (new.last_disconnected, new.msec_since_disconnect))
r = ovs.reconnect.Reconnect(now)
r.set_name("remote")
prev = r.get_stats(now)
- print "### t=%d ###" % now
+ print("### t=%d ###" % now)
old_time = now
old_max_tries = r.get_max_tries()
while True:
if line == "":
break
- print line[:-1]
+ print(line[:-1])
if line[0] == "#":
continue
commands[command](op)
if old_time != now:
- print
- print "### t=%d ###" % now
+ print()
+ print("### t=%d ###" % now)
cur = r.get_stats(now)
diff_stats(prev, cur, now - old_time)
prev = cur
if r.get_max_tries() != old_max_tries:
old_max_tries = r.get_max_tries()
- print " %d tries left" % old_max_tries
+ print(" %d tries left" % old_max_tries)
old_time = now
import ovs.socket_util
+
def main(argv):
if len(argv) not in (2, 3):
sys.stderr.write("usage: %s SOCKETNAME1 [SOCKETNAME2]", argv[0])
os.strerror(error)))
sys.exit(1)
+
if __name__ == '__main__':
main(sys.argv)
vlog = ovs.vlog.Vlog("test-unixctl")
exiting = False
+
def unixctl_exit(conn, unused_argv, aux):
assert aux == "aux_exit"
global exiting
def unixctl_echo_error(conn, argv, aux):
- assert aux == "aux_echo_error"
+ assert aux == "aux_echo_error"
conn.reply_error(str(argv))
utilities_ovs_benchmark_SOURCES = utilities/ovs-benchmark.c
utilities_ovs_benchmark_LDADD = lib/libopenvswitch.la
+FLAKE8_PYFILES += utilities/ovs-pcap.in
+
include utilities/bugtool/automake.mk
\fIport\fR is omitted, then statistics are printed for \fIqueue\fR on
every port where it exists.
.
+.IP "\fBqueue\-get\-config \fIswitch \fR[\fIport\fR]"
+Prints to the console information about all of the queues configured
+on \fIport\fR within \fIswitch\fR. If \fIport\fR is \fBANY\fR or if
+it is omitted, prints information about queues on every port. The
+OpenFlow specification says that only physical ports have queues; in
+particular, \fBLOCAL\fR is not valid for \fIport\fR.
+.IP
+This command has limited usefulness, because ports often have no
+configured queues and because the OpenFlow protocol provides only very
+limited information about the configuration of a queue.
+.
.SS "OpenFlow 1.1+ Group Table Commands"
.
The following commands work only with switches that support OpenFlow
configuration'' message at connection setup time that requests
\fBINVALID_TTL_TO_CONTROLLER\fR, so that \fBovs\-ofctl monitor\fR can
receive ``packet-in'' messages when TTL reaches zero on \fBdec_ttl\fR action.
+Only OpenFlow 1.1 and 1.2 support \fBinvalid_ttl\fR; Open vSwitch also
+implements it for OpenFlow 1.0 as an extension.
.IP
\fBwatch:\fR[\fB\fIspec\fR...] causes \fBovs\-ofctl\fR to send a
``monitor request'' Nicira extension message to the switch at
.IP
The following flags describe the state of the tracking:
.RS
-.IP "\fB0x80: trk\fR"
-This packet is tracked, meaning that it has previously traversed the connection
-tracker. If this flag is not set, then no other flags will be set. If this flag
-is set, then the packet is tracked and other flags may also be set.
-.IP "\fB0x40: rpl\fR"
+.IP "\fB0x01: new\fR"
+This is the beginning of a new connection. This flag may only be present for
+uncommitted connections.
+.IP "\fB0x02: est\fR"
+This is part of an already existing connection. This flag may only be present
+for committed connections.
+.IP "\fB0x04: rel\fR"
+This is a connection that is related to an existing connection, for
+instance ICMP "destination unreachable" messages or FTP data connections. This
+flag may only be present for committed connections.
+.IP "\fB0x08: rpl\fR"
The flow is in the reply direction, meaning it did not initiate the
connection. This flag may only be present for committed connections.
-.IP "\fB0x20: inv\fR"
+.IP "\fB0x10: inv\fR"
The state is invalid, meaning that the connection tracker couldn't identify the
connection. This flag is a catch-all for any problems that the connection
tracker may have, for example:
.PP
- Packets are unexpected length for protocol.
.RE
-.IP "\fB0x01: new\fR"
-This is the beginning of a new connection. This flag may only be present for
-uncommitted connections.
-.IP "\fB0x02: est\fR"
-This is part of an already existing connection. This flag may only be present
-for committed connections.
-.IP "\fB0x04: rel\fR"
-This is a connection that is related to an existing connection, for
-instance ICMP "destination unreachable" messages or FTP data connections. This
-flag may only be present for committed connections.
+.IP "\fB0x20: trk\fR"
+This packet is tracked, meaning that it has previously traversed the connection
+tracker. If this flag is not set, then no other flags will be set. If this flag
+is set, then the packet is tracked and other flags may also be set.
.PP
This field was introduced in Open vSwitch 2.5.
.RE
Currently, connection tracking is only available on Linux kernels with the
nf_conntrack module loaded.
.
-.RE
-.
.IP \fBdec_ttl\fR
.IQ \fBdec_ttl(\fIid1\fR[\fB,\fIid2\fR]...\fB)\fR
Decrement TTL of IPv4 packet or hop limit of IPv6 packet. If the
" dump-group-features SWITCH print group features\n"
" dump-groups SWITCH [GROUP] print group description\n"
" dump-group-stats SWITCH [GROUP] print group statistics\n"
- " queue-get-config SWITCH PORT print queue information for port\n"
+ " queue-get-config SWITCH [PORT] print queue config for PORT\n"
" add-meter SWITCH METER add meter described by METER\n"
" mod-meter SWITCH METER modify specific METER\n"
" del-meter SWITCH METER delete METER\n"
}
static void
-fetch_switch_config(struct vconn *vconn, struct ofp_switch_config *config_)
+fetch_switch_config(struct vconn *vconn, struct ofputil_switch_config *config)
{
- struct ofp_switch_config *config;
struct ofpbuf *request;
struct ofpbuf *reply;
enum ofptype type;
run(vconn_transact(vconn, request, &reply),
"talking to %s", vconn_get_name(vconn));
- if (ofptype_pull(&type, reply) || type != OFPTYPE_GET_CONFIG_REPLY) {
+ if (ofptype_decode(&type, reply->data)
+ || type != OFPTYPE_GET_CONFIG_REPLY) {
ovs_fatal(0, "%s: bad reply to config request", vconn_get_name(vconn));
}
-
- config = ofpbuf_pull(reply, sizeof *config);
- *config_ = *config;
-
+ ofputil_decode_get_config_reply(reply->data, config);
ofpbuf_delete(reply);
}
static void
-set_switch_config(struct vconn *vconn, const struct ofp_switch_config *config)
+set_switch_config(struct vconn *vconn,
+ const struct ofputil_switch_config *config)
{
- struct ofpbuf *request;
-
- request = ofpraw_alloc(OFPRAW_OFPT_SET_CONFIG, vconn_get_version(vconn), 0);
- ofpbuf_put(request, config, sizeof *config);
-
- transact_noreply(vconn, request);
+ enum ofp_version version = vconn_get_version(vconn);
+ transact_noreply(vconn, ofputil_encode_set_config(config, version));
}
static void
}
-static bool fetch_port_by_stats(struct vconn *,
- const char *port_name, ofp_port_t port_no,
- struct ofputil_phy_port *);
-
-/* Uses OFPT_FEATURES_REQUEST to attempt to fetch information about the port
- * named 'port_name' or numbered 'port_no' into '*pp'. Returns true if
- * successful, false on failure.
- *
- * This is only appropriate for OpenFlow 1.0, 1.1, and 1.2, which include a
- * list of ports in OFPT_FEATURES_REPLY. */
static bool
-fetch_port_by_features(struct vconn *vconn,
- const char *port_name, ofp_port_t port_no,
- struct ofputil_phy_port *pp)
+str_to_ofp(const char *s, ofp_port_t *ofp_port)
{
- struct ofputil_switch_features features;
- const struct ofp_header *oh;
- struct ofpbuf *request, *reply;
- enum ofperr error;
- enum ofptype type;
- struct ofpbuf b;
- bool found = false;
+ bool ret;
+ uint32_t port_;
+
+ ret = str_to_uint(s, 10, &port_);
+ *ofp_port = u16_to_ofp(port_);
+ return ret;
+}
+
+struct port_iterator {
+ struct vconn *vconn;
+
+ enum { PI_FEATURES, PI_PORT_DESC } variant;
+ struct ofpbuf *reply;
+ ovs_be32 send_xid;
+ bool more;
+};
+
+static void
+port_iterator_fetch_port_desc(struct port_iterator *pi)
+{
+ pi->variant = PI_PORT_DESC;
+ pi->more = true;
+
+ struct ofpbuf *rq = ofputil_encode_port_desc_stats_request(
+ vconn_get_version(pi->vconn), OFPP_ANY);
+ pi->send_xid = ((struct ofp_header *) rq->data)->xid;
+ send_openflow_buffer(pi->vconn, rq);
+}
+
+static void
+port_iterator_fetch_features(struct port_iterator *pi)
+{
+ pi->variant = PI_FEATURES;
/* Fetch the switch's ofp_switch_features. */
- request = ofpraw_alloc(OFPRAW_OFPT_FEATURES_REQUEST,
- vconn_get_version(vconn), 0);
- run(vconn_transact(vconn, request, &reply),
- "talking to %s", vconn_get_name(vconn));
+ enum ofp_version version = vconn_get_version(pi->vconn);
+ struct ofpbuf *rq = ofpraw_alloc(OFPRAW_OFPT_FEATURES_REQUEST, version, 0);
+ run(vconn_transact(pi->vconn, rq, &pi->reply),
+ "talking to %s", vconn_get_name(pi->vconn));
- oh = reply->data;
- if (ofptype_decode(&type, reply->data)
+ const struct ofp_header *oh = pi->reply->data;
+ enum ofptype type;
+ if (ofptype_decode(&type, pi->reply->data)
|| type != OFPTYPE_FEATURES_REPLY) {
- ovs_fatal(0, "%s: received bad features reply", vconn_get_name(vconn));
+ ovs_fatal(0, "%s: received bad features reply",
+ vconn_get_name(pi->vconn));
}
- if (!ofputil_switch_features_has_ports(reply)) {
+ if (!ofputil_switch_features_has_ports(pi->reply)) {
/* The switch features reply does not contain a complete list of ports.
* Probably, there are more ports than will fit into a single 64 kB
* OpenFlow message. Use OFPST_PORT_DESC to get a complete list of
* ports. */
- ofpbuf_delete(reply);
- return fetch_port_by_stats(vconn, port_name, port_no, pp);
+ ofpbuf_delete(pi->reply);
+ pi->reply = NULL;
+ port_iterator_fetch_port_desc(pi);
+ return;
}
- error = ofputil_decode_switch_features(oh, &features, &b);
+ struct ofputil_switch_features features;
+ enum ofperr error = ofputil_decode_switch_features(oh, &features,
+ pi->reply);
if (error) {
ovs_fatal(0, "%s: failed to decode features reply (%s)",
- vconn_get_name(vconn), ofperr_to_string(error));
+ vconn_get_name(pi->vconn), ofperr_to_string(error));
}
+}
- while (!ofputil_pull_phy_port(oh->version, &b, pp)) {
- if (port_no != OFPP_NONE
- ? port_no == pp->port_no
- : !strcmp(pp->name, port_name)) {
- found = true;
- break;
- }
+/* Initializes 'pi' to prepare for iterating through all of the ports on the
+ * OpenFlow switch to which 'vconn' is connected.
+ *
+ * During iteration, the client should not make other use of 'vconn', because
+ * that can cause other messages to be interleaved with the replies used by the
+ * iterator and thus some ports may be missed or a hang can occur. */
+static void
+port_iterator_init(struct port_iterator *pi, struct vconn *vconn)
+{
+ memset(pi, 0, sizeof *pi);
+ pi->vconn = vconn;
+ if (vconn_get_version(vconn) < OFP13_VERSION) {
+ port_iterator_fetch_features(pi);
+ } else {
+ port_iterator_fetch_port_desc(pi);
}
- ofpbuf_delete(reply);
- return found;
}
-/* Uses a OFPST_PORT_DESC request to attempt to fetch information about the
- * port named 'port_name' or numbered 'port_no' into '*pp'. Returns true if
- * successful, false on failure.
- *
- * This is most appropriate for OpenFlow 1.3 and later. Open vSwitch 1.7 and
- * later also implements OFPST_PORT_DESC, as an extension, for OpenFlow 1.0,
- * 1.1, and 1.2, so this can be used as a fallback in those versions when there
- * are too many ports than fit in an OFPT_FEATURES_REPLY. */
+/* Obtains the next port from 'pi'. On success, initializes '*pp' with the
+ * port's details and returns true, otherwise (if all the ports have already
+ * been seen), returns false. */
static bool
-fetch_port_by_stats(struct vconn *vconn,
- const char *port_name, ofp_port_t port_no,
- struct ofputil_phy_port *pp)
+port_iterator_next(struct port_iterator *pi, struct ofputil_phy_port *pp)
{
- struct ofpbuf *request;
- ovs_be32 send_xid;
- bool done = false;
- bool found = false;
-
- request = ofputil_encode_port_desc_stats_request(vconn_get_version(vconn),
- port_no);
- send_xid = ((struct ofp_header *) request->data)->xid;
-
- send_openflow_buffer(vconn, request);
- while (!done) {
- ovs_be32 recv_xid;
- struct ofpbuf *reply;
-
- run(vconn_recv_block(vconn, &reply), "OpenFlow packet receive failed");
- recv_xid = ((struct ofp_header *) reply->data)->xid;
- if (send_xid == recv_xid) {
- struct ofp_header *oh = reply->data;
- enum ofptype type;
- struct ofpbuf b;
- uint16_t flags;
-
- ofpbuf_use_const(&b, oh, ntohs(oh->length));
- if (ofptype_pull(&type, &b)
- || type != OFPTYPE_PORT_DESC_STATS_REPLY) {
+ for (;;) {
+ if (pi->reply) {
+ int retval = ofputil_pull_phy_port(vconn_get_version(pi->vconn),
+ pi->reply, pp);
+ if (!retval) {
+ return true;
+ } else if (retval != EOF) {
ovs_fatal(0, "received bad reply: %s",
- ofp_to_string(reply->data, reply->size,
+ ofp_to_string(pi->reply->data, pi->reply->size,
verbosity + 1));
}
+ }
- flags = ofpmp_flags(oh);
- done = !(flags & OFPSF_REPLY_MORE);
-
- if (found) {
- /* We've already found the port, but we need to drain
- * the queue of any other replies for this request. */
- continue;
- }
+ if (pi->variant == PI_FEATURES || !pi->more) {
+ return false;
+ }
- while (!ofputil_pull_phy_port(oh->version, &b, pp)) {
- if (port_no != OFPP_NONE ? port_no == pp->port_no
- : !strcmp(pp->name, port_name)) {
- found = true;
- break;
- }
- }
- } else {
- VLOG_DBG("received reply with xid %08"PRIx32" "
- "!= expected %08"PRIx32, recv_xid, send_xid);
+ ovs_be32 recv_xid;
+ do {
+ ofpbuf_delete(pi->reply);
+ run(vconn_recv_block(pi->vconn, &pi->reply),
+ "OpenFlow receive failed");
+ recv_xid = ((struct ofp_header *) pi->reply->data)->xid;
+ } while (pi->send_xid != recv_xid);
+
+ struct ofp_header *oh = pi->reply->data;
+ enum ofptype type;
+ if (ofptype_pull(&type, pi->reply)
+ || type != OFPTYPE_PORT_DESC_STATS_REPLY) {
+ ovs_fatal(0, "received bad reply: %s",
+ ofp_to_string(pi->reply->data, pi->reply->size,
+ verbosity + 1));
}
- ofpbuf_delete(reply);
- }
- return found;
+ pi->more = (ofpmp_flags(oh) & OFPSF_REPLY_MORE) != 0;
+ }
}
-static bool
-str_to_ofp(const char *s, ofp_port_t *ofp_port)
+/* Destroys iterator 'pi'. */
+static void
+port_iterator_destroy(struct port_iterator *pi)
{
- bool ret;
- uint32_t port_;
+ if (pi) {
+ while (pi->variant == PI_PORT_DESC && pi->more) {
+ /* Drain vconn's queue of any other replies for this request. */
+ struct ofputil_phy_port pp;
+ port_iterator_next(pi, &pp);
+ }
- ret = str_to_uint(s, 10, &port_);
- *ofp_port = u16_to_ofp(port_);
- return ret;
+ ofpbuf_delete(pi->reply);
+ }
}
/* Opens a connection to 'vconn_name', fetches the port structure for
{
struct vconn *vconn;
ofp_port_t port_no;
- bool found;
+ bool found = false;
/* Try to interpret the argument as a port number. */
if (!str_to_ofp(port_name, &port_no)) {
* OFPT_FEATURES_REPLY message. OpenFlow 1.3 and later versions put it
* into the OFPST_PORT_DESC reply. Try it the correct way. */
open_vconn(vconn_name, &vconn);
- found = (vconn_get_version(vconn) < OFP13_VERSION
- ? fetch_port_by_features(vconn, port_name, port_no, pp)
- : fetch_port_by_stats(vconn, port_name, port_no, pp));
+ struct port_iterator pi;
+ for (port_iterator_init(&pi, vconn); port_iterator_next(&pi, pp); ) {
+ if (port_no != OFPP_NONE
+ ? port_no == pp->port_no
+ : !strcmp(pp->name, port_name)) {
+ found = true;
+ break;
+ }
+ }
+ port_iterator_destroy(&pi);
vconn_close(vconn);
if (!found) {
ofctl_queue_get_config(struct ovs_cmdl_context *ctx)
{
const char *vconn_name = ctx->argv[1];
- const char *port_name = ctx->argv[2];
- enum ofputil_protocol protocol;
- enum ofp_version version;
- struct ofpbuf *request;
- struct vconn *vconn;
- ofp_port_t port;
-
- port = str_to_port_no(vconn_name, port_name);
+ const char *port_name = ctx->argc >= 3 ? ctx->argv[2] : NULL;
+ ofp_port_t port = (port_name
+ ? str_to_port_no(vconn_name, port_name)
+ : OFPP_ANY);
- protocol = open_vconn(vconn_name, &vconn);
- version = ofputil_protocol_to_ofp_version(protocol);
- request = ofputil_encode_queue_get_config_request(version, port);
- dump_transaction(vconn, request);
+ struct vconn *vconn;
+ enum ofputil_protocol protocol = open_vconn(vconn_name, &vconn);
+ enum ofp_version version = ofputil_protocol_to_ofp_version(protocol);
+ if (port == OFPP_ANY && version == OFP10_VERSION) {
+ /* The user requested all queues on all ports. OpenFlow 1.0 only
+ * supports getting queues for an individual port, so to implement the
+ * user's request we have to get a list of all the ports.
+ *
+ * We use a second vconn to avoid having to accumulate a list of all of
+ * the ports. */
+ struct vconn *vconn2;
+ enum ofputil_protocol protocol2 = open_vconn(vconn_name, &vconn2);
+ enum ofp_version version2 = ofputil_protocol_to_ofp_version(protocol2);
+
+ struct port_iterator pi;
+ struct ofputil_phy_port pp;
+ for (port_iterator_init(&pi, vconn); port_iterator_next(&pi, &pp); ) {
+ if (ofp_to_u16(pp.port_no) < ofp_to_u16(OFPP_MAX)) {
+ dump_transaction(vconn2,
+ ofputil_encode_queue_get_config_request(
+ version2, pp.port_no));
+ }
+ }
+ port_iterator_destroy(&pi);
+ vconn_close(vconn2);
+ } else {
+ dump_transaction(vconn, ofputil_encode_queue_get_config_request(
+ version, port));
+ }
vconn_close(vconn);
}
static int
monitor_set_invalid_ttl_to_controller(struct vconn *vconn)
{
- struct ofp_switch_config config;
- enum ofp_config_flags flags;
+ struct ofputil_switch_config config;
fetch_switch_config(vconn, &config);
- flags = ntohs(config.flags);
- if (!(flags & OFPC_INVALID_TTL_TO_CONTROLLER)) {
- /* Set the invalid ttl config. */
- flags |= OFPC_INVALID_TTL_TO_CONTROLLER;
-
- config.flags = htons(flags);
+ if (!config.invalid_ttl_to_controller) {
+ config.invalid_ttl_to_controller = 1;
set_switch_config(vconn, &config);
/* Then retrieve the configuration to see if it really took. OpenFlow
- * doesn't define error reporting for bad modes, so this is all we can
- * do. */
+ * has ill-defined error reporting for bad flags, so this is about the
+ * best we can do. */
fetch_switch_config(vconn, &config);
- flags = ntohs(config.flags);
- if (!(flags & OFPC_INVALID_TTL_TO_CONTROLLER)) {
+ if (!config.invalid_ttl_to_controller) {
ovs_fatal(0, "setting invalid_ttl_to_controller failed (this "
- "switch probably doesn't support mode)");
- return -EOPNOTSUPP;
+ "switch probably doesn't support this flag)");
}
}
return 0;
int i;
enum ofputil_protocol usable_protocols;
+ /* If the user wants the invalid_ttl_to_controller feature, limit the
+ * OpenFlow versions to those that support that feature. (Support in
+ * OpenFlow 1.0 is an Open vSwitch extension.) */
+ for (i = 2; i < ctx->argc; i++) {
+ if (!strcmp(ctx->argv[i], "invalid_ttl")) {
+ uint32_t usable_versions = ((1u << OFP10_VERSION) |
+ (1u << OFP11_VERSION) |
+ (1u << OFP12_VERSION));
+ uint32_t allowed_versions = get_allowed_ofp_versions();
+ if (!(allowed_versions & usable_versions)) {
+ struct ds versions = DS_EMPTY_INITIALIZER;
+ ofputil_format_version_bitmap_names(&versions,
+ usable_versions);
+ ovs_fatal(0, "invalid_ttl requires one of the OpenFlow "
+ "versions %s but none is enabled (use -O)",
+ ds_cstr(&versions));
+ }
+ mask_allowed_ofp_versions(usable_versions);
+ break;
+ }
+ }
+
open_vconn(ctx->argv[1], &vconn);
for (i = 2; i < ctx->argc; i++) {
const char *arg = ctx->argv[i];
if (isdigit((unsigned char) *arg)) {
- struct ofp_switch_config config;
+ struct ofputil_switch_config config;
fetch_switch_config(vconn, &config);
- config.miss_send_len = htons(atoi(arg));
+ config.miss_send_len = atoi(arg);
set_switch_config(vconn, &config);
} else if (!strcmp(arg, "invalid_ttl")) {
monitor_set_invalid_ttl_to_controller(vconn);
static void
ofctl_get_frags(struct ovs_cmdl_context *ctx)
{
- struct ofp_switch_config config;
+ struct ofputil_switch_config config;
struct vconn *vconn;
open_vconn(ctx->argv[1], &vconn);
fetch_switch_config(vconn, &config);
- puts(ofputil_frag_handling_to_string(ntohs(config.flags)));
+ puts(ofputil_frag_handling_to_string(config.frag));
vconn_close(vconn);
}
static void
ofctl_set_frags(struct ovs_cmdl_context *ctx)
{
- struct ofp_switch_config config;
- enum ofp_config_flags mode;
+ struct ofputil_switch_config config;
+ enum ofputil_frag_handling frag;
struct vconn *vconn;
- ovs_be16 flags;
- if (!ofputil_frag_handling_from_string(ctx->argv[2], &mode)) {
+ if (!ofputil_frag_handling_from_string(ctx->argv[2], &frag)) {
ovs_fatal(0, "%s: unknown fragment handling mode", ctx->argv[2]);
}
open_vconn(ctx->argv[1], &vconn);
fetch_switch_config(vconn, &config);
- flags = htons(mode) | (config.flags & htons(~OFPC_FRAG_MASK));
- if (flags != config.flags) {
+ if (frag != config.frag) {
/* Set the configuration. */
- config.flags = flags;
+ config.frag = frag;
set_switch_config(vconn, &config);
/* Then retrieve the configuration to see if it really took. OpenFlow
- * doesn't define error reporting for bad modes, so this is all we can
- * do. */
+ * has ill-defined error reporting for bad flags, so this is about the
+ * best we can do. */
fetch_switch_config(vconn, &config);
- if (flags != config.flags) {
+ if (frag != config.frag) {
ovs_fatal(0, "%s: setting fragment handling mode failed (this "
"switch probably doesn't support mode \"%s\")",
- ctx->argv[1], ofputil_frag_handling_to_string(mode));
+ ctx->argv[1], ofputil_frag_handling_to_string(frag));
}
}
vconn_close(vconn);
1, 2, ofctl_dump_aggregate },
{ "queue-stats", "switch [port [queue]]",
1, 3, ofctl_queue_stats },
- { "queue-get-config", "switch port",
- 2, 2, ofctl_queue_get_config },
+ { "queue-get-config", "switch [port]",
+ 1, 2, ofctl_queue_get_config },
{ "add-flow", "switch flow",
2, 2, ofctl_add_flow },
{ "add-flows", "switch file",
# See the License for the specific language governing permissions and
# limitations under the License.
+from __future__ import print_function
+
import binascii
import getopt
import struct
import sys
+
class PcapException(Exception):
pass
+
class PcapReader(object):
def __init__(self, file_name):
self.file = open(file_name, "rb")
return packet
argv0 = sys.argv[0]
+
def usage():
- print """\
+ print("""\
%(argv0)s: print pcap file packet data as hex
usage: %(argv0)s FILE
where FILE is a PCAP file.
The following options are also available:
-h, --help display this help message
-V, --version display version information\
-""" % {'argv0': argv0}
+""" % {'argv0': argv0})
sys.exit(0)
if __name__ == "__main__":
try:
options, args = getopt.gnu_getopt(sys.argv[1:], 'hV',
['help', 'version'])
- except getopt.GetoptException, geo:
+ except getopt.GetoptException as geo:
sys.stderr.write("%s: %s\n" % (argv0, geo.msg))
sys.exit(1)
if key in ['-h', '--help']:
usage()
elif key in ['-V', '--version']:
- print "ovs-pcap (Open vSwitch) @VERSION@"
+ print("ovs-pcap (Open vSwitch) @VERSION@")
else:
sys.exit(0)
if packet is None:
break
- print binascii.hexlify(packet)
+ print(binascii.hexlify(packet))
- except PcapException, e:
+ except PcapException as e:
sys.stderr.write("%s: %s\n" % (argv0, e))
sys.exit(1)
docs += vtep/README.ovs-vtep.md
EXTRA_DIST += vtep/ovs-vtep
+FLAKE8_PYFILES += vtep/ovs-vtep
+
# VTEP schema and IDL
EXTRA_DIST += vtep/vtep.ovsschema
pkgdata_DATA += vtep/vtep.ovsschema
bfd_bridge = "vtep_bfd"
bfd_ref = {}
+
def call_prog(prog, args_list):
cmd = [prog, "-vconsole:off"] + args_list
output = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()
- if len(output) == 0 or output[0] == None:
+ if len(output) == 0 or output[0] is None:
output = ""
else:
output = output[0].strip()
return output
+
def ovs_vsctl(args):
return call_prog("ovs-vsctl", shlex.split(args))
+
def ovs_ofctl(args):
return call_prog("ovs-ofctl", shlex.split(args))
+
def vtep_ctl(args):
return call_prog("vtep-ctl", shlex.split(args))
column = vtep_ctl("--columns=tunnel_key find logical_switch "
"name=%s" % self.name)
tunnel_key = column.partition(":")[2].strip()
- if (tunnel_key and type(eval(tunnel_key)) == types.IntType):
+ if tunnel_key and isinstance(eval(tunnel_key), types.IntType):
self.tunnel_key = tunnel_key
vlog.info("using tunnel key %s in %s"
% (self.tunnel_key, self.name))
def del_lbinding(self, lbinding):
vlog.info("removing %s binding from %s" % (lbinding, self.name))
port_no = self.ports[lbinding]
- ovs_ofctl("del-flows %s in_port=%s" % (self.short_name, port_no));
+ ovs_ofctl("del-flows %s in_port=%s" % (self.short_name, port_no))
del self.ports[lbinding]
self.update_flood()
self.local_macs = macs
def add_remote_mac(self, mac, tunnel):
- port_no = self.tunnels.get(tunnel, (0,""))[0]
+ port_no = self.tunnels.get(tunnel, (0, ""))[0]
if not port_no:
return
self.add_remote_mac(mac, remote_macs[mac])
for mac in self.remote_macs.keys():
- if not remote_macs.has_key(mac):
+ if mac not in remote_macs:
self.del_remote_mac(mac)
self.remote_macs = remote_macs
# vtep's logical_binding_stats. Since we are using the 'interface' from
# the logical switch to collect stats, packets transmitted from it
# is received in the physical switch and vice versa.
- stats_map = {'tx_packets':'packets_to_local',
- 'tx_bytes':'bytes_to_local',
- 'rx_packets':'packets_from_local',
- 'rx_bytes':'bytes_from_local'}
+ stats_map = {'tx_packets': 'packets_to_local',
+ 'tx_bytes': 'bytes_to_local',
+ 'rx_packets': 'packets_from_local',
+ 'rx_bytes': 'bytes_from_local'}
# Go through all the logical switch's interfaces that end with "-l"
# and copy the statistics to logical_binding_stats.
self.update_remote_macs()
self.update_stats()
+
def get_vtep_tunnel(remote_ip):
# Get the physical_locator record for the local tunnel end point.
column = vtep_ctl("--columns=_uuid find physical_locator "
return (local, remote, tunnel)
+
def create_vtep_tunnel(remote_ip):
local, remote, tunnel = get_vtep_tunnel(remote_ip)
if not local or not remote:
% remote_ip)
tunnel = vtep_ctl("add physical_switch %s tunnels @tun -- "
"--id=@tun create Tunnel local=%s remote=%s"
- %(ps_name, local, remote))
+ % (ps_name, local, remote))
return tunnel
+
def destroy_vtep_tunnel(remote_ip):
local, remote, tunnel = get_vtep_tunnel(remote_ip)
if tunnel:
"-- --if-exists destroy tunnel %s"
% (ps_name, tunnel, tunnel))
+
def add_bfd(remote_ip):
# The VTEP emulator creates one OVS bridge for every logical switch.
# Multiple logical switches can have multiple OVS tunnels to the
# conditions, pass the responsibility of creating a 'tunnel' record
# to run_bfd() which runs more often.
+
def del_bfd(remote_ip):
if remote_ip in bfd_ref:
if bfd_ref[remote_ip] == 1:
else:
bfd_ref[remote_ip] -= 1
+
def run_bfd():
bfd_ports = ovs_vsctl("list-ports %s" % bfd_bridge).split()
for port in bfd_ports:
if not tunnel:
continue
- bfd_params_default = {'bfd_params:enable' : 'false',
- 'bfd_params:min_rx' : 1000,
- 'bfd_params:min_tx' : 100,
- 'bfd_params:decay_min_rx' : 0,
- 'bfd_params:cpath_down' : 'false',
- 'bfd_params:check_tnl_key' : 'false'}
+ bfd_params_default = {'bfd_params:enable': 'false',
+ 'bfd_params:min_rx': 1000,
+ 'bfd_params:min_tx': 100,
+ 'bfd_params:decay_min_rx': 0,
+ 'bfd_params:cpath_down': 'false',
+ 'bfd_params:check_tnl_key': 'false'}
bfd_params_values = {}
for key, default in bfd_params_default.iteritems():
bfd_params_values[key] = column
for key, value in bfd_params_values.iteritems():
- new_key = key.replace('_params','')
+ new_key = key.replace('_params', '')
ovs_vsctl("set interface %s %s=%s" % (port, new_key, value))
bfd_status = ['bfd_status:state', 'bfd_status:forwarding',
for key in bfd_status:
value = ovs_vsctl("--if-exists get interface %s %s" % (port, key))
if value:
- vtep_ctl("set tunnel %s %s=%s" %(tunnel, key, value))
+ vtep_ctl("set tunnel %s %s=%s" % (tunnel, key, value))
else:
new_key = key.replace('bfd_status:', '')
vtep_ctl("remove tunnel %s bfd_status %s" % (tunnel, new_key))
% (tunnel, bfd_params_values['bfd_params:enable']))
# Add the defaults as described in VTEP schema to make it explicit.
- bfd_lconf_default = {'bfd_config_local:bfd_dst_ip' : '169.254.1.0',
- 'bfd_config_local:bfd_dst_mac' :
+ bfd_lconf_default = {'bfd_config_local:bfd_dst_ip': '169.254.1.0',
+ 'bfd_config_local:bfd_dst_mac':
'00:23:20:00:00:01'}
for key, value in bfd_lconf_default.iteritems():
- vtep_ctl("set tunnel %s %s=%s" %(tunnel, key, value))
+ vtep_ctl("set tunnel %s %s=%s" % (tunnel, key, value))
# bfd_config_remote options from VTEP DB should be populated to
# corresponding OVS DB values.
bfd_lconf_default['bfd_config_local:bfd_dst_mac'],
bfd_dst_mac))
+
def add_binding(binding, ls):
vlog.info("adding binding %s" % binding)
vlan, pp_name = binding.split("-", 1)
- pbinding = binding+"-p"
- lbinding = binding+"-l"
+ pbinding = binding + "-p"
+ lbinding = binding + "-l"
# Create a patch port that connects the VLAN+port to the lswitch.
# Do them as two separate calls so if one side already exists, the
# Create a logical_bindings_stats record.
if not vlan_:
vlan_ = "0"
- vtep_ctl("set physical_port %s vlan_stats:%s=@stats --\
- --id=@stats create logical_binding_stats packets_from_local=0"\
- % (pp_name, vlan_))
+ vtep_ctl("set physical_port %s vlan_stats:%s=@stats -- "
+ "--id=@stats create logical_binding_stats packets_from_local=0"
+ % (pp_name, vlan_))
ls.add_lbinding(lbinding)
Bindings[binding] = ls.name
+
def del_binding(binding, ls):
vlog.info("removing binding %s" % binding)
vlan, pp_name = binding.split("-", 1)
- pbinding = binding+"-p"
- lbinding = binding+"-l"
+ pbinding = binding + "-p"
+ lbinding = binding + "-l"
port_no = ovs_vsctl("get Interface %s ofport" % pp_name)
patch_no = ovs_vsctl("get Interface %s ofport" % pbinding)
del Bindings[binding]
+
def handle_physical():
# Gather physical ports except the patch ports we created
ovs_ports = ovs_vsctl("list-ports %s" % ps_name).split()
ls = Lswitches[ls_name]
new_bindings.add(binding)
- if Bindings.has_key(binding):
+ if binding in Bindings:
if Bindings[binding] == ls_name:
continue
else:
add_binding(binding, ls)
-
dead_bindings = set(Bindings.keys()).difference(new_bindings)
for binding in dead_bindings:
ls_name = Bindings[binding]
vtep_ctl("clear-local-macs %s" % Lswitches[ls_name].name)
del Lswitches[ls_name]
+
def setup():
br_list = ovs_vsctl("list-br").split()
if (ps_name not in br_list):
for port in bfd_ports:
remote_ip = ovs_vsctl("get interface %s options:remote_ip"
% port)
- tunnel = destroy_vtep_tunnel(remote_ip)
+ destroy_vtep_tunnel(remote_ip)
ovs_vsctl("del-br %s" % br)
xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync \
xenserver/usr_share_openvswitch_scripts_sysconfig.template
+FLAKE8_PYFILES += \
+ xenserver/usr_share_openvswitch_scripts_ovs-xapi-sync
+
$(srcdir)/xenserver/openvswitch-xen.spec: xenserver/openvswitch-xen.spec.in $(top_builddir)/config.status
$(AM_V_GEN)($(ro_shell) && sed -e 's,[@]VERSION[@],$(VERSION),g') \
< $(srcdir)/xenserver/$(@F).in > $(@F).tmp || exit 1; \
import XenAPI
import ovs.dirs
-from ovs.db import error
-from ovs.db import types
import ovs.daemon
import ovs.db.idl
import ovs.unixctl
try:
session = XenAPI.xapi_local()
session.xenapi.login_with_password("", "")
- except XenAPI.Failure, e:
+ except XenAPI.Failure as e:
session = None
vlog.warn("Couldn't login to XAPI (%s)" % e)
return False
" XAPI session could not be initialized" % br_name)
return None
- recs = session.xenapi.network.get_all_records_where('field "bridge"="%s"' % br_name)
+ recs = session.xenapi.network.get_all_records_where(
+ 'field "bridge"="%s"' % br_name)
if len(recs) > 0:
return recs.values()[0]
return None
+
# There are possibilities when multiple xs-network-uuids are set for a bridge.
# In cases like that, we should choose the bridge-id associated with the bridge
# name.
xapi_down = True
return default
+
# By default, the "bridge-id" external id in the Bridge table is the
# same as "xs-network-uuids". This may be overridden by defining a
# "nicira-bridge-id" key in the "other_config" field of the network
while True:
unixctl_server.run()
if exiting:
- break;
+ break
idl.run()
if not xapi_down and not flush_cache and seqno == idl.change_seqno: