[ovs-dev] [PATCH v2 ovn 5/5] Remove python directory
nusiddiq at redhat.com
nusiddiq at redhat.com
Mon Aug 19 18:15:49 UTC 2019
From: Numan Siddique <nusiddiq at redhat.com>
The python/ directory belongs to Open vSwitch repo.
This patch uses the python utils required for building OVN from
the configured OVS source directory and deletes the python directory.
Signed-off-by: Numan Siddique <nusiddiq at redhat.com>
---
Makefile.am | 5 +-
python/.gitignore | 2 -
python/README.rst | 1 -
python/automake.mk | 123 -
python/build/__init__.py | 0
python/build/nroff.py | 398 ---
python/build/soutil.py | 56 -
python/ovs/.gitignore | 1 -
python/ovs/__init__.py | 1 -
python/ovs/_json.c | 269 --
python/ovs/compat/__init__.py | 0
python/ovs/compat/sortedcontainers/LICENSE | 13 -
.../ovs/compat/sortedcontainers/__init__.py | 52 -
.../ovs/compat/sortedcontainers/sorteddict.py | 741 -----
.../ovs/compat/sortedcontainers/sortedlist.py | 2508 -----------------
.../ovs/compat/sortedcontainers/sortedset.py | 327 ---
python/ovs/daemon.py | 652 -----
python/ovs/db/__init__.py | 1 -
python/ovs/db/custom_index.py | 154 -
python/ovs/db/data.py | 585 ----
python/ovs/db/error.py | 34 -
python/ovs/db/idl.py | 2030 -------------
python/ovs/db/parser.py | 118 -
python/ovs/db/schema.py | 304 --
python/ovs/db/types.py | 647 -----
python/ovs/dirs.py | 31 -
python/ovs/dirs.py.template | 31 -
python/ovs/fatal_signal.py | 183 --
python/ovs/fcntl_win.py | 46 -
python/ovs/json.py | 531 ----
python/ovs/jsonrpc.py | 616 ----
python/ovs/ovsuuid.py | 70 -
python/ovs/poller.py | 290 --
python/ovs/process.py | 41 -
python/ovs/reconnect.py | 608 ----
python/ovs/socket_util.py | 335 ---
python/ovs/stream.py | 831 ------
python/ovs/timeval.py | 81 -
python/ovs/unixctl/__init__.py | 91 -
python/ovs/unixctl/client.py | 68 -
python/ovs/unixctl/server.py | 260 --
python/ovs/util.py | 95 -
python/ovs/vlog.py | 475 ----
python/ovs/winutils.py | 266 --
python/ovstest/__init__.py | 1 -
python/ovstest/args.py | 283 --
python/ovstest/rpcserver.py | 383 ---
python/ovstest/tcp.py | 120 -
python/ovstest/tests.py | 250 --
python/ovstest/udp.py | 85 -
python/ovstest/util.py | 253 --
python/ovstest/vswitch.py | 107 -
python/setup.py | 102 -
tests/ovn-controller-vtep.at | 2 +
54 files changed, 4 insertions(+), 15553 deletions(-)
delete mode 100644 python/.gitignore
delete mode 100644 python/README.rst
delete mode 100644 python/automake.mk
delete mode 100644 python/build/__init__.py
delete mode 100644 python/build/nroff.py
delete mode 100755 python/build/soutil.py
delete mode 100644 python/ovs/.gitignore
delete mode 100644 python/ovs/__init__.py
delete mode 100644 python/ovs/_json.c
delete mode 100644 python/ovs/compat/__init__.py
delete mode 100644 python/ovs/compat/sortedcontainers/LICENSE
delete mode 100644 python/ovs/compat/sortedcontainers/__init__.py
delete mode 100644 python/ovs/compat/sortedcontainers/sorteddict.py
delete mode 100644 python/ovs/compat/sortedcontainers/sortedlist.py
delete mode 100644 python/ovs/compat/sortedcontainers/sortedset.py
delete mode 100644 python/ovs/daemon.py
delete mode 100644 python/ovs/db/__init__.py
delete mode 100644 python/ovs/db/custom_index.py
delete mode 100644 python/ovs/db/data.py
delete mode 100644 python/ovs/db/error.py
delete mode 100644 python/ovs/db/idl.py
delete mode 100644 python/ovs/db/parser.py
delete mode 100644 python/ovs/db/schema.py
delete mode 100644 python/ovs/db/types.py
delete mode 100644 python/ovs/dirs.py
delete mode 100644 python/ovs/dirs.py.template
delete mode 100644 python/ovs/fatal_signal.py
delete mode 100644 python/ovs/fcntl_win.py
delete mode 100644 python/ovs/json.py
delete mode 100644 python/ovs/jsonrpc.py
delete mode 100644 python/ovs/ovsuuid.py
delete mode 100644 python/ovs/poller.py
delete mode 100644 python/ovs/process.py
delete mode 100644 python/ovs/reconnect.py
delete mode 100644 python/ovs/socket_util.py
delete mode 100644 python/ovs/stream.py
delete mode 100644 python/ovs/timeval.py
delete mode 100644 python/ovs/unixctl/__init__.py
delete mode 100644 python/ovs/unixctl/client.py
delete mode 100644 python/ovs/unixctl/server.py
delete mode 100644 python/ovs/util.py
delete mode 100644 python/ovs/vlog.py
delete mode 100644 python/ovs/winutils.py
delete mode 100644 python/ovstest/__init__.py
delete mode 100644 python/ovstest/args.py
delete mode 100644 python/ovstest/rpcserver.py
delete mode 100644 python/ovstest/tcp.py
delete mode 100644 python/ovstest/tests.py
delete mode 100644 python/ovstest/udp.py
delete mode 100644 python/ovstest/util.py
delete mode 100644 python/ovstest/vswitch.py
delete mode 100644 python/setup.py
diff --git a/Makefile.am b/Makefile.am
index f3df733a1..559cb1689 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -77,7 +77,7 @@ endif
# foo/__init__.py into an (older) version with plain foo.py, since
# foo/__init__.pyc will cause Python to ignore foo.py.
run_python = \
- PYTHONPATH=$(top_srcdir)/python$(psep)$$PYTHONPATH \
+ PYTHONPATH=$(OVS_SRCDIR)/python$(psep)$$PYTHONPATH \
PYTHONDONTWRITEBYTECODE=yes $(PYTHON)
ALL_LOCAL =
@@ -430,7 +430,7 @@ endif
CLEANFILES += flake8-check
include $(srcdir)/manpages.mk
-$(srcdir)/manpages.mk: $(MAN_ROOTS) build-aux/sodepends.py python/build/soutil.py
+$(srcdir)/manpages.mk: $(MAN_ROOTS) build-aux/sodepends.py $(OVS_SRCDIR)/python/build/soutil.py
@PYTHONPATH=$$PYTHONPATH$(psep)$(srcdir)/python $(PYTHON) $(srcdir)/build-aux/sodepends.py -I. -I$(srcdir) -I$(OVS_MANDIR) $(MAN_ROOTS) >$(@F).tmp
@if cmp -s $(@F).tmp $@; then \
touch $@; \
@@ -516,7 +516,6 @@ include lib/ovsdb_automake.mk
include ipsec/automake.mk
include rhel/automake.mk
include xenserver/automake.mk
-include python/automake.mk
include tutorial/automake.mk
include selinux/automake.mk
include controller/automake.mk
diff --git a/python/.gitignore b/python/.gitignore
deleted file mode 100644
index 60ace6f05..000000000
--- a/python/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-dist/
-*.egg-info
diff --git a/python/README.rst b/python/README.rst
deleted file mode 100644
index 4f4742c53..000000000
--- a/python/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-Python library for working with Open vSwitch
diff --git a/python/automake.mk b/python/automake.mk
deleted file mode 100644
index 5a1e1da8a..000000000
--- a/python/automake.mk
+++ /dev/null
@@ -1,123 +0,0 @@
-ovstest_pyfiles = \
- python/ovstest/__init__.py \
- python/ovstest/args.py \
- python/ovstest/rpcserver.py \
- python/ovstest/tcp.py \
- python/ovstest/tests.py \
- python/ovstest/udp.py \
- python/ovstest/util.py \
- python/ovstest/vswitch.py
-
-ovs_pyfiles = \
- python/ovs/__init__.py \
- python/ovs/compat/__init__.py \
- python/ovs/compat/sortedcontainers/__init__.py \
- python/ovs/compat/sortedcontainers/sortedlist.py \
- python/ovs/compat/sortedcontainers/sorteddict.py \
- python/ovs/compat/sortedcontainers/sortedset.py \
- python/ovs/daemon.py \
- python/ovs/fcntl_win.py \
- python/ovs/db/__init__.py \
- python/ovs/db/custom_index.py \
- python/ovs/db/data.py \
- python/ovs/db/error.py \
- python/ovs/db/idl.py \
- python/ovs/db/parser.py \
- python/ovs/db/schema.py \
- python/ovs/db/types.py \
- python/ovs/fatal_signal.py \
- python/ovs/json.py \
- python/ovs/jsonrpc.py \
- python/ovs/ovsuuid.py \
- python/ovs/poller.py \
- python/ovs/process.py \
- python/ovs/reconnect.py \
- python/ovs/socket_util.py \
- python/ovs/stream.py \
- python/ovs/timeval.py \
- python/ovs/unixctl/__init__.py \
- python/ovs/unixctl/client.py \
- python/ovs/unixctl/server.py \
- python/ovs/util.py \
- python/ovs/version.py \
- python/ovs/vlog.py \
- python/ovs/winutils.py
-# These python files are used at build time but not runtime,
-# so they are not installed.
-EXTRA_DIST += \
- python/build/__init__.py \
- python/build/nroff.py \
- python/build/soutil.py
-
-# PyPI support.
-EXTRA_DIST += \
- python/ovs/compat/sortedcontainers/LICENSE \
- python/README.rst \
- python/setup.py
-
-# C extension support.
-EXTRA_DIST += python/ovs/_json.c
-
-PYFILES = $(ovs_pyfiles) python/ovs/dirs.py $(ovstest_pyfiles)
-EXTRA_DIST += $(PYFILES)
-PYCOV_CLEAN_FILES += $(PYFILES:.py=.py,cover)
-
-FLAKE8_PYFILES += \
- $(filter-out python/ovs/compat/% python/ovs/dirs.py,$(PYFILES)) \
- python/setup.py \
- python/build/__init__.py \
- python/build/nroff.py \
- python/ovs/dirs.py.template
-
-if HAVE_PYTHON
-nobase_pkgdata_DATA = $(ovs_pyfiles) $(ovstest_pyfiles)
-ovs-install-data-local:
- $(MKDIR_P) python/ovs
- sed \
- -e '/^##/d' \
- -e 's,[@]pkgdatadir[@],$(pkgdatadir),g' \
- -e 's,[@]RUNDIR[@],$(RUNDIR),g' \
- -e 's,[@]LOGDIR[@],$(LOGDIR),g' \
- -e 's,[@]bindir[@],$(bindir),g' \
- -e 's,[@]sysconfdir[@],$(sysconfdir),g' \
- -e 's,[@]DBDIR[@],$(DBDIR),g' \
- < $(srcdir)/python/ovs/dirs.py.template \
- > python/ovs/dirs.py.tmp
- $(MKDIR_P) $(DESTDIR)$(pkgdatadir)/python/ovs
- $(INSTALL_DATA) python/ovs/dirs.py.tmp $(DESTDIR)$(pkgdatadir)/python/ovs/dirs.py
- rm python/ovs/dirs.py.tmp
-
-python-sdist: $(srcdir)/python/ovs/version.py $(ovs_pyfiles) python/ovs/dirs.py
- (cd python/ && $(PYTHON) setup.py sdist)
-
-pypi-upload: $(srcdir)/python/ovs/version.py $(ovs_pyfiles) python/ovs/dirs.py
- (cd python/ && $(PYTHON) setup.py sdist upload)
-else
-ovs-install-data-local:
- @:
-endif
-install-data-local: ovs-install-data-local
-
-UNINSTALL_LOCAL += ovs-uninstall-local
-ovs-uninstall-local:
- rm -f $(DESTDIR)$(pkgdatadir)/python/ovs/dirs.py
-
-ALL_LOCAL += $(srcdir)/python/ovs/version.py
-$(srcdir)/python/ovs/version.py: config.status
- $(AM_V_GEN)$(ro_shell) > $(@F).tmp && \
- echo 'VERSION = "$(VERSION)"' >> $(@F).tmp && \
- if cmp -s $(@F).tmp $@; then touch $@; rm $(@F).tmp; else mv $(@F).tmp $@; fi
-
-ALL_LOCAL += $(srcdir)/python/ovs/dirs.py
-$(srcdir)/python/ovs/dirs.py: python/ovs/dirs.py.template
- $(AM_V_GEN)sed \
- -e '/^##/d' \
- -e 's,[@]pkgdatadir[@],/usr/local/share/openvswitch,g' \
- -e 's,[@]RUNDIR[@],/var/run,g' \
- -e 's,[@]LOGDIR[@],/usr/local/var/log,g' \
- -e 's,[@]bindir[@],/usr/local/bin,g' \
- -e 's,[@]sysconfdir[@],/usr/local/etc,g' \
- -e 's,[@]DBDIR[@],/usr/local/etc/openvswitch,g' \
- < $? > $@.tmp && \
- mv $@.tmp $@
-EXTRA_DIST += python/ovs/dirs.py.template
diff --git a/python/build/__init__.py b/python/build/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/python/build/nroff.py b/python/build/nroff.py
deleted file mode 100644
index a94907757..000000000
--- a/python/build/nroff.py
+++ /dev/null
@@ -1,398 +0,0 @@
-# Copyright (c) 2010, 2011, 2012, 2015, 2016, 2017 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-import sys
-
-from ovs.db import error
-
-
-def text_to_nroff(s, font=r'\fR', escape_dot=True):
- def escape(match):
- c = match.group(0)
-
- # In Roman type, let -- in XML be \- in nroff. That gives us a way to
- # write minus signs, which is important in some places in manpages.
- #
- # Bold in nroff usually represents literal text, where there's no
- # distinction between hyphens and minus sign. The convention in nroff
- # appears to be to use a minus sign in such cases, so we follow that
- # convention.
- #
- # Finally, we always output - as a minus sign when it is followed by a
- # digit.
- if c.startswith('-'):
- if c == '--' and font == r'\fR':
- return r'\-'
- if c != '-' or font in (r'\fB', r'\fL'):
- return c.replace('-', r'\-')
- else:
- return '-'
-
- if c == '\\':
- return r'\e'
- elif c == '"':
- return r'\(dq'
- elif c == "'":
- return r'\(cq'
- elif c == ".":
- if escape_dot:
- # groff(7) says that . can be escaped by \. but in practice
- # groff still gives an error with \. at the beginning of a
- # line.
- return r'\[char46]'
- else:
- return '.'
- else:
- raise error.Error("bad escape")
-
- # Escape - \ " ' . as needed by nroff.
- s = re.sub('(-[0-9]|--|[-"\'\\\\.])', escape, s)
- return s
-
-
-def escape_nroff_literal(s, font=r'\fB'):
- return font + r'%s\fR' % text_to_nroff(s, font)
-
-
-def inline_xml_to_nroff(node, font, to_upper=False, newline='\n'):
- if node.nodeType == node.TEXT_NODE:
- if to_upper:
- s = text_to_nroff(node.data.upper(), font)
- else:
- s = text_to_nroff(node.data, font)
- return s.replace('\n', newline)
- elif node.nodeType == node.ELEMENT_NODE:
- if node.tagName in ['code', 'em', 'option', 'env', 'b']:
- s = r'\fB'
- for child in node.childNodes:
- s += inline_xml_to_nroff(child, r'\fB', to_upper, newline)
- return s + font
- elif node.tagName == 'ref':
- if node.hasAttribute('column'):
- s = node.attributes['column'].nodeValue
- if node.hasAttribute('key'):
- s += ':' + node.attributes['key'].nodeValue
- elif node.hasAttribute('table'):
- s = node.attributes['table'].nodeValue
- elif node.hasAttribute('group'):
- s = node.attributes['group'].nodeValue
- elif node.hasAttribute('db'):
- s = node.attributes['db'].nodeValue
- elif node.hasAttribute('field'):
- s = node.attributes['field'].nodeValue
- elif node.hasAttribute('section'):
- s = node.attributes['section'].nodeValue
- else:
- raise error.Error("'ref' lacks required attributes: %s"
- % list(node.attributes.keys()))
- return r'\fB' + re.sub(r'\s+', ' ', s) + font
- elif node.tagName in ['var', 'dfn', 'i', 'cite']:
- s = r'\fI'
- for child in node.childNodes:
- s += inline_xml_to_nroff(child, r'\fI', to_upper, newline)
- return s + font
- elif node.tagName in ['literal']:
- s = r'\fL'
- for child in node.childNodes:
- s += inline_xml_to_nroff(child, r'\fL')
- return s + font
- elif node.tagName == 'url':
- return ('\n.URL "'
- + text_to_nroff(node.attributes['href'].nodeValue,
- escape_dot=False)
- + '"\n')
- else:
- raise error.Error("element <%s> unknown or invalid here"
- % node.tagName)
- elif node.nodeType == node.COMMENT_NODE:
- return ''
- else:
- raise error.Error("unknown node %s in inline xml" % node)
-
-
-def pre_to_nroff(nodes, para, font):
- # This puts 'font' at the beginning of each line so that leading and
- # trailing whitespace stripping later doesn't removed leading spaces
- # from preformatted text.
- s = para + '\n.nf\n' + font
- for node in nodes:
- s += inline_xml_to_nroff(node, font, False, '\n.br\n' + font) + '\\fR'
- s += '\n.fi\n'
- return s
-
-
-def tbl_to_nroff(nodes, para):
- s = para + '\n.TS\n'
- for node in nodes:
- if node.nodeType != node.TEXT_NODE:
- fatal("<tbl> element may only have text children")
- s += node.data + '\n'
- s += '.TE\n'
- return s
-
-
-def fatal(msg):
- sys.stderr.write('%s\n' % msg)
- sys.exit(1)
-
-
-def put_text(text, x, y, s):
- x = int(x)
- y = int(y)
- extend = x + len(s) - len(text[y])
- if extend > 0:
- text[y] += ' ' * extend
- text[y] = text[y][:x] + s + text[y][x + len(s):]
-
-
-def put_centered(text, x, width, y, s):
- put_text(text, x + (width - len(s)) / 2, y, s)
-
-
-def diagram_header_to_nroff(header_node, text, x):
- # Parse header.
- header_fields = []
- i = 0
- for node in header_node.childNodes:
- if node.nodeType == node.ELEMENT_NODE and node.tagName == 'bits':
- name = node.attributes['name'].nodeValue
- width = node.attributes['width'].nodeValue
- above = node.getAttribute('above')
- below = node.getAttribute('below')
- fill = node.getAttribute('fill')
- header_fields += [{"name": name,
- "tag": "B%d" % i,
- "width": width,
- "above": above,
- "below": below,
- "fill": fill}]
- i += 1
- elif node.nodeType == node.COMMENT_NODE:
- pass
- elif node.nodeType == node.TEXT_NODE and node.data.isspace():
- pass
- else:
- fatal("unknown node %s in diagram <header> element" % node)
-
- # Format pic version.
- pic_s = ""
- for f in header_fields:
- name = f['name'].replace('...', '. . .')
- pic_s += " %s: box \"%s\" width %s" % (f['tag'], name, f['width'])
- if f['fill'] == 'yes':
- pic_s += " fill"
- pic_s += '\n'
- for f in header_fields:
- pic_s += " \"%s\" at %s.n above\n" % (f['above'], f['tag'])
- pic_s += " \"%s\" at %s.s below\n" % (f['below'], f['tag'])
- name = header_node.getAttribute('name')
- if name == "":
- visible = " invis"
- else:
- visible = ""
- pic_s += "line <->%s \"%s\" above " % (visible, name)
- pic_s += "from %s.nw + (0,textht) " % header_fields[0]['tag']
- pic_s += "to %s.ne + (0,textht)\n" % header_fields[-1]['tag']
-
- # Format text version.
- header_width = 1
- for f in header_fields:
- field_width = max(len(f['above']), len(f['below']), len(f['name']))
- f['width'] = field_width
- header_width += field_width + 1
- min_header_width = 2 + len(name)
- while min_header_width > header_width:
- for f in header_fields:
- f['width'] += 1
- header_width += 1
- if header_width >= min_header_width:
- break
-
- if name != "":
- put_centered(text, x, header_width, 0, name)
- if header_width >= 4:
- arrow = '<' + '-' * (header_width - 4) + '>'
- put_text(text, x + 1, 1, arrow)
- for f in header_fields:
- box1 = '+' + '-' * f['width'] + '+'
- box2 = '|' + ' ' * f['width'] + '|'
- put_text(text, x, 3, box1)
- put_text(text, x, 4, box2)
- put_text(text, x, 5, box1)
-
- put_centered(text, x + 1, f['width'], 2, f['above'])
- put_centered(text, x + 1, f['width'], 4, f['name'])
- put_centered(text, x + 1, f['width'], 6, f['below'])
-
- x += f['width'] + 1
-
- return pic_s, x + 1
-
-
-def diagram_to_nroff(nodes, para):
- pic_s = ''
- text = [''] * 7
- x = 0
- move = False
- for node in nodes:
- if node.nodeType == node.ELEMENT_NODE and node.tagName == 'header':
- if move:
- pic_s += "move .1\n"
- x += 1
- elif x > 0:
- x -= 1
- pic_header, x = diagram_header_to_nroff(node, text, x)
- pic_s += "[\n" + pic_header + "]\n"
- move = True
- elif node.nodeType == node.ELEMENT_NODE and node.tagName == 'nospace':
- move = False
- elif node.nodeType == node.ELEMENT_NODE and node.tagName == 'dots':
- pic_s += "move .1\n"
- pic_s += '". . ." ljust\n'
-
- put_text(text, x, 4, " ... ")
- x += 5
- elif node.nodeType == node.COMMENT_NODE:
- pass
- elif node.nodeType == node.TEXT_NODE and node.data.isspace():
- pass
- else:
- fatal("unknown node %s in diagram <header> element" % node)
-
- text_s = '.br\n'.join(["\\fL%s\n" % s for s in text if s != ""])
- return para + """
-.\\" check if in troff mode (TTY)
-.if t \\{
-.PS
-boxht = .2
-textht = 1/6
-fillval = .2
-""" + pic_s + """\
-.PE
-\\}
-.\\" check if in nroff mode:
-.if n \\{
-.nf
-""" + text_s + """\
-.fi
-\\}"""
-
-
-def block_xml_to_nroff(nodes, para='.PP'):
- HEADER_TAGS = ('h1', 'h2', 'h3', 'h4')
- s = ''
- prev = ''
- for node in nodes:
- if node.nodeType == node.TEXT_NODE:
- if s == '' and para != '.IP':
- s = para + '\n'
- text = re.sub(r'\s+', ' ', node.data)
- if s.endswith(' '):
- text = text.lstrip()
- s += text_to_nroff(text)
- s = s.lstrip()
- elif node.nodeType == node.ELEMENT_NODE:
- if node.tagName in ['ul', 'ol']:
- if s != "":
- s += "\n"
- s += ".RS\n"
- i = 0
- for li_node in node.childNodes:
- if (li_node.nodeType == node.ELEMENT_NODE
- and li_node.tagName == 'li'):
- i += 1
- if node.tagName == 'ul':
- s += ".IP \\(bu\n"
- else:
- s += ".IP %d. .4in\n" % i
- s += block_xml_to_nroff(li_node.childNodes, ".IP")
- elif li_node.nodeType == node.COMMENT_NODE:
- pass
- elif (li_node.nodeType != node.TEXT_NODE
- or not li_node.data.isspace()):
- raise error.Error("<%s> element may only have "
- "<li> children" % node.tagName)
- s += ".RE\n"
- elif node.tagName == 'dl':
- indent = True
- if prev in HEADER_TAGS:
- indent = False
- if s != "":
- s += "\n"
- if indent:
- s += ".RS\n"
- prev = "dd"
- for li_node in node.childNodes:
- if (li_node.nodeType == node.ELEMENT_NODE
- and li_node.tagName == 'dt'):
- if prev == 'dd':
- s += '.TP\n'
- else:
- s += '.TQ .5in\n'
- prev = 'dt'
- elif (li_node.nodeType == node.ELEMENT_NODE
- and li_node.tagName == 'dd'):
- if prev == 'dd':
- s += '.IP\n'
- prev = 'dd'
- elif li_node.nodeType == node.COMMENT_NODE:
- continue
- elif (li_node.nodeType != node.TEXT_NODE
- or not li_node.data.isspace()):
- raise error.Error("<dl> element may only have "
- "<dt> and <dd> children")
- s += block_xml_to_nroff(li_node.childNodes, ".IP")
- if indent:
- s += ".RE\n"
- elif node.tagName == 'p':
- if s != "":
- if not s.endswith("\n"):
- s += "\n"
- s += para + "\n"
- s += block_xml_to_nroff(node.childNodes, para)
- elif node.tagName in HEADER_TAGS:
- if s != "":
- if not s.endswith("\n"):
- s += "\n"
- nroffTag, font = {'h1': ('SH', r'\fR'),
- 'h2': ('SS', r'\fB'),
- 'h3': ('ST', r'\fI'),
- 'h4': ('SU', r'\fI')}[node.tagName]
- to_upper = node.tagName == 'h1'
- s += ".%s \"" % nroffTag
- for child_node in node.childNodes:
- s += inline_xml_to_nroff(child_node, font, to_upper)
- s += "\"\n"
- elif node.tagName == 'pre':
- fixed = node.getAttribute('fixed')
- if fixed == 'yes':
- font = r'\fL'
- else:
- font = r'\fB'
- s += pre_to_nroff(node.childNodes, para, font)
- elif node.tagName == 'tbl':
- s += tbl_to_nroff(node.childNodes, para)
- elif node.tagName == 'diagram':
- s += diagram_to_nroff(node.childNodes, para)
- else:
- s += inline_xml_to_nroff(node, r'\fR')
- prev = node.tagName
- elif node.nodeType == node.COMMENT_NODE:
- pass
- else:
- raise error.Error("unknown node %s in block xml" % node)
- if s != "" and not s.endswith('\n'):
- s += '\n'
- return s
diff --git a/python/build/soutil.py b/python/build/soutil.py
deleted file mode 100755
index b8027af86..000000000
--- a/python/build/soutil.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#! /usr/bin/env python
-
-# Copyright (c) 2008, 2017 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import getopt
-import os
-import re
-import sys
-
-
-def parse_include_dirs():
- include_dirs = []
- options, args = getopt.gnu_getopt(sys.argv[1:], 'I:', ['include='])
- for key, value in options:
- if key in ['-I', '--include']:
- include_dirs.append(value)
- else:
- assert False
-
- include_dirs.append('.')
- return include_dirs, args
-
-
-def find_file(include_dirs, name):
- for dir in include_dirs:
- file = "%s/%s" % (dir, name)
- try:
- os.stat(file)
- return file
- except OSError:
- pass
- sys.stderr.write("%s not found in: %s\n" % (name, ' '.join(include_dirs)))
- return None
-
-
-so_re = re.compile(r'^\.so (\S+)$')
-
-
-def extract_include_directive(line):
- m = so_re.match(line)
- if m:
- return m.group(1)
- else:
- return None
diff --git a/python/ovs/.gitignore b/python/ovs/.gitignore
deleted file mode 100644
index 985278646..000000000
--- a/python/ovs/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-version.py
diff --git a/python/ovs/__init__.py b/python/ovs/__init__.py
deleted file mode 100644
index 218d8921e..000000000
--- a/python/ovs/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file intentionally left blank.
diff --git a/python/ovs/_json.c b/python/ovs/_json.c
deleted file mode 100644
index ef7bb4b8e..000000000
--- a/python/ovs/_json.c
+++ /dev/null
@@ -1,269 +0,0 @@
-#include "Python.h"
-#include <openvswitch/json.h>
-#include "structmember.h"
-
-#if PY_MAJOR_VERSION >= 3
-#define IS_PY3K
-#endif
-
-typedef struct {
- PyObject_HEAD
- struct json_parser *_parser;
-} json_ParserObject;
-
-static void
-Parser_dealloc(json_ParserObject * p)
-{
- json_parser_abort(p->_parser);
- Py_TYPE(p)->tp_free(p);
-}
-
-static PyObject *
-Parser_new(PyTypeObject * type, PyObject * args, PyObject * kwargs)
-{
- json_ParserObject *self;
- static char *kwlist[] = { "check_trailer", NULL };
- PyObject *check_trailer = NULL;
- int ct_int = 0;
-
- if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O", kwlist,
- &check_trailer)) {
- return NULL;
- }
-
- if (check_trailer != NULL) {
- ct_int = PyObject_IsTrue(check_trailer);
- if (ct_int < 0) {
- return NULL;
- } else if (ct_int) {
- ct_int = JSPF_TRAILER;
- }
- }
-
- self = (json_ParserObject *) type->tp_alloc(type, 0);
- if (self != NULL) {
- self->_parser = json_parser_create(ct_int);
- }
-
- return (PyObject *) self;
-}
-
-static PyObject *
-Parser_feed(json_ParserObject * self, PyObject * args)
-{
- Py_ssize_t input_sz;
- PyObject *input;
- size_t rd;
- char *input_str;
-
- if (self->_parser == NULL) {
- return NULL;
- }
-
- if (!PyArg_UnpackTuple(args, "input", 1, 1, &input)) {
- return NULL;
- }
-#ifdef IS_PY3K
- if ((input_str = PyUnicode_AsUTF8AndSize(input, &input_sz)) == NULL) {
-#else
- if (PyString_AsStringAndSize(input, &input_str, &input_sz) < 0) {
-#endif
- return NULL;
- }
-
- rd = json_parser_feed(self->_parser, input_str, (size_t) input_sz);
-
-#ifdef IS_PY3K
- return PyLong_FromSize_t(rd);
-#else
- return PyInt_FromSize_t(rd);
-#endif
-}
-
-static PyObject *
-Parser_is_done(json_ParserObject * self)
-{
- if (self->_parser == NULL) {
- return NULL;
- }
- return PyBool_FromLong(json_parser_is_done(self->_parser));
-}
-
-static PyObject *
-json_to_python(struct json *json)
-{
- switch (json->type) {
- case JSON_NULL:
- Py_RETURN_NONE;
- case JSON_FALSE:
- Py_RETURN_FALSE;
- case JSON_TRUE:
- Py_RETURN_TRUE;
- case JSON_OBJECT:{
- struct shash_node *node;
- PyObject *dict = PyDict_New();
-
- if (dict == NULL) {
- return PyErr_NoMemory();
- }
- SHASH_FOR_EACH (node, json->object) {
- PyObject *key = PyUnicode_FromString(node->name);
- PyObject *val = json_to_python(node->data);
-
- if (!(key && val) || PyDict_SetItem(dict, key, val)) {
- Py_XDECREF(key);
- Py_XDECREF(val);
- Py_XDECREF(dict);
- return NULL;
- }
-
- Py_XDECREF(key);
- Py_XDECREF(val);
- }
- return dict;
- }
- case JSON_ARRAY:{
- int i;
- PyObject *arr = PyList_New(json->array.n);
-
- if (arr == NULL) {
- return PyErr_NoMemory();
- }
- for (i = 0; i < json->array.n; i++) {
- PyObject *item = json_to_python(json->array.elems[i]);
-
- if (!item || PyList_SetItem(arr, i, item)) {
- Py_XDECREF(arr);
- return NULL;
- }
- }
- return arr;
- }
- case JSON_REAL:
- if (json->real != 0) {
- return PyFloat_FromDouble(json->real);
- } /* fall through to treat 0 as int */
- case JSON_INTEGER:
-#ifdef IS_PY3K
- return PyLong_FromLong((long) json->integer);
-#else
- return PyInt_FromLong((long) json->integer);
-#endif
-
- case JSON_STRING:
- return PyUnicode_FromString(json->string);
- default:
- return NULL;
- }
-}
-
-static PyObject *
-Parser_finish(json_ParserObject * self)
-{
- struct json *json;
- PyObject *obj;
-
- if (self->_parser == NULL) {
- return NULL;
- }
-
- json = json_parser_finish(self->_parser);
- self->_parser = NULL;
- obj = json_to_python(json);
- json_destroy(json);
- return obj;
-}
-
-static PyMethodDef Parser_methods[] = {
- {"feed", (PyCFunction) Parser_feed, METH_VARARGS,
- "Feed data to the parser and return the index of the last object."},
- {"is_done", (PyCFunction) Parser_is_done, METH_NOARGS,
- "Whether the parser has finished decoding an object."},
- {"finish", (PyCFunction) Parser_finish, METH_NOARGS,
- "Finish parsing and return Python object parsed."},
- {NULL},
-};
-
-static PyTypeObject json_ParserType = {
- PyVarObject_HEAD_INIT(NULL, 0)
- "ovs._json.Parser", /* tp_name */
- sizeof (json_ParserObject), /* tp_basicsize */
- 0, /* tp_itemsize */
- (destructor) Parser_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
- "Parser objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- Parser_methods, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- Parser_new, /* tp_new */
-};
-
-#ifdef IS_PY3K
-static struct PyModuleDef moduledef = {
- PyModuleDef_HEAD_INIT,
- "ovs._json", /* m_name */
- "OVS JSON Parser module", /* m_doc */
- 0, /* m_size */
- 0, /* m_methods */
- 0, /* m_slots */
- 0, /* m_traverse */
- 0, /* m_clear */
- 0, /* m_free */
-};
-
-#define INITERROR return NULL
-#else /* !IS_PY3K */
-#define INITERROR return
-#endif
-
-PyMODINIT_FUNC
-#ifdef IS_PY3K
-PyInit__json(void)
-#else
-init_json(void)
-#endif
-{
- PyObject *m;
-
- if (PyType_Ready(&json_ParserType) < 0) {
- INITERROR;
- }
-#ifdef IS_PY3K
- m = PyModule_Create(&moduledef);
-#else
- m = Py_InitModule3("ovs._json", NULL, "OVS JSON Parser module");
-#endif
-
- Py_INCREF(&json_ParserType);
- PyModule_AddObject(m, "Parser", (PyObject *) & json_ParserType);
-#ifdef IS_PY3K
- return m;
-#endif
-}
diff --git a/python/ovs/compat/__init__.py b/python/ovs/compat/__init__.py
deleted file mode 100644
index e69de29bb..000000000
diff --git a/python/ovs/compat/sortedcontainers/LICENSE b/python/ovs/compat/sortedcontainers/LICENSE
deleted file mode 100644
index 8794014e0..000000000
--- a/python/ovs/compat/sortedcontainers/LICENSE
+++ /dev/null
@@ -1,13 +0,0 @@
-Copyright 2014-2016 Grant Jenks
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/python/ovs/compat/sortedcontainers/__init__.py b/python/ovs/compat/sortedcontainers/__init__.py
deleted file mode 100644
index 392adfad6..000000000
--- a/python/ovs/compat/sortedcontainers/__init__.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""Sorted Container Types: SortedList, SortedDict, SortedSet
-
-SortedContainers is an Apache2 licensed containers library, written in
-pure-Python, and fast as C-extensions.
-
-
-Python's standard library is great until you need a sorted collections
-type. Many will attest that you can get really far without one, but the moment
-you **really need** a sorted list, dict, or set, you're faced with a dozen
-different implementations, most using C-extensions without great documentation
-and benchmarking.
-
-In Python, we can do better. And we can do it in pure-Python!
-
-::
-
- >>> from sortedcontainers import SortedList, SortedDict, SortedSet
- >>> sl = SortedList(xrange(10000000))
- >>> 1234567 in sl
- True
- >>> sl[7654321]
- 7654321
- >>> sl.add(1234567)
- >>> sl.count(1234567)
- 2
- >>> sl *= 3
- >>> len(sl)
- 30000003
-
-SortedContainers takes all of the work out of Python sorted types - making your
-deployment and use of Python easy. There's no need to install a C compiler or
-pre-build and distribute custom extensions. Performance is a feature and
-testing has 100% coverage with unit tests and hours of stress.
-
-:copyright: (c) 2016 by Grant Jenks.
-:license: Apache 2.0, see LICENSE for more details.
-
-"""
-
-
-from .sortedlist import SortedList, SortedListWithKey
-from .sortedset import SortedSet
-from .sorteddict import SortedDict
-
-__all__ = ['SortedList', 'SortedSet', 'SortedDict', 'SortedListWithKey']
-
-__title__ = 'sortedcontainers'
-__version__ = '1.5.9'
-__build__ = 0x010509
-__author__ = 'Grant Jenks'
-__license__ = 'Apache 2.0'
-__copyright__ = 'Copyright 2016 Grant Jenks'
diff --git a/python/ovs/compat/sortedcontainers/sorteddict.py b/python/ovs/compat/sortedcontainers/sorteddict.py
deleted file mode 100644
index 5d425fee6..000000000
--- a/python/ovs/compat/sortedcontainers/sorteddict.py
+++ /dev/null
@@ -1,741 +0,0 @@
-"""Sorted dictionary implementation.
-
-"""
-
-from collections import Set, Sequence
-from collections import KeysView as AbstractKeysView
-from collections import ValuesView as AbstractValuesView
-from collections import ItemsView as AbstractItemsView
-from sys import hexversion
-
-from .sortedlist import SortedList, recursive_repr, SortedListWithKey
-from .sortedset import SortedSet
-
-NONE = object()
-
-
-class _IlocWrapper(object):
- "Positional indexing support for sorted dictionary objects."
- # pylint: disable=protected-access, too-few-public-methods
- def __init__(self, _dict):
- self._dict = _dict
- def __len__(self):
- return len(self._dict)
- def __getitem__(self, index):
- """
- Very efficiently return the key at index *index* in iteration. Supports
- negative indices and slice notation. Raises IndexError on invalid
- *index*.
- """
- return self._dict._list[index]
- def __delitem__(self, index):
- """
- Remove the ``sdict[sdict.iloc[index]]`` from *sdict*. Supports negative
- indices and slice notation. Raises IndexError on invalid *index*.
- """
- _dict = self._dict
- _list = _dict._list
- _delitem = _dict._delitem
-
- if isinstance(index, slice):
- keys = _list[index]
- del _list[index]
- for key in keys:
- _delitem(key)
- else:
- key = _list[index]
- del _list[index]
- _delitem(key)
-
-
-class SortedDict(dict):
- """SortedDict provides the same methods as a dict. Additionally, SortedDict
- efficiently maintains its keys in sorted order. Consequently, the keys
- method will return the keys in sorted order, the popitem method will remove
- the item with the highest key, etc.
-
- """
- def __init__(self, *args, **kwargs):
- """SortedDict provides the same methods as a dict. Additionally, SortedDict
- efficiently maintains its keys in sorted order. Consequently, the keys
- method will return the keys in sorted order, the popitem method will
- remove the item with the highest key, etc.
-
- An optional *key* argument defines a callable that, like the `key`
- argument to Python's `sorted` function, extracts a comparison key from
- each dict key. If no function is specified, the default compares the
- dict keys directly. The `key` argument must be provided as a positional
- argument and must come before all other arguments.
-
- An optional *iterable* argument provides an initial series of items to
- populate the SortedDict. Each item in the series must itself contain
- two items. The first is used as a key in the new dictionary, and the
- second as the key's value. If a given key is seen more than once, the
- last value associated with it is retained in the new dictionary.
-
- If keyword arguments are given, the keywords themselves with their
- associated values are added as items to the dictionary. If a key is
- specified both in the positional argument and as a keyword argument, the
- value associated with the keyword is retained in the dictionary. For
- example, these all return a dictionary equal to ``{"one": 2, "two":
- 3}``:
-
- * ``SortedDict(one=2, two=3)``
- * ``SortedDict({'one': 2, 'two': 3})``
- * ``SortedDict(zip(('one', 'two'), (2, 3)))``
- * ``SortedDict([['two', 3], ['one', 2]])``
-
- The first example only works for keys that are valid Python
- identifiers; the others work with any valid keys.
-
- """
- # pylint: disable=super-init-not-called
- if args and (args[0] is None or callable(args[0])):
- self._key = args[0]
- args = args[1:]
- else:
- self._key = None
-
- if self._key is None:
- self._list = SortedList()
- else:
- self._list = SortedListWithKey(key=self._key)
-
- # Cache function pointers to dict methods.
-
- _dict = super(SortedDict, self)
- self._dict = _dict
- self._clear = _dict.clear
- self._delitem = _dict.__delitem__
- self._iter = _dict.__iter__
- self._pop = _dict.pop
- self._setdefault = _dict.setdefault
- self._setitem = _dict.__setitem__
- self._dict_update = _dict.update
-
- # Cache function pointers to SortedList methods.
-
- _list = self._list
- self._list_add = _list.add
- self.bisect_left = _list.bisect_left
- self.bisect = _list.bisect_right
- self.bisect_right = _list.bisect_right
- self._list_clear = _list.clear
- self.index = _list.index
- self._list_pop = _list.pop
- self._list_remove = _list.remove
- self._list_update = _list.update
- self.irange = _list.irange
- self.islice = _list.islice
- self._reset = _list._reset # pylint: disable=protected-access
-
- if self._key is not None:
- self.bisect_key_left = _list.bisect_key_left
- self.bisect_key_right = _list.bisect_key_right
- self.bisect_key = _list.bisect_key
- self.irange_key = _list.irange_key
-
- self.iloc = _IlocWrapper(self)
-
- self._update(*args, **kwargs)
-
- @property
- def key(self):
- """Key function used to extract comparison key for sorting."""
- return self._key
-
- def clear(self):
- """Remove all elements from the dictionary."""
- self._clear()
- self._list_clear()
-
- def __delitem__(self, key):
- """
- Remove ``d[key]`` from *d*. Raises a KeyError if *key* is not in the
- dictionary.
- """
- self._delitem(key)
- self._list_remove(key)
-
- def __iter__(self):
- """
- Return an iterator over the sorted keys of the dictionary.
-
- Iterating the Mapping while adding or deleting keys may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return iter(self._list)
-
- def __reversed__(self):
- """
- Return a reversed iterator over the sorted keys of the dictionary.
-
- Iterating the Mapping while adding or deleting keys may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return reversed(self._list)
-
- def __setitem__(self, key, value):
- """Set `d[key]` to *value*."""
- if key not in self:
- self._list_add(key)
- self._setitem(key, value)
-
- def copy(self):
- """Return a shallow copy of the sorted dictionary."""
- return self.__class__(self._key, self._iteritems())
-
- __copy__ = copy
-
- @classmethod
- def fromkeys(cls, seq, value=None):
- """
- Create a new dictionary with keys from *seq* and values set to *value*.
- """
- return cls((key, value) for key in seq)
-
- if hexversion < 0x03000000:
- def items(self):
- """
- Return a list of the dictionary's items (``(key, value)`` pairs).
- """
- return list(self._iteritems())
- else:
- def items(self):
- """
- Return a new ItemsView of the dictionary's items. In addition to
- the methods provided by the built-in `view` the ItemsView is
- indexable (e.g. ``d.items()[5]``).
- """
- return ItemsView(self)
-
- def iteritems(self):
- """
- Return an iterator over the items (``(key, value)`` pairs).
-
- Iterating the Mapping while adding or deleting keys may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return iter((key, self[key]) for key in self._list)
-
- _iteritems = iteritems
-
- if hexversion < 0x03000000:
- def keys(self):
- """Return a SortedSet of the dictionary's keys."""
- return SortedSet(self._list, key=self._key)
- else:
- def keys(self):
- """
- Return a new KeysView of the dictionary's keys. In addition to the
- methods provided by the built-in `view` the KeysView is indexable
- (e.g. ``d.keys()[5]``).
- """
- return KeysView(self)
-
- def iterkeys(self):
- """
- Return an iterator over the sorted keys of the Mapping.
-
- Iterating the Mapping while adding or deleting keys may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return iter(self._list)
-
- if hexversion < 0x03000000:
- def values(self):
- """Return a list of the dictionary's values."""
- return list(self._itervalues())
- else:
- def values(self):
- """
- Return a new :class:`ValuesView` of the dictionary's values.
- In addition to the methods provided by the built-in `view` the
- ValuesView is indexable (e.g., ``d.values()[5]``).
- """
- return ValuesView(self)
-
- def itervalues(self):
- """
- Return an iterator over the values of the Mapping.
-
- Iterating the Mapping while adding or deleting keys may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return iter(self[key] for key in self._list)
-
- _itervalues = itervalues
-
- def pop(self, key, default=NONE):
- """
- If *key* is in the dictionary, remove it and return its value,
- else return *default*. If *default* is not given and *key* is not in
- the dictionary, a KeyError is raised.
- """
- if key in self:
- self._list_remove(key)
- return self._pop(key)
- else:
- if default is NONE:
- raise KeyError(key)
- else:
- return default
-
- def popitem(self, last=True):
- """
- Remove and return a ``(key, value)`` pair from the dictionary. If
- last=True (default) then remove the *greatest* `key` from the
- diciontary. Else, remove the *least* key from the dictionary.
-
- If the dictionary is empty, calling `popitem` raises a
- KeyError`.
- """
- if not self:
- raise KeyError('popitem(): dictionary is empty')
-
- key = self._list_pop(-1 if last else 0)
- value = self._pop(key)
-
- return (key, value)
-
- def peekitem(self, index=-1):
- """Return (key, value) item pair at index.
-
- Unlike ``popitem``, the sorted dictionary is not modified. Index
- defaults to -1, the last/greatest key in the dictionary. Specify
- ``index=0`` to lookup the first/least key in the dictiony.
-
- If index is out of range, raise IndexError.
-
- """
- key = self._list[index]
- return key, self[key]
-
- def setdefault(self, key, default=None):
- """
- If *key* is in the dictionary, return its value. If not, insert *key*
- with a value of *default* and return *default*. *default* defaults to
- ``None``.
- """
- if key in self:
- return self[key]
-
- self._setitem(key, default)
- self._list_add(key)
- return default
-
- def update(self, *args, **kwargs):
- """
- Update the dictionary with the key/value pairs from *other*, overwriting
- existing keys.
-
- *update* accepts either another dictionary object or an iterable of
- key/value pairs (as a tuple or other iterable of length two). If
- keyword arguments are specified, the dictionary is then updated with
- those key/value pairs: ``d.update(red=1, blue=2)``.
- """
- if not self:
- self._dict_update(*args, **kwargs)
- self._list_update(self._iter())
- return
-
- if not kwargs and len(args) == 1 and isinstance(args[0], dict):
- pairs = args[0]
- else:
- pairs = dict(*args, **kwargs)
-
- if (10 * len(pairs)) > len(self):
- self._dict_update(pairs)
- self._list_clear()
- self._list_update(self._iter())
- else:
- for key in pairs:
- self[key] = pairs[key]
-
- _update = update
-
- if hexversion >= 0x02070000:
- def viewkeys(self):
- "Return ``KeysView`` of dictionary keys."
- return KeysView(self)
-
- def viewvalues(self):
- "Return ``ValuesView`` of dictionary values."
- return ValuesView(self)
-
- def viewitems(self):
- "Return ``ItemsView`` of dictionary (key, value) item pairs."
- return ItemsView(self)
-
- def __reduce__(self):
- return (self.__class__, (self._key, list(self._iteritems())))
-
- @recursive_repr
- def __repr__(self):
- _key = self._key
- name = type(self).__name__
- key = '' if _key is None else '{0!r}, '.format(_key)
- func = '{0!r}: {1!r}'.format
- items = ', '.join(func(key, self[key]) for key in self._list)
- return '{0}({1}{{{2}}})'.format(name, key, items)
-
- def _check(self):
- # pylint: disable=protected-access
- self._list._check()
- assert len(self) == len(self._list)
- assert all(key in self for key in self._list)
-
-
-class KeysView(AbstractKeysView, Set, Sequence):
- """
- A KeysView object is a dynamic view of the dictionary's keys, which
- means that when the dictionary's keys change, the view reflects
- those changes.
-
- The KeysView class implements the Set and Sequence Abstract Base Classes.
- """
- # pylint: disable=too-many-ancestors
- if hexversion < 0x03000000:
- def __init__(self, sorted_dict):
- """
- Initialize a KeysView from a SortedDict container as *sorted_dict*.
- """
- # pylint: disable=super-init-not-called, protected-access
- self._list = sorted_dict._list
- self._view = sorted_dict._dict.viewkeys()
- else:
- def __init__(self, sorted_dict):
- """
- Initialize a KeysView from a SortedDict container as *sorted_dict*.
- """
- # pylint: disable=super-init-not-called, protected-access
- self._list = sorted_dict._list
- self._view = sorted_dict._dict.keys()
- def __len__(self):
- """Return the number of entries in the dictionary."""
- return len(self._view)
- def __contains__(self, key):
- """
- Return True if and only if *key* is one of the underlying dictionary's
- keys.
- """
- return key in self._view
- def __iter__(self):
- """
- Return an iterable over the keys in the dictionary. Keys are iterated
- over in their sorted order.
-
- Iterating views while adding or deleting entries in the dictionary may
- raise a `RuntimeError` or fail to iterate over all entries.
- """
- return iter(self._list)
- def __getitem__(self, index):
- """Return the key at position *index*."""
- return self._list[index]
- def __reversed__(self):
- """
- Return a reversed iterable over the keys in the dictionary. Keys are
- iterated over in their reverse sort order.
-
- Iterating views while adding or deleting entries in the dictionary may
- raise a RuntimeError or fail to iterate over all entries.
- """
- return reversed(self._list)
- def index(self, value, start=None, stop=None):
- """
- Return the smallest *k* such that `keysview[k] == value` and `start <= k
- < end`. Raises `KeyError` if *value* is not present. *stop* defaults
- to the end of the set. *start* defaults to the beginning. Negative
- indexes are supported, as for slice indices.
- """
- # pylint: disable=arguments-differ
- return self._list.index(value, start, stop)
- def count(self, value):
- """Return the number of occurrences of *value* in the set."""
- return 1 if value in self._view else 0
- def __eq__(self, that):
- """Test set-like equality with *that*."""
- return self._view == that
- def __ne__(self, that):
- """Test set-like inequality with *that*."""
- return self._view != that
- def __lt__(self, that):
- """Test whether self is a proper subset of *that*."""
- return self._view < that
- def __gt__(self, that):
- """Test whether self is a proper superset of *that*."""
- return self._view > that
- def __le__(self, that):
- """Test whether self is contained within *that*."""
- return self._view <= that
- def __ge__(self, that):
- """Test whether *that* is contained within self."""
- return self._view >= that
- def __and__(self, that):
- """Return a SortedSet of the intersection of self and *that*."""
- return SortedSet(self._view & that)
- def __or__(self, that):
- """Return a SortedSet of the union of self and *that*."""
- return SortedSet(self._view | that)
- def __sub__(self, that):
- """Return a SortedSet of the difference of self and *that*."""
- return SortedSet(self._view - that)
- def __xor__(self, that):
- """Return a SortedSet of the symmetric difference of self and *that*."""
- return SortedSet(self._view ^ that)
- if hexversion < 0x03000000:
- def isdisjoint(self, that):
- """Return True if and only if *that* is disjoint with self."""
- # pylint: disable=arguments-differ
- return not any(key in self._list for key in that)
- else:
- def isdisjoint(self, that):
- """Return True if and only if *that* is disjoint with self."""
- # pylint: disable=arguments-differ
- return self._view.isdisjoint(that)
- @recursive_repr
- def __repr__(self):
- return 'SortedDict_keys({0!r})'.format(list(self))
-
-
-class ValuesView(AbstractValuesView, Sequence):
- """
- A ValuesView object is a dynamic view of the dictionary's values, which
- means that when the dictionary's values change, the view reflects those
- changes.
-
- The ValuesView class implements the Sequence Abstract Base Class.
- """
- # pylint: disable=too-many-ancestors
- if hexversion < 0x03000000:
- def __init__(self, sorted_dict):
- """
- Initialize a ValuesView from a SortedDict container as
- *sorted_dict*.
- """
- # pylint: disable=super-init-not-called, protected-access
- self._dict = sorted_dict
- self._list = sorted_dict._list
- self._view = sorted_dict._dict.viewvalues()
- else:
- def __init__(self, sorted_dict):
- """
- Initialize a ValuesView from a SortedDict container as
- *sorted_dict*.
- """
- # pylint: disable=super-init-not-called, protected-access
- self._dict = sorted_dict
- self._list = sorted_dict._list
- self._view = sorted_dict._dict.values()
- def __len__(self):
- """Return the number of entries in the dictionary."""
- return len(self._dict)
- def __contains__(self, value):
- """
- Return True if and only if *value* is in the underlying Mapping's
- values.
- """
- return value in self._view
- def __iter__(self):
- """
- Return an iterator over the values in the dictionary. Values are
- iterated over in sorted order of the keys.
-
- Iterating views while adding or deleting entries in the dictionary may
- raise a `RuntimeError` or fail to iterate over all entries.
- """
- _dict = self._dict
- return iter(_dict[key] for key in self._list)
- def __getitem__(self, index):
- """
- Efficiently return value at *index* in iteration.
-
- Supports slice notation and negative indexes.
- """
- _dict, _list = self._dict, self._list
- if isinstance(index, slice):
- return [_dict[key] for key in _list[index]]
- return _dict[_list[index]]
- def __reversed__(self):
- """
- Return a reverse iterator over the values in the dictionary. Values are
- iterated over in reverse sort order of the keys.
-
- Iterating views while adding or deleting entries in the dictionary may
- raise a `RuntimeError` or fail to iterate over all entries.
- """
- _dict = self._dict
- return iter(_dict[key] for key in reversed(self._list))
- def index(self, value):
- """
- Return index of *value* in self.
-
- Raises ValueError if *value* is not found.
- """
- # pylint: disable=arguments-differ
- for idx, val in enumerate(self):
- if value == val:
- return idx
- raise ValueError('{0!r} is not in dict'.format(value))
- if hexversion < 0x03000000:
- def count(self, value):
- """Return the number of occurrences of *value* in self."""
- return sum(1 for val in self._dict.itervalues() if val == value)
- else:
- def count(self, value):
- """Return the number of occurrences of *value* in self."""
- return sum(1 for val in self._dict.values() if val == value)
- def __lt__(self, that):
- raise TypeError
- def __gt__(self, that):
- raise TypeError
- def __le__(self, that):
- raise TypeError
- def __ge__(self, that):
- raise TypeError
- def __and__(self, that):
- raise TypeError
- def __or__(self, that):
- raise TypeError
- def __sub__(self, that):
- raise TypeError
- def __xor__(self, that):
- raise TypeError
- @recursive_repr
- def __repr__(self):
- return 'SortedDict_values({0!r})'.format(list(self))
-
-
-class ItemsView(AbstractItemsView, Set, Sequence):
- """
- An ItemsView object is a dynamic view of the dictionary's ``(key,
- value)`` pairs, which means that when the dictionary changes, the
- view reflects those changes.
-
- The ItemsView class implements the Set and Sequence Abstract Base Classes.
- However, the set-like operations (``&``, ``|``, ``-``, ``^``) will only
- operate correctly if all of the dictionary's values are hashable.
- """
- # pylint: disable=too-many-ancestors
- if hexversion < 0x03000000:
- def __init__(self, sorted_dict):
- """
- Initialize an ItemsView from a SortedDict container as
- *sorted_dict*.
- """
- # pylint: disable=super-init-not-called, protected-access
- self._dict = sorted_dict
- self._list = sorted_dict._list
- self._view = sorted_dict._dict.viewitems()
- else:
- def __init__(self, sorted_dict):
- """
- Initialize an ItemsView from a SortedDict container as
- *sorted_dict*.
- """
- # pylint: disable=super-init-not-called, protected-access
- self._dict = sorted_dict
- self._list = sorted_dict._list
- self._view = sorted_dict._dict.items()
- def __len__(self):
- """Return the number of entries in the dictionary."""
- return len(self._view)
- def __contains__(self, key):
- """
- Return True if and only if *key* is one of the underlying dictionary's
- items.
- """
- return key in self._view
- def __iter__(self):
- """
- Return an iterable over the items in the dictionary. Items are iterated
- over in their sorted order.
-
- Iterating views while adding or deleting entries in the dictionary may
- raise a `RuntimeError` or fail to iterate over all entries.
- """
- _dict = self._dict
- return iter((key, _dict[key]) for key in self._list)
- def __getitem__(self, index):
- """Return the item as position *index*."""
- _dict, _list = self._dict, self._list
- if isinstance(index, slice):
- return [(key, _dict[key]) for key in _list[index]]
- key = _list[index]
- return (key, _dict[key])
- def __reversed__(self):
- """
- Return a reversed iterable over the items in the dictionary. Items are
- iterated over in their reverse sort order.
-
- Iterating views while adding or deleting entries in the dictionary may
- raise a RuntimeError or fail to iterate over all entries.
- """
- _dict = self._dict
- return iter((key, _dict[key]) for key in reversed(self._list))
- def index(self, key, start=None, stop=None):
- """
- Return the smallest *k* such that `itemssview[k] == key` and `start <= k
- < end`. Raises `KeyError` if *key* is not present. *stop* defaults
- to the end of the set. *start* defaults to the beginning. Negative
- indexes are supported, as for slice indices.
- """
- # pylint: disable=arguments-differ
- temp, value = key
- pos = self._list.index(temp, start, stop)
- if value == self._dict[temp]:
- return pos
- else:
- raise ValueError('{0!r} is not in dict'.format(key))
- def count(self, item):
- """Return the number of occurrences of *item* in the set."""
- # pylint: disable=arguments-differ
- key, value = item
- return 1 if key in self._dict and self._dict[key] == value else 0
- def __eq__(self, that):
- """Test set-like equality with *that*."""
- return self._view == that
- def __ne__(self, that):
- """Test set-like inequality with *that*."""
- return self._view != that
- def __lt__(self, that):
- """Test whether self is a proper subset of *that*."""
- return self._view < that
- def __gt__(self, that):
- """Test whether self is a proper superset of *that*."""
- return self._view > that
- def __le__(self, that):
- """Test whether self is contained within *that*."""
- return self._view <= that
- def __ge__(self, that):
- """Test whether *that* is contained within self."""
- return self._view >= that
- def __and__(self, that):
- """Return a SortedSet of the intersection of self and *that*."""
- return SortedSet(self._view & that)
- def __or__(self, that):
- """Return a SortedSet of the union of self and *that*."""
- return SortedSet(self._view | that)
- def __sub__(self, that):
- """Return a SortedSet of the difference of self and *that*."""
- return SortedSet(self._view - that)
- def __xor__(self, that):
- """Return a SortedSet of the symmetric difference of self and *that*."""
- return SortedSet(self._view ^ that)
- if hexversion < 0x03000000:
- def isdisjoint(self, that):
- """Return True if and only if *that* is disjoint with self."""
- # pylint: disable=arguments-differ
- _dict = self._dict
- for key, value in that:
- if key in _dict and _dict[key] == value:
- return False
- return True
- else:
- def isdisjoint(self, that):
- """Return True if and only if *that* is disjoint with self."""
- # pylint: disable=arguments-differ
- return self._view.isdisjoint(that)
- @recursive_repr
- def __repr__(self):
- return 'SortedDict_items({0!r})'.format(list(self))
diff --git a/python/ovs/compat/sortedcontainers/sortedlist.py b/python/ovs/compat/sortedcontainers/sortedlist.py
deleted file mode 100644
index 8aec6bbac..000000000
--- a/python/ovs/compat/sortedcontainers/sortedlist.py
+++ /dev/null
@@ -1,2508 +0,0 @@
-"""Sorted list implementation.
-
-"""
-# pylint: disable=redefined-builtin, ungrouped-imports
-
-from __future__ import print_function
-
-from bisect import bisect_left, bisect_right, insort
-from collections import Sequence, MutableSequence
-from functools import wraps
-from itertools import chain, repeat, starmap
-from math import log as log_e
-import operator as op
-from operator import iadd, add
-from sys import hexversion
-
-if hexversion < 0x03000000:
- from itertools import izip as zip # pylint: disable=no-name-in-module
- from itertools import imap as map # pylint: disable=no-name-in-module
- try:
- from thread import get_ident
- except ImportError:
- from dummy_thread import get_ident
-else:
- from functools import reduce
- try:
- from _thread import get_ident
- except ImportError:
- from _dummy_thread import get_ident # pylint: disable=import-error
-
-LOAD = 1000
-
-def recursive_repr(func):
- """Decorator to prevent infinite repr recursion."""
- repr_running = set()
-
- @wraps(func)
- def wrapper(self):
- "Return ellipsis on recursive re-entry to function."
- key = id(self), get_ident()
-
- if key in repr_running:
- return '...'
-
- repr_running.add(key)
-
- try:
- return func(self)
- finally:
- repr_running.discard(key)
-
- return wrapper
-
-class SortedList(MutableSequence):
- """
- SortedList provides most of the same methods as a list but keeps the items
- in sorted order.
- """
- # pylint: disable=too-many-ancestors
- def __init__(self, iterable=None):
- """
- SortedList provides most of the same methods as a list but keeps the
- items in sorted order.
-
- An optional *iterable* provides an initial series of items to populate
- the SortedList.
- """
- self._len = 0
- self._lists = []
- self._maxes = []
- self._index = []
- self._load = LOAD
- self._half = LOAD >> 1
- self._dual = LOAD << 1
- self._offset = 0
-
- if iterable is not None:
- self._update(iterable)
-
- def __new__(cls, iterable=None, key=None):
- """
- SortedList provides most of the same methods as a list but keeps the
- items in sorted order.
-
- An optional *iterable* provides an initial series of items to populate
- the SortedList.
-
- An optional *key* argument will return an instance of subtype
- SortedListWithKey.
- """
- # pylint: disable=unused-argument
- if key is None:
- return object.__new__(cls)
- else:
- if cls is SortedList:
- return object.__new__(SortedListWithKey)
- else:
- raise TypeError('inherit SortedListWithKey for key argument')
-
- @property
- def key(self):
- """Key function used to extract comparison key for sorting."""
- return None
-
- def _reset(self, load):
- """
- Reset sorted list load.
-
- The *load* specifies the load-factor of the list. The default load
- factor of '1000' works well for lists from tens to tens of millions of
- elements. Good practice is to use a value that is the cube root of the
- list size. With billions of elements, the best load factor depends on
- your usage. It's best to leave the load factor at the default until
- you start benchmarking.
- """
- values = reduce(iadd, self._lists, [])
- self._clear()
- self._load = load
- self._half = load >> 1
- self._dual = load << 1
- self._update(values)
-
- def clear(self):
- """Remove all the elements from the list."""
- self._len = 0
- del self._lists[:]
- del self._maxes[:]
- del self._index[:]
-
- _clear = clear
-
- def add(self, val):
- """Add the element *val* to the list."""
- _lists = self._lists
- _maxes = self._maxes
-
- if _maxes:
- pos = bisect_right(_maxes, val)
-
- if pos == len(_maxes):
- pos -= 1
- _lists[pos].append(val)
- _maxes[pos] = val
- else:
- insort(_lists[pos], val)
-
- self._expand(pos)
- else:
- _lists.append([val])
- _maxes.append(val)
-
- self._len += 1
-
- def _expand(self, pos):
- """Splits sublists that are more than double the load level.
-
- Updates the index when the sublist length is less than double the load
- level. This requires incrementing the nodes in a traversal from the
- leaf node to the root. For an example traversal see self._loc.
-
- """
- _lists = self._lists
- _index = self._index
-
- if len(_lists[pos]) > self._dual:
- _maxes = self._maxes
- _load = self._load
-
- _lists_pos = _lists[pos]
- half = _lists_pos[_load:]
- del _lists_pos[_load:]
- _maxes[pos] = _lists_pos[-1]
-
- _lists.insert(pos + 1, half)
- _maxes.insert(pos + 1, half[-1])
-
- del _index[:]
- else:
- if _index:
- child = self._offset + pos
- while child:
- _index[child] += 1
- child = (child - 1) >> 1
- _index[0] += 1
-
- def update(self, iterable):
- """Update the list by adding all elements from *iterable*."""
- _lists = self._lists
- _maxes = self._maxes
- values = sorted(iterable)
-
- if _maxes:
- if len(values) * 4 >= self._len:
- values.extend(chain.from_iterable(_lists))
- values.sort()
- self._clear()
- else:
- _add = self.add
- for val in values:
- _add(val)
- return
-
- _load = self._load
- _lists.extend(values[pos:(pos + _load)]
- for pos in range(0, len(values), _load))
- _maxes.extend(sublist[-1] for sublist in _lists)
- self._len = len(values)
- del self._index[:]
-
- _update = update
-
- def __contains__(self, val):
- """Return True if and only if *val* is an element in the list."""
- _maxes = self._maxes
-
- if not _maxes:
- return False
-
- pos = bisect_left(_maxes, val)
-
- if pos == len(_maxes):
- return False
-
- _lists = self._lists
- idx = bisect_left(_lists[pos], val)
-
- return _lists[pos][idx] == val
-
- def discard(self, val):
- """
- Remove the first occurrence of *val*.
-
- If *val* is not a member, does nothing.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return
-
- pos = bisect_left(_maxes, val)
-
- if pos == len(_maxes):
- return
-
- _lists = self._lists
- idx = bisect_left(_lists[pos], val)
-
- if _lists[pos][idx] == val:
- self._delete(pos, idx)
-
- def remove(self, val):
- """
- Remove first occurrence of *val*.
-
- Raises ValueError if *val* is not present.
- """
- # pylint: disable=arguments-differ
- _maxes = self._maxes
-
- if not _maxes:
- raise ValueError('{0!r} not in list'.format(val))
-
- pos = bisect_left(_maxes, val)
-
- if pos == len(_maxes):
- raise ValueError('{0!r} not in list'.format(val))
-
- _lists = self._lists
- idx = bisect_left(_lists[pos], val)
-
- if _lists[pos][idx] == val:
- self._delete(pos, idx)
- else:
- raise ValueError('{0!r} not in list'.format(val))
-
- def _delete(self, pos, idx):
- """Delete the item at the given (pos, idx).
-
- Combines lists that are less than half the load level.
-
- Updates the index when the sublist length is more than half the load
- level. This requires decrementing the nodes in a traversal from the leaf
- node to the root. For an example traversal see self._loc.
- """
- _lists = self._lists
- _maxes = self._maxes
- _index = self._index
-
- _lists_pos = _lists[pos]
-
- del _lists_pos[idx]
- self._len -= 1
-
- len_lists_pos = len(_lists_pos)
-
- if len_lists_pos > self._half:
-
- _maxes[pos] = _lists_pos[-1]
-
- if _index:
- child = self._offset + pos
- while child > 0:
- _index[child] -= 1
- child = (child - 1) >> 1
- _index[0] -= 1
-
- elif len(_lists) > 1:
-
- if not pos:
- pos += 1
-
- prev = pos - 1
- _lists[prev].extend(_lists[pos])
- _maxes[prev] = _lists[prev][-1]
-
- del _lists[pos]
- del _maxes[pos]
- del _index[:]
-
- self._expand(prev)
-
- elif len_lists_pos:
-
- _maxes[pos] = _lists_pos[-1]
-
- else:
-
- del _lists[pos]
- del _maxes[pos]
- del _index[:]
-
- def _loc(self, pos, idx):
- """Convert an index pair (alpha, beta) into a single index that corresponds to
- the position of the value in the sorted list.
-
- Most queries require the index be built. Details of the index are
- described in self._build_index.
-
- Indexing requires traversing the tree from a leaf node to the root. The
- parent of each node is easily computable at (pos - 1) // 2.
-
- Left-child nodes are always at odd indices and right-child nodes are
- always at even indices.
-
- When traversing up from a right-child node, increment the total by the
- left-child node.
-
- The final index is the sum from traversal and the index in the sublist.
-
- For example, using the index from self._build_index:
-
- _index = 14 5 9 3 2 4 5
- _offset = 3
-
- Tree:
-
- 14
- 5 9
- 3 2 4 5
-
- Converting index pair (2, 3) into a single index involves iterating like
- so:
-
- 1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify
- the node as a left-child node. At such nodes, we simply traverse to
- the parent.
-
- 2. At node 9, position 2, we recognize the node as a right-child node
- and accumulate the left-child in our total. Total is now 5 and we
- traverse to the parent at position 0.
-
- 3. Iteration ends at the root.
-
- Computing the index is the sum of the total and beta: 5 + 3 = 8.
- """
- if not pos:
- return idx
-
- _index = self._index
-
- if not _index:
- self._build_index()
-
- total = 0
-
- # Increment pos to point in the index to len(self._lists[pos]).
-
- pos += self._offset
-
- # Iterate until reaching the root of the index tree at pos = 0.
-
- while pos:
-
- # Right-child nodes are at odd indices. At such indices
- # account the total below the left child node.
-
- if not pos & 1:
- total += _index[pos - 1]
-
- # Advance pos to the parent node.
-
- pos = (pos - 1) >> 1
-
- return total + idx
-
- def _pos(self, idx):
- """Convert an index into a pair (alpha, beta) that can be used to access
- the corresponding _lists[alpha][beta] position.
-
- Most queries require the index be built. Details of the index are
- described in self._build_index.
-
- Indexing requires traversing the tree to a leaf node. Each node has
- two children which are easily computable. Given an index, pos, the
- left-child is at pos * 2 + 1 and the right-child is at pos * 2 + 2.
-
- When the index is less than the left-child, traversal moves to the
- left sub-tree. Otherwise, the index is decremented by the left-child
- and traversal moves to the right sub-tree.
-
- At a child node, the indexing pair is computed from the relative
- position of the child node as compared with the offset and the remaining
- index.
-
- For example, using the index from self._build_index:
-
- _index = 14 5 9 3 2 4 5
- _offset = 3
-
- Tree:
-
- 14
- 5 9
- 3 2 4 5
-
- Indexing position 8 involves iterating like so:
-
- 1. Starting at the root, position 0, 8 is compared with the left-child
- node (5) which it is greater than. When greater the index is
- decremented and the position is updated to the right child node.
-
- 2. At node 9 with index 3, we again compare the index to the left-child
- node with value 4. Because the index is the less than the left-child
- node, we simply traverse to the left.
-
- 3. At node 4 with index 3, we recognize that we are at a leaf node and
- stop iterating.
-
- 4. To compute the sublist index, we subtract the offset from the index
- of the leaf node: 5 - 3 = 2. To compute the index in the sublist, we
- simply use the index remaining from iteration. In this case, 3.
-
- The final index pair from our example is (2, 3) which corresponds to
- index 8 in the sorted list.
- """
- if idx < 0:
- last_len = len(self._lists[-1])
-
- if (-idx) <= last_len:
- return len(self._lists) - 1, last_len + idx
-
- idx += self._len
-
- if idx < 0:
- raise IndexError('list index out of range')
- elif idx >= self._len:
- raise IndexError('list index out of range')
-
- if idx < len(self._lists[0]):
- return 0, idx
-
- _index = self._index
-
- if not _index:
- self._build_index()
-
- pos = 0
- child = 1
- len_index = len(_index)
-
- while child < len_index:
- index_child = _index[child]
-
- if idx < index_child:
- pos = child
- else:
- idx -= index_child
- pos = child + 1
-
- child = (pos << 1) + 1
-
- return (pos - self._offset, idx)
-
- def _build_index(self):
- """Build an index for indexing the sorted list.
-
- Indexes are represented as binary trees in a dense array notation
- similar to a binary heap.
-
- For example, given a _lists representation storing integers:
-
- [0]: 1 2 3
- [1]: 4 5
- [2]: 6 7 8 9
- [3]: 10 11 12 13 14
-
- The first transformation maps the sub-lists by their length. The
- first row of the index is the length of the sub-lists.
-
- [0]: 3 2 4 5
-
- Each row after that is the sum of consecutive pairs of the previous row:
-
- [1]: 5 9
- [2]: 14
-
- Finally, the index is built by concatenating these lists together:
-
- _index = 14 5 9 3 2 4 5
-
- An offset storing the start of the first row is also stored:
-
- _offset = 3
-
- When built, the index can be used for efficient indexing into the list.
- See the comment and notes on self._pos for details.
- """
- row0 = list(map(len, self._lists))
-
- if len(row0) == 1:
- self._index[:] = row0
- self._offset = 0
- return
-
- head = iter(row0)
- tail = iter(head)
- row1 = list(starmap(add, zip(head, tail)))
-
- if len(row0) & 1:
- row1.append(row0[-1])
-
- if len(row1) == 1:
- self._index[:] = row1 + row0
- self._offset = 1
- return
-
- size = 2 ** (int(log_e(len(row1) - 1, 2)) + 1)
- row1.extend(repeat(0, size - len(row1)))
- tree = [row0, row1]
-
- while len(tree[-1]) > 1:
- head = iter(tree[-1])
- tail = iter(head)
- row = list(starmap(add, zip(head, tail)))
- tree.append(row)
-
- reduce(iadd, reversed(tree), self._index)
- self._offset = size * 2 - 1
-
- def __delitem__(self, idx):
- """Remove the element at *idx*. Supports slicing."""
- if isinstance(idx, slice):
- start, stop, step = idx.indices(self._len)
-
- if step == 1 and start < stop:
- if start == 0 and stop == self._len:
- return self._clear()
- elif self._len <= 8 * (stop - start):
- values = self._getitem(slice(None, start))
- if stop < self._len:
- values += self._getitem(slice(stop, None))
- self._clear()
- return self._update(values)
-
- indices = range(start, stop, step)
-
- # Delete items from greatest index to least so
- # that the indices remain valid throughout iteration.
-
- if step > 0:
- indices = reversed(indices)
-
- _pos, _delete = self._pos, self._delete
-
- for index in indices:
- pos, idx = _pos(index)
- _delete(pos, idx)
- else:
- pos, idx = self._pos(idx)
- self._delete(pos, idx)
-
- _delitem = __delitem__
-
- def __getitem__(self, idx):
- """Return the element at *idx*. Supports slicing."""
- _lists = self._lists
-
- if isinstance(idx, slice):
- start, stop, step = idx.indices(self._len)
-
- if step == 1 and start < stop:
- if start == 0 and stop == self._len:
- return reduce(iadd, self._lists, [])
-
- start_pos, start_idx = self._pos(start)
-
- if stop == self._len:
- stop_pos = len(_lists) - 1
- stop_idx = len(_lists[stop_pos])
- else:
- stop_pos, stop_idx = self._pos(stop)
-
- if start_pos == stop_pos:
- return _lists[start_pos][start_idx:stop_idx]
-
- prefix = _lists[start_pos][start_idx:]
- middle = _lists[(start_pos + 1):stop_pos]
- result = reduce(iadd, middle, prefix)
- result += _lists[stop_pos][:stop_idx]
-
- return result
-
- if step == -1 and start > stop:
- result = self._getitem(slice(stop + 1, start + 1))
- result.reverse()
- return result
-
- # Return a list because a negative step could
- # reverse the order of the items and this could
- # be the desired behavior.
-
- indices = range(start, stop, step)
- return list(self._getitem(index) for index in indices)
- else:
- if self._len:
- if idx == 0:
- return _lists[0][0]
- elif idx == -1:
- return _lists[-1][-1]
- else:
- raise IndexError('list index out of range')
-
- if 0 <= idx < len(_lists[0]):
- return _lists[0][idx]
-
- len_last = len(_lists[-1])
-
- if -len_last < idx < 0:
- return _lists[-1][len_last + idx]
-
- pos, idx = self._pos(idx)
- return _lists[pos][idx]
-
- _getitem = __getitem__
-
- def _check_order(self, idx, val):
- _len = self._len
- _lists = self._lists
-
- pos, loc = self._pos(idx)
-
- if idx < 0:
- idx += _len
-
- # Check that the inserted value is not less than the
- # previous value.
-
- if idx > 0:
- idx_prev = loc - 1
- pos_prev = pos
-
- if idx_prev < 0:
- pos_prev -= 1
- idx_prev = len(_lists[pos_prev]) - 1
-
- if _lists[pos_prev][idx_prev] > val:
- msg = '{0!r} not in sort order at index {1}'.format(val, idx)
- raise ValueError(msg)
-
- # Check that the inserted value is not greater than
- # the previous value.
-
- if idx < (_len - 1):
- idx_next = loc + 1
- pos_next = pos
-
- if idx_next == len(_lists[pos_next]):
- pos_next += 1
- idx_next = 0
-
- if _lists[pos_next][idx_next] < val:
- msg = '{0!r} not in sort order at index {1}'.format(val, idx)
- raise ValueError(msg)
-
- def __setitem__(self, index, value):
- """Replace item at position *index* with *value*.
-
- Supports slice notation. Raises :exc:`ValueError` if the sort order
- would be violated. When used with a slice and iterable, the
- :exc:`ValueError` is raised before the list is mutated if the sort
- order would be violated by the operation.
-
- """
- _lists = self._lists
- _maxes = self._maxes
- _check_order = self._check_order
- _pos = self._pos
-
- if isinstance(index, slice):
- _len = self._len
- start, stop, step = index.indices(_len)
- indices = range(start, stop, step)
-
- # Copy value to avoid aliasing issues with self and cases where an
- # iterator is given.
-
- values = tuple(value)
-
- if step != 1:
- if len(values) != len(indices):
- raise ValueError(
- 'attempt to assign sequence of size %s'
- ' to extended slice of size %s'
- % (len(values), len(indices)))
-
- # Keep a log of values that are set so that we can
- # roll back changes if ordering is violated.
-
- log = []
- _append = log.append
-
- for idx, val in zip(indices, values):
- pos, loc = _pos(idx)
- _append((idx, _lists[pos][loc], val))
- _lists[pos][loc] = val
- if len(_lists[pos]) == (loc + 1):
- _maxes[pos] = val
-
- try:
- # Validate ordering of new values.
-
- for idx, _, newval in log:
- _check_order(idx, newval)
-
- except ValueError:
-
- # Roll back changes from log.
-
- for idx, oldval, _ in log:
- pos, loc = _pos(idx)
- _lists[pos][loc] = oldval
- if len(_lists[pos]) == (loc + 1):
- _maxes[pos] = oldval
-
- raise
- else:
- if start == 0 and stop == _len:
- self._clear()
- return self._update(values)
-
- if stop < start:
- # When calculating indices, stop may be less than start.
- # For example: ...[5:3:1] results in slice(5, 3, 1) which
- # is a valid but not useful stop index.
- stop = start
-
- if values:
-
- # Check that given values are ordered properly.
-
- alphas = iter(values)
- betas = iter(values)
- next(betas)
- pairs = zip(alphas, betas)
-
- if not all(alpha <= beta for alpha, beta in pairs):
- raise ValueError('given values not in sort order')
-
- # Check ordering in context of sorted list.
-
- if start and self._getitem(start - 1) > values[0]:
- message = '{0!r} not in sort order at index {1}'.format(
- values[0], start)
- raise ValueError(message)
-
- if stop != _len and self._getitem(stop) < values[-1]:
- message = '{0!r} not in sort order at index {1}'.format(
- values[-1], stop)
- raise ValueError(message)
-
- # Delete the existing values.
-
- self._delitem(index)
-
- # Insert the new values.
-
- _insert = self.insert
- for idx, val in enumerate(values):
- _insert(start + idx, val)
- else:
- pos, loc = _pos(index)
- _check_order(index, value)
- _lists[pos][loc] = value
- if len(_lists[pos]) == (loc + 1):
- _maxes[pos] = value
-
- def __iter__(self):
- """
- Return an iterator over the Sequence.
-
- Iterating the Sequence while adding or deleting values may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return chain.from_iterable(self._lists)
-
- def __reversed__(self):
- """
- Return an iterator to traverse the Sequence in reverse.
-
- Iterating the Sequence while adding or deleting values may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return chain.from_iterable(map(reversed, reversed(self._lists)))
-
- def reverse(self):
- """Raise NotImplementedError
-
- SortedList maintains values in ascending sort order. Values may not be
- reversed in-place.
-
- Use ``reversed(sorted_list)`` for a reverse iterator over values in
- descending sort order.
-
- Implemented to override MutableSequence.reverse which provides an
- erroneous default implementation.
-
- """
- raise NotImplementedError('.reverse() not defined')
-
- def islice(self, start=None, stop=None, reverse=False):
-
- """
- Returns an iterator that slices `self` from `start` to `stop` index,
- inclusive and exclusive respectively.
-
- When `reverse` is `True`, values are yielded from the iterator in
- reverse order.
-
- Both `start` and `stop` default to `None` which is automatically
- inclusive of the beginning and end.
- """
- _len = self._len
-
- if not _len:
- return iter(())
-
- start, stop, _ = slice(start, stop).indices(self._len)
-
- if start >= stop:
- return iter(())
-
- _pos = self._pos
-
- min_pos, min_idx = _pos(start)
-
- if stop == _len:
- max_pos = len(self._lists) - 1
- max_idx = len(self._lists[-1])
- else:
- max_pos, max_idx = _pos(stop)
-
- return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
-
- def _islice(self, min_pos, min_idx, max_pos, max_idx, reverse):
- """
- Returns an iterator that slices `self` using two index pairs,
- `(min_pos, min_idx)` and `(max_pos, max_idx)`; the first inclusive
- and the latter exclusive. See `_pos` for details on how an index
- is converted to an index pair.
-
- When `reverse` is `True`, values are yielded from the iterator in
- reverse order.
- """
- _lists = self._lists
-
- if min_pos > max_pos:
- return iter(())
- elif min_pos == max_pos and not reverse:
- return iter(_lists[min_pos][min_idx:max_idx])
- elif min_pos == max_pos and reverse:
- return reversed(_lists[min_pos][min_idx:max_idx])
- elif min_pos + 1 == max_pos and not reverse:
- return chain(_lists[min_pos][min_idx:], _lists[max_pos][:max_idx])
- elif min_pos + 1 == max_pos and reverse:
- return chain(
- reversed(_lists[max_pos][:max_idx]),
- reversed(_lists[min_pos][min_idx:]),
- )
- elif not reverse:
- return chain(
- _lists[min_pos][min_idx:],
- chain.from_iterable(_lists[(min_pos + 1):max_pos]),
- _lists[max_pos][:max_idx],
- )
-
- temp = map(reversed, reversed(_lists[(min_pos + 1):max_pos]))
- return chain(
- reversed(_lists[max_pos][:max_idx]),
- chain.from_iterable(temp),
- reversed(_lists[min_pos][min_idx:]),
- )
-
- def irange(self, minimum=None, maximum=None, inclusive=(True, True),
- reverse=False):
- """
- Create an iterator of values between `minimum` and `maximum`.
-
- `inclusive` is a pair of booleans that indicates whether the minimum
- and maximum ought to be included in the range, respectively. The
- default is (True, True) such that the range is inclusive of both
- minimum and maximum.
-
- Both `minimum` and `maximum` default to `None` which is automatically
- inclusive of the start and end of the list, respectively.
-
- When `reverse` is `True` the values are yielded from the iterator in
- reverse order; `reverse` defaults to `False`.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return iter(())
-
- _lists = self._lists
-
- # Calculate the minimum (pos, idx) pair. By default this location
- # will be inclusive in our calculation.
-
- if minimum is None:
- min_pos = 0
- min_idx = 0
- else:
- if inclusive[0]:
- min_pos = bisect_left(_maxes, minimum)
-
- if min_pos == len(_maxes):
- return iter(())
-
- min_idx = bisect_left(_lists[min_pos], minimum)
- else:
- min_pos = bisect_right(_maxes, minimum)
-
- if min_pos == len(_maxes):
- return iter(())
-
- min_idx = bisect_right(_lists[min_pos], minimum)
-
- # Calculate the maximum (pos, idx) pair. By default this location
- # will be exclusive in our calculation.
-
- if maximum is None:
- max_pos = len(_maxes) - 1
- max_idx = len(_lists[max_pos])
- else:
- if inclusive[1]:
- max_pos = bisect_right(_maxes, maximum)
-
- if max_pos == len(_maxes):
- max_pos -= 1
- max_idx = len(_lists[max_pos])
- else:
- max_idx = bisect_right(_lists[max_pos], maximum)
- else:
- max_pos = bisect_left(_maxes, maximum)
-
- if max_pos == len(_maxes):
- max_pos -= 1
- max_idx = len(_lists[max_pos])
- else:
- max_idx = bisect_left(_lists[max_pos], maximum)
-
- return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
-
- def __len__(self):
- """Return the number of elements in the list."""
- return self._len
-
- def bisect_left(self, val):
- """
- Similar to the *bisect* module in the standard library, this returns an
- appropriate index to insert *val*. If *val* is already present, the
- insertion point will be before (to the left of) any existing entries.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return 0
-
- pos = bisect_left(_maxes, val)
-
- if pos == len(_maxes):
- return self._len
-
- idx = bisect_left(self._lists[pos], val)
-
- return self._loc(pos, idx)
-
- def bisect_right(self, val):
- """
- Same as *bisect_left*, but if *val* is already present, the insertion
- point will be after (to the right of) any existing entries.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return 0
-
- pos = bisect_right(_maxes, val)
-
- if pos == len(_maxes):
- return self._len
-
- idx = bisect_right(self._lists[pos], val)
-
- return self._loc(pos, idx)
-
- bisect = bisect_right
- _bisect_right = bisect_right
-
- def count(self, val):
- """Return the number of occurrences of *val* in the list."""
- # pylint: disable=arguments-differ
- _maxes = self._maxes
-
- if not _maxes:
- return 0
-
- pos_left = bisect_left(_maxes, val)
-
- if pos_left == len(_maxes):
- return 0
-
- _lists = self._lists
- idx_left = bisect_left(_lists[pos_left], val)
- pos_right = bisect_right(_maxes, val)
-
- if pos_right == len(_maxes):
- return self._len - self._loc(pos_left, idx_left)
-
- idx_right = bisect_right(_lists[pos_right], val)
-
- if pos_left == pos_right:
- return idx_right - idx_left
-
- right = self._loc(pos_right, idx_right)
- left = self._loc(pos_left, idx_left)
-
- return right - left
-
- def copy(self):
- """Return a shallow copy of the sorted list."""
- return self.__class__(self)
-
- __copy__ = copy
-
- def append(self, val):
- """
- Append the element *val* to the list. Raises a ValueError if the *val*
- would violate the sort order.
- """
- # pylint: disable=arguments-differ
- _lists = self._lists
- _maxes = self._maxes
-
- if not _maxes:
- _maxes.append(val)
- _lists.append([val])
- self._len = 1
- return
-
- pos = len(_lists) - 1
-
- if val < _lists[pos][-1]:
- msg = '{0!r} not in sort order at index {1}'.format(val, self._len)
- raise ValueError(msg)
-
- _maxes[pos] = val
- _lists[pos].append(val)
- self._len += 1
- self._expand(pos)
-
- def extend(self, values):
- """
- Extend the list by appending all elements from the *values*. Raises a
- ValueError if the sort order would be violated.
- """
- _lists = self._lists
- _maxes = self._maxes
- _load = self._load
-
- if not isinstance(values, list):
- values = list(values)
-
- if not values:
- return
-
- if any(values[pos - 1] > values[pos]
- for pos in range(1, len(values))):
- raise ValueError('given sequence not in sort order')
-
- offset = 0
-
- if _maxes:
- if values[0] < _lists[-1][-1]:
- msg = '{0!r} not in sort order at index {1}'.format(values[0], self._len)
- raise ValueError(msg)
-
- if len(_lists[-1]) < self._half:
- _lists[-1].extend(values[:_load])
- _maxes[-1] = _lists[-1][-1]
- offset = _load
-
- len_lists = len(_lists)
-
- for idx in range(offset, len(values), _load):
- _lists.append(values[idx:(idx + _load)])
- _maxes.append(_lists[-1][-1])
-
- _index = self._index
-
- if len_lists == len(_lists):
- len_index = len(_index)
- if len_index > 0:
- len_values = len(values)
- child = len_index - 1
- while child:
- _index[child] += len_values
- child = (child - 1) >> 1
- _index[0] += len_values
- else:
- del _index[:]
-
- self._len += len(values)
-
- def insert(self, idx, val):
- """
- Insert the element *val* into the list at *idx*. Raises a ValueError if
- the *val* at *idx* would violate the sort order.
- """
- # pylint: disable=arguments-differ
- _len = self._len
- _lists = self._lists
- _maxes = self._maxes
-
- if idx < 0:
- idx += _len
- if idx < 0:
- idx = 0
- if idx > _len:
- idx = _len
-
- if not _maxes:
- # The idx must be zero by the inequalities above.
- _maxes.append(val)
- _lists.append([val])
- self._len = 1
- return
-
- if not idx:
- if val > _lists[0][0]:
- msg = '{0!r} not in sort order at index {1}'.format(val, 0)
- raise ValueError(msg)
- else:
- _lists[0].insert(0, val)
- self._expand(0)
- self._len += 1
- return
-
- if idx == _len:
- pos = len(_lists) - 1
- if _lists[pos][-1] > val:
- msg = '{0!r} not in sort order at index {1}'.format(val, _len)
- raise ValueError(msg)
- else:
- _lists[pos].append(val)
- _maxes[pos] = _lists[pos][-1]
- self._expand(pos)
- self._len += 1
- return
-
- pos, idx = self._pos(idx)
- idx_before = idx - 1
- if idx_before < 0:
- pos_before = pos - 1
- idx_before = len(_lists[pos_before]) - 1
- else:
- pos_before = pos
-
- before = _lists[pos_before][idx_before]
- if before <= val <= _lists[pos][idx]:
- _lists[pos].insert(idx, val)
- self._expand(pos)
- self._len += 1
- else:
- msg = '{0!r} not in sort order at index {1}'.format(val, idx)
- raise ValueError(msg)
-
- def pop(self, idx=-1):
- """
- Remove and return item at *idx* (default last). Raises IndexError if
- list is empty or index is out of range. Negative indices are supported,
- as for slice indices.
- """
- # pylint: disable=arguments-differ
- if not self._len:
- raise IndexError('pop index out of range')
-
- _lists = self._lists
-
- if idx == 0:
- val = _lists[0][0]
- self._delete(0, 0)
- return val
-
- if idx == -1:
- pos = len(_lists) - 1
- loc = len(_lists[pos]) - 1
- val = _lists[pos][loc]
- self._delete(pos, loc)
- return val
-
- if 0 <= idx < len(_lists[0]):
- val = _lists[0][idx]
- self._delete(0, idx)
- return val
-
- len_last = len(_lists[-1])
-
- if -len_last < idx < 0:
- pos = len(_lists) - 1
- loc = len_last + idx
- val = _lists[pos][loc]
- self._delete(pos, loc)
- return val
-
- pos, idx = self._pos(idx)
- val = _lists[pos][idx]
- self._delete(pos, idx)
-
- return val
-
- def index(self, val, start=None, stop=None):
- """
- Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
- ValueError if *val* is not present. *stop* defaults to the end of the
- list. *start* defaults to the beginning. Negative indices are supported,
- as for slice indices.
- """
- # pylint: disable=arguments-differ
- _len = self._len
-
- if not _len:
- raise ValueError('{0!r} is not in list'.format(val))
-
- if start is None:
- start = 0
- if start < 0:
- start += _len
- if start < 0:
- start = 0
-
- if stop is None:
- stop = _len
- if stop < 0:
- stop += _len
- if stop > _len:
- stop = _len
-
- if stop <= start:
- raise ValueError('{0!r} is not in list'.format(val))
-
- _maxes = self._maxes
- pos_left = bisect_left(_maxes, val)
-
- if pos_left == len(_maxes):
- raise ValueError('{0!r} is not in list'.format(val))
-
- _lists = self._lists
- idx_left = bisect_left(_lists[pos_left], val)
-
- if _lists[pos_left][idx_left] != val:
- raise ValueError('{0!r} is not in list'.format(val))
-
- stop -= 1
- left = self._loc(pos_left, idx_left)
-
- if start <= left:
- if left <= stop:
- return left
- else:
- right = self._bisect_right(val) - 1
-
- if start <= right:
- return start
-
- raise ValueError('{0!r} is not in list'.format(val))
-
- def __add__(self, that):
- """
- Return a new sorted list containing all the elements in *self* and
- *that*. Elements in *that* do not need to be properly ordered with
- respect to *self*.
- """
- values = reduce(iadd, self._lists, [])
- values.extend(that)
- return self.__class__(values)
-
- def __iadd__(self, that):
- """
- Update *self* to include all values in *that*. Elements in *that* do not
- need to be properly ordered with respect to *self*.
- """
- self._update(that)
- return self
-
- def __mul__(self, that):
- """
- Return a new sorted list containing *that* shallow copies of each item
- in SortedList.
- """
- values = reduce(iadd, self._lists, []) * that
- return self.__class__(values)
-
- def __imul__(self, that):
- """
- Increase the length of the list by appending *that* shallow copies of
- each item.
- """
- values = reduce(iadd, self._lists, []) * that
- self._clear()
- self._update(values)
- return self
-
- def _make_cmp(self, seq_op, doc):
- "Make comparator method."
- def comparer(self, that):
- "Compare method for sorted list and sequence."
- # pylint: disable=protected-access
- if not isinstance(that, Sequence):
- return NotImplemented
-
- self_len = self._len
- len_that = len(that)
-
- if self_len != len_that:
- if seq_op is op.eq:
- return False
- if seq_op is op.ne:
- return True
-
- for alpha, beta in zip(self, that):
- if alpha != beta:
- return seq_op(alpha, beta)
-
- return seq_op(self_len, len_that)
-
- comparer.__name__ = '__{0}__'.format(seq_op.__name__)
- doc_str = 'Return `True` if and only if Sequence is {0} `that`.'
- comparer.__doc__ = doc_str.format(doc)
-
- return comparer
-
- __eq__ = _make_cmp(None, op.eq, 'equal to')
- __ne__ = _make_cmp(None, op.ne, 'not equal to')
- __lt__ = _make_cmp(None, op.lt, 'less than')
- __gt__ = _make_cmp(None, op.gt, 'greater than')
- __le__ = _make_cmp(None, op.le, 'less than or equal to')
- __ge__ = _make_cmp(None, op.ge, 'greater than or equal to')
-
- @recursive_repr
- def __repr__(self):
- """Return string representation of sequence."""
- return '{0}({1!r})'.format(type(self).__name__, list(self))
-
- def _check(self):
- try:
- # Check load parameters.
-
- assert self._load >= 4
- assert self._half == (self._load >> 1)
- assert self._dual == (self._load << 1)
-
- # Check empty sorted list case.
-
- if self._maxes == []:
- assert self._lists == []
- return
-
- assert self._maxes and self._lists
-
- # Check all sublists are sorted.
-
- assert all(sublist[pos - 1] <= sublist[pos]
- for sublist in self._lists
- for pos in range(1, len(sublist)))
-
- # Check beginning/end of sublists are sorted.
-
- for pos in range(1, len(self._lists)):
- assert self._lists[pos - 1][-1] <= self._lists[pos][0]
-
- # Check length of _maxes and _lists match.
-
- assert len(self._maxes) == len(self._lists)
-
- # Check _maxes is a map of _lists.
-
- assert all(self._maxes[pos] == self._lists[pos][-1]
- for pos in range(len(self._maxes)))
-
- # Check load level is less than _dual.
-
- assert all(len(sublist) <= self._dual for sublist in self._lists)
-
- # Check load level is greater than _half for all
- # but the last sublist.
-
- assert all(len(self._lists[pos]) >= self._half
- for pos in range(0, len(self._lists) - 1))
-
- # Check length.
-
- assert self._len == sum(len(sublist) for sublist in self._lists)
-
- # Check index.
-
- if self._index:
- assert len(self._index) == self._offset + len(self._lists)
- assert self._len == self._index[0]
-
- def test_offset_pos(pos):
- "Test positional indexing offset."
- from_index = self._index[self._offset + pos]
- return from_index == len(self._lists[pos])
-
- assert all(test_offset_pos(pos)
- for pos in range(len(self._lists)))
-
- for pos in range(self._offset):
- child = (pos << 1) + 1
- if child >= len(self._index):
- assert self._index[pos] == 0
- elif child + 1 == len(self._index):
- assert self._index[pos] == self._index[child]
- else:
- child_sum = self._index[child] + self._index[child + 1]
- assert self._index[pos] == child_sum
-
- except:
- import sys
- import traceback
-
- traceback.print_exc(file=sys.stdout)
-
- print('len', self._len)
- print('load', self._load, self._half, self._dual)
- print('offset', self._offset)
- print('len_index', len(self._index))
- print('index', self._index)
- print('len_maxes', len(self._maxes))
- print('maxes', self._maxes)
- print('len_lists', len(self._lists))
- print('lists', self._lists)
-
- raise
-
-def identity(value):
- "Identity function."
- return value
-
-class SortedListWithKey(SortedList):
- """
- SortedListWithKey provides most of the same methods as a list but keeps
- the items in sorted order.
- """
- # pylint: disable=too-many-ancestors,abstract-method
- def __init__(self, iterable=None, key=identity):
- """SortedListWithKey provides most of the same methods as list but keeps the
- items in sorted order.
-
- An optional *iterable* provides an initial series of items to populate
- the SortedListWithKey.
-
- An optional *key* argument defines a callable that, like the `key`
- argument to Python's `sorted` function, extracts a comparison key from
- each element. The default is the identity function.
- """
- # pylint: disable=super-init-not-called
- self._len = 0
- self._lists = []
- self._keys = []
- self._maxes = []
- self._index = []
- self._key = key
- self._load = LOAD
- self._half = LOAD >> 1
- self._dual = LOAD << 1
- self._offset = 0
-
- if iterable is not None:
- self._update(iterable)
-
- def __new__(cls, iterable=None, key=identity):
- return object.__new__(cls)
-
- @property
- def key(self):
- """Key function used to extract comparison key for sorting."""
- return self._key
-
- def clear(self):
- """Remove all the elements from the list."""
- self._len = 0
- del self._lists[:]
- del self._keys[:]
- del self._maxes[:]
- del self._index[:]
-
- _clear = clear
-
- def add(self, val):
- """Add the element *val* to the list."""
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
-
- key = self._key(val)
-
- if _maxes:
- pos = bisect_right(_maxes, key)
-
- if pos == len(_maxes):
- pos -= 1
- _lists[pos].append(val)
- _keys[pos].append(key)
- _maxes[pos] = key
- else:
- idx = bisect_right(_keys[pos], key)
- _lists[pos].insert(idx, val)
- _keys[pos].insert(idx, key)
-
- self._expand(pos)
- else:
- _lists.append([val])
- _keys.append([key])
- _maxes.append(key)
-
- self._len += 1
-
- def _expand(self, pos):
- """Splits sublists that are more than double the load level.
-
- Updates the index when the sublist length is less than double the load
- level. This requires incrementing the nodes in a traversal from the
- leaf node to the root. For an example traversal see self._loc.
-
- """
- _lists = self._lists
- _keys = self._keys
- _index = self._index
-
- if len(_keys[pos]) > self._dual:
- _maxes = self._maxes
- _load = self._load
-
- _lists_pos = _lists[pos]
- _keys_pos = _keys[pos]
- half = _lists_pos[_load:]
- half_keys = _keys_pos[_load:]
- del _lists_pos[_load:]
- del _keys_pos[_load:]
- _maxes[pos] = _keys_pos[-1]
-
- _lists.insert(pos + 1, half)
- _keys.insert(pos + 1, half_keys)
- _maxes.insert(pos + 1, half_keys[-1])
-
- del _index[:]
- else:
- if _index:
- child = self._offset + pos
- while child:
- _index[child] += 1
- child = (child - 1) >> 1
- _index[0] += 1
-
- def update(self, iterable):
- """Update the list by adding all elements from *iterable*."""
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
- values = sorted(iterable, key=self._key)
-
- if _maxes:
- if len(values) * 4 >= self._len:
- values.extend(chain.from_iterable(_lists))
- values.sort(key=self._key)
- self._clear()
- else:
- _add = self.add
- for val in values:
- _add(val)
- return
-
- _load = self._load
- _lists.extend(values[pos:(pos + _load)]
- for pos in range(0, len(values), _load))
- _keys.extend(list(map(self._key, _list)) for _list in _lists)
- _maxes.extend(sublist[-1] for sublist in _keys)
- self._len = len(values)
- del self._index[:]
-
- _update = update
-
- def __contains__(self, val):
- """Return True if and only if *val* is an element in the list."""
- _maxes = self._maxes
-
- if not _maxes:
- return False
-
- key = self._key(val)
- pos = bisect_left(_maxes, key)
-
- if pos == len(_maxes):
- return False
-
- _lists = self._lists
- _keys = self._keys
-
- idx = bisect_left(_keys[pos], key)
-
- len_keys = len(_keys)
- len_sublist = len(_keys[pos])
-
- while True:
- if _keys[pos][idx] != key:
- return False
- if _lists[pos][idx] == val:
- return True
- idx += 1
- if idx == len_sublist:
- pos += 1
- if pos == len_keys:
- return False
- len_sublist = len(_keys[pos])
- idx = 0
-
- def discard(self, val):
- """
- Remove the first occurrence of *val*.
-
- If *val* is not a member, does nothing.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return
-
- key = self._key(val)
- pos = bisect_left(_maxes, key)
-
- if pos == len(_maxes):
- return
-
- _lists = self._lists
- _keys = self._keys
- idx = bisect_left(_keys[pos], key)
- len_keys = len(_keys)
- len_sublist = len(_keys[pos])
-
- while True:
- if _keys[pos][idx] != key:
- return
- if _lists[pos][idx] == val:
- self._delete(pos, idx)
- return
- idx += 1
- if idx == len_sublist:
- pos += 1
- if pos == len_keys:
- return
- len_sublist = len(_keys[pos])
- idx = 0
-
- def remove(self, val):
- """
- Remove first occurrence of *val*.
-
- Raises ValueError if *val* is not present.
- """
- _maxes = self._maxes
-
- if not _maxes:
- raise ValueError('{0!r} not in list'.format(val))
-
- key = self._key(val)
- pos = bisect_left(_maxes, key)
-
- if pos == len(_maxes):
- raise ValueError('{0!r} not in list'.format(val))
-
- _lists = self._lists
- _keys = self._keys
- idx = bisect_left(_keys[pos], key)
- len_keys = len(_keys)
- len_sublist = len(_keys[pos])
-
- while True:
- if _keys[pos][idx] != key:
- raise ValueError('{0!r} not in list'.format(val))
- if _lists[pos][idx] == val:
- self._delete(pos, idx)
- return
- idx += 1
- if idx == len_sublist:
- pos += 1
- if pos == len_keys:
- raise ValueError('{0!r} not in list'.format(val))
- len_sublist = len(_keys[pos])
- idx = 0
-
- def _delete(self, pos, idx):
- """
- Delete the item at the given (pos, idx).
-
- Combines lists that are less than half the load level.
-
- Updates the index when the sublist length is more than half the load
- level. This requires decrementing the nodes in a traversal from the leaf
- node to the root. For an example traversal see self._loc.
- """
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
- _index = self._index
- keys_pos = _keys[pos]
- lists_pos = _lists[pos]
-
- del keys_pos[idx]
- del lists_pos[idx]
- self._len -= 1
-
- len_keys_pos = len(keys_pos)
-
- if len_keys_pos > self._half:
-
- _maxes[pos] = keys_pos[-1]
-
- if _index:
- child = self._offset + pos
- while child > 0:
- _index[child] -= 1
- child = (child - 1) >> 1
- _index[0] -= 1
-
- elif len(_keys) > 1:
-
- if not pos:
- pos += 1
-
- prev = pos - 1
- _keys[prev].extend(_keys[pos])
- _lists[prev].extend(_lists[pos])
- _maxes[prev] = _keys[prev][-1]
-
- del _lists[pos]
- del _keys[pos]
- del _maxes[pos]
- del _index[:]
-
- self._expand(prev)
-
- elif len_keys_pos:
-
- _maxes[pos] = keys_pos[-1]
-
- else:
-
- del _lists[pos]
- del _keys[pos]
- del _maxes[pos]
- del _index[:]
-
- def _check_order(self, idx, key, val):
- # pylint: disable=arguments-differ
- _len = self._len
- _keys = self._keys
-
- pos, loc = self._pos(idx)
-
- if idx < 0:
- idx += _len
-
- # Check that the inserted value is not less than the
- # previous value.
-
- if idx > 0:
- idx_prev = loc - 1
- pos_prev = pos
-
- if idx_prev < 0:
- pos_prev -= 1
- idx_prev = len(_keys[pos_prev]) - 1
-
- if _keys[pos_prev][idx_prev] > key:
- msg = '{0!r} not in sort order at index {1}'.format(val, idx)
- raise ValueError(msg)
-
- # Check that the inserted value is not greater than
- # the previous value.
-
- if idx < (_len - 1):
- idx_next = loc + 1
- pos_next = pos
-
- if idx_next == len(_keys[pos_next]):
- pos_next += 1
- idx_next = 0
-
- if _keys[pos_next][idx_next] < key:
- msg = '{0!r} not in sort order at index {1}'.format(val, idx)
- raise ValueError(msg)
-
- def __setitem__(self, index, value):
- """Replace the item at position *index* with *value*.
-
- Supports slice notation. Raises a :exc:`ValueError` if the sort order
- would be violated. When used with a slice and iterable, the
- :exc:`ValueError` is raised before the list is mutated if the sort
- order would be violated by the operation.
-
- """
- # pylint: disable=too-many-locals
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
- _check_order = self._check_order
- _pos = self._pos
-
- if isinstance(index, slice):
- _len = self._len
- start, stop, step = index.indices(_len)
- indices = range(start, stop, step)
-
- # Copy value to avoid aliasing issues with self and cases where an
- # iterator is given.
-
- values = tuple(value)
-
- if step != 1:
- if len(values) != len(indices):
- raise ValueError(
- 'attempt to assign sequence of size %s'
- ' to extended slice of size %s'
- % (len(values), len(indices)))
-
- # Keep a log of values that are set so that we can
- # roll back changes if ordering is violated.
-
- log = []
- _append = log.append
-
- for idx, val in zip(indices, values):
- pos, loc = _pos(idx)
- key = self._key(val)
- _append((idx, _keys[pos][loc], key, _lists[pos][loc], val))
- _keys[pos][loc] = key
- _lists[pos][loc] = val
- if len(_keys[pos]) == (loc + 1):
- _maxes[pos] = key
-
- try:
- # Validate ordering of new values.
-
- for idx, oldkey, newkey, oldval, newval in log:
- _check_order(idx, newkey, newval)
-
- except ValueError:
-
- # Roll back changes from log.
-
- for idx, oldkey, newkey, oldval, newval in log:
- pos, loc = _pos(idx)
- _keys[pos][loc] = oldkey
- _lists[pos][loc] = oldval
- if len(_keys[pos]) == (loc + 1):
- _maxes[pos] = oldkey
-
- raise
- else:
- if start == 0 and stop == self._len:
- self._clear()
- return self._update(values)
-
- if stop < start:
- # When calculating indices, stop may be less than start.
- # For example: ...[5:3:1] results in slice(5, 3, 1) which
- # is a valid but not useful stop index.
- stop = start
-
- if values:
-
- # Check that given values are ordered properly.
-
- keys = tuple(map(self._key, values))
- alphas = iter(keys)
- betas = iter(keys)
- next(betas)
- pairs = zip(alphas, betas)
-
- if not all(alpha <= beta for alpha, beta in pairs):
- raise ValueError('given values not in sort order')
-
- # Check ordering in context of sorted list.
-
- if start:
- pos, loc = _pos(start - 1)
- if _keys[pos][loc] > keys[0]:
- msg = '{0!r} not in sort order at index {1}'.format(
- values[0], start)
- raise ValueError(msg)
-
- if stop != _len:
- pos, loc = _pos(stop)
- if _keys[pos][loc] < keys[-1]:
- msg = '{0!r} not in sort order at index {1}'.format(
- values[-1], stop)
- raise ValueError(msg)
-
- # Delete the existing values.
-
- self._delitem(index)
-
- # Insert the new values.
-
- _insert = self.insert
- for idx, val in enumerate(values):
- _insert(start + idx, val)
- else:
- pos, loc = _pos(index)
- key = self._key(value)
- _check_order(index, key, value)
- _lists[pos][loc] = value
- _keys[pos][loc] = key
- if len(_lists[pos]) == (loc + 1):
- _maxes[pos] = key
-
- def irange(self, minimum=None, maximum=None, inclusive=(True, True),
- reverse=False):
- """
- Create an iterator of values between `minimum` and `maximum`.
-
- `inclusive` is a pair of booleans that indicates whether the minimum
- and maximum ought to be included in the range, respectively. The
- default is (True, True) such that the range is inclusive of both
- minimum and maximum.
-
- Both `minimum` and `maximum` default to `None` which is automatically
- inclusive of the start and end of the list, respectively.
-
- When `reverse` is `True` the values are yielded from the iterator in
- reverse order; `reverse` defaults to `False`.
- """
- minimum = self._key(minimum) if minimum is not None else None
- maximum = self._key(maximum) if maximum is not None else None
- return self._irange_key(
- min_key=minimum, max_key=maximum,
- inclusive=inclusive, reverse=reverse,
- )
-
- def irange_key(self, min_key=None, max_key=None, inclusive=(True, True),
- reverse=False):
- """
- Create an iterator of values between `min_key` and `max_key`.
-
- `inclusive` is a pair of booleans that indicates whether the min_key
- and max_key ought to be included in the range, respectively. The
- default is (True, True) such that the range is inclusive of both
- `min_key` and `max_key`.
-
- Both `min_key` and `max_key` default to `None` which is automatically
- inclusive of the start and end of the list, respectively.
-
- When `reverse` is `True` the values are yielded from the iterator in
- reverse order; `reverse` defaults to `False`.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return iter(())
-
- _keys = self._keys
-
- # Calculate the minimum (pos, idx) pair. By default this location
- # will be inclusive in our calculation.
-
- if min_key is None:
- min_pos = 0
- min_idx = 0
- else:
- if inclusive[0]:
- min_pos = bisect_left(_maxes, min_key)
-
- if min_pos == len(_maxes):
- return iter(())
-
- min_idx = bisect_left(_keys[min_pos], min_key)
- else:
- min_pos = bisect_right(_maxes, min_key)
-
- if min_pos == len(_maxes):
- return iter(())
-
- min_idx = bisect_right(_keys[min_pos], min_key)
-
- # Calculate the maximum (pos, idx) pair. By default this location
- # will be exclusive in our calculation.
-
- if max_key is None:
- max_pos = len(_maxes) - 1
- max_idx = len(_keys[max_pos])
- else:
- if inclusive[1]:
- max_pos = bisect_right(_maxes, max_key)
-
- if max_pos == len(_maxes):
- max_pos -= 1
- max_idx = len(_keys[max_pos])
- else:
- max_idx = bisect_right(_keys[max_pos], max_key)
- else:
- max_pos = bisect_left(_maxes, max_key)
-
- if max_pos == len(_maxes):
- max_pos -= 1
- max_idx = len(_keys[max_pos])
- else:
- max_idx = bisect_left(_keys[max_pos], max_key)
-
- return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
-
- _irange_key = irange_key
-
- def bisect_left(self, val):
- """
- Similar to the *bisect* module in the standard library, this returns an
- appropriate index to insert *val*. If *val* is already present, the
- insertion point will be before (to the left of) any existing entries.
- """
- return self._bisect_key_left(self._key(val))
-
- def bisect_right(self, val):
- """
- Same as *bisect_left*, but if *val* is already present, the insertion
- point will be after (to the right of) any existing entries.
- """
- return self._bisect_key_right(self._key(val))
-
- bisect = bisect_right
-
- def bisect_key_left(self, key):
- """
- Similar to the *bisect* module in the standard library, this returns an
- appropriate index to insert a value with a given *key*. If values with
- *key* are already present, the insertion point will be before (to the
- left of) any existing entries.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return 0
-
- pos = bisect_left(_maxes, key)
-
- if pos == len(_maxes):
- return self._len
-
- idx = bisect_left(self._keys[pos], key)
-
- return self._loc(pos, idx)
-
- _bisect_key_left = bisect_key_left
-
- def bisect_key_right(self, key):
- """
- Same as *bisect_key_left*, but if *key* is already present, the insertion
- point will be after (to the right of) any existing entries.
- """
- _maxes = self._maxes
-
- if not _maxes:
- return 0
-
- pos = bisect_right(_maxes, key)
-
- if pos == len(_maxes):
- return self._len
-
- idx = bisect_right(self._keys[pos], key)
-
- return self._loc(pos, idx)
-
- bisect_key = bisect_key_right
- _bisect_key_right = bisect_key_right
-
- def count(self, val):
- """Return the number of occurrences of *val* in the list."""
- _maxes = self._maxes
-
- if not _maxes:
- return 0
-
- key = self._key(val)
- pos = bisect_left(_maxes, key)
-
- if pos == len(_maxes):
- return 0
-
- _lists = self._lists
- _keys = self._keys
- idx = bisect_left(_keys[pos], key)
- total = 0
- len_keys = len(_keys)
- len_sublist = len(_keys[pos])
-
- while True:
- if _keys[pos][idx] != key:
- return total
- if _lists[pos][idx] == val:
- total += 1
- idx += 1
- if idx == len_sublist:
- pos += 1
- if pos == len_keys:
- return total
- len_sublist = len(_keys[pos])
- idx = 0
-
- def copy(self):
- """Return a shallow copy of the sorted list."""
- return self.__class__(self, key=self._key)
-
- __copy__ = copy
-
- def append(self, val):
- """
- Append the element *val* to the list. Raises a ValueError if the *val*
- would violate the sort order.
- """
- # pylint: disable=arguments-differ
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
- key = self._key(val)
-
- if not _maxes:
- _maxes.append(key)
- _keys.append([key])
- _lists.append([val])
- self._len = 1
- return
-
- pos = len(_keys) - 1
-
- if key < _keys[pos][-1]:
- msg = '{0!r} not in sort order at index {1}'.format(val, self._len)
- raise ValueError(msg)
-
- _lists[pos].append(val)
- _keys[pos].append(key)
- _maxes[pos] = key
- self._len += 1
- self._expand(pos)
-
- def extend(self, values):
- """
- Extend the list by appending all elements from the *values*. Raises a
- ValueError if the sort order would be violated.
- """
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
- _load = self._load
-
- if not isinstance(values, list):
- values = list(values)
-
- keys = list(map(self._key, values))
-
- if any(keys[pos - 1] > keys[pos]
- for pos in range(1, len(keys))):
- raise ValueError('given sequence not in sort order')
-
- offset = 0
-
- if _maxes:
- if keys[0] < _keys[-1][-1]:
- msg = '{0!r} not in sort order at index {1}'.format(values[0], self._len)
- raise ValueError(msg)
-
- if len(_keys[-1]) < self._half:
- _lists[-1].extend(values[:_load])
- _keys[-1].extend(keys[:_load])
- _maxes[-1] = _keys[-1][-1]
- offset = _load
-
- len_keys = len(_keys)
-
- for idx in range(offset, len(keys), _load):
- _lists.append(values[idx:(idx + _load)])
- _keys.append(keys[idx:(idx + _load)])
- _maxes.append(_keys[-1][-1])
-
- _index = self._index
-
- if len_keys == len(_keys):
- len_index = len(_index)
- if len_index > 0:
- len_values = len(values)
- child = len_index - 1
- while child:
- _index[child] += len_values
- child = (child - 1) >> 1
- _index[0] += len_values
- else:
- del _index[:]
-
- self._len += len(values)
-
- def insert(self, idx, val):
- """
- Insert the element *val* into the list at *idx*. Raises a ValueError if
- the *val* at *idx* would violate the sort order.
- """
- _len = self._len
- _lists = self._lists
- _keys = self._keys
- _maxes = self._maxes
-
- if idx < 0:
- idx += _len
- if idx < 0:
- idx = 0
- if idx > _len:
- idx = _len
-
- key = self._key(val)
-
- if not _maxes:
- self._len = 1
- _lists.append([val])
- _keys.append([key])
- _maxes.append(key)
- return
-
- if not idx:
- if key > _keys[0][0]:
- msg = '{0!r} not in sort order at index {1}'.format(val, 0)
- raise ValueError(msg)
- else:
- self._len += 1
- _lists[0].insert(0, val)
- _keys[0].insert(0, key)
- self._expand(0)
- return
-
- if idx == _len:
- pos = len(_keys) - 1
- if _keys[pos][-1] > key:
- msg = '{0!r} not in sort order at index {1}'.format(val, _len)
- raise ValueError(msg)
- else:
- self._len += 1
- _lists[pos].append(val)
- _keys[pos].append(key)
- _maxes[pos] = _keys[pos][-1]
- self._expand(pos)
- return
-
- pos, idx = self._pos(idx)
- idx_before = idx - 1
- if idx_before < 0:
- pos_before = pos - 1
- idx_before = len(_keys[pos_before]) - 1
- else:
- pos_before = pos
-
- before = _keys[pos_before][idx_before]
- if before <= key <= _keys[pos][idx]:
- self._len += 1
- _lists[pos].insert(idx, val)
- _keys[pos].insert(idx, key)
- self._expand(pos)
- else:
- msg = '{0!r} not in sort order at index {1}'.format(val, idx)
- raise ValueError(msg)
-
- def index(self, val, start=None, stop=None):
- """
- Return the smallest *k* such that L[k] == val and i <= k < j`. Raises
- ValueError if *val* is not present. *stop* defaults to the end of the
- list. *start* defaults to the beginning. Negative indices are supported,
- as for slice indices.
- """
- _len = self._len
-
- if not _len:
- raise ValueError('{0!r} is not in list'.format(val))
-
- if start is None:
- start = 0
- if start < 0:
- start += _len
- if start < 0:
- start = 0
-
- if stop is None:
- stop = _len
- if stop < 0:
- stop += _len
- if stop > _len:
- stop = _len
-
- if stop <= start:
- raise ValueError('{0!r} is not in list'.format(val))
-
- _maxes = self._maxes
- key = self._key(val)
- pos = bisect_left(_maxes, key)
-
- if pos == len(_maxes):
- raise ValueError('{0!r} is not in list'.format(val))
-
- stop -= 1
- _lists = self._lists
- _keys = self._keys
- idx = bisect_left(_keys[pos], key)
- len_keys = len(_keys)
- len_sublist = len(_keys[pos])
-
- while True:
- if _keys[pos][idx] != key:
- raise ValueError('{0!r} is not in list'.format(val))
- if _lists[pos][idx] == val:
- loc = self._loc(pos, idx)
- if start <= loc <= stop:
- return loc
- elif loc > stop:
- break
- idx += 1
- if idx == len_sublist:
- pos += 1
- if pos == len_keys:
- raise ValueError('{0!r} is not in list'.format(val))
- len_sublist = len(_keys[pos])
- idx = 0
-
- raise ValueError('{0!r} is not in list'.format(val))
-
- def __add__(self, that):
- """
- Return a new sorted list containing all the elements in *self* and
- *that*. Elements in *that* do not need to be properly ordered with
- respect to *self*.
- """
- values = reduce(iadd, self._lists, [])
- values.extend(that)
- return self.__class__(values, key=self._key)
-
- def __mul__(self, that):
- """
- Return a new sorted list containing *that* shallow copies of each item
- in SortedListWithKey.
- """
- values = reduce(iadd, self._lists, []) * that
- return self.__class__(values, key=self._key)
-
- def __imul__(self, that):
- """
- Increase the length of the list by appending *that* shallow copies of
- each item.
- """
- values = reduce(iadd, self._lists, []) * that
- self._clear()
- self._update(values)
- return self
-
- @recursive_repr
- def __repr__(self):
- """Return string representation of sequence."""
- name = type(self).__name__
- values = list(self)
- _key = self._key
- return '{0}({1!r}, key={2!r})'.format(name, values, _key)
-
- def _check(self):
- try:
- # Check load parameters.
-
- assert self._load >= 4
- assert self._half == (self._load >> 1)
- assert self._dual == (self._load << 1)
-
- # Check empty sorted list case.
-
- if self._maxes == []:
- assert self._keys == []
- assert self._lists == []
- return
-
- assert self._maxes and self._keys and self._lists
-
- # Check all sublists are sorted.
-
- assert all(sublist[pos - 1] <= sublist[pos]
- for sublist in self._keys
- for pos in range(1, len(sublist)))
-
- # Check beginning/end of sublists are sorted.
-
- for pos in range(1, len(self._keys)):
- assert self._keys[pos - 1][-1] <= self._keys[pos][0]
-
- # Check length of _maxes and _lists match.
-
- assert len(self._maxes) == len(self._lists) == len(self._keys)
-
- # Check _keys matches _key mapped to _lists.
-
- assert all(len(val_list) == len(key_list)
- for val_list, key_list in zip(self._lists, self._keys))
- assert all(self._key(val) == key for val, key in
- zip((_val for _val_list in self._lists for _val in _val_list),
- (_key for _key_list in self._keys for _key in _key_list)))
-
- # Check _maxes is a map of _keys.
-
- assert all(self._maxes[pos] == self._keys[pos][-1]
- for pos in range(len(self._maxes)))
-
- # Check load level is less than _dual.
-
- assert all(len(sublist) <= self._dual for sublist in self._lists)
-
- # Check load level is greater than _half for all
- # but the last sublist.
-
- assert all(len(self._lists[pos]) >= self._half
- for pos in range(0, len(self._lists) - 1))
-
- # Check length.
-
- assert self._len == sum(len(sublist) for sublist in self._lists)
-
- # Check index.
-
- if self._index:
- assert len(self._index) == self._offset + len(self._lists)
- assert self._len == self._index[0]
-
- def test_offset_pos(pos):
- "Test positional indexing offset."
- from_index = self._index[self._offset + pos]
- return from_index == len(self._lists[pos])
-
- assert all(test_offset_pos(pos)
- for pos in range(len(self._lists)))
-
- for pos in range(self._offset):
- child = (pos << 1) + 1
- if self._index[pos] == 0:
- assert child >= len(self._index)
- elif child + 1 == len(self._index):
- assert self._index[pos] == self._index[child]
- else:
- child_sum = self._index[child] + self._index[child + 1]
- assert self._index[pos] == child_sum
-
- except:
- import sys
- import traceback
-
- traceback.print_exc(file=sys.stdout)
-
- print('len', self._len)
- print('load', self._load, self._half, self._dual)
- print('offset', self._offset)
- print('len_index', len(self._index))
- print('index', self._index)
- print('len_maxes', len(self._maxes))
- print('maxes', self._maxes)
- print('len_keys', len(self._keys))
- print('keys', self._keys)
- print('len_lists', len(self._lists))
- print('lists', self._lists)
-
- raise
diff --git a/python/ovs/compat/sortedcontainers/sortedset.py b/python/ovs/compat/sortedcontainers/sortedset.py
deleted file mode 100644
index 6d82b387b..000000000
--- a/python/ovs/compat/sortedcontainers/sortedset.py
+++ /dev/null
@@ -1,327 +0,0 @@
-"""Sorted set implementation.
-
-"""
-
-from collections import Set, MutableSet, Sequence
-from itertools import chain
-import operator as op
-
-from .sortedlist import SortedList, recursive_repr, SortedListWithKey
-
-class SortedSet(MutableSet, Sequence):
- """
- A `SortedSet` provides the same methods as a `set`. Additionally, a
- `SortedSet` maintains its items in sorted order, allowing the `SortedSet` to
- be indexed.
-
- Unlike a `set`, a `SortedSet` requires items be hashable and comparable.
- """
- # pylint: disable=too-many-ancestors
- def __init__(self, iterable=None, key=None):
- """
- A `SortedSet` provides the same methods as a `set`. Additionally, a
- `SortedSet` maintains its items in sorted order, allowing the
- `SortedSet` to be indexed.
-
- An optional *iterable* provides an initial series of items to populate
- the `SortedSet`.
-
- An optional *key* argument defines a callable that, like the `key`
- argument to Python's `sorted` function, extracts a comparison key from
- each set item. If no function is specified, the default compares the
- set items directly.
- """
- self._key = key
-
- if not hasattr(self, '_set'):
- self._set = set()
-
- _set = self._set
- self.isdisjoint = _set.isdisjoint
- self.issubset = _set.issubset
- self.issuperset = _set.issuperset
-
- if key is None:
- self._list = SortedList(self._set)
- else:
- self._list = SortedListWithKey(self._set, key=key)
-
- _list = self._list
- self.bisect_left = _list.bisect_left
- self.bisect = _list.bisect
- self.bisect_right = _list.bisect_right
- self.index = _list.index
- self.irange = _list.irange
- self.islice = _list.islice
- self._reset = _list._reset # pylint: disable=protected-access
-
- if key is not None:
- self.bisect_key_left = _list.bisect_key_left
- self.bisect_key_right = _list.bisect_key_right
- self.bisect_key = _list.bisect_key
- self.irange_key = _list.irange_key
-
- if iterable is not None:
- self._update(iterable)
-
- @property
- def key(self):
- """Key function used to extract comparison key for sorting."""
- return self._key
-
- @classmethod
- def _fromset(cls, values, key=None):
- """Initialize sorted set from existing set."""
- sorted_set = object.__new__(cls)
- sorted_set._set = values # pylint: disable=protected-access
- sorted_set.__init__(key=key)
- return sorted_set
-
- def __contains__(self, value):
- """Return True if and only if *value* is an element in the set."""
- return value in self._set
-
- def __getitem__(self, index):
- """
- Return the element at position *index*.
-
- Supports slice notation and negative indexes.
- """
- return self._list[index]
-
- def __delitem__(self, index):
- """
- Remove the element at position *index*.
-
- Supports slice notation and negative indexes.
- """
- _set = self._set
- _list = self._list
- if isinstance(index, slice):
- values = _list[index]
- _set.difference_update(values)
- else:
- value = _list[index]
- _set.remove(value)
- del _list[index]
-
- def _make_cmp(self, set_op, doc):
- "Make comparator method."
- def comparer(self, that):
- "Compare method for sorted set and set-like object."
- # pylint: disable=protected-access
- if isinstance(that, SortedSet):
- return set_op(self._set, that._set)
- elif isinstance(that, Set):
- return set_op(self._set, that)
- return NotImplemented
-
- comparer.__name__ = '__{0}__'.format(set_op.__name__)
- doc_str = 'Return True if and only if Set is {0} `that`.'
- comparer.__doc__ = doc_str.format(doc)
-
- return comparer
-
- __eq__ = _make_cmp(None, op.eq, 'equal to')
- __ne__ = _make_cmp(None, op.ne, 'not equal to')
- __lt__ = _make_cmp(None, op.lt, 'a proper subset of')
- __gt__ = _make_cmp(None, op.gt, 'a proper superset of')
- __le__ = _make_cmp(None, op.le, 'a subset of')
- __ge__ = _make_cmp(None, op.ge, 'a superset of')
-
- def __len__(self):
- """Return the number of elements in the set."""
- return len(self._set)
-
- def __iter__(self):
- """
- Return an iterator over the Set. Elements are iterated in their sorted
- order.
-
- Iterating the Set while adding or deleting values may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return iter(self._list)
-
- def __reversed__(self):
- """
- Return an iterator over the Set. Elements are iterated in their reverse
- sorted order.
-
- Iterating the Set while adding or deleting values may raise a
- `RuntimeError` or fail to iterate over all entries.
- """
- return reversed(self._list)
-
- def add(self, value):
- """Add the element *value* to the set."""
- _set = self._set
- if value not in _set:
- _set.add(value)
- self._list.add(value)
-
- def clear(self):
- """Remove all elements from the set."""
- self._set.clear()
- self._list.clear()
-
- def copy(self):
- """Create a shallow copy of the sorted set."""
- return self._fromset(set(self._set), key=self._key)
-
- __copy__ = copy
-
- def count(self, value):
- """Return the number of occurrences of *value* in the set."""
- return 1 if value in self._set else 0
-
- def discard(self, value):
- """
- Remove the first occurrence of *value*. If *value* is not a member,
- does nothing.
- """
- _set = self._set
- if value in _set:
- _set.remove(value)
- self._list.discard(value)
-
- def pop(self, index=-1):
- """
- Remove and return item at *index* (default last). Raises IndexError if
- set is empty or index is out of range. Negative indexes are supported,
- as for slice indices.
- """
- # pylint: disable=arguments-differ
- value = self._list.pop(index)
- self._set.remove(value)
- return value
-
- def remove(self, value):
- """
- Remove first occurrence of *value*. Raises ValueError if
- *value* is not present.
- """
- self._set.remove(value)
- self._list.remove(value)
-
- def difference(self, *iterables):
- """
- Return a new set with elements in the set that are not in the
- *iterables*.
- """
- diff = self._set.difference(*iterables)
- return self._fromset(diff, key=self._key)
-
- __sub__ = difference
- __rsub__ = __sub__
-
- def difference_update(self, *iterables):
- """
- Update the set, removing elements found in keeping only elements
- found in any of the *iterables*.
- """
- _set = self._set
- values = set(chain(*iterables))
- if (4 * len(values)) > len(_set):
- _list = self._list
- _set.difference_update(values)
- _list.clear()
- _list.update(_set)
- else:
- _discard = self.discard
- for value in values:
- _discard(value)
- return self
-
- __isub__ = difference_update
-
- def intersection(self, *iterables):
- """
- Return a new set with elements common to the set and all *iterables*.
- """
- comb = self._set.intersection(*iterables)
- return self._fromset(comb, key=self._key)
-
- __and__ = intersection
- __rand__ = __and__
-
- def intersection_update(self, *iterables):
- """
- Update the set, keeping only elements found in it and all *iterables*.
- """
- _set = self._set
- _list = self._list
- _set.intersection_update(*iterables)
- _list.clear()
- _list.update(_set)
- return self
-
- __iand__ = intersection_update
-
- def symmetric_difference(self, that):
- """
- Return a new set with elements in either *self* or *that* but not both.
- """
- diff = self._set.symmetric_difference(that)
- return self._fromset(diff, key=self._key)
-
- __xor__ = symmetric_difference
- __rxor__ = __xor__
-
- def symmetric_difference_update(self, that):
- """
- Update the set, keeping only elements found in either *self* or *that*,
- but not in both.
- """
- _set = self._set
- _list = self._list
- _set.symmetric_difference_update(that)
- _list.clear()
- _list.update(_set)
- return self
-
- __ixor__ = symmetric_difference_update
-
- def union(self, *iterables):
- """
- Return a new SortedSet with elements from the set and all *iterables*.
- """
- return self.__class__(chain(iter(self), *iterables), key=self._key)
-
- __or__ = union
- __ror__ = __or__
-
- def update(self, *iterables):
- """Update the set, adding elements from all *iterables*."""
- _set = self._set
- values = set(chain(*iterables))
- if (4 * len(values)) > len(_set):
- _list = self._list
- _set.update(values)
- _list.clear()
- _list.update(_set)
- else:
- _add = self.add
- for value in values:
- _add(value)
- return self
-
- __ior__ = update
- _update = update
-
- def __reduce__(self):
- return (type(self), (self._set, self._key))
-
- @recursive_repr
- def __repr__(self):
- _key = self._key
- key = '' if _key is None else ', key={0!r}'.format(_key)
- name = type(self).__name__
- return '{0}({1!r}{2})'.format(name, list(self), key)
-
- def _check(self):
- # pylint: disable=protected-access
- self._list._check()
- assert len(self._set) == len(self._list)
- _set = self._set
- assert all(val in _set for val in self._list)
diff --git a/python/ovs/daemon.py b/python/ovs/daemon.py
deleted file mode 100644
index 06ef92b78..000000000
--- a/python/ovs/daemon.py
+++ /dev/null
@@ -1,652 +0,0 @@
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import errno
-import os
-import signal
-import sys
-import time
-
-import ovs.dirs
-import ovs.fatal_signal
-import ovs.process
-import ovs.socket_util
-import ovs.timeval
-import ovs.util
-import ovs.vlog
-
-if sys.platform != 'win32':
- import fcntl
- import resource
-else:
- import ovs.winutils as winutils
- import ovs.fcntl_win as fcntl
- import pywintypes
- import subprocess
- import win32process
-
-vlog = ovs.vlog.Vlog("daemon")
-
-# --detach: Should we run in the background?
-_detach = False
-
-# Running as the child process - Windows only.
-_detached = False
-
-# --pidfile: Name of pidfile (null if none).
-_pidfile = None
-
-# Our pidfile's inode and device, if we have created one.
-_pidfile_dev = None
-_pidfile_ino = None
-
-# --overwrite-pidfile: Create pidfile even if one already exists and is locked?
-_overwrite_pidfile = False
-
-# --no-chdir: Should we chdir to "/"?
-_chdir = True
-
-# --monitor: Should a supervisory process monitor the daemon and restart it if
-# it dies due to an error signal?
-_monitor = False
-
-# File descriptor used by daemonize_start() and daemonize_complete().
-_daemonize_fd = None
-
-RESTART_EXIT_CODE = 5
-
-
-def make_pidfile_name(name):
- """Returns the file name that would be used for a pidfile if 'name' were
- provided to set_pidfile()."""
- if name is None or name == "":
- return "%s/%s.pid" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME)
- else:
- return ovs.util.abs_file_name(ovs.dirs.RUNDIR, name)
-
-
-def set_pidfile(name):
- """Sets up a following call to daemonize() to create a pidfile named
- 'name'. If 'name' begins with '/', then it is treated as an absolute path.
- Otherwise, it is taken relative to ovs.util.RUNDIR, which is
- $(prefix)/var/run by default.
-
- If 'name' is null, then ovs.util.PROGRAM_NAME followed by ".pid" is
- used."""
- global _pidfile
- _pidfile = make_pidfile_name(name)
-
-
-def set_no_chdir():
- """Sets that we do not chdir to "/"."""
- global _chdir
- _chdir = False
-
-
-def ignore_existing_pidfile():
- """Normally, daemonize() or daemonize_start() will terminate the program
- with a message if a locked pidfile already exists. If this function is
- called, an existing pidfile will be replaced, with a warning."""
- global _overwrite_pidfile
- _overwrite_pidfile = True
-
-
-def set_detach():
- """Sets up a following call to daemonize() to detach from the foreground
- session, running this process in the background."""
- global _detach
- _detach = True
-
-
-def set_detached(wp):
- """Sets up a following call to daemonize() to fork a supervisory
- process to monitor the daemon and restart it if it dies due to
- an error signal. Used on Windows only."""
- global _detached
- global _daemonize_fd
- _detached = True
- _daemonize_fd = int(wp)
-
-
-def get_detach():
- """Will daemonize() really detach?"""
- return _detach
-
-
-def set_monitor():
- """Sets up a following call to daemonize() to fork a supervisory process to
- monitor the daemon and restart it if it dies due to an error signal."""
- global _monitor
- _monitor = True
-
-
-def _fatal(msg):
- vlog.err(msg)
- sys.stderr.write("%s\n" % msg)
- sys.exit(1)
-
-
-def _make_pidfile():
- """If a pidfile has been configured, creates it and stores the running
- process's pid in it. Ensures that the pidfile will be deleted when the
- process exits."""
- pid = os.getpid()
-
- # Create a temporary pidfile.
- if sys.platform != 'win32':
- tmpfile = "%s.tmp%d" % (_pidfile, pid)
- ovs.fatal_signal.add_file_to_unlink(tmpfile)
- else:
- tmpfile = "%s" % _pidfile
-
- try:
- # This is global to keep Python from garbage-collecting and
- # therefore closing our file after this function exits. That would
- # unlock the lock for us, and we don't want that.
- global file_handle
-
- file_handle = open(tmpfile, "w")
- except IOError as e:
- _fatal("%s: create failed (%s)" % (tmpfile, e.strerror))
-
- try:
- s = os.fstat(file_handle.fileno())
- except IOError as e:
- _fatal("%s: fstat failed (%s)" % (tmpfile, e.strerror))
-
- try:
- file_handle.write("%s\n" % pid)
- file_handle.flush()
- except OSError as e:
- _fatal("%s: write failed: %s" % (tmpfile, e.strerror))
-
- try:
- if sys.platform != 'win32':
- fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
- else:
- fcntl.lockf(file_handle, fcntl.LOCK_SH | fcntl.LOCK_NB)
- except IOError as e:
- _fatal("%s: fcntl failed: %s" % (tmpfile, e.strerror))
-
- if sys.platform == 'win32':
- # Ensure that the pidfile will gets closed and deleted on exit.
- ovs.fatal_signal.add_file_to_close_and_unlink(_pidfile, file_handle)
- else:
- # Rename or link it to the correct name.
- if _overwrite_pidfile:
- try:
- os.rename(tmpfile, _pidfile)
- except OSError as e:
- _fatal("failed to rename \"%s\" to \"%s\" (%s)"
- % (tmpfile, _pidfile, e.strerror))
- else:
- while True:
- try:
- os.link(tmpfile, _pidfile)
- error = 0
- except OSError as e:
- error = e.errno
- if error == errno.EEXIST:
- _check_already_running()
- elif error != errno.EINTR:
- break
- if error:
- _fatal("failed to link \"%s\" as \"%s\" (%s)"
- % (tmpfile, _pidfile, os.strerror(error)))
-
- # Ensure that the pidfile will get deleted on exit.
- ovs.fatal_signal.add_file_to_unlink(_pidfile)
-
- # Delete the temporary pidfile if it still exists.
- if not _overwrite_pidfile:
- error = ovs.fatal_signal.unlink_file_now(tmpfile)
- if error:
- _fatal("%s: unlink failed (%s)" % (
- tmpfile, os.strerror(error)))
-
- global _pidfile_dev
- global _pidfile_ino
- _pidfile_dev = s.st_dev
- _pidfile_ino = s.st_ino
-
-
-def daemonize():
- """If configured with set_pidfile() or set_detach(), creates the pid file
- and detaches from the foreground session."""
- daemonize_start()
- daemonize_complete()
-
-
-def _waitpid(pid, options):
- while True:
- try:
- return os.waitpid(pid, options)
- except OSError as e:
- if e.errno == errno.EINTR:
- pass
- return -e.errno, 0
-
-
-def _fork_and_wait_for_startup():
- if sys.platform == 'win32':
- return _fork_and_wait_for_startup_windows()
-
- try:
- rfd, wfd = os.pipe()
- except OSError as e:
- sys.stderr.write("pipe failed: %s\n" % os.strerror(e.errno))
- sys.exit(1)
-
- try:
- pid = os.fork()
- except OSError as e:
- sys.stderr.write("could not fork: %s\n" % os.strerror(e.errno))
- sys.exit(1)
-
- if pid > 0:
- # Running in parent process.
- os.close(wfd)
- ovs.fatal_signal.fork()
- while True:
- try:
- s = os.read(rfd, 1)
- error = 0
- except OSError as e:
- s = ""
- error = e.errno
- if error != errno.EINTR:
- break
- if len(s) != 1:
- retval, status = _waitpid(pid, 0)
- if retval == pid:
- if os.WIFEXITED(status) and os.WEXITSTATUS(status):
- # Child exited with an error. Convey the same error to
- # our parent process as a courtesy.
- sys.exit(os.WEXITSTATUS(status))
- else:
- sys.stderr.write("fork child failed to signal "
- "startup (%s)\n"
- % ovs.process.status_msg(status))
- else:
- assert retval < 0
- sys.stderr.write("waitpid failed (%s)\n"
- % os.strerror(-retval))
- sys.exit(1)
-
- os.close(rfd)
- else:
- # Running in parent process.
- os.close(rfd)
- ovs.timeval.postfork()
-
- global _daemonize_fd
- _daemonize_fd = wfd
- return pid
-
-
-def _fork_and_wait_for_startup_windows():
- global _detached
- if _detached:
- # Running in child process
- ovs.timeval.postfork()
- return 0
-
- """ close the log file, on Windows cannot be moved while the parent has
- a reference on it."""
- vlog.close_log_file()
-
- try:
- (rfd, wfd) = winutils.windows_create_pipe()
- except pywintypes.error as e:
- sys.stderr.write("pipe failed to create: %s\n" % e.strerror)
- sys.exit(1)
-
- try:
- creationFlags = win32process.DETACHED_PROCESS
- args = ("%s %s --pipe-handle=%ld" % (
- sys.executable, " ".join(sys.argv), int(wfd)))
- proc = subprocess.Popen(
- args=args,
- close_fds=False,
- shell=False,
- creationflags=creationFlags,
- stdout=sys.stdout,
- stderr=sys.stderr)
- pid = proc.pid
- except OSError as e:
- sys.stderr.write("CreateProcess failed (%s)\n" % os.strerror(e.errno))
- sys.exit(1)
-
- # Running in parent process.
- winutils.win32file.CloseHandle(wfd)
- ovs.fatal_signal.fork()
-
- error, s = winutils.windows_read_pipe(rfd, 1)
- if error:
- s = ""
-
- if len(s) != 1:
- retval = proc.wait()
- if retval == 0:
- sys.stderr.write("fork child failed to signal startup\n")
- else:
- # Child exited with an error. Convey the same error to
- # our parent process as a courtesy.
- sys.exit(retval)
- winutils.win32file.CloseHandle(rfd)
-
- return pid
-
-
-def _fork_notify_startup(fd):
- if sys.platform == 'win32':
- _fork_notify_startup_windows(fd)
- return
-
- if fd is not None:
- error, bytes_written = ovs.socket_util.write_fully(fd, "0")
- if error:
- sys.stderr.write("could not write to pipe\n")
- sys.exit(1)
- os.close(fd)
-
-
-def _fork_notify_startup_windows(fd):
- if fd is not None:
- try:
- # Python 2 requires a string as second parameter, while
- # Python 3 requires a bytes-like object. b"0" fits for both
- # python versions.
- winutils.win32file.WriteFile(fd, b"0", None)
- except winutils.pywintypes.error as e:
- sys.stderr.write("could not write to pipe: %s\n" %
- os.strerror(e.winerror))
- sys.exit(1)
-
-
-def _should_restart(status):
- global RESTART_EXIT_CODE
-
- if sys.platform == 'win32':
- # The exit status is encoded in the high byte of the
- # 16-bit number 'status'.
- exit_status = status >> 8
-
- if exit_status == RESTART_EXIT_CODE:
- return True
- return False
-
- if os.WIFEXITED(status) and os.WEXITSTATUS(status) == RESTART_EXIT_CODE:
- return True
-
- if os.WIFSIGNALED(status):
- for signame in ("SIGABRT", "SIGALRM", "SIGBUS", "SIGFPE", "SIGILL",
- "SIGPIPE", "SIGSEGV", "SIGXCPU", "SIGXFSZ"):
- if os.WTERMSIG(status) == getattr(signal, signame, None):
- return True
- return False
-
-
-def _monitor_daemon(daemon_pid):
- # XXX should log daemon's stderr output at startup time
- # XXX should use setproctitle module if available
- last_restart = None
- while True:
- retval, status = _waitpid(daemon_pid, 0)
- if retval < 0:
- sys.stderr.write("waitpid failed\n")
- sys.exit(1)
- elif retval == daemon_pid:
- status_msg = ("pid %d died, %s"
- % (daemon_pid, ovs.process.status_msg(status)))
-
- if _should_restart(status):
- if sys.platform != 'win32' and os.WCOREDUMP(status):
- # Disable further core dumps to save disk space.
- try:
- resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
- except resource.error:
- vlog.warn("failed to disable core dumps")
-
- # Throttle restarts to no more than once every 10 seconds.
- if (last_restart is not None and
- ovs.timeval.msec() < last_restart + 10000):
- vlog.warn("%s, waiting until 10 seconds since last "
- "restart" % status_msg)
- while True:
- now = ovs.timeval.msec()
- wakeup = last_restart + 10000
- if now > wakeup:
- break
- sys.stdout.write("sleep %f\n" % (
- (wakeup - now) / 1000.0))
- time.sleep((wakeup - now) / 1000.0)
- last_restart = ovs.timeval.msec()
-
- vlog.err("%s, restarting" % status_msg)
- daemon_pid = _fork_and_wait_for_startup()
- if not daemon_pid:
- break
- else:
- vlog.info("%s, exiting" % status_msg)
- sys.exit(0)
-
- # Running in new daemon process.
-
-
-def _close_standard_fds():
- """Close stdin, stdout, stderr. If we're started from e.g. an SSH session,
- then this keeps us from holding that session open artificially."""
- null_fd = ovs.socket_util.get_null_fd()
- if null_fd >= 0:
- os.dup2(null_fd, 0)
- os.dup2(null_fd, 1)
- os.dup2(null_fd, 2)
-
-
-def daemonize_start():
- """If daemonization is configured, then starts daemonization, by forking
- and returning in the child process. The parent process hangs around until
- the child lets it know either that it completed startup successfully (by
- calling daemon_complete()) or that it failed to start up (by exiting with a
- nonzero exit code)."""
-
- if _detach:
- if _fork_and_wait_for_startup() > 0:
- # Running in parent process.
- sys.exit(0)
-
- if sys.platform != 'win32':
- # Running in daemon or monitor process.
- os.setsid()
-
- if _monitor:
- saved_daemonize_fd = _daemonize_fd
- daemon_pid = _fork_and_wait_for_startup()
- if daemon_pid > 0:
- # Running in monitor process.
- _fork_notify_startup(saved_daemonize_fd)
- if sys.platform != 'win32':
- _close_standard_fds()
- _monitor_daemon(daemon_pid)
- # Running in daemon process
-
- if _pidfile:
- _make_pidfile()
-
-
-def daemonize_complete():
- """If daemonization is configured, then this function notifies the parent
- process that the child process has completed startup successfully."""
- _fork_notify_startup(_daemonize_fd)
-
- if _detach:
- if _chdir:
- os.chdir("/")
- _close_standard_fds()
-
-
-def usage():
- sys.stdout.write("""
-Daemon options:
- --detach run in background as daemon
- --no-chdir do not chdir to '/'
- --pidfile[=FILE] create pidfile (default: %s/%s.pid)
- --overwrite-pidfile with --pidfile, start even if already running
-""" % (ovs.dirs.RUNDIR, ovs.util.PROGRAM_NAME))
-
-
-def __read_pidfile(pidfile, delete_if_stale):
- if _pidfile_dev is not None:
- try:
- s = os.stat(pidfile)
- if s.st_ino == _pidfile_ino and s.st_dev == _pidfile_dev:
- # It's our own pidfile. We can't afford to open it,
- # because closing *any* fd for a file that a process
- # has locked also releases all the locks on that file.
- #
- # Fortunately, we know the associated pid anyhow.
- return os.getpid()
- except OSError:
- pass
-
- try:
- file_handle = open(pidfile, "r+")
- except IOError as e:
- if e.errno == errno.ENOENT and delete_if_stale:
- return 0
- vlog.warn("%s: open: %s" % (pidfile, e.strerror))
- return -e.errno
-
- # Python fcntl doesn't directly support F_GETLK so we have to just try
- # to lock it.
- try:
- fcntl.lockf(file_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)
-
- # pidfile exists but wasn't locked by anyone. Now we have the lock.
- if not delete_if_stale:
- file_handle.close()
- vlog.warn("%s: pid file is stale" % pidfile)
- return -errno.ESRCH
-
- # Is the file we have locked still named 'pidfile'?
- try:
- raced = False
- s = os.stat(pidfile)
- s2 = os.fstat(file_handle.fileno())
- if s.st_ino != s2.st_ino or s.st_dev != s2.st_dev:
- raced = True
- except IOError:
- raced = True
- if raced:
- vlog.warn("%s: lost race to delete pidfile" % pidfile)
- return -errno.EALREADY
-
- # We won the right to delete the stale pidfile.
- try:
- os.unlink(pidfile)
- except IOError as e:
- vlog.warn("%s: failed to delete stale pidfile (%s)"
- % (pidfile, e.strerror))
- return -e.errno
- else:
- vlog.dbg("%s: deleted stale pidfile" % pidfile)
- file_handle.close()
- return 0
- except IOError as e:
- if e.errno not in [errno.EACCES, errno.EAGAIN]:
- vlog.warn("%s: fcntl: %s" % (pidfile, e.strerror))
- return -e.errno
-
- # Someone else has the pidfile locked.
- try:
- try:
- error = int(file_handle.readline())
- except IOError as e:
- vlog.warn("%s: read: %s" % (pidfile, e.strerror))
- error = -e.errno
- except ValueError:
- vlog.warn("%s does not contain a pid" % pidfile)
- error = -errno.EINVAL
-
- return error
- finally:
- try:
- file_handle.close()
- except IOError:
- pass
-
-
-def read_pidfile(pidfile):
- """Opens and reads a PID from 'pidfile'. Returns the positive PID if
- successful, otherwise a negative errno value."""
- return __read_pidfile(pidfile, False)
-
-
-def _check_already_running():
- pid = __read_pidfile(_pidfile, True)
- if pid > 0:
- _fatal("%s: already running as pid %d, aborting" % (_pidfile, pid))
- elif pid < 0:
- _fatal("%s: pidfile check failed (%s), aborting"
- % (_pidfile, os.strerror(pid)))
-
-
-def add_args(parser):
- """Populates 'parser', an ArgumentParser allocated using the argparse
- module, with the command line arguments required by the daemon module."""
-
- pidfile = make_pidfile_name(None)
-
- group = parser.add_argument_group(title="Daemon Options")
- group.add_argument("--detach", action="store_true",
- help="Run in background as a daemon.")
- group.add_argument("--no-chdir", action="store_true",
- help="Do not chdir to '/'.")
- group.add_argument("--monitor", action="store_true",
- help="Monitor %s process." % ovs.util.PROGRAM_NAME)
- group.add_argument("--pidfile", nargs="?", const=pidfile,
- help="Create pidfile (default %s)." % pidfile)
- group.add_argument("--overwrite-pidfile", action="store_true",
- help="With --pidfile, start even if already running.")
- if sys.platform == 'win32':
- group.add_argument("--pipe-handle",
- help=("With --pidfile, start even if "
- "already running."))
-
-
-def handle_args(args):
- """Handles daemon module settings in 'args'. 'args' is an object
- containing values parsed by the parse_args() method of ArgumentParser. The
- parent ArgumentParser should have been prepared by add_args() before
- calling parse_args()."""
-
- if sys.platform == 'win32':
- if args.pipe_handle:
- set_detached(args.pipe_handle)
-
- if args.detach:
- set_detach()
-
- if args.no_chdir:
- set_no_chdir()
-
- if args.pidfile:
- set_pidfile(args.pidfile)
-
- if args.overwrite_pidfile:
- ignore_existing_pidfile()
-
- if args.monitor:
- set_monitor()
diff --git a/python/ovs/db/__init__.py b/python/ovs/db/__init__.py
deleted file mode 100644
index 218d8921e..000000000
--- a/python/ovs/db/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file intentionally left blank.
diff --git a/python/ovs/db/custom_index.py b/python/ovs/db/custom_index.py
deleted file mode 100644
index 587caf5e3..000000000
--- a/python/ovs/db/custom_index.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import collections
-import functools
-import operator
-try:
- from UserDict import IterableUserDict as DictBase
-except ImportError:
- from collections import UserDict as DictBase
-
-try:
- import sortedcontainers
-except ImportError:
- from ovs.compat import sortedcontainers
-
-from ovs.db import data
-
-OVSDB_INDEX_ASC = "ASC"
-OVSDB_INDEX_DESC = "DESC"
-ColumnIndex = collections.namedtuple('ColumnIndex',
- ['column', 'direction', 'key'])
-
-
-class MultiColumnIndex(object):
- def __init__(self, name):
- self.name = name
- self.columns = []
- self.clear()
-
- def __repr__(self):
- return "{}(name={})".format(self.__class__.__name__, self.name)
-
- def __str__(self):
- return repr(self) + " columns={} values={}".format(
- self.columns, [str(v) for v in self.values])
-
- def add_column(self, column, direction=OVSDB_INDEX_ASC, key=None):
- self.columns.append(ColumnIndex(column, direction,
- key or operator.attrgetter(column)))
-
- def add_columns(self, *columns):
- self.columns.extend(ColumnIndex(col, OVSDB_INDEX_ASC,
- operator.attrgetter(col))
- for col in columns)
-
- def _cmp(self, a, b):
- for col, direction, key in self.columns:
- aval, bval = key(a), key(b)
- if aval == bval:
- continue
- result = (aval > bval) - (aval < bval)
- return result if direction == OVSDB_INDEX_ASC else -result
- return 0
-
- def index_entry_from_row(self, row):
- return row._table.rows.IndexEntry(
- uuid=row.uuid,
- **{c.column: getattr(row, c.column) for c in self.columns})
-
- def add(self, row):
- if not all(hasattr(row, col.column) for col in self.columns):
- # This is a new row, but it hasn't had the necessary columns set
- # We'll add it later
- return
- self.values.add(self.index_entry_from_row(row))
-
- def remove(self, row):
- self.values.remove(self.index_entry_from_row(row))
-
- def clear(self):
- self.values = sortedcontainers.SortedListWithKey(
- key=functools.cmp_to_key(self._cmp))
-
- def irange(self, start, end):
- return iter(r._table.rows[r.uuid]
- for r in self.values.irange(start, end))
-
- def __iter__(self):
- return iter(r._table.rows[r.uuid] for r in self.values)
-
-
-class IndexedRows(DictBase, object):
- def __init__(self, table, *args, **kwargs):
- super(IndexedRows, self).__init__(*args, **kwargs)
- self.table = table
- self.indexes = {}
- self.IndexEntry = IndexEntryClass(table)
-
- def index_create(self, name):
- if name in self.indexes:
- raise ValueError("An index named {} already exists".format(name))
- index = self.indexes[name] = MultiColumnIndex(name)
- return index
-
- def __setitem__(self, key, item):
- self.data[key] = item
- for index in self.indexes.values():
- index.add(item)
-
- def __delitem__(self, key):
- val = self.data[key]
- del self.data[key]
- for index in self.indexes.values():
- index.remove(val)
-
- def clear(self):
- self.data.clear()
- for index in self.indexes.values():
- index.clear()
-
- # Nothing uses the methods below, though they'd be easy to implement
- def update(self, dict=None, **kwargs):
- raise NotImplementedError()
-
- def setdefault(self, key, failobj=None):
- raise NotImplementedError()
-
- def pop(self, key, *args):
- raise NotImplementedError()
-
- def popitem(self):
- raise NotImplementedError()
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- raise NotImplementedError()
-
-
-def IndexEntryClass(table):
- """Create a class used represent Rows in indexes
-
- ovs.db.idl.Row, being inherently tied to transaction processing and being
- initialized with dicts of Datums, is not really useable as an object to
- pass to and store in indexes. This method will create a class named after
- the table's name that is initialized with that Table Row's default values.
- For example:
-
- Port = IndexEntryClass(idl.tables['Port'])
-
- will create a Port class. This class can then be used to search custom
- indexes. For example:
-
- for port in idx.iranage(Port(name="test1"), Port(name="test9")):
- ...
- """
-
- def defaults_uuid_to_row(atom, base):
- return atom.value
-
- columns = ['uuid'] + list(table.columns.keys())
- cls = collections.namedtuple(table.name, columns)
- cls._table = table
- cls.__new__.__defaults__ = (None,) + tuple(
- data.Datum.default(c.type).to_python(defaults_uuid_to_row)
- for c in table.columns.values())
- return cls
diff --git a/python/ovs/db/data.py b/python/ovs/db/data.py
deleted file mode 100644
index 9e57595f7..000000000
--- a/python/ovs/db/data.py
+++ /dev/null
@@ -1,585 +0,0 @@
-# Copyright (c) 2009, 2010, 2011, 2014, 2016 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools
-import re
-import uuid
-
-import ovs.db.parser
-import ovs.db.types
-import ovs.json
-import ovs.jsonrpc
-import ovs.ovsuuid
-import ovs.poller
-import ovs.socket_util
-from ovs.db import error
-
-import six
-
-
-class ConstraintViolation(error.Error):
- def __init__(self, msg, json=None):
- error.Error.__init__(self, msg, json, tag="constraint violation")
-
-
-def escapeCString(src):
- dst = []
- for c in src:
- if c in "\\\"":
- dst.append("\\" + c)
- elif ord(c) < 32:
- if c == '\n':
- dst.append('\\n')
- elif c == '\r':
- dst.append('\\r')
- elif c == '\a':
- dst.append('\\a')
- elif c == '\b':
- dst.append('\\b')
- elif c == '\f':
- dst.append('\\f')
- elif c == '\t':
- dst.append('\\t')
- elif c == '\v':
- dst.append('\\v')
- else:
- dst.append('\\%03o' % ord(c))
- else:
- dst.append(c)
- return ''.join(dst)
-
-
-def returnUnchanged(x):
- return x
-
-
- at functools.total_ordering
-class Atom(object):
- def __init__(self, type_, value=None):
- self.type = type_
- if value is not None:
- self.value = value
- else:
- self.value = type_.default_atom()
-
- def __eq__(self, other):
- if not isinstance(other, Atom) or self.type != other.type:
- return NotImplemented
- return True if self.value == other.value else False
-
- def __lt__(self, other):
- if not isinstance(other, Atom) or self.type != other.type:
- return NotImplemented
- return True if self.value < other.value else False
-
- def __cmp__(self, other):
- if not isinstance(other, Atom) or self.type != other.type:
- return NotImplemented
- elif self.value < other.value:
- return -1
- elif self.value > other.value:
- return 1
- else:
- return 0
-
- def __hash__(self):
- return hash(self.value)
-
- @staticmethod
- def default(type_):
- """Returns the default value for the given type_, which must be an
- instance of ovs.db.types.AtomicType.
-
- The default value for each atomic type is;
-
- - 0, for integer or real atoms.
-
- - False, for a boolean atom.
-
- - "", for a string atom.
-
- - The all-zeros UUID, for a UUID atom."""
- return Atom(type_)
-
- def is_default(self):
- return self == self.default(self.type)
-
- @staticmethod
- def from_json(base, json, symtab=None):
- type_ = base.type
- json = ovs.db.parser.float_to_int(json)
- real_types = list(six.integer_types)
- real_types.extend([float])
- real_types = tuple(real_types)
- if ((type_ == ovs.db.types.IntegerType
- and isinstance(json, six.integer_types))
- or (type_ == ovs.db.types.RealType
- and isinstance(json, real_types))
- or (type_ == ovs.db.types.BooleanType and isinstance(json, bool))
- or (type_ == ovs.db.types.StringType
- and isinstance(json, six.string_types))):
- atom = Atom(type_, json)
- elif type_ == ovs.db.types.UuidType:
- atom = Atom(type_, ovs.ovsuuid.from_json(json, symtab))
- else:
- raise error.Error("expected %s" % type_.to_string(), json)
- atom.check_constraints(base)
- return atom
-
- @staticmethod
- def from_python(base, value):
- value = ovs.db.parser.float_to_int(value)
- if isinstance(value, base.type.python_types):
- atom = Atom(base.type, value)
- else:
- raise error.Error("expected %s, got %s" % (base.type, type(value)))
- atom.check_constraints(base)
- return atom
-
- def check_constraints(self, base):
- """Checks whether 'atom' meets the constraints (if any) defined in
- 'base' and raises an ovs.db.error.Error if any constraint is violated.
-
- 'base' and 'atom' must have the same type.
- Checking UUID constraints is deferred to transaction commit time, so
- this function does nothing for UUID constraints."""
- assert base.type == self.type
- if base.enum is not None and self not in base.enum:
- raise ConstraintViolation(
- "%s is not one of the allowed values (%s)"
- % (self.to_string(), base.enum.to_string()))
- elif base.type in [ovs.db.types.IntegerType, ovs.db.types.RealType]:
- if ((base.min is None or self.value >= base.min) and
- (base.max is None or self.value <= base.max)):
- pass
- elif base.min is not None and base.max is not None:
- raise ConstraintViolation(
- "%s is not in the valid range %.15g to %.15g (inclusive)"
- % (self.to_string(), base.min, base.max))
- elif base.min is not None:
- raise ConstraintViolation(
- "%s is less than minimum allowed value %.15g"
- % (self.to_string(), base.min))
- else:
- raise ConstraintViolation(
- "%s is greater than maximum allowed value %.15g"
- % (self.to_string(), base.max))
- elif base.type == ovs.db.types.StringType:
- # XXX The C version validates that the string is valid UTF-8 here.
- # Do we need to do that in Python too?
- s = self.value
- length = len(s)
- if length < base.min_length:
- raise ConstraintViolation(
- '"%s" length %d is less than minimum allowed length %d'
- % (s, length, base.min_length))
- elif length > base.max_length:
- raise ConstraintViolation(
- '"%s" length %d is greater than maximum allowed '
- 'length %d' % (s, length, base.max_length))
-
- def to_json(self):
- if self.type == ovs.db.types.UuidType:
- return ovs.ovsuuid.to_json(self.value)
- else:
- return self.value
-
- def cInitAtom(self, var):
- if self.type == ovs.db.types.IntegerType:
- return '.integer = %d' % self.value
- elif self.type == ovs.db.types.RealType:
- return '.real = %.15g' % self.value
- elif self.type == ovs.db.types.BooleanType:
- if self.value:
- return '.boolean = true'
- else:
- return '.boolean = false'
- elif self.type == ovs.db.types.StringType:
- return '.string = "%s"' % escapeCString(self.value)
- elif self.type == ovs.db.types.UuidType:
- return '.uuid = %s' % ovs.ovsuuid.to_c_assignment(self.value)
-
- def toEnglish(self, escapeLiteral=returnUnchanged):
- if self.type == ovs.db.types.IntegerType:
- return '%d' % self.value
- elif self.type == ovs.db.types.RealType:
- return '%.15g' % self.value
- elif self.type == ovs.db.types.BooleanType:
- if self.value:
- return 'true'
- else:
- return 'false'
- elif self.type == ovs.db.types.StringType:
- return escapeLiteral(self.value)
- elif self.type == ovs.db.types.UuidType:
- return self.value.value
-
- __need_quotes_re = re.compile("$|true|false|[^_a-zA-Z]|.*[^-._a-zA-Z]")
-
- @staticmethod
- def __string_needs_quotes(s):
- return Atom.__need_quotes_re.match(s)
-
- def to_string(self):
- if self.type == ovs.db.types.IntegerType:
- return '%d' % self.value
- elif self.type == ovs.db.types.RealType:
- return '%.15g' % self.value
- elif self.type == ovs.db.types.BooleanType:
- if self.value:
- return 'true'
- else:
- return 'false'
- elif self.type == ovs.db.types.StringType:
- if Atom.__string_needs_quotes(self.value):
- return ovs.json.to_string(self.value)
- else:
- return self.value
- elif self.type == ovs.db.types.UuidType:
- return str(self.value)
-
- @staticmethod
- def new(x):
- if isinstance(x, six.integer_types):
- t = ovs.db.types.IntegerType
- elif isinstance(x, float):
- t = ovs.db.types.RealType
- elif isinstance(x, bool):
- t = ovs.db.types.BooleanType
- elif isinstance(x, six.string_types):
- t = ovs.db.types.StringType
- elif isinstance(x, uuid):
- t = ovs.db.types.UuidType
- else:
- raise TypeError
- return Atom(t, x)
-
-
- at functools.total_ordering
-class Datum(object):
- def __init__(self, type_, values={}):
- self.type = type_
- self.values = values
-
- def __eq__(self, other):
- if not isinstance(other, Datum):
- return NotImplemented
- return True if self.values == other.values else False
-
- def __lt__(self, other):
- if not isinstance(other, Datum):
- return NotImplemented
- return True if self.values < other.values else False
-
- def __cmp__(self, other):
- if not isinstance(other, Datum):
- return NotImplemented
- elif self.values < other.values:
- return -1
- elif self.values > other.values:
- return 1
- else:
- return 0
-
- __hash__ = None
-
- def __contains__(self, item):
- return item in self.values
-
- def copy(self):
- return Datum(self.type, dict(self.values))
-
- @staticmethod
- def default(type_):
- if type_.n_min == 0:
- values = {}
- elif type_.is_map():
- values = {type_.key.default(): type_.value.default()}
- else:
- values = {type_.key.default(): None}
- return Datum(type_, values)
-
- def is_default(self):
- return self == Datum.default(self.type)
-
- def check_constraints(self):
- """Checks that each of the atoms in 'datum' conforms to the constraints
- specified by its 'type' and raises an ovs.db.error.Error.
-
- This function is not commonly useful because the most ordinary way to
- obtain a datum is ultimately via Datum.from_json() or Atom.from_json(),
- which check constraints themselves."""
- for keyAtom, valueAtom in six.iteritems(self.values):
- keyAtom.check_constraints(self.type.key)
- if valueAtom is not None:
- valueAtom.check_constraints(self.type.value)
-
- @staticmethod
- def from_json(type_, json, symtab=None):
- """Parses 'json' as a datum of the type described by 'type'. If
- successful, returns a new datum. On failure, raises an
- ovs.db.error.Error.
-
- Violations of constraints expressed by 'type' are treated as errors.
-
- If 'symtab' is nonnull, then named UUIDs in 'symtab' are accepted.
- Refer to RFC 7047 for information about this, and for the syntax
- that this function accepts."""
- is_map = type_.is_map()
- if (is_map or
- (isinstance(json, list) and len(json) > 0 and json[0] == "set")):
- if is_map:
- class_ = "map"
- else:
- class_ = "set"
-
- inner = ovs.db.parser.unwrap_json(json, class_, [list, tuple],
- "array")
- n = len(inner)
- if n < type_.n_min or n > type_.n_max:
- raise error.Error("%s must have %d to %d members but %d are "
- "present" % (class_, type_.n_min,
- type_.n_max, n),
- json)
-
- values = {}
- for element in inner:
- if is_map:
- key, value = ovs.db.parser.parse_json_pair(element)
- keyAtom = Atom.from_json(type_.key, key, symtab)
- valueAtom = Atom.from_json(type_.value, value, symtab)
- else:
- keyAtom = Atom.from_json(type_.key, element, symtab)
- valueAtom = None
-
- if keyAtom in values:
- if is_map:
- raise error.Error("map contains duplicate key")
- else:
- raise error.Error("set contains duplicate")
-
- values[keyAtom] = valueAtom
-
- return Datum(type_, values)
- else:
- keyAtom = Atom.from_json(type_.key, json, symtab)
- return Datum(type_, {keyAtom: None})
-
- def to_json(self):
- if self.type.is_map():
- return ["map", [[k.to_json(), v.to_json()]
- for k, v in sorted(self.values.items())]]
- elif len(self.values) == 1:
- key = next(six.iterkeys(self.values))
- return key.to_json()
- else:
- return ["set", [k.to_json() for k in sorted(self.values.keys())]]
-
- def to_string(self):
- head = tail = None
- if self.type.n_max > 1 or len(self.values) == 0:
- if self.type.is_map():
- head = "{"
- tail = "}"
- else:
- head = "["
- tail = "]"
-
- s = []
- if head:
- s.append(head)
-
- for i, key in enumerate(sorted(self.values)):
- if i:
- s.append(", ")
-
- s.append(key.to_string())
- if self.type.is_map():
- s.append("=")
- s.append(self.values[key].to_string())
-
- if tail:
- s.append(tail)
- return ''.join(s)
-
- def diff(self, datum):
- if self.type.n_max > 1 or len(self.values) == 0:
- for k, v in six.iteritems(datum.values):
- if k in self.values and v == self.values[k]:
- del self.values[k]
- else:
- self.values[k] = v
- else:
- return datum
-
- return self
-
- def as_list(self):
- if self.type.is_map():
- return [[k.value, v.value] for k, v in six.iteritems(self.values)]
- else:
- return [k.value for k in six.iterkeys(self.values)]
-
- def as_dict(self):
- return dict(self.values)
-
- def as_scalar(self):
- if len(self.values) == 1:
- if self.type.is_map():
- k, v = next(six.iteritems(self.values))
- return [k.value, v.value]
- else:
- return next(six.iterkeys(self.values)).value
- else:
- return None
-
- def to_python(self, uuid_to_row):
- """Returns this datum's value converted into a natural Python
- representation of this datum's type, according to the following
- rules:
-
- - If the type has exactly one value and it is not a map (that is,
- self.type.is_scalar() returns True), then the value is:
-
- * An int or long, for an integer column.
-
- * An int or long or float, for a real column.
-
- * A bool, for a boolean column.
-
- * A str or unicode object, for a string column.
-
- * A uuid.UUID object, for a UUID column without a ref_table.
-
- * An object represented the referenced row, for a UUID column with
- a ref_table. (For the Idl, this object will be an ovs.db.idl.Row
- object.)
-
- If some error occurs (e.g. the database server's idea of the column
- is different from the IDL's idea), then the default value for the
- scalar type is used (see Atom.default()).
-
- - Otherwise, if the type is not a map, then the value is a Python list
- whose elements have the types described above.
-
- - Otherwise, the type is a map, and the value is a Python dict that
- maps from key to value, with key and value types determined as
- described above.
-
- 'uuid_to_row' must be a function that takes a value and an
- ovs.db.types.BaseType and translates UUIDs into row objects."""
- if self.type.is_scalar():
- value = uuid_to_row(self.as_scalar(), self.type.key)
- if value is None:
- return self.type.key.default()
- else:
- return value
- elif self.type.is_map():
- value = {}
- for k, v in six.iteritems(self.values):
- dk = uuid_to_row(k.value, self.type.key)
- dv = uuid_to_row(v.value, self.type.value)
- if dk is not None and dv is not None:
- value[dk] = dv
- return value
- else:
- s = set()
- for k in self.values:
- dk = uuid_to_row(k.value, self.type.key)
- if dk is not None:
- s.add(dk)
- return sorted(s)
-
- @staticmethod
- def from_python(type_, value, row_to_uuid):
- """Returns a new Datum with the given ovs.db.types.Type 'type_'. The
- new datum's value is taken from 'value', which must take the form
- described as a valid return value from Datum.to_python() for 'type'.
-
- Each scalar value within 'value' is initially passed through
- 'row_to_uuid', which should convert objects that represent rows (if
- any) into uuid.UUID objects and return other data unchanged.
-
- Raises ovs.db.error.Error if 'value' is not in an appropriate form for
- 'type_'."""
- d = {}
- if isinstance(value, dict):
- for k, v in six.iteritems(value):
- ka = Atom.from_python(type_.key, row_to_uuid(k))
- va = Atom.from_python(type_.value, row_to_uuid(v))
- d[ka] = va
- elif isinstance(value, (list, set, tuple)):
- for k in value:
- ka = Atom.from_python(type_.key, row_to_uuid(k))
- d[ka] = None
- else:
- ka = Atom.from_python(type_.key, row_to_uuid(value))
- d[ka] = None
-
- datum = Datum(type_, d)
- datum.check_constraints()
- if not datum.conforms_to_type():
- raise error.Error("%d values when type requires between %d and %d"
- % (len(d), type_.n_min, type_.n_max))
-
- return datum
-
- def __getitem__(self, key):
- if not isinstance(key, Atom):
- key = Atom.new(key)
- if not self.type.is_map():
- raise IndexError
- elif key not in self.values:
- raise KeyError
- else:
- return self.values[key].value
-
- def get(self, key, default=None):
- if not isinstance(key, Atom):
- key = Atom.new(key)
- if key in self.values:
- return self.values[key].value
- else:
- return default
-
- def __str__(self):
- return self.to_string()
-
- def conforms_to_type(self):
- n = len(self.values)
- return self.type.n_min <= n <= self.type.n_max
-
- def cDeclareDatum(self, name):
- n = len(self.values)
- if n == 0:
- return ["static struct ovsdb_datum %s = { .n = 0 };"]
-
- s = ["static union ovsdb_atom %s_keys[%d] = {" % (name, n)]
- for key in sorted(self.values):
- s += [" { %s }," % key.cInitAtom(key)]
- s += ["};"]
-
- if self.type.value:
- s = ["static union ovsdb_atom %s_values[%d] = {" % (name, n)]
- for k, v in sorted(self.values.items()):
- s += [" { %s }," % v.cInitAtom(v)]
- s += ["};"]
-
- s += ["static struct ovsdb_datum %s = {" % name]
- s += [" .n = %d," % n]
- s += [" .keys = %s_keys," % name]
- if self.type.value:
- s += [" .values = %s_values," % name]
- s += ["};"]
- return s
diff --git a/python/ovs/db/error.py b/python/ovs/db/error.py
deleted file mode 100644
index 4d192839b..000000000
--- a/python/ovs/db/error.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) 2009, 2010, 2011 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import ovs.json
-
-
-class Error(Exception):
- def __init__(self, msg, json=None, tag=None):
- self.msg = msg
- self.json = json
- if tag is None:
- if json is None:
- self.tag = "ovsdb error"
- else:
- self.tag = "syntax error"
- else:
- self.tag = tag
-
- # Compose message.
- syntax = ""
- if self.json is not None:
- syntax = 'syntax "%s": ' % ovs.json.to_string(self.json)
- Exception.__init__(self, "%s%s: %s" % (syntax, self.tag, self.msg))
diff --git a/python/ovs/db/idl.py b/python/ovs/db/idl.py
deleted file mode 100644
index 84af978a4..000000000
--- a/python/ovs/db/idl.py
+++ /dev/null
@@ -1,2030 +0,0 @@
-# Copyright (c) 2009, 2010, 2011, 2012, 2013, 2016 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import functools
-import uuid
-
-import ovs.db.data as data
-import ovs.db.parser
-import ovs.db.schema
-import ovs.jsonrpc
-import ovs.ovsuuid
-import ovs.poller
-import ovs.vlog
-from ovs.db import custom_index
-from ovs.db import error
-
-import six
-
-vlog = ovs.vlog.Vlog("idl")
-
-__pychecker__ = 'no-classattr no-objattrs'
-
-ROW_CREATE = "create"
-ROW_UPDATE = "update"
-ROW_DELETE = "delete"
-
-OVSDB_UPDATE = 0
-OVSDB_UPDATE2 = 1
-
-CLUSTERED = "clustered"
-
-
-class Idl(object):
- """Open vSwitch Database Interface Definition Language (OVSDB IDL).
-
- The OVSDB IDL maintains an in-memory replica of a database. It issues RPC
- requests to an OVSDB database server and parses the responses, converting
- raw JSON into data structures that are easier for clients to digest.
-
- The IDL also assists with issuing database transactions. The client
- creates a transaction, manipulates the IDL data structures, and commits or
- aborts the transaction. The IDL then composes and issues the necessary
- JSON-RPC requests and reports to the client whether the transaction
- completed successfully.
-
- The client is allowed to access the following attributes directly, in a
- read-only fashion:
-
- - 'tables': This is the 'tables' map in the ovs.db.schema.DbSchema provided
- to the Idl constructor. Each ovs.db.schema.TableSchema in the map is
- annotated with a new attribute 'rows', which is a dict from a uuid.UUID
- to a Row object.
-
- The client may directly read and write the Row objects referenced by the
- 'rows' map values. Refer to Row for more details.
-
- - 'change_seqno': A number that represents the IDL's state. When the IDL
- is updated (by Idl.run()), its value changes. The sequence number can
- occasionally change even if the database does not. This happens if the
- connection to the database drops and reconnects, which causes the
- database contents to be reloaded even if they didn't change. (It could
- also happen if the database server sends out a "change" that reflects
- what the IDL already thought was in the database. The database server is
- not supposed to do that, but bugs could in theory cause it to do so.)
-
- - 'lock_name': The name of the lock configured with Idl.set_lock(), or None
- if no lock is configured.
-
- - 'has_lock': True, if the IDL is configured to obtain a lock and owns that
- lock, and False otherwise.
-
- Locking and unlocking happens asynchronously from the database client's
- point of view, so the information is only useful for optimization
- (e.g. if the client doesn't have the lock then there's no point in trying
- to write to the database).
-
- - 'is_lock_contended': True, if the IDL is configured to obtain a lock but
- the database server has indicated that some other client already owns the
- requested lock, and False otherwise.
-
- - 'txn': The ovs.db.idl.Transaction object for the database transaction
- currently being constructed, if there is one, or None otherwise.
-"""
-
- IDL_S_INITIAL = 0
- IDL_S_SERVER_SCHEMA_REQUESTED = 1
- IDL_S_SERVER_MONITOR_REQUESTED = 2
- IDL_S_DATA_MONITOR_REQUESTED = 3
- IDL_S_DATA_MONITOR_COND_REQUESTED = 4
-
- def __init__(self, remote, schema_helper, probe_interval=None,
- leader_only=True):
- """Creates and returns a connection to the database named 'db_name' on
- 'remote', which should be in a form acceptable to
- ovs.jsonrpc.session.open(). The connection will maintain an in-memory
- replica of the remote database.
-
- 'remote' can be comma separated multiple remotes and each remote
- should be in a form acceptable to ovs.jsonrpc.session.open().
-
- 'schema_helper' should be an instance of the SchemaHelper class which
- generates schema for the remote database. The caller may have cut it
- down by removing tables or columns that are not of interest. The IDL
- will only replicate the tables and columns that remain. The caller may
- also add an attribute named 'alert' to selected remaining columns,
- setting its value to False; if so, then changes to those columns will
- not be considered changes to the database for the purpose of the return
- value of Idl.run() and Idl.change_seqno. This is useful for columns
- that the IDL's client will write but not read.
-
- As a convenience to users, 'schema' may also be an instance of the
- SchemaHelper class.
-
- The IDL uses and modifies 'schema' directly.
-
- If 'leader_only' is set to True (default value) the IDL will only
- monitor and transact with the leader of the cluster.
-
- If "probe_interval" is zero it disables the connection keepalive
- feature. If non-zero the value will be forced to at least 1000
- milliseconds. If None it will just use the default value in OVS.
- """
-
- assert isinstance(schema_helper, SchemaHelper)
- schema = schema_helper.get_idl_schema()
-
- self.tables = schema.tables
- self.readonly = schema.readonly
- self._db = schema
- remotes = self._parse_remotes(remote)
- self._session = ovs.jsonrpc.Session.open_multiple(remotes,
- probe_interval=probe_interval)
- self._monitor_request_id = None
- self._last_seqno = None
- self.change_seqno = 0
- self.uuid = uuid.uuid1()
-
- # Server monitor.
- self._server_schema_request_id = None
- self._server_monitor_request_id = None
- self._db_change_aware_request_id = None
- self._server_db_name = '_Server'
- self._server_db_table = 'Database'
- self.server_tables = None
- self._server_db = None
- self.server_monitor_uuid = uuid.uuid1()
- self.leader_only = leader_only
- self.cluster_id = None
- self._min_index = 0
-
- self.state = self.IDL_S_INITIAL
-
- # Database locking.
- self.lock_name = None # Name of lock we need, None if none.
- self.has_lock = False # Has db server said we have the lock?
- self.is_lock_contended = False # Has db server said we can't get lock?
- self._lock_request_id = None # JSON-RPC ID of in-flight lock request.
-
- # Transaction support.
- self.txn = None
- self._outstanding_txns = {}
-
- for table in six.itervalues(schema.tables):
- for column in six.itervalues(table.columns):
- if not hasattr(column, 'alert'):
- column.alert = True
- table.need_table = False
- table.rows = custom_index.IndexedRows(table)
- table.idl = self
- table.condition = [True]
- table.cond_changed = False
-
- def _parse_remotes(self, remote):
- # If remote is -
- # "tcp:10.0.0.1:6641,unix:/tmp/db.sock,t,s,tcp:10.0.0.2:6642"
- # this function returns
- # ["tcp:10.0.0.1:6641", "unix:/tmp/db.sock,t,s", tcp:10.0.0.2:6642"]
- remotes = []
- for r in remote.split(','):
- if remotes and r.find(":") == -1:
- remotes[-1] += "," + r
- else:
- remotes.append(r)
- return remotes
-
- def set_cluster_id(self, cluster_id):
- """Set the id of the cluster that this idl must connect to."""
- self.cluster_id = cluster_id
- if self.state != self.IDL_S_INITIAL:
- self.force_reconnect()
-
- def index_create(self, table, name):
- """Create a named multi-column index on a table"""
- return self.tables[table].rows.index_create(name)
-
- def index_irange(self, table, name, start, end):
- """Return items in a named index between start/end inclusive"""
- return self.tables[table].rows.indexes[name].irange(start, end)
-
- def index_equal(self, table, name, value):
- """Return items in a named index matching a value"""
- return self.tables[table].rows.indexes[name].irange(value, value)
-
- def close(self):
- """Closes the connection to the database. The IDL will no longer
- update."""
- self._session.close()
-
- def run(self):
- """Processes a batch of messages from the database server. Returns
- True if the database as seen through the IDL changed, False if it did
- not change. The initial fetch of the entire contents of the remote
- database is considered to be one kind of change. If the IDL has been
- configured to acquire a database lock (with Idl.set_lock()), then
- successfully acquiring the lock is also considered to be a change.
-
- This function can return occasional false positives, that is, report
- that the database changed even though it didn't. This happens if the
- connection to the database drops and reconnects, which causes the
- database contents to be reloaded even if they didn't change. (It could
- also happen if the database server sends out a "change" that reflects
- what we already thought was in the database, but the database server is
- not supposed to do that.)
-
- As an alternative to checking the return value, the client may check
- for changes in self.change_seqno."""
- assert not self.txn
- initial_change_seqno = self.change_seqno
-
- self.send_cond_change()
- self._session.run()
- i = 0
- while i < 50:
- i += 1
- if not self._session.is_connected():
- break
-
- seqno = self._session.get_seqno()
- if seqno != self._last_seqno:
- self._last_seqno = seqno
- self.__txn_abort_all()
- self.__send_server_schema_request()
- if self.lock_name:
- self.__send_lock_request()
- break
-
- msg = self._session.recv()
- if msg is None:
- break
-
- if (msg.type == ovs.jsonrpc.Message.T_NOTIFY
- and msg.method == "update2"
- and len(msg.params) == 2):
- # Database contents changed.
- self.__parse_update(msg.params[1], OVSDB_UPDATE2)
- elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
- and msg.method == "update"
- and len(msg.params) == 2):
- # Database contents changed.
- if msg.params[0] == str(self.server_monitor_uuid):
- self.__parse_update(msg.params[1], OVSDB_UPDATE,
- tables=self.server_tables)
- self.change_seqno = initial_change_seqno
- if not self.__check_server_db():
- self.force_reconnect()
- break
- else:
- self.__parse_update(msg.params[1], OVSDB_UPDATE)
- elif (msg.type == ovs.jsonrpc.Message.T_REPLY
- and self._monitor_request_id is not None
- and self._monitor_request_id == msg.id):
- # Reply to our "monitor" request.
- try:
- self.change_seqno += 1
- self._monitor_request_id = None
- self.__clear()
- if self.state == self.IDL_S_DATA_MONITOR_COND_REQUESTED:
- self.__parse_update(msg.result, OVSDB_UPDATE2)
- else:
- assert self.state == self.IDL_S_DATA_MONITOR_REQUESTED
- self.__parse_update(msg.result, OVSDB_UPDATE)
-
- except error.Error as e:
- vlog.err("%s: parse error in received schema: %s"
- % (self._session.get_name(), e))
- self.__error()
- elif (msg.type == ovs.jsonrpc.Message.T_REPLY
- and self._server_schema_request_id is not None
- and self._server_schema_request_id == msg.id):
- # Reply to our "get_schema" of _Server request.
- try:
- self._server_schema_request_id = None
- sh = SchemaHelper(None, msg.result)
- sh.register_table(self._server_db_table)
- schema = sh.get_idl_schema()
- self._server_db = schema
- self.server_tables = schema.tables
- self.__send_server_monitor_request()
- except error.Error as e:
- vlog.err("%s: error receiving server schema: %s"
- % (self._session.get_name(), e))
- if self.cluster_id:
- self.__error()
- break
- else:
- self.change_seqno = initial_change_seqno
- self.__send_monitor_request()
- elif (msg.type == ovs.jsonrpc.Message.T_REPLY
- and self._server_monitor_request_id is not None
- and self._server_monitor_request_id == msg.id):
- # Reply to our "monitor" of _Server request.
- try:
- self._server_monitor_request_id = None
- self.__parse_update(msg.result, OVSDB_UPDATE,
- tables=self.server_tables)
- self.change_seqno = initial_change_seqno
- if self.__check_server_db():
- self.__send_monitor_request()
- self.__send_db_change_aware()
- else:
- self.force_reconnect()
- break
- except error.Error as e:
- vlog.err("%s: parse error in received schema: %s"
- % (self._session.get_name(), e))
- if self.cluster_id:
- self.__error()
- break
- else:
- self.change_seqno = initial_change_seqno
- self.__send_monitor_request()
- elif (msg.type == ovs.jsonrpc.Message.T_REPLY
- and self._db_change_aware_request_id is not None
- and self._db_change_aware_request_id == msg.id):
- # Reply to us notifying the server of our change awarness.
- self._db_change_aware_request_id = None
- elif (msg.type == ovs.jsonrpc.Message.T_REPLY
- and self._lock_request_id is not None
- and self._lock_request_id == msg.id):
- # Reply to our "lock" request.
- self.__parse_lock_reply(msg.result)
- elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
- and msg.method == "locked"):
- # We got our lock.
- self.__parse_lock_notify(msg.params, True)
- elif (msg.type == ovs.jsonrpc.Message.T_NOTIFY
- and msg.method == "stolen"):
- # Someone else stole our lock.
- self.__parse_lock_notify(msg.params, False)
- elif msg.type == ovs.jsonrpc.Message.T_NOTIFY and msg.id == "echo":
- # Reply to our echo request. Ignore it.
- pass
- elif (msg.type == ovs.jsonrpc.Message.T_ERROR and
- self.state == self.IDL_S_DATA_MONITOR_COND_REQUESTED and
- self._monitor_request_id == msg.id):
- if msg.error == "unknown method":
- self.__send_monitor_request()
- elif (msg.type == ovs.jsonrpc.Message.T_ERROR and
- self._server_schema_request_id is not None and
- self._server_schema_request_id == msg.id):
- self._server_schema_request_id = None
- if self.cluster_id:
- self.force_reconnect()
- break
- else:
- self.change_seqno = initial_change_seqno
- self.__send_monitor_request()
- elif (msg.type in (ovs.jsonrpc.Message.T_ERROR,
- ovs.jsonrpc.Message.T_REPLY)
- and self.__txn_process_reply(msg)):
- # __txn_process_reply() did everything needed.
- pass
- else:
- # This can happen if a transaction is destroyed before we
- # receive the reply, so keep the log level low.
- vlog.dbg("%s: received unexpected %s message"
- % (self._session.get_name(),
- ovs.jsonrpc.Message.type_to_string(msg.type)))
-
- return initial_change_seqno != self.change_seqno
-
- def send_cond_change(self):
- if not self._session.is_connected():
- return
-
- for table in six.itervalues(self.tables):
- if table.cond_changed:
- self.__send_cond_change(table, table.condition)
- table.cond_changed = False
-
- def cond_change(self, table_name, cond):
- """Sets the condition for 'table_name' to 'cond', which should be a
- conditional expression suitable for use directly in the OVSDB
- protocol, with the exception that the empty condition []
- matches no rows (instead of matching every row). That is, []
- is equivalent to [False], not to [True].
- """
-
- table = self.tables.get(table_name)
- if not table:
- raise error.Error('Unknown table "%s"' % table_name)
-
- if cond == []:
- cond = [False]
- if table.condition != cond:
- table.condition = cond
- table.cond_changed = True
-
- def wait(self, poller):
- """Arranges for poller.block() to wake up when self.run() has something
- to do or when activity occurs on a transaction on 'self'."""
- self._session.wait(poller)
- self._session.recv_wait(poller)
-
- def has_ever_connected(self):
- """Returns True, if the IDL successfully connected to the remote
- database and retrieved its contents (even if the connection
- subsequently dropped and is in the process of reconnecting). If so,
- then the IDL contains an atomic snapshot of the database's contents
- (but it might be arbitrarily old if the connection dropped).
-
- Returns False if the IDL has never connected or retrieved the
- database's contents. If so, the IDL is empty."""
- return self.change_seqno != 0
-
- def force_reconnect(self):
- """Forces the IDL to drop its connection to the database and reconnect.
- In the meantime, the contents of the IDL will not change."""
- self._session.force_reconnect()
-
- def session_name(self):
- return self._session.get_name()
-
- def set_lock(self, lock_name):
- """If 'lock_name' is not None, configures the IDL to obtain the named
- lock from the database server and to avoid modifying the database when
- the lock cannot be acquired (that is, when another client has the same
- lock).
-
- If 'lock_name' is None, drops the locking requirement and releases the
- lock."""
- assert not self.txn
- assert not self._outstanding_txns
-
- if self.lock_name and (not lock_name or lock_name != self.lock_name):
- # Release previous lock.
- self.__send_unlock_request()
- self.lock_name = None
- self.is_lock_contended = False
-
- if lock_name and not self.lock_name:
- # Acquire new lock.
- self.lock_name = lock_name
- self.__send_lock_request()
-
- def notify(self, event, row, updates=None):
- """Hook for implementing create/update/delete notifications
-
- :param event: The event that was triggered
- :type event: ROW_CREATE, ROW_UPDATE, or ROW_DELETE
- :param row: The row as it is after the operation has occured
- :type row: Row
- :param updates: For updates, row with only old values of the changed
- columns
- :type updates: Row
- """
-
- def __send_cond_change(self, table, cond):
- monitor_cond_change = {table.name: [{"where": cond}]}
- old_uuid = str(self.uuid)
- self.uuid = uuid.uuid1()
- params = [old_uuid, str(self.uuid), monitor_cond_change]
- msg = ovs.jsonrpc.Message.create_request("monitor_cond_change", params)
- self._session.send(msg)
-
- def __clear(self):
- changed = False
-
- for table in six.itervalues(self.tables):
- if table.rows:
- changed = True
- table.rows = custom_index.IndexedRows(table)
-
- if changed:
- self.change_seqno += 1
-
- def __update_has_lock(self, new_has_lock):
- if new_has_lock and not self.has_lock:
- if self._monitor_request_id is None:
- self.change_seqno += 1
- else:
- # We're waiting for a monitor reply, so don't signal that the
- # database changed. The monitor reply will increment
- # change_seqno anyhow.
- pass
- self.is_lock_contended = False
- self.has_lock = new_has_lock
-
- def __do_send_lock_request(self, method):
- self.__update_has_lock(False)
- self._lock_request_id = None
- if self._session.is_connected():
- msg = ovs.jsonrpc.Message.create_request(method, [self.lock_name])
- msg_id = msg.id
- self._session.send(msg)
- else:
- msg_id = None
- return msg_id
-
- def __send_lock_request(self):
- self._lock_request_id = self.__do_send_lock_request("lock")
-
- def __send_unlock_request(self):
- self.__do_send_lock_request("unlock")
-
- def __parse_lock_reply(self, result):
- self._lock_request_id = None
- got_lock = isinstance(result, dict) and result.get("locked") is True
- self.__update_has_lock(got_lock)
- if not got_lock:
- self.is_lock_contended = True
-
- def __parse_lock_notify(self, params, new_has_lock):
- if (self.lock_name is not None
- and isinstance(params, (list, tuple))
- and params
- and params[0] == self.lock_name):
- self.__update_has_lock(new_has_lock)
- if not new_has_lock:
- self.is_lock_contended = True
-
- def __send_db_change_aware(self):
- msg = ovs.jsonrpc.Message.create_request("set_db_change_aware",
- [True])
- self._db_change_aware_request_id = msg.id
- self._session.send(msg)
-
- def __send_monitor_request(self):
- if (self.state in [self.IDL_S_SERVER_MONITOR_REQUESTED,
- self.IDL_S_INITIAL]):
- self.state = self.IDL_S_DATA_MONITOR_COND_REQUESTED
- method = "monitor_cond"
- else:
- self.state = self.IDL_S_DATA_MONITOR_REQUESTED
- method = "monitor"
-
- monitor_requests = {}
- for table in six.itervalues(self.tables):
- columns = []
- for column in six.iterkeys(table.columns):
- if ((table.name not in self.readonly) or
- (table.name in self.readonly) and
- (column not in self.readonly[table.name])):
- columns.append(column)
- monitor_request = {"columns": columns}
- if method == "monitor_cond" and table.condition != [True]:
- monitor_request["where"] = table.condition
- table.cond_change = False
- monitor_requests[table.name] = [monitor_request]
-
- msg = ovs.jsonrpc.Message.create_request(
- method, [self._db.name, str(self.uuid), monitor_requests])
- self._monitor_request_id = msg.id
- self._session.send(msg)
-
- def __send_server_schema_request(self):
- self.state = self.IDL_S_SERVER_SCHEMA_REQUESTED
- msg = ovs.jsonrpc.Message.create_request(
- "get_schema", [self._server_db_name, str(self.uuid)])
- self._server_schema_request_id = msg.id
- self._session.send(msg)
-
- def __send_server_monitor_request(self):
- self.state = self.IDL_S_SERVER_MONITOR_REQUESTED
- monitor_requests = {}
- table = self.server_tables[self._server_db_table]
- columns = [column for column in six.iterkeys(table.columns)]
- for column in six.itervalues(table.columns):
- if not hasattr(column, 'alert'):
- column.alert = True
- table.rows = custom_index.IndexedRows(table)
- table.need_table = False
- table.idl = self
- monitor_request = {"columns": columns}
- monitor_requests[table.name] = [monitor_request]
- msg = ovs.jsonrpc.Message.create_request(
- 'monitor', [self._server_db.name,
- str(self.server_monitor_uuid),
- monitor_requests])
- self._server_monitor_request_id = msg.id
- self._session.send(msg)
-
- def __parse_update(self, update, version, tables=None):
- try:
- if not tables:
- self.__do_parse_update(update, version, self.tables)
- else:
- self.__do_parse_update(update, version, tables)
- except error.Error as e:
- vlog.err("%s: error parsing update: %s"
- % (self._session.get_name(), e))
-
- def __do_parse_update(self, table_updates, version, tables):
- if not isinstance(table_updates, dict):
- raise error.Error("<table-updates> is not an object",
- table_updates)
-
- for table_name, table_update in six.iteritems(table_updates):
- table = tables.get(table_name)
- if not table:
- raise error.Error('<table-updates> includes unknown '
- 'table "%s"' % table_name)
-
- if not isinstance(table_update, dict):
- raise error.Error('<table-update> for table "%s" is not '
- 'an object' % table_name, table_update)
-
- for uuid_string, row_update in six.iteritems(table_update):
- if not ovs.ovsuuid.is_valid_string(uuid_string):
- raise error.Error('<table-update> for table "%s" '
- 'contains bad UUID "%s" as member '
- 'name' % (table_name, uuid_string),
- table_update)
- uuid = ovs.ovsuuid.from_string(uuid_string)
-
- if not isinstance(row_update, dict):
- raise error.Error('<table-update> for table "%s" '
- 'contains <row-update> for %s that '
- 'is not an object'
- % (table_name, uuid_string))
-
- if version == OVSDB_UPDATE2:
- if self.__process_update2(table, uuid, row_update):
- self.change_seqno += 1
- continue
-
- parser = ovs.db.parser.Parser(row_update, "row-update")
- old = parser.get_optional("old", [dict])
- new = parser.get_optional("new", [dict])
- parser.finish()
-
- if not old and not new:
- raise error.Error('<row-update> missing "old" and '
- '"new" members', row_update)
-
- if self.__process_update(table, uuid, old, new):
- self.change_seqno += 1
-
- def __process_update2(self, table, uuid, row_update):
- row = table.rows.get(uuid)
- changed = False
- if "delete" in row_update:
- if row:
- del table.rows[uuid]
- self.notify(ROW_DELETE, row)
- changed = True
- else:
- # XXX rate-limit
- vlog.warn("cannot delete missing row %s from table"
- "%s" % (uuid, table.name))
- elif "insert" in row_update or "initial" in row_update:
- if row:
- vlog.warn("cannot add existing row %s from table"
- " %s" % (uuid, table.name))
- del table.rows[uuid]
- row = self.__create_row(table, uuid)
- if "insert" in row_update:
- row_update = row_update['insert']
- else:
- row_update = row_update['initial']
- self.__add_default(table, row_update)
- changed = self.__row_update(table, row, row_update)
- table.rows[uuid] = row
- if changed:
- self.notify(ROW_CREATE, row)
- elif "modify" in row_update:
- if not row:
- raise error.Error('Modify non-existing row')
-
- old_row = self.__apply_diff(table, row, row_update['modify'])
- self.notify(ROW_UPDATE, row, Row(self, table, uuid, old_row))
- changed = True
- else:
- raise error.Error('<row-update> unknown operation',
- row_update)
- return changed
-
- def __process_update(self, table, uuid, old, new):
- """Returns True if a column changed, False otherwise."""
- row = table.rows.get(uuid)
- changed = False
- if not new:
- # Delete row.
- if row:
- del table.rows[uuid]
- changed = True
- self.notify(ROW_DELETE, row)
- else:
- # XXX rate-limit
- vlog.warn("cannot delete missing row %s from table %s"
- % (uuid, table.name))
- elif not old:
- # Insert row.
- op = ROW_CREATE
- if not row:
- row = self.__create_row(table, uuid)
- changed = True
- else:
- # XXX rate-limit
- op = ROW_UPDATE
- vlog.warn("cannot add existing row %s to table %s"
- % (uuid, table.name))
- changed |= self.__row_update(table, row, new)
- if op == ROW_CREATE:
- table.rows[uuid] = row
- if changed:
- self.notify(ROW_CREATE, row)
- else:
- op = ROW_UPDATE
- if not row:
- row = self.__create_row(table, uuid)
- changed = True
- op = ROW_CREATE
- # XXX rate-limit
- vlog.warn("cannot modify missing row %s in table %s"
- % (uuid, table.name))
- changed |= self.__row_update(table, row, new)
- if op == ROW_CREATE:
- table.rows[uuid] = row
- if changed:
- self.notify(op, row, Row.from_json(self, table, uuid, old))
- return changed
-
- def __check_server_db(self):
- """Returns True if this is a valid server database, False otherwise."""
- session_name = self.session_name()
-
- if self._server_db_table not in self.server_tables:
- vlog.info("%s: server does not have %s table in its %s database"
- % (session_name, self._server_db_table,
- self._server_db_name))
- return False
-
- rows = self.server_tables[self._server_db_table].rows
-
- database = None
- for row in six.itervalues(rows):
- if self.cluster_id:
- if self.cluster_id in \
- map(lambda x: str(x)[:4], row.cid):
- database = row
- break
- elif row.name == self._db.name:
- database = row
- break
-
- if not database:
- vlog.info("%s: server does not have %s database"
- % (session_name, self._db.name))
- return False
-
- if (database.model == CLUSTERED and
- self._session.get_num_of_remotes() > 1):
- if not database.schema:
- vlog.info('%s: clustered database server has not yet joined '
- 'cluster; trying another server' % session_name)
- return False
- if not database.connected:
- vlog.info('%s: clustered database server is disconnected '
- 'from cluster; trying another server' % session_name)
- return False
- if (self.leader_only and
- not database.leader):
- vlog.info('%s: clustered database server is not cluster '
- 'leader; trying another server' % session_name)
- return False
- if database.index:
- if database.index[0] < self._min_index:
- vlog.warn('%s: clustered database server has stale data; '
- 'trying another server' % session_name)
- return False
- self._min_index = database.index[0]
-
- return True
-
- def __column_name(self, column):
- if column.type.key.type == ovs.db.types.UuidType:
- return ovs.ovsuuid.to_json(column.type.key.type.default)
- else:
- return column.type.key.type.default
-
- def __add_default(self, table, row_update):
- for column in six.itervalues(table.columns):
- if column.name not in row_update:
- if ((table.name not in self.readonly) or
- (table.name in self.readonly) and
- (column.name not in self.readonly[table.name])):
- if column.type.n_min != 0 and not column.type.is_map():
- row_update[column.name] = self.__column_name(column)
-
- def __apply_diff(self, table, row, row_diff):
- old_row = {}
- for column_name, datum_diff_json in six.iteritems(row_diff):
- column = table.columns.get(column_name)
- if not column:
- # XXX rate-limit
- vlog.warn("unknown column %s updating table %s"
- % (column_name, table.name))
- continue
-
- try:
- datum_diff = data.Datum.from_json(column.type, datum_diff_json)
- except error.Error as e:
- # XXX rate-limit
- vlog.warn("error parsing column %s in table %s: %s"
- % (column_name, table.name, e))
- continue
-
- old_row[column_name] = row._data[column_name].copy()
- datum = row._data[column_name].diff(datum_diff)
- if datum != row._data[column_name]:
- row._data[column_name] = datum
-
- return old_row
-
- def __row_update(self, table, row, row_json):
- changed = False
- for column_name, datum_json in six.iteritems(row_json):
- column = table.columns.get(column_name)
- if not column:
- # XXX rate-limit
- vlog.warn("unknown column %s updating table %s"
- % (column_name, table.name))
- continue
-
- try:
- datum = data.Datum.from_json(column.type, datum_json)
- except error.Error as e:
- # XXX rate-limit
- vlog.warn("error parsing column %s in table %s: %s"
- % (column_name, table.name, e))
- continue
-
- if datum != row._data[column_name]:
- row._data[column_name] = datum
- if column.alert:
- changed = True
- else:
- # Didn't really change but the OVSDB monitor protocol always
- # includes every value in a row.
- pass
- return changed
-
- def __create_row(self, table, uuid):
- data = {}
- for column in six.itervalues(table.columns):
- data[column.name] = ovs.db.data.Datum.default(column.type)
- return Row(self, table, uuid, data)
-
- def __error(self):
- self._session.force_reconnect()
-
- def __txn_abort_all(self):
- while self._outstanding_txns:
- txn = self._outstanding_txns.popitem()[1]
- txn._status = Transaction.TRY_AGAIN
-
- def __txn_process_reply(self, msg):
- txn = self._outstanding_txns.pop(msg.id, None)
- if txn:
- txn._process_reply(msg)
- return True
-
-
-def _uuid_to_row(atom, base):
- if base.ref_table:
- return base.ref_table.rows.get(atom)
- else:
- return atom
-
-
-def _row_to_uuid(value):
- if isinstance(value, Row):
- return value.uuid
- else:
- return value
-
-
- at functools.total_ordering
-class Row(object):
- """A row within an IDL.
-
- The client may access the following attributes directly:
-
- - 'uuid': a uuid.UUID object whose value is the row's database UUID.
-
- - An attribute for each column in the Row's table, named for the column,
- whose values are as returned by Datum.to_python() for the column's type.
-
- If some error occurs (e.g. the database server's idea of the column is
- different from the IDL's idea), then the attribute values is the
- "default" value return by Datum.default() for the column's type. (It is
- important to know this because the default value may violate constraints
- for the column's type, e.g. the default integer value is 0 even if column
- contraints require the column's value to be positive.)
-
- When a transaction is active, column attributes may also be assigned new
- values. Committing the transaction will then cause the new value to be
- stored into the database.
-
- *NOTE*: In the current implementation, the value of a column is a *copy*
- of the value in the database. This means that modifying its value
- directly will have no useful effect. For example, the following:
- row.mycolumn["a"] = "b" # don't do this
- will not change anything in the database, even after commit. To modify
- the column, instead assign the modified column value back to the column:
- d = row.mycolumn
- d["a"] = "b"
- row.mycolumn = d
-"""
- def __init__(self, idl, table, uuid, data):
- # All of the explicit references to self.__dict__ below are required
- # to set real attributes with invoking self.__getattr__().
- self.__dict__["uuid"] = uuid
-
- self.__dict__["_idl"] = idl
- self.__dict__["_table"] = table
-
- # _data is the committed data. It takes the following values:
- #
- # - A dictionary that maps every column name to a Datum, if the row
- # exists in the committed form of the database.
- #
- # - None, if this row is newly inserted within the active transaction
- # and thus has no committed form.
- self.__dict__["_data"] = data
-
- # _changes describes changes to this row within the active transaction.
- # It takes the following values:
- #
- # - {}, the empty dictionary, if no transaction is active or if the
- # row has yet not been changed within this transaction.
- #
- # - A dictionary that maps a column name to its new Datum, if an
- # active transaction changes those columns' values.
- #
- # - A dictionary that maps every column name to a Datum, if the row
- # is newly inserted within the active transaction.
- #
- # - None, if this transaction deletes this row.
- self.__dict__["_changes"] = {}
-
- # _mutations describes changes to this row to be handled via a
- # mutate operation on the wire. It takes the following values:
- #
- # - {}, the empty dictionary, if no transaction is active or if the
- # row has yet not been mutated within this transaction.
- #
- # - A dictionary that contains two keys:
- #
- # - "_inserts" contains a dictionary that maps column names to
- # new keys/key-value pairs that should be inserted into the
- # column
- # - "_removes" contains a dictionary that maps column names to
- # the keys/key-value pairs that should be removed from the
- # column
- #
- # - None, if this transaction deletes this row.
- self.__dict__["_mutations"] = {}
-
- # A dictionary whose keys are the names of columns that must be
- # verified as prerequisites when the transaction commits. The values
- # in the dictionary are all None.
- self.__dict__["_prereqs"] = {}
-
- def __lt__(self, other):
- if not isinstance(other, Row):
- return NotImplemented
- return bool(self.__dict__['uuid'] < other.__dict__['uuid'])
-
- def __eq__(self, other):
- if not isinstance(other, Row):
- return NotImplemented
- return bool(self.__dict__['uuid'] == other.__dict__['uuid'])
-
- def __hash__(self):
- return int(self.__dict__['uuid'])
-
- def __getattr__(self, column_name):
- assert self._changes is not None
- assert self._mutations is not None
-
- try:
- column = self._table.columns[column_name]
- except KeyError:
- raise AttributeError("%s instance has no attribute '%s'" %
- (self.__class__.__name__, column_name))
- datum = self._changes.get(column_name)
- inserts = None
- if '_inserts' in self._mutations.keys():
- inserts = self._mutations['_inserts'].get(column_name)
- removes = None
- if '_removes' in self._mutations.keys():
- removes = self._mutations['_removes'].get(column_name)
- if datum is None:
- if self._data is None:
- if inserts is None:
- raise AttributeError("%s instance has no attribute '%s'" %
- (self.__class__.__name__,
- column_name))
- else:
- datum = data.Datum.from_python(column.type,
- inserts,
- _row_to_uuid)
- elif column_name in self._data:
- datum = self._data[column_name]
- if column.type.is_set():
- dlist = datum.as_list()
- if inserts is not None:
- dlist.extend(list(inserts))
- if removes is not None:
- removes_datum = data.Datum.from_python(column.type,
- removes,
- _row_to_uuid)
- removes_list = removes_datum.as_list()
- dlist = [x for x in dlist if x not in removes_list]
- datum = data.Datum.from_python(column.type, dlist,
- _row_to_uuid)
- elif column.type.is_map():
- dmap = datum.to_python(_uuid_to_row)
- if inserts is not None:
- dmap.update(inserts)
- if removes is not None:
- for key in removes:
- if key not in (inserts or {}):
- dmap.pop(key, None)
- datum = data.Datum.from_python(column.type, dmap,
- _row_to_uuid)
- else:
- if inserts is None:
- raise AttributeError("%s instance has no attribute '%s'" %
- (self.__class__.__name__,
- column_name))
- else:
- datum = inserts
-
- return datum.to_python(_uuid_to_row)
-
- def __setattr__(self, column_name, value):
- assert self._changes is not None
- assert self._idl.txn
-
- if ((self._table.name in self._idl.readonly) and
- (column_name in self._idl.readonly[self._table.name])):
- vlog.warn("attempting to write to readonly column %s"
- % column_name)
- return
-
- column = self._table.columns[column_name]
- try:
- datum = data.Datum.from_python(column.type, value, _row_to_uuid)
- except error.Error as e:
- # XXX rate-limit
- vlog.err("attempting to write bad value to column %s (%s)"
- % (column_name, e))
- return
- # Remove prior version of the Row from the index if it has the indexed
- # column set, and the column changing is an indexed column
- if hasattr(self, column_name):
- for idx in self._table.rows.indexes.values():
- if column_name in (c.column for c in idx.columns):
- idx.remove(self)
- self._idl.txn._write(self, column, datum)
- for idx in self._table.rows.indexes.values():
- # Only update the index if indexed columns change
- if column_name in (c.column for c in idx.columns):
- idx.add(self)
-
- def addvalue(self, column_name, key):
- self._idl.txn._txn_rows[self.uuid] = self
- column = self._table.columns[column_name]
- try:
- data.Datum.from_python(column.type, key, _row_to_uuid)
- except error.Error as e:
- # XXX rate-limit
- vlog.err("attempting to write bad value to column %s (%s)"
- % (column_name, e))
- return
- inserts = self._mutations.setdefault('_inserts', {})
- column_value = inserts.setdefault(column_name, set())
- column_value.add(key)
-
- def delvalue(self, column_name, key):
- self._idl.txn._txn_rows[self.uuid] = self
- column = self._table.columns[column_name]
- try:
- data.Datum.from_python(column.type, key, _row_to_uuid)
- except error.Error as e:
- # XXX rate-limit
- vlog.err("attempting to delete bad value from column %s (%s)"
- % (column_name, e))
- return
- removes = self._mutations.setdefault('_removes', {})
- column_value = removes.setdefault(column_name, set())
- column_value.add(key)
-
- def setkey(self, column_name, key, value):
- self._idl.txn._txn_rows[self.uuid] = self
- column = self._table.columns[column_name]
- try:
- data.Datum.from_python(column.type, {key: value}, _row_to_uuid)
- except error.Error as e:
- # XXX rate-limit
- vlog.err("attempting to write bad value to column %s (%s)"
- % (column_name, e))
- return
- if self._data and column_name in self._data:
- # Remove existing key/value before updating.
- removes = self._mutations.setdefault('_removes', {})
- column_value = removes.setdefault(column_name, set())
- column_value.add(key)
- inserts = self._mutations.setdefault('_inserts', {})
- column_value = inserts.setdefault(column_name, {})
- column_value[key] = value
-
- def delkey(self, column_name, key, value=None):
- self._idl.txn._txn_rows[self.uuid] = self
- if value:
- try:
- old_value = data.Datum.to_python(self._data[column_name],
- _uuid_to_row)
- except error.Error:
- return
- if key not in old_value:
- return
- if old_value[key] != value:
- return
- removes = self._mutations.setdefault('_removes', {})
- column_value = removes.setdefault(column_name, set())
- column_value.add(key)
- return
-
- @classmethod
- def from_json(cls, idl, table, uuid, row_json):
- data = {}
- for column_name, datum_json in six.iteritems(row_json):
- column = table.columns.get(column_name)
- if not column:
- # XXX rate-limit
- vlog.warn("unknown column %s in table %s"
- % (column_name, table.name))
- continue
- try:
- datum = ovs.db.data.Datum.from_json(column.type, datum_json)
- except error.Error as e:
- # XXX rate-limit
- vlog.warn("error parsing column %s in table %s: %s"
- % (column_name, table.name, e))
- continue
- data[column_name] = datum
- return cls(idl, table, uuid, data)
-
- def verify(self, column_name):
- """Causes the original contents of column 'column_name' in this row to
- be verified as a prerequisite to completing the transaction. That is,
- if 'column_name' changed in this row (or if this row was deleted)
- between the time that the IDL originally read its contents and the time
- that the transaction commits, then the transaction aborts and
- Transaction.commit() returns Transaction.TRY_AGAIN.
-
- The intention is that, to ensure that no transaction commits based on
- dirty reads, an application should call Row.verify() on each data item
- read as part of a read-modify-write operation.
-
- In some cases Row.verify() reduces to a no-op, because the current
- value of the column is already known:
-
- - If this row is a row created by the current transaction (returned
- by Transaction.insert()).
-
- - If the column has already been modified within the current
- transaction.
-
- Because of the latter property, always call Row.verify() *before*
- modifying the column, for a given read-modify-write.
-
- A transaction must be in progress."""
- assert self._idl.txn
- assert self._changes is not None
- if not self._data or column_name in self._changes:
- return
-
- self._prereqs[column_name] = None
-
- def delete(self):
- """Deletes this row from its table.
-
- A transaction must be in progress."""
- assert self._idl.txn
- assert self._changes is not None
- if self._data is None:
- del self._idl.txn._txn_rows[self.uuid]
- else:
- self._idl.txn._txn_rows[self.uuid] = self
- del self._table.rows[self.uuid]
- self.__dict__["_changes"] = None
-
- def fetch(self, column_name):
- self._idl.txn._fetch(self, column_name)
-
- def increment(self, column_name):
- """Causes the transaction, when committed, to increment the value of
- 'column_name' within this row by 1. 'column_name' must have an integer
- type. After the transaction commits successfully, the client may
- retrieve the final (incremented) value of 'column_name' with
- Transaction.get_increment_new_value().
-
- The client could accomplish something similar by reading and writing
- and verify()ing columns. However, increment() will never (by itself)
- cause a transaction to fail because of a verify error.
-
- The intended use is for incrementing the "next_cfg" column in
- the Open_vSwitch table."""
- self._idl.txn._increment(self, column_name)
-
-
-def _uuid_name_from_uuid(uuid):
- return "row%s" % str(uuid).replace("-", "_")
-
-
-def _where_uuid_equals(uuid):
- return [["_uuid", "==", ["uuid", str(uuid)]]]
-
-
-class _InsertedRow(object):
- def __init__(self, op_index):
- self.op_index = op_index
- self.real = None
-
-
-class Transaction(object):
- """A transaction may modify the contents of a database by modifying the
- values of columns, deleting rows, inserting rows, or adding checks that
- columns in the database have not changed ("verify" operations), through
- Row methods.
-
- Reading and writing columns and inserting and deleting rows are all
- straightforward. The reasons to verify columns are less obvious.
- Verification is the key to maintaining transactional integrity. Because
- OVSDB handles multiple clients, it can happen that between the time that
- OVSDB client A reads a column and writes a new value, OVSDB client B has
- written that column. Client A's write should not ordinarily overwrite
- client B's, especially if the column in question is a "map" column that
- contains several more or less independent data items. If client A adds a
- "verify" operation before it writes the column, then the transaction fails
- in case client B modifies it first. Client A will then see the new value
- of the column and compose a new transaction based on the new contents
- written by client B.
-
- When a transaction is complete, which must be before the next call to
- Idl.run(), call Transaction.commit() or Transaction.abort().
-
- The life-cycle of a transaction looks like this:
-
- 1. Create the transaction and record the initial sequence number:
-
- seqno = idl.change_seqno(idl)
- txn = Transaction(idl)
-
- 2. Modify the database with Row and Transaction methods.
-
- 3. Commit the transaction by calling Transaction.commit(). The first call
- to this function probably returns Transaction.INCOMPLETE. The client
- must keep calling again along as this remains true, calling Idl.run() in
- between to let the IDL do protocol processing. (If the client doesn't
- have anything else to do in the meantime, it can use
- Transaction.commit_block() to avoid having to loop itself.)
-
- 4. If the final status is Transaction.TRY_AGAIN, wait for Idl.change_seqno
- to change from the saved 'seqno' (it's possible that it's already
- changed, in which case the client should not wait at all), then start
- over from step 1. Only a call to Idl.run() will change the return value
- of Idl.change_seqno. (Transaction.commit_block() calls Idl.run().)"""
-
- # Status values that Transaction.commit() can return.
-
- # Not yet committed or aborted.
- UNCOMMITTED = "uncommitted"
- # Transaction didn't include any changes.
- UNCHANGED = "unchanged"
- # Commit in progress, please wait.
- INCOMPLETE = "incomplete"
- # ovsdb_idl_txn_abort() called.
- ABORTED = "aborted"
- # Commit successful.
- SUCCESS = "success"
- # Commit failed because a "verify" operation
- # reported an inconsistency, due to a network
- # problem, or other transient failure. Wait
- # for a change, then try again.
- TRY_AGAIN = "try again"
- # Server hasn't given us the lock yet.
- NOT_LOCKED = "not locked"
- # Commit failed due to a hard error.
- ERROR = "error"
-
- @staticmethod
- def status_to_string(status):
- """Converts one of the status values that Transaction.commit() can
- return into a human-readable string.
-
- (The status values are in fact such strings already, so
- there's nothing to do.)"""
- return status
-
- def __init__(self, idl):
- """Starts a new transaction on 'idl' (an instance of ovs.db.idl.Idl).
- A given Idl may only have a single active transaction at a time.
-
- A Transaction may modify the contents of a database by assigning new
- values to columns (attributes of Row), deleting rows (with
- Row.delete()), or inserting rows (with Transaction.insert()). It may
- also check that columns in the database have not changed with
- Row.verify().
-
- When a transaction is complete (which must be before the next call to
- Idl.run()), call Transaction.commit() or Transaction.abort()."""
- assert idl.txn is None
-
- idl.txn = self
- self._request_id = None
- self.idl = idl
- self.dry_run = False
- self._txn_rows = {}
- self._status = Transaction.UNCOMMITTED
- self._error = None
- self._comments = []
-
- self._inc_row = None
- self._inc_column = None
-
- self._fetch_requests = []
-
- self._inserted_rows = {} # Map from UUID to _InsertedRow
-
- def add_comment(self, comment):
- """Appends 'comment' to the comments that will be passed to the OVSDB
- server when this transaction is committed. (The comment will be
- committed to the OVSDB log, which "ovsdb-tool show-log" can print in a
- relatively human-readable form.)"""
- self._comments.append(comment)
-
- def wait(self, poller):
- """Causes poll_block() to wake up if this transaction has completed
- committing."""
- if self._status not in (Transaction.UNCOMMITTED,
- Transaction.INCOMPLETE):
- poller.immediate_wake()
-
- def _substitute_uuids(self, json):
- if isinstance(json, (list, tuple)):
- if (len(json) == 2
- and json[0] == 'uuid'
- and ovs.ovsuuid.is_valid_string(json[1])):
- uuid = ovs.ovsuuid.from_string(json[1])
- row = self._txn_rows.get(uuid, None)
- if row and row._data is None:
- return ["named-uuid", _uuid_name_from_uuid(uuid)]
- else:
- return [self._substitute_uuids(elem) for elem in json]
- return json
-
- def __disassemble(self):
- self.idl.txn = None
-
- for row in six.itervalues(self._txn_rows):
- if row._changes is None:
- # If we add the deleted row back to rows with _changes == None
- # then __getattr__ will not work for the indexes
- row.__dict__["_changes"] = {}
- row.__dict__["_mutations"] = {}
- row._table.rows[row.uuid] = row
- elif row._data is None:
- del row._table.rows[row.uuid]
- row.__dict__["_changes"] = {}
- row.__dict__["_mutations"] = {}
- row.__dict__["_prereqs"] = {}
- self._txn_rows = {}
-
- def commit(self):
- """Attempts to commit 'txn'. Returns the status of the commit
- operation, one of the following constants:
-
- Transaction.INCOMPLETE:
-
- The transaction is in progress, but not yet complete. The caller
- should call again later, after calling Idl.run() to let the
- IDL do OVSDB protocol processing.
-
- Transaction.UNCHANGED:
-
- The transaction is complete. (It didn't actually change the
- database, so the IDL didn't send any request to the database
- server.)
-
- Transaction.ABORTED:
-
- The caller previously called Transaction.abort().
-
- Transaction.SUCCESS:
-
- The transaction was successful. The update made by the
- transaction (and possibly other changes made by other database
- clients) should already be visible in the IDL.
-
- Transaction.TRY_AGAIN:
-
- The transaction failed for some transient reason, e.g. because a
- "verify" operation reported an inconsistency or due to a network
- problem. The caller should wait for a change to the database,
- then compose a new transaction, and commit the new transaction.
-
- Use Idl.change_seqno to wait for a change in the database. It is
- important to use its value *before* the initial call to
- Transaction.commit() as the baseline for this purpose, because
- the change that one should wait for can happen after the initial
- call but before the call that returns Transaction.TRY_AGAIN, and
- using some other baseline value in that situation could cause an
- indefinite wait if the database rarely changes.
-
- Transaction.NOT_LOCKED:
-
- The transaction failed because the IDL has been configured to
- require a database lock (with Idl.set_lock()) but didn't
- get it yet or has already lost it.
-
- Committing a transaction rolls back all of the changes that it made to
- the IDL's copy of the database. If the transaction commits
- successfully, then the database server will send an update and, thus,
- the IDL will be updated with the committed changes."""
- # The status can only change if we're the active transaction.
- # (Otherwise, our status will change only in Idl.run().)
- if self != self.idl.txn:
- return self._status
-
- # If we need a lock but don't have it, give up quickly.
- if self.idl.lock_name and not self.idl.has_lock:
- self._status = Transaction.NOT_LOCKED
- self.__disassemble()
- return self._status
-
- operations = [self.idl._db.name]
-
- # Assert that we have the required lock (avoiding a race).
- if self.idl.lock_name:
- operations.append({"op": "assert",
- "lock": self.idl.lock_name})
-
- # Add prerequisites and declarations of new rows.
- for row in six.itervalues(self._txn_rows):
- if row._prereqs:
- rows = {}
- columns = []
- for column_name in row._prereqs:
- columns.append(column_name)
- rows[column_name] = row._data[column_name].to_json()
- operations.append({"op": "wait",
- "table": row._table.name,
- "timeout": 0,
- "where": _where_uuid_equals(row.uuid),
- "until": "==",
- "columns": columns,
- "rows": [rows]})
-
- # Add updates.
- any_updates = False
- for row in six.itervalues(self._txn_rows):
- if row._changes is None:
- if row._table.is_root:
- operations.append({"op": "delete",
- "table": row._table.name,
- "where": _where_uuid_equals(row.uuid)})
- any_updates = True
- else:
- # Let ovsdb-server decide whether to really delete it.
- pass
- elif row._changes:
- op = {"table": row._table.name}
- if row._data is None:
- op["op"] = "insert"
- op["uuid-name"] = _uuid_name_from_uuid(row.uuid)
- any_updates = True
-
- op_index = len(operations) - 1
- self._inserted_rows[row.uuid] = _InsertedRow(op_index)
- else:
- op["op"] = "update"
- op["where"] = _where_uuid_equals(row.uuid)
-
- row_json = {}
- op["row"] = row_json
-
- for column_name, datum in six.iteritems(row._changes):
- if row._data is not None or not datum.is_default():
- row_json[column_name] = (
- self._substitute_uuids(datum.to_json()))
-
- # If anything really changed, consider it an update.
- # We can't suppress not-really-changed values earlier
- # or transactions would become nonatomic (see the big
- # comment inside Transaction._write()).
- if (not any_updates and row._data is not None and
- row._data[column_name] != datum):
- any_updates = True
-
- if row._data is None or row_json:
- operations.append(op)
- if row._mutations:
- addop = False
- op = {"table": row._table.name}
- op["op"] = "mutate"
- if row._data is None:
- # New row
- op["where"] = self._substitute_uuids(
- _where_uuid_equals(row.uuid))
- else:
- # Existing row
- op["where"] = _where_uuid_equals(row.uuid)
- op["mutations"] = []
- if '_removes' in row._mutations.keys():
- for col, dat in six.iteritems(row._mutations['_removes']):
- column = row._table.columns[col]
- if column.type.is_map():
- opdat = ["set"]
- opdat.append(list(dat))
- else:
- opdat = ["set"]
- inner_opdat = []
- for ele in dat:
- try:
- datum = data.Datum.from_python(column.type,
- ele, _row_to_uuid)
- except error.Error:
- return
- inner_opdat.append(
- self._substitute_uuids(datum.to_json()))
- opdat.append(inner_opdat)
- mutation = [col, "delete", opdat]
- op["mutations"].append(mutation)
- addop = True
- if '_inserts' in row._mutations.keys():
- for col, val in six.iteritems(row._mutations['_inserts']):
- column = row._table.columns[col]
- if column.type.is_map():
- opdat = ["map"]
- datum = data.Datum.from_python(column.type, val,
- _row_to_uuid)
- opdat.append(datum.as_list())
- else:
- opdat = ["set"]
- inner_opdat = []
- for ele in val:
- try:
- datum = data.Datum.from_python(column.type,
- ele, _row_to_uuid)
- except error.Error:
- return
- inner_opdat.append(
- self._substitute_uuids(datum.to_json()))
- opdat.append(inner_opdat)
- mutation = [col, "insert", opdat]
- op["mutations"].append(mutation)
- addop = True
- if addop:
- operations.append(op)
- any_updates = True
-
- if self._fetch_requests:
- for fetch in self._fetch_requests:
- fetch["index"] = len(operations) - 1
- operations.append({"op": "select",
- "table": fetch["row"]._table.name,
- "where": self._substitute_uuids(
- _where_uuid_equals(fetch["row"].uuid)),
- "columns": [fetch["column_name"]]})
- any_updates = True
-
- # Add increment.
- if self._inc_row and any_updates:
- self._inc_index = len(operations) - 1
-
- operations.append({"op": "mutate",
- "table": self._inc_row._table.name,
- "where": self._substitute_uuids(
- _where_uuid_equals(self._inc_row.uuid)),
- "mutations": [[self._inc_column, "+=", 1]]})
- operations.append({"op": "select",
- "table": self._inc_row._table.name,
- "where": self._substitute_uuids(
- _where_uuid_equals(self._inc_row.uuid)),
- "columns": [self._inc_column]})
-
- # Add comment.
- if self._comments:
- operations.append({"op": "comment",
- "comment": "\n".join(self._comments)})
-
- # Dry run?
- if self.dry_run:
- operations.append({"op": "abort"})
-
- if not any_updates:
- self._status = Transaction.UNCHANGED
- else:
- msg = ovs.jsonrpc.Message.create_request("transact", operations)
- self._request_id = msg.id
- if not self.idl._session.send(msg):
- self.idl._outstanding_txns[self._request_id] = self
- self._status = Transaction.INCOMPLETE
- else:
- self._status = Transaction.TRY_AGAIN
-
- self.__disassemble()
- return self._status
-
- def commit_block(self):
- """Attempts to commit this transaction, blocking until the commit
- either succeeds or fails. Returns the final commit status, which may
- be any Transaction.* value other than Transaction.INCOMPLETE.
-
- This function calls Idl.run() on this transaction'ss IDL, so it may
- cause Idl.change_seqno to change."""
- while True:
- status = self.commit()
- if status != Transaction.INCOMPLETE:
- return status
-
- self.idl.run()
-
- poller = ovs.poller.Poller()
- self.idl.wait(poller)
- self.wait(poller)
- poller.block()
-
- def get_increment_new_value(self):
- """Returns the final (incremented) value of the column in this
- transaction that was set to be incremented by Row.increment. This
- transaction must have committed successfully."""
- assert self._status == Transaction.SUCCESS
- return self._inc_new_value
-
- def abort(self):
- """Aborts this transaction. If Transaction.commit() has already been
- called then the transaction might get committed anyhow."""
- self.__disassemble()
- if self._status in (Transaction.UNCOMMITTED,
- Transaction.INCOMPLETE):
- self._status = Transaction.ABORTED
-
- def get_error(self):
- """Returns a string representing this transaction's current status,
- suitable for use in log messages."""
- if self._status != Transaction.ERROR:
- return Transaction.status_to_string(self._status)
- elif self._error:
- return self._error
- else:
- return "no error details available"
-
- def __set_error_json(self, json):
- if self._error is None:
- self._error = ovs.json.to_string(json)
-
- def get_insert_uuid(self, uuid):
- """Finds and returns the permanent UUID that the database assigned to a
- newly inserted row, given the UUID that Transaction.insert() assigned
- locally to that row.
-
- Returns None if 'uuid' is not a UUID assigned by Transaction.insert()
- or if it was assigned by that function and then deleted by Row.delete()
- within the same transaction. (Rows that are inserted and then deleted
- within a single transaction are never sent to the database server, so
- it never assigns them a permanent UUID.)
-
- This transaction must have completed successfully."""
- assert self._status in (Transaction.SUCCESS,
- Transaction.UNCHANGED)
- inserted_row = self._inserted_rows.get(uuid)
- if inserted_row:
- return inserted_row.real
- return None
-
- def _increment(self, row, column):
- assert not self._inc_row
- self._inc_row = row
- self._inc_column = column
-
- def _fetch(self, row, column_name):
- self._fetch_requests.append({"row": row, "column_name": column_name})
-
- def _write(self, row, column, datum):
- assert row._changes is not None
- assert row._mutations is not None
-
- txn = row._idl.txn
-
- # If this is a write-only column and the datum being written is the
- # same as the one already there, just skip the update entirely. This
- # is worth optimizing because we have a lot of columns that get
- # periodically refreshed into the database but don't actually change
- # that often.
- #
- # We don't do this for read/write columns because that would break
- # atomicity of transactions--some other client might have written a
- # different value in that column since we read it. (But if a whole
- # transaction only does writes of existing values, without making any
- # real changes, we will drop the whole transaction later in
- # ovsdb_idl_txn_commit().)
- if (not column.alert and row._data and
- row._data.get(column.name) == datum):
- new_value = row._changes.get(column.name)
- if new_value is None or new_value == datum:
- return
-
- txn._txn_rows[row.uuid] = row
- if '_inserts' in row._mutations:
- row._mutations['_inserts'].pop(column.name, None)
- if '_removes' in row._mutations:
- row._mutations['_removes'].pop(column.name, None)
- row._changes[column.name] = datum.copy()
-
- def insert(self, table, new_uuid=None):
- """Inserts and returns a new row in 'table', which must be one of the
- ovs.db.schema.TableSchema objects in the Idl's 'tables' dict.
-
- The new row is assigned a provisional UUID. If 'uuid' is None then one
- is randomly generated; otherwise 'uuid' should specify a randomly
- generated uuid.UUID not otherwise in use. ovsdb-server will assign a
- different UUID when 'txn' is committed, but the IDL will replace any
- uses of the provisional UUID in the data to be to be committed by the
- UUID assigned by ovsdb-server."""
- assert self._status == Transaction.UNCOMMITTED
- if new_uuid is None:
- new_uuid = uuid.uuid4()
- row = Row(self.idl, table, new_uuid, None)
- table.rows[row.uuid] = row
- self._txn_rows[row.uuid] = row
- return row
-
- def _process_reply(self, msg):
- if msg.type == ovs.jsonrpc.Message.T_ERROR:
- self._status = Transaction.ERROR
- elif not isinstance(msg.result, (list, tuple)):
- # XXX rate-limit
- vlog.warn('reply to "transact" is not JSON array')
- else:
- hard_errors = False
- soft_errors = False
- lock_errors = False
-
- ops = msg.result
- for op in ops:
- if op is None:
- # This isn't an error in itself but indicates that some
- # prior operation failed, so make sure that we know about
- # it.
- soft_errors = True
- elif isinstance(op, dict):
- error = op.get("error")
- if error is not None:
- if error == "timed out":
- soft_errors = True
- elif error == "not owner":
- lock_errors = True
- elif error == "aborted":
- pass
- else:
- hard_errors = True
- self.__set_error_json(op)
- else:
- hard_errors = True
- self.__set_error_json(op)
- # XXX rate-limit
- vlog.warn("operation reply is not JSON null or object")
-
- if not soft_errors and not hard_errors and not lock_errors:
- if self._inc_row and not self.__process_inc_reply(ops):
- hard_errors = True
- if self._fetch_requests:
- if self.__process_fetch_reply(ops):
- self.idl.change_seqno += 1
- else:
- hard_errors = True
-
- for insert in six.itervalues(self._inserted_rows):
- if not self.__process_insert_reply(insert, ops):
- hard_errors = True
-
- if hard_errors:
- self._status = Transaction.ERROR
- elif lock_errors:
- self._status = Transaction.NOT_LOCKED
- elif soft_errors:
- self._status = Transaction.TRY_AGAIN
- else:
- self._status = Transaction.SUCCESS
-
- @staticmethod
- def __check_json_type(json, types, name):
- if not json:
- # XXX rate-limit
- vlog.warn("%s is missing" % name)
- return False
- elif not isinstance(json, tuple(types)):
- # XXX rate-limit
- vlog.warn("%s has unexpected type %s" % (name, type(json)))
- return False
- else:
- return True
-
- def __process_fetch_reply(self, ops):
- update = False
- for fetch_request in self._fetch_requests:
- row = fetch_request["row"]
- column_name = fetch_request["column_name"]
- index = fetch_request["index"]
- table = row._table
-
- select = ops[index]
- fetched_rows = select.get("rows")
- if not Transaction.__check_json_type(fetched_rows, (list, tuple),
- '"select" reply "rows"'):
- return False
- if len(fetched_rows) != 1:
- # XXX rate-limit
- vlog.warn('"select" reply "rows" has %d elements '
- 'instead of 1' % len(fetched_rows))
- continue
- fetched_row = fetched_rows[0]
- if not Transaction.__check_json_type(fetched_row, (dict,),
- '"select" reply row'):
- continue
-
- column = table.columns.get(column_name)
- datum_json = fetched_row.get(column_name)
- datum = data.Datum.from_json(column.type, datum_json)
-
- row._data[column_name] = datum
- update = True
-
- return update
-
- def __process_inc_reply(self, ops):
- if self._inc_index + 2 > len(ops):
- # XXX rate-limit
- vlog.warn("reply does not contain enough operations for "
- "increment (has %d, needs %d)" %
- (len(ops), self._inc_index + 2))
-
- # We know that this is a JSON object because the loop in
- # __process_reply() already checked.
- mutate = ops[self._inc_index]
- count = mutate.get("count")
- if not Transaction.__check_json_type(count, six.integer_types,
- '"mutate" reply "count"'):
- return False
- if count != 1:
- # XXX rate-limit
- vlog.warn('"mutate" reply "count" is %d instead of 1' % count)
- return False
-
- select = ops[self._inc_index + 1]
- rows = select.get("rows")
- if not Transaction.__check_json_type(rows, (list, tuple),
- '"select" reply "rows"'):
- return False
- if len(rows) != 1:
- # XXX rate-limit
- vlog.warn('"select" reply "rows" has %d elements '
- 'instead of 1' % len(rows))
- return False
- row = rows[0]
- if not Transaction.__check_json_type(row, (dict,),
- '"select" reply row'):
- return False
- column = row.get(self._inc_column)
- if not Transaction.__check_json_type(column, six.integer_types,
- '"select" reply inc column'):
- return False
- self._inc_new_value = column
- return True
-
- def __process_insert_reply(self, insert, ops):
- if insert.op_index >= len(ops):
- # XXX rate-limit
- vlog.warn("reply does not contain enough operations "
- "for insert (has %d, needs %d)"
- % (len(ops), insert.op_index))
- return False
-
- # We know that this is a JSON object because the loop in
- # __process_reply() already checked.
- reply = ops[insert.op_index]
- json_uuid = reply.get("uuid")
- if not Transaction.__check_json_type(json_uuid, (tuple, list),
- '"insert" reply "uuid"'):
- return False
-
- try:
- uuid_ = ovs.ovsuuid.from_json(json_uuid)
- except error.Error:
- # XXX rate-limit
- vlog.warn('"insert" reply "uuid" is not a JSON UUID')
- return False
-
- insert.real = uuid_
- return True
-
-
-class SchemaHelper(object):
- """IDL Schema helper.
-
- This class encapsulates the logic required to generate schemas suitable
- for creating 'ovs.db.idl.Idl' objects. Clients should register columns
- they are interested in using register_columns(). When finished, the
- get_idl_schema() function may be called.
-
- The location on disk of the schema used may be found in the
- 'schema_location' variable."""
-
- def __init__(self, location=None, schema_json=None):
- """Creates a new Schema object.
-
- 'location' file path to ovs schema. None means default location
- 'schema_json' schema in json preresentation in memory
- """
-
- if location and schema_json:
- raise ValueError("both location and schema_json can't be "
- "specified. it's ambiguous.")
- if schema_json is None:
- if location is None:
- location = "%s/vswitch.ovsschema" % ovs.dirs.PKGDATADIR
- schema_json = ovs.json.from_file(location)
-
- self.schema_json = schema_json
- self._tables = {}
- self._readonly = {}
- self._all = False
-
- def register_columns(self, table, columns, readonly=[]):
- """Registers interest in the given 'columns' of 'table'. Future calls
- to get_idl_schema() will include 'table':column for each column in
- 'columns'. This function automatically avoids adding duplicate entries
- to the schema.
- A subset of 'columns' can be specified as 'readonly'. The readonly
- columns are not replicated but can be fetched on-demand by the user
- with Row.fetch().
-
- 'table' must be a string.
- 'columns' must be a list of strings.
- 'readonly' must be a list of strings.
- """
-
- assert isinstance(table, six.string_types)
- assert isinstance(columns, list)
-
- columns = set(columns) | self._tables.get(table, set())
- self._tables[table] = columns
- self._readonly[table] = readonly
-
- def register_table(self, table):
- """Registers interest in the given all columns of 'table'. Future calls
- to get_idl_schema() will include all columns of 'table'.
-
- 'table' must be a string
- """
- assert isinstance(table, six.string_types)
- self._tables[table] = set() # empty set means all columns in the table
-
- def register_all(self):
- """Registers interest in every column of every table."""
- self._all = True
-
- def get_idl_schema(self):
- """Gets a schema appropriate for the creation of an 'ovs.db.id.IDL'
- object based on columns registered using the register_columns()
- function."""
-
- schema = ovs.db.schema.DbSchema.from_json(self.schema_json)
- self.schema_json = None
-
- if not self._all:
- schema_tables = {}
- for table, columns in six.iteritems(self._tables):
- schema_tables[table] = (
- self._keep_table_columns(schema, table, columns))
-
- schema.tables = schema_tables
- schema.readonly = self._readonly
- return schema
-
- def _keep_table_columns(self, schema, table_name, columns):
- assert table_name in schema.tables
- table = schema.tables[table_name]
-
- if not columns:
- # empty set means all columns in the table
- return table
-
- new_columns = {}
- for column_name in columns:
- assert isinstance(column_name, six.string_types)
- assert column_name in table.columns
-
- new_columns[column_name] = table.columns[column_name]
-
- table.columns = new_columns
- return table
diff --git a/python/ovs/db/parser.py b/python/ovs/db/parser.py
deleted file mode 100644
index b39de39ff..000000000
--- a/python/ovs/db/parser.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright (c) 2010, 2011 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-
-from ovs.db import error
-
-import six
-
-
-class Parser(object):
- def __init__(self, json, name):
- self.name = name
- self.json = json
- if not isinstance(json, dict):
- self.__raise_error("Object expected.")
- self.used = set()
-
- def __get(self, name, types, optional, default=None):
- if name in self.json:
- self.used.add(name)
- member = float_to_int(self.json[name])
- if is_identifier(member) and "id" in types:
- return member
- try:
- if len(types) and not isinstance(member, tuple(types)):
- self.__raise_error("Type mismatch for member '%s'." % name)
- except TypeError:
- self.__raise_error("Type mismatch for member '%s'." % name)
- return member
- else:
- if not optional:
- self.__raise_error("Required '%s' member is missing." % name)
- return default
-
- def get(self, name, types):
- return self.__get(name, types, False)
-
- def get_optional(self, name, types, default=None):
- return self.__get(name, types, True, default)
-
- def __raise_error(self, message):
- raise error.Error("Parsing %s failed: %s" % (self.name, message),
- self.json)
-
- def finish(self):
- missing = set(self.json) - set(self.used)
- if missing:
- name = missing.pop()
- if len(missing) > 1:
- present = "and %d other members are" % len(missing)
- elif missing:
- present = "and 1 other member are"
- else:
- present = "is"
- self.__raise_error("Member '%s' %s present but not allowed here" %
- (name, present))
-
-
-def float_to_int(x):
- # XXX still needed?
- if isinstance(x, float):
- integer = int(x)
- if integer == x and -2 ** 53 <= integer < 2 ** 53:
- return integer
- return x
-
-
-id_re = re.compile("[_a-zA-Z][_a-zA-Z0-9]*$")
-
-
-def is_identifier(s):
- return isinstance(s, six.string_types) and id_re.match(s)
-
-
-def json_type_to_string(type_):
- number_types = list(six.integer_types)
- number_types.extend([float])
- number_types = tuple(number_types)
- if type_ is None:
- return "null"
- elif issubclass(type_, bool):
- return "boolean"
- elif issubclass(type_, dict):
- return "object"
- elif issubclass(type_, list):
- return "array"
- elif issubclass(type_, number_types):
- return "number"
- elif issubclass(type_, six.string_types):
- return "string"
- else:
- return "<invalid>"
-
-
-def unwrap_json(json, name, types, desc):
- if (not isinstance(json, (list, tuple))
- or len(json) != 2 or json[0] != name
- or not isinstance(json[1], tuple(types))):
- raise error.Error('expected ["%s", <%s>]' % (name, desc), json)
- return json[1]
-
-
-def parse_json_pair(json):
- if not isinstance(json, list) or len(json) != 2:
- raise error.Error("expected 2-element array", json)
- return json
diff --git a/python/ovs/db/schema.py b/python/ovs/db/schema.py
deleted file mode 100644
index 44b030757..000000000
--- a/python/ovs/db/schema.py
+++ /dev/null
@@ -1,304 +0,0 @@
-# Copyright (c) 2009, 2010, 2011, 2016 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-import sys
-
-import ovs.db.parser
-import ovs.db.types
-from ovs.db import error
-
-import six
-
-
-def _check_id(name, json):
- if name.startswith('_'):
- raise error.Error('names beginning with "_" are reserved', json)
- elif not ovs.db.parser.is_identifier(name):
- raise error.Error("name must be a valid id", json)
-
-
-class DbSchema(object):
- """Schema for an OVSDB database."""
-
- def __init__(self, name, version, tables, allow_extensions=False):
- self.name = name
- self.version = version
- self.tables = tables
-
- # "isRoot" was not part of the original schema definition. Before it
- # was added, there was no support for garbage collection. So, for
- # backward compatibility, if the root set is empty then assume that
- # every table is in the root set.
- if self.__root_set_size() == 0:
- for table in six.itervalues(self.tables):
- table.is_root = True
-
- # Find the "ref_table"s referenced by "ref_table_name"s.
- #
- # Also force certain columns to be persistent, as explained in
- # __check_ref_table(). This requires 'is_root' to be known, so this
- # must follow the loop updating 'is_root' above.
- for table in six.itervalues(self.tables):
- for column in six.itervalues(table.columns):
- self.__follow_ref_table(column, column.type.key, "key")
- self.__follow_ref_table(column, column.type.value, "value")
-
- def __root_set_size(self):
- """Returns the number of tables in the schema's root set."""
- n_root = 0
- for table in six.itervalues(self.tables):
- if table.is_root:
- n_root += 1
- return n_root
-
- @staticmethod
- def from_json(json, allow_extensions=False):
- parser = ovs.db.parser.Parser(json, "database schema")
- name = parser.get("name", ['id'])
- version = parser.get_optional("version", six.string_types)
- parser.get_optional("cksum", six.string_types)
- tablesJson = parser.get("tables", [dict])
- parser.finish()
-
- if (version is not None and
- not re.match(r'[0-9]+\.[0-9]+\.[0-9]+$', version)):
- raise error.Error('schema version "%s" not in format x.y.z'
- % version)
-
- tables = {}
- for tableName, tableJson in six.iteritems(tablesJson):
- _check_id(tableName, json)
- tables[tableName] = TableSchema.from_json(tableJson, tableName,
- allow_extensions)
-
- return DbSchema(name, version, tables)
-
- def to_json(self):
- # "isRoot" was not part of the original schema definition. Before it
- # was added, there was no support for garbage collection. So, for
- # backward compatibility, if every table is in the root set then do not
- # output "isRoot" in table schemas.
- default_is_root = self.__root_set_size() == len(self.tables)
-
- tables = {}
- for table in six.itervalues(self.tables):
- tables[table.name] = table.to_json(default_is_root)
- json = {"name": self.name, "tables": tables}
- if self.version:
- json["version"] = self.version
- return json
-
- def copy(self):
- return DbSchema.from_json(self.to_json())
-
- def __follow_ref_table(self, column, base, base_name):
- if (not base or base.type != ovs.db.types.UuidType
- or not base.ref_table_name):
- return
-
- base.ref_table = self.tables.get(base.ref_table_name)
- if not base.ref_table:
- raise error.Error("column %s %s refers to undefined table %s"
- % (column.name, base_name, base.ref_table_name),
- tag="syntax error")
-
- if base.is_strong_ref() and not base.ref_table.is_root:
- # We cannot allow a strong reference to a non-root table to be
- # ephemeral: if it is the only reference to a row, then replaying
- # the database log from disk will cause the referenced row to be
- # deleted, even though it did exist in memory. If there are
- # references to that row later in the log (to modify it, to delete
- # it, or just to point to it), then this will yield a transaction
- # error.
- column.persistent = True
-
-
-class IdlSchema(DbSchema):
- def __init__(self, name, version, tables, idlPrefix, idlHeader,
- cDecls, hDecls):
- DbSchema.__init__(self, name, version, tables)
- self.idlPrefix = idlPrefix
- self.idlHeader = idlHeader
- self.cDecls = cDecls
- self.hDecls = hDecls
-
- @staticmethod
- def from_json(json):
- parser = ovs.db.parser.Parser(json, "IDL schema")
- idlPrefix = parser.get("idlPrefix", six.string_types)
- idlHeader = parser.get("idlHeader", six.string_types)
- cDecls = parser.get_optional("cDecls", six.string_types, "")
- hDecls = parser.get_optional("hDecls", six.string_types, "")
-
- subjson = dict(json)
- del subjson["idlPrefix"]
- del subjson["idlHeader"]
- subjson.pop("cDecls", None)
- subjson.pop("hDecls", None)
- schema = DbSchema.from_json(subjson, allow_extensions=True)
-
- return IdlSchema(schema.name, schema.version, schema.tables,
- idlPrefix, idlHeader, cDecls, hDecls)
-
-
-def column_set_from_json(json, columns):
- if json is None:
- return tuple(columns)
- elif not isinstance(json, list):
- raise error.Error("array of distinct column names expected", json)
- else:
- for column_name in json:
- if not isinstance(column_name, six.string_types):
- raise error.Error("array of distinct column names expected",
- json)
- elif column_name not in columns:
- raise error.Error("%s is not a valid column name"
- % column_name, json)
- if len(set(json)) != len(json):
- # Duplicate.
- raise error.Error("array of distinct column names expected", json)
- return tuple([columns[column_name] for column_name in json])
-
-
-class TableSchema(object):
- def __init__(self, name, columns, mutable=True, max_rows=sys.maxsize,
- is_root=True, indexes=[], extensions={}):
- self.name = name
- self.columns = columns
- self.mutable = mutable
- self.max_rows = max_rows
- self.is_root = is_root
- self.indexes = indexes
- self.extensions = extensions
-
- @staticmethod
- def from_json(json, name, allow_extensions=False):
- parser = ovs.db.parser.Parser(json, "table schema for table %s" % name)
- columns_json = parser.get("columns", [dict])
- mutable = parser.get_optional("mutable", [bool], True)
- max_rows = parser.get_optional("maxRows", [int])
- is_root = parser.get_optional("isRoot", [bool], False)
- indexes_json = parser.get_optional("indexes", [list], [])
- if allow_extensions:
- extensions = parser.get_optional("extensions", [dict], {})
- else:
- extensions = {}
- parser.finish()
-
- if max_rows is None:
- max_rows = sys.maxsize
- elif max_rows <= 0:
- raise error.Error("maxRows must be at least 1", json)
-
- if not columns_json:
- raise error.Error("table must have at least one column", json)
-
- columns = {}
- for column_name, column_json in six.iteritems(columns_json):
- _check_id(column_name, json)
- columns[column_name] = ColumnSchema.from_json(column_json,
- column_name,
- allow_extensions)
-
- indexes = []
- for index_json in indexes_json:
- index = column_set_from_json(index_json, columns)
- if not index:
- raise error.Error("index must have at least one column", json)
- elif len(index) == 1:
- index[0].unique = True
- for column in index:
- if not column.persistent:
- raise error.Error("ephemeral columns (such as %s) may "
- "not be indexed" % column.name, json)
- indexes.append(index)
-
- return TableSchema(name, columns, mutable, max_rows, is_root, indexes,
- extensions)
-
- def to_json(self, default_is_root=False):
- """Returns this table schema serialized into JSON.
-
- The "isRoot" member is included in the JSON only if its value would
- differ from 'default_is_root'. Ordinarily 'default_is_root' should be
- false, because ordinarily a table would be not be part of the root set
- if its "isRoot" member is omitted. However, garbage collection was not
- originally included in OVSDB, so in older schemas that do not include
- any "isRoot" members, every table is implicitly part of the root set.
- To serialize such a schema in a way that can be read by older OVSDB
- tools, specify 'default_is_root' as True.
- """
- json = {}
- if not self.mutable:
- json["mutable"] = False
- if default_is_root != self.is_root:
- json["isRoot"] = self.is_root
-
- json["columns"] = columns = {}
- for column in six.itervalues(self.columns):
- if not column.name.startswith("_"):
- columns[column.name] = column.to_json()
-
- if self.max_rows != sys.maxsize:
- json["maxRows"] = self.max_rows
-
- if self.indexes:
- json["indexes"] = []
- for index in self.indexes:
- json["indexes"].append([column.name for column in index])
-
- return json
-
-
-class ColumnSchema(object):
- def __init__(self, name, mutable, persistent, type_, extensions={}):
- self.name = name
- self.mutable = mutable
- self.persistent = persistent
- self.type = type_
- self.unique = False
- self.extensions = extensions
-
- @staticmethod
- def from_json(json, name, allow_extensions=False):
- parser = ovs.db.parser.Parser(json, "schema for column %s" % name)
- mutable = parser.get_optional("mutable", [bool], True)
- ephemeral = parser.get_optional("ephemeral", [bool], False)
- _types = list(six.string_types)
- _types.extend([dict])
- type_ = ovs.db.types.Type.from_json(parser.get("type", _types))
- if allow_extensions:
- extensions = parser.get_optional("extensions", [dict], {})
- else:
- extensions = {}
- parser.finish()
-
- if not mutable and (type_.key.is_weak_ref()
- or (type_.value and type_.value.is_weak_ref())):
- # We cannot allow a weak reference to be immutable: if referenced
- # rows are deleted, then the weak reference needs to change.
- mutable = True
-
- return ColumnSchema(name, mutable, not ephemeral, type_, extensions)
-
- def to_json(self):
- json = {"type": self.type.to_json()}
- if not self.mutable:
- json["mutable"] = False
- if not self.persistent:
- json["ephemeral"] = True
- if self.extensions:
- json["extensions"] = self.extensions
- return json
diff --git a/python/ovs/db/types.py b/python/ovs/db/types.py
deleted file mode 100644
index 54f577405..000000000
--- a/python/ovs/db/types.py
+++ /dev/null
@@ -1,647 +0,0 @@
-# Copyright (c) 2009, 2010, 2011, 2012, 2013, 2016 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import uuid
-
-import ovs.db.data
-import ovs.db.parser
-import ovs.ovsuuid
-from ovs.db import error
-
-import six
-
-
-class AtomicType(object):
- def __init__(self, name, default, python_types):
- self.name = name
- self.default = default
- self.python_types = python_types
-
- @staticmethod
- def from_string(s):
- if s != "void":
- for atomic_type in ATOMIC_TYPES:
- if s == atomic_type.name:
- return atomic_type
- raise error.Error('"%s" is not an atomic-type' % s, s)
-
- @staticmethod
- def from_json(json):
- if not isinstance(json, six.string_types):
- raise error.Error("atomic-type expected", json)
- else:
- return AtomicType.from_string(json)
-
- def __str__(self):
- return self.name
-
- def to_string(self):
- return self.name
-
- def to_json(self):
- return self.name
-
- def default_atom(self):
- return ovs.db.data.Atom(self, self.default)
-
-
-REAL_PYTHON_TYPES = list(six.integer_types)
-REAL_PYTHON_TYPES.extend([float])
-REAL_PYTHON_TYPES = tuple(REAL_PYTHON_TYPES)
-
-VoidType = AtomicType("void", None, ())
-IntegerType = AtomicType("integer", 0, six.integer_types)
-RealType = AtomicType("real", 0.0, REAL_PYTHON_TYPES)
-BooleanType = AtomicType("boolean", False, (bool,))
-StringType = AtomicType("string", "", six.string_types)
-UuidType = AtomicType("uuid", ovs.ovsuuid.zero(), (uuid.UUID,))
-
-ATOMIC_TYPES = [VoidType, IntegerType, RealType, BooleanType, StringType,
- UuidType]
-
-
-def escapeCString(src):
- dst = ""
- for c in src:
- if c in "\\\"":
- dst += "\\" + c
- elif ord(c) < 32:
- if c == '\n':
- dst += '\\n'
- elif c == '\r':
- dst += '\\r'
- elif c == '\a':
- dst += '\\a'
- elif c == '\b':
- dst += '\\b'
- elif c == '\f':
- dst += '\\f'
- elif c == '\t':
- dst += '\\t'
- elif c == '\v':
- dst += '\\v'
- else:
- dst += '\\%03o' % ord(c)
- else:
- dst += c
- return dst
-
-
-def commafy(x):
- """Returns integer x formatted in decimal with thousands set off by
- commas."""
- return _commafy("%d" % x)
-
-
-def _commafy(s):
- if s.startswith('-'):
- return '-' + _commafy(s[1:])
- elif len(s) <= 3:
- return s
- else:
- return _commafy(s[:-3]) + ',' + _commafy(s[-3:])
-
-
-def returnUnchanged(x):
- return x
-
-
-class BaseType(object):
- def __init__(self, type_, enum=None, min=None, max=None,
- min_length=0, max_length=sys.maxsize, ref_table_name=None):
- assert isinstance(type_, AtomicType)
- self.type = type_
- self.enum = enum
- self.min = min
- self.max = max
- self.min_length = min_length
- self.max_length = max_length
- self.ref_table_name = ref_table_name
- if ref_table_name:
- self.ref_type = 'strong'
- else:
- self.ref_type = None
- self.ref_table = None
-
- def default(self):
- return ovs.db.data.Atom.default(self.type)
-
- def __eq__(self, other):
- if not isinstance(other, BaseType):
- return NotImplemented
- return (self.type == other.type and self.enum == other.enum and
- self.min == other.min and self.max == other.max and
- self.min_length == other.min_length and
- self.max_length == other.max_length and
- self.ref_table_name == other.ref_table_name)
-
- def __ne__(self, other):
- if not isinstance(other, BaseType):
- return NotImplemented
- else:
- return not (self == other)
-
- @staticmethod
- def __parse_uint(parser, name, default):
- value = parser.get_optional(name, six.integer_types)
- if value is None:
- value = default
- else:
- max_value = 2 ** 32 - 1
- if not (0 <= value <= max_value):
- raise error.Error("%s out of valid range 0 to %d"
- % (name, max_value), value)
- return value
-
- @staticmethod
- def from_json(json):
- if isinstance(json, six.string_types):
- return BaseType(AtomicType.from_json(json))
-
- parser = ovs.db.parser.Parser(json, "ovsdb type")
- atomic_type = AtomicType.from_json(parser.get("type",
- six.string_types))
-
- base = BaseType(atomic_type)
-
- enum = parser.get_optional("enum", [])
- if enum is not None:
- base.enum = ovs.db.data.Datum.from_json(
- BaseType.get_enum_type(base.type), enum)
- elif base.type == IntegerType:
- base.min = parser.get_optional("minInteger", six.integer_types)
- base.max = parser.get_optional("maxInteger", six.integer_types)
- if (base.min is not None and base.max is not None
- and base.min > base.max):
- raise error.Error("minInteger exceeds maxInteger", json)
- elif base.type == RealType:
- base.min = parser.get_optional("minReal", REAL_PYTHON_TYPES)
- base.max = parser.get_optional("maxReal", REAL_PYTHON_TYPES)
- if (base.min is not None and base.max is not None
- and base.min > base.max):
- raise error.Error("minReal exceeds maxReal", json)
- elif base.type == StringType:
- base.min_length = BaseType.__parse_uint(parser, "minLength", 0)
- base.max_length = BaseType.__parse_uint(parser, "maxLength",
- sys.maxsize)
- if base.min_length > base.max_length:
- raise error.Error("minLength exceeds maxLength", json)
- elif base.type == UuidType:
- base.ref_table_name = parser.get_optional("refTable", ['id'])
- if base.ref_table_name:
- base.ref_type = parser.get_optional("refType",
- six.string_types,
- "strong")
- if base.ref_type not in ['strong', 'weak']:
- raise error.Error('refType must be "strong" or "weak" '
- '(not "%s")' % base.ref_type)
- parser.finish()
-
- return base
-
- def to_json(self):
- if not self.has_constraints():
- return self.type.to_json()
-
- json = {'type': self.type.to_json()}
-
- if self.enum:
- json['enum'] = self.enum.to_json()
-
- if self.type == IntegerType:
- if self.min is not None:
- json['minInteger'] = self.min
- if self.max is not None:
- json['maxInteger'] = self.max
- elif self.type == RealType:
- if self.min is not None:
- json['minReal'] = self.min
- if self.max is not None:
- json['maxReal'] = self.max
- elif self.type == StringType:
- if self.min_length != 0:
- json['minLength'] = self.min_length
- if self.max_length != sys.maxsize:
- json['maxLength'] = self.max_length
- elif self.type == UuidType:
- if self.ref_table_name:
- json['refTable'] = self.ref_table_name
- if self.ref_type != 'strong':
- json['refType'] = self.ref_type
- return json
-
- def copy(self):
- base = BaseType(self.type, self.enum.copy(), self.min, self.max,
- self.min_length, self.max_length, self.ref_table_name)
- base.ref_table = self.ref_table
- return base
-
- def is_valid(self):
- if self.type in (VoidType, BooleanType, UuidType):
- return True
- elif self.type in (IntegerType, RealType):
- return self.min is None or self.max is None or self.min <= self.max
- elif self.type == StringType:
- return self.min_length <= self.max_length
- else:
- return False
-
- def has_constraints(self):
- return (self.enum is not None or self.min is not None or
- self.max is not None or
- self.min_length != 0 or self.max_length != sys.maxsize or
- self.ref_table_name is not None)
-
- def without_constraints(self):
- return BaseType(self.type)
-
- @staticmethod
- def get_enum_type(atomic_type):
- """Returns the type of the 'enum' member for a BaseType whose
- 'type' is 'atomic_type'."""
- return Type(BaseType(atomic_type), None, 1, sys.maxsize)
-
- def is_ref(self):
- return self.type == UuidType and self.ref_table_name is not None
-
- def is_strong_ref(self):
- return self.is_ref() and self.ref_type == 'strong'
-
- def is_weak_ref(self):
- return self.is_ref() and self.ref_type == 'weak'
-
- def toEnglish(self, escapeLiteral=returnUnchanged):
- if self.type == UuidType and self.ref_table_name:
- s = escapeLiteral(self.ref_table_name)
- if self.ref_type == 'weak':
- s = "weak reference to " + s
- return s
- else:
- return self.type.to_string()
-
- def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
- escapeNumber=returnUnchanged):
- if self.enum:
- literals = [value.toEnglish(escapeLiteral)
- for value in self.enum.values]
- literals.sort()
- if len(literals) == 1:
- english = 'must be %s' % (literals[0])
- elif len(literals) == 2:
- english = 'either %s or %s' % (literals[0], literals[1])
- else:
- english = 'one of %s, %s, or %s' % (literals[0],
- ', '.join(literals[1:-1]),
- literals[-1])
- elif self.min is not None and self.max is not None:
- if self.type == IntegerType:
- english = 'in range %s to %s' % (
- escapeNumber(commafy(self.min)),
- escapeNumber(commafy(self.max)))
- else:
- english = 'in range %s to %s' % (
- escapeNumber("%g" % self.min),
- escapeNumber("%g" % self.max))
- elif self.min is not None:
- if self.type == IntegerType:
- english = 'at least %s' % escapeNumber(commafy(self.min))
- else:
- english = 'at least %s' % escapeNumber("%g" % self.min)
- elif self.max is not None:
- if self.type == IntegerType:
- english = 'at most %s' % escapeNumber(commafy(self.max))
- else:
- english = 'at most %s' % escapeNumber("%g" % self.max)
- elif self.min_length != 0 and self.max_length != sys.maxsize:
- if self.min_length == self.max_length:
- english = ('exactly %s characters long'
- % commafy(self.min_length))
- else:
- english = ('between %s and %s characters long'
- % (commafy(self.min_length),
- commafy(self.max_length)))
- elif self.min_length != 0:
- return 'at least %s characters long' % commafy(self.min_length)
- elif self.max_length != sys.maxsize:
- english = 'at most %s characters long' % commafy(self.max_length)
- else:
- english = ''
-
- return english
-
- def toCType(self, prefix, refTable=True):
- if self.ref_table_name:
- if not refTable:
- assert self.type == UuidType
- return 'struct uuid *'
- return "struct %s%s *" % (prefix, self.ref_table_name.lower())
- else:
- return {IntegerType: 'int64_t ',
- RealType: 'double ',
- UuidType: 'struct uuid ',
- BooleanType: 'bool ',
- StringType: 'char *'}[self.type]
-
- def to_const_c_type(self, prefix, refTable=True):
- nonconst = self.toCType(prefix, refTable)
-
- # A "const" prefix works OK for the types we use, but it's a little
- # weird to write "const bool" as, e.g., a function parameter since
- # there's no real "const"ness there. So, omit the "const" except
- # when a pointer is involved.
- if '*' in nonconst:
- return 'const ' + nonconst
- else:
- return nonconst
-
- def toAtomicType(self):
- return "OVSDB_TYPE_%s" % self.type.to_string().upper()
-
- def copyCValue(self, dst, src, refTable=True):
- args = {'dst': dst, 'src': src}
- if self.ref_table_name:
- if not refTable:
- return "%(dst)s = *%(src)s;" % args
- return ("%(dst)s = %(src)s->header_.uuid;") % args
- elif self.type == StringType:
- return "%(dst)s = xstrdup(%(src)s);" % args
- else:
- return "%(dst)s = %(src)s;" % args
-
- def assign_c_value_casting_away_const(self, dst, src, refTable=True):
- args = {'dst': dst, 'src': src}
- if self.ref_table_name:
- if not refTable:
- return "%(dst)s = *%(src)s;" % args
- return ("%(dst)s = %(src)s->header_.uuid;") % args
- elif self.type == StringType:
- return "%(dst)s = CONST_CAST(char *, %(src)s);" % args
- else:
- return "%(dst)s = %(src)s;" % args
-
- def initCDefault(self, var, is_optional):
- if self.ref_table_name:
- return "%s = NULL;" % var
- elif self.type == StringType and not is_optional:
- return '%s = "";' % var
- else:
- pattern = {IntegerType: '%s = 0;',
- RealType: '%s = 0.0;',
- UuidType: 'uuid_zero(&%s);',
- BooleanType: '%s = false;',
- StringType: '%s = NULL;'}[self.type]
- return pattern % var
-
- def cInitBaseType(self, prefix, prereqs):
- init = [".type = %s," % self.toAtomicType()]
- if self.enum:
- datum_name = "%s_enum" % prefix
- init += [".enum_ = &%s," % datum_name]
- prereqs += self.enum.cDeclareDatum(datum_name)
- if self.type == IntegerType:
- if self.min is None:
- low = "INT64_MIN"
- else:
- low = "INT64_C(%d)" % self.min
- if self.max is None:
- high = "INT64_MAX"
- else:
- high = "INT64_C(%d)" % self.max
- init.append(".integer = { .min = %s, .max = %s }," % (low, high))
- elif self.type == RealType:
- if self.min is None:
- low = "-DBL_MAX"
- else:
- low = self.min
- if self.max is None:
- high = "DBL_MAX"
- else:
- high = self.max
- init.append(".real = { .min = %s, .max = %s }," % (low, high))
- elif self.type == StringType:
- if self.min is None:
- low = 0
- else:
- low = self.min_length
- if self.max is None:
- high = "UINT_MAX"
- else:
- high = self.max_length
- init.append(".string = { .minLen = %s, .maxLen = %s }," % (
- low, high))
- elif self.type == UuidType:
- if self.ref_table_name is not None:
- init.append(".uuid = { .refTableName = \"%s\", "
- ".refType = OVSDB_REF_%s }," % (
- escapeCString(self.ref_table_name),
- self.ref_type.upper()))
- return init
-
-
-class Type(object):
- DEFAULT_MIN = 1
- DEFAULT_MAX = 1
-
- def __init__(self, key, value=None, n_min=DEFAULT_MIN, n_max=DEFAULT_MAX):
- self.key = key
- self.value = value
- self.n_min = n_min
- self.n_max = n_max
-
- def copy(self):
- if self.value is None:
- value = None
- else:
- value = self.value.copy()
- return Type(self.key.copy(), value, self.n_min, self.n_max)
-
- def __eq__(self, other):
- if not isinstance(other, Type):
- return NotImplemented
- return (self.key == other.key and self.value == other.value and
- self.n_min == other.n_min and self.n_max == other.n_max)
-
- def __ne__(self, other):
- if not isinstance(other, Type):
- return NotImplemented
- else:
- return not (self == other)
-
- def is_valid(self):
- return (self.key.type != VoidType and self.key.is_valid() and
- (self.value is None or
- (self.value.type != VoidType and self.value.is_valid())) and
- self.n_min <= 1 <= self.n_max)
-
- def is_scalar(self):
- return self.n_min == 1 and self.n_max == 1 and not self.value
-
- def is_optional(self):
- return self.n_min == 0 and self.n_max == 1
-
- def is_composite(self):
- return self.n_max > 1
-
- def is_set(self):
- return self.value is None and (self.n_min != 1 or self.n_max != 1)
-
- def is_map(self):
- return self.value is not None
-
- def is_smap(self):
- return (self.is_map()
- and self.key.type == StringType
- and self.value.type == StringType)
-
- def is_optional_pointer(self):
- return (self.is_optional() and not self.value
- and (self.key.type == StringType or self.key.ref_table_name))
-
- @staticmethod
- def __n_from_json(json, default):
- if json is None:
- return default
- elif isinstance(json, int) and 0 <= json <= sys.maxsize:
- return json
- else:
- raise error.Error("bad min or max value", json)
-
- @staticmethod
- def from_json(json):
- if isinstance(json, six.string_types):
- return Type(BaseType.from_json(json))
-
- parser = ovs.db.parser.Parser(json, "ovsdb type")
- _types = list(six.string_types)
- _types.extend([dict])
- key_json = parser.get("key", _types)
- value_json = parser.get_optional("value", _types)
- min_json = parser.get_optional("min", [int])
- _types = list(six.string_types)
- _types.extend([int])
- max_json = parser.get_optional("max", _types)
- parser.finish()
-
- key = BaseType.from_json(key_json)
- if value_json:
- value = BaseType.from_json(value_json)
- else:
- value = None
-
- n_min = Type.__n_from_json(min_json, Type.DEFAULT_MIN)
-
- if max_json == 'unlimited':
- n_max = sys.maxsize
- else:
- n_max = Type.__n_from_json(max_json, Type.DEFAULT_MAX)
-
- type_ = Type(key, value, n_min, n_max)
- if not type_.is_valid():
- raise error.Error("ovsdb type fails constraint checks", json)
- return type_
-
- def to_json(self):
- if self.is_scalar() and not self.key.has_constraints():
- return self.key.to_json()
-
- json = {"key": self.key.to_json()}
- if self.value is not None:
- json["value"] = self.value.to_json()
- if self.n_min != Type.DEFAULT_MIN:
- json["min"] = self.n_min
- if self.n_max == sys.maxsize:
- json["max"] = "unlimited"
- elif self.n_max != Type.DEFAULT_MAX:
- json["max"] = self.n_max
- return json
-
- def toEnglish(self, escapeLiteral=returnUnchanged):
- keyName = self.key.toEnglish(escapeLiteral)
- if self.value:
- valueName = self.value.toEnglish(escapeLiteral)
-
- if self.is_scalar():
- return keyName
- elif self.is_optional():
- if self.value:
- return "optional %s-%s pair" % (keyName, valueName)
- else:
- return "optional %s" % keyName
- else:
- if self.n_max == sys.maxsize:
- if self.n_min:
- quantity = "%s or more " % commafy(self.n_min)
- else:
- quantity = ""
- elif self.n_min:
- quantity = "%s to %s " % (commafy(self.n_min),
- commafy(self.n_max))
- else:
- quantity = "up to %s " % commafy(self.n_max)
-
- if self.value:
- return "map of %s%s-%s pairs" % (quantity, keyName, valueName)
- else:
- if keyName.endswith('s'):
- plural = keyName + "es"
- else:
- plural = keyName + "s"
- return "set of %s%s" % (quantity, plural)
-
- def constraintsToEnglish(self, escapeLiteral=returnUnchanged,
- escapeNumber=returnUnchanged):
- constraints = []
- keyConstraints = self.key.constraintsToEnglish(escapeLiteral,
- escapeNumber)
- if keyConstraints:
- if self.value:
- constraints.append('key %s' % keyConstraints)
- else:
- constraints.append(keyConstraints)
-
- if self.value:
- valueConstraints = self.value.constraintsToEnglish(escapeLiteral,
- escapeNumber)
- if valueConstraints:
- constraints.append('value %s' % valueConstraints)
-
- return ', '.join(constraints)
-
- def cDeclComment(self):
- if self.n_min == 1 and self.n_max == 1 and self.key.type == StringType:
- return "\t/* Always nonnull. */"
- else:
- return ""
-
- def cInitType(self, prefix, prereqs):
- init = [".key = {"]
- init += [" " + x for x in self.key.cInitBaseType(prefix + "_key",
- prereqs)]
- init += ["},"]
- if self.value:
- init += [".value = {"]
- init += [" " + x
- for x in self.value.cInitBaseType(prefix + "_value",
- prereqs)]
- init += ["},"]
- else:
- init.append(".value = OVSDB_BASE_VOID_INIT,")
- init.append(".n_min = %s," % self.n_min)
- if self.n_max == sys.maxsize:
- n_max = "UINT_MAX"
- else:
- n_max = self.n_max
- init.append(".n_max = %s," % n_max)
- return init
diff --git a/python/ovs/dirs.py b/python/ovs/dirs.py
deleted file mode 100644
index c67aecbb4..000000000
--- a/python/ovs/dirs.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The @variables@ in this file are replaced by default directories for
-# use in python/ovs/dirs.py in the source directory and replaced by the
-# configured directories for use in the installed python/ovs/dirs.py.
-#
-import os
-
-# Note that the use of """ is to aid in dealing with paths with quotes in them.
-PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """/usr/local/share/openvswitch""")
-RUNDIR = os.environ.get("OVS_RUNDIR", """/var/run""")
-LOGDIR = os.environ.get("OVS_LOGDIR", """/usr/local/var/log""")
-BINDIR = os.environ.get("OVS_BINDIR", """/usr/local/bin""")
-
-DBDIR = os.environ.get("OVS_DBDIR")
-if not DBDIR:
- sysconfdir = os.environ.get("OVS_SYSCONFDIR")
- if sysconfdir:
- DBDIR = "%s/openvswitch" % sysconfdir
- else:
- DBDIR = """/usr/local/etc/openvswitch"""
diff --git a/python/ovs/dirs.py.template b/python/ovs/dirs.py.template
deleted file mode 100644
index fb31b7475..000000000
--- a/python/ovs/dirs.py.template
+++ /dev/null
@@ -1,31 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# The @variables@ in this file are replaced by default directories for
-# use in python/ovs/dirs.py in the source directory and replaced by the
-# configured directories for use in the installed python/ovs/dirs.py.
-#
-import os
-
-# Note that the use of """ is to aid in dealing with paths with quotes in them.
-PKGDATADIR = os.environ.get("OVS_PKGDATADIR", """@pkgdatadir@""")
-RUNDIR = os.environ.get("OVS_RUNDIR", """@RUNDIR@""")
-LOGDIR = os.environ.get("OVS_LOGDIR", """@LOGDIR@""")
-BINDIR = os.environ.get("OVS_BINDIR", """@bindir@""")
-
-DBDIR = os.environ.get("OVS_DBDIR")
-if not DBDIR:
- sysconfdir = os.environ.get("OVS_SYSCONFDIR")
- if sysconfdir:
- DBDIR = "%s/openvswitch" % sysconfdir
- else:
- DBDIR = """@DBDIR@"""
diff --git a/python/ovs/fatal_signal.py b/python/ovs/fatal_signal.py
deleted file mode 100644
index cb2e99e87..000000000
--- a/python/ovs/fatal_signal.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright (c) 2010, 2011 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import atexit
-import os
-import signal
-import sys
-
-import ovs.vlog
-
-_hooks = []
-vlog = ovs.vlog.Vlog("fatal-signal")
-
-
-def add_hook(hook, cancel, run_at_exit):
- _init()
- _hooks.append((hook, cancel, run_at_exit))
-
-
-def fork():
- """Clears all of the fatal signal hooks without executing them. If any of
- the hooks passed a 'cancel' function to add_hook(), then those functions
- will be called, allowing them to free resources, etc.
-
- Following a fork, one of the resulting processes can call this function to
- allow it to terminate without calling the hooks registered before calling
- this function. New hooks registered after calling this function will take
- effect normally."""
- global _hooks
- for hook, cancel, run_at_exit in _hooks:
- if cancel:
- cancel()
-
- _hooks = []
-
-
-_added_hook = False
-_files = {}
-
-
-def add_file_to_unlink(file):
- """Registers 'file' to be unlinked when the program terminates via
- sys.exit() or a fatal signal."""
- global _added_hook
- if not _added_hook:
- _added_hook = True
- add_hook(_unlink_files, _cancel_files, True)
- _files[file] = None
-
-
-def add_file_to_close_and_unlink(file, fd=None):
- """Registers 'file' to be unlinked when the program terminates via
- sys.exit() or a fatal signal and the 'fd' to be closed. On Windows a file
- cannot be removed while it is open for writing."""
- global _added_hook
- if not _added_hook:
- _added_hook = True
- add_hook(_unlink_files, _cancel_files, True)
- _files[file] = fd
-
-
-def remove_file_to_unlink(file):
- """Unregisters 'file' from being unlinked when the program terminates via
- sys.exit() or a fatal signal."""
- if file in _files:
- del _files[file]
-
-
-def unlink_file_now(file):
- """Like fatal_signal_remove_file_to_unlink(), but also unlinks 'file'.
- Returns 0 if successful, otherwise a positive errno value."""
- error = _unlink(file)
- if error:
- vlog.warn("could not unlink \"%s\" (%s)" % (file, os.strerror(error)))
- remove_file_to_unlink(file)
- return error
-
-
-def _unlink_files():
- for file_ in _files:
- if sys.platform == "win32" and _files[file_]:
- _files[file_].close()
- _unlink(file_)
-
-
-def _cancel_files():
- global _added_hook
- global _files
- _added_hook = False
- _files = {}
-
-
-def _unlink(file_):
- try:
- os.unlink(file_)
- return 0
- except OSError as e:
- return e.errno
-
-
-def _signal_handler(signr, _):
- _call_hooks(signr)
-
- # Re-raise the signal with the default handling so that the program
- # termination status reflects that we were killed by this signal.
- signal.signal(signr, signal.SIG_DFL)
- os.kill(os.getpid(), signr)
-
-
-def _atexit_handler():
- _call_hooks(0)
-
-
-recurse = False
-
-
-def _call_hooks(signr):
- global recurse
- if recurse:
- return
- recurse = True
-
- for hook, cancel, run_at_exit in _hooks:
- if signr != 0 or run_at_exit:
- hook()
-
-
-_inited = False
-
-
-def _init():
- global _inited
- if not _inited:
- _inited = True
- if sys.platform == "win32":
- signals = [signal.SIGTERM, signal.SIGINT]
- else:
- signals = [signal.SIGTERM, signal.SIGINT, signal.SIGHUP,
- signal.SIGALRM]
-
- for signr in signals:
- if signal.getsignal(signr) == signal.SIG_DFL:
- signal.signal(signr, _signal_handler)
- atexit.register(_atexit_handler)
-
-
-def signal_alarm(timeout):
- if not timeout:
- env_timeout = os.environ.get('OVS_CTL_TIMEOUT')
- if env_timeout:
- timeout = int(env_timeout)
- if not timeout:
- return
-
- if sys.platform == "win32":
- import time
- import threading
-
- class Alarm (threading.Thread):
- def __init__(self, timeout):
- super(Alarm, self).__init__()
- self.timeout = timeout
- self.setDaemon(True)
-
- def run(self):
- time.sleep(self.timeout)
- os._exit(1)
-
- alarm = Alarm(timeout)
- alarm.start()
- else:
- signal.alarm(timeout)
diff --git a/python/ovs/fcntl_win.py b/python/ovs/fcntl_win.py
deleted file mode 100644
index a0ae970fe..000000000
--- a/python/ovs/fcntl_win.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright (c) 2016 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import errno
-
-import msvcrt
-
-import pywintypes
-
-import win32con
-
-import win32file
-
-LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
-LOCK_SH = 0 # the default
-LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
-LOCK_UN = 0x80000000 # unlock - non-standard
-
-
-def lockf(fd, flags, length=0xFFFF0000, start=0, whence=0):
- overlapped = pywintypes.OVERLAPPED()
- hfile = msvcrt.get_osfhandle(fd.fileno())
- if LOCK_UN & flags:
- ret = win32file.UnlockFileEx(hfile, 0, start, length, overlapped)
- else:
- try:
- ret = win32file.LockFileEx(hfile, flags, start, length, overlapped)
- except:
- raise IOError(errno.EAGAIN, "", "")
-
- return ret
-
-
-def flock(fd, flags):
- lockf(fd, flags, 0xFFFF0000, 0, 0)
diff --git a/python/ovs/json.py b/python/ovs/json.py
deleted file mode 100644
index 96a07513d..000000000
--- a/python/ovs/json.py
+++ /dev/null
@@ -1,531 +0,0 @@
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import absolute_import
-
-import functools
-import json
-import re
-import sys
-
-import six
-
-PARSER_C = 'C'
-PARSER_PY = 'PYTHON'
-try:
- import ovs._json
- PARSER = PARSER_C
-except ImportError:
- PARSER = PARSER_PY
-
-__pychecker__ = 'no-stringiter'
-
-SPACES_PER_LEVEL = 2
-_dumper = functools.partial(json.dumps, separators=(",", ":"))
-
-if six.PY2:
- def dumper(*args, **kwargs):
- return _dumper(*args, **kwargs).decode('raw-unicode-escape')
-else:
- dumper = _dumper
-
-
-def to_stream(obj, stream, pretty=False, sort_keys=True):
- stream.write(dumper(obj, indent=SPACES_PER_LEVEL if pretty else None,
- sort_keys=sort_keys))
-
-
-def to_file(obj, name, pretty=False, sort_keys=True):
- with open(name, "w") as stream:
- to_stream(obj, stream, pretty, sort_keys)
-
-
-def to_string(obj, pretty=False, sort_keys=True):
- return dumper(obj, indent=SPACES_PER_LEVEL if pretty else None,
- sort_keys=sort_keys)
-
-
-def from_stream(stream):
- p = Parser(check_trailer=True)
- while True:
- buf = stream.read(4096)
- if buf == "" or p.feed(buf) != len(buf):
- break
- return p.finish()
-
-
-def from_file(name):
- stream = open(name, "r")
- try:
- return from_stream(stream)
- finally:
- stream.close()
-
-
-def from_string(s):
- if not isinstance(s, six.text_type):
- # We assume the input is a string. We will only hit this case for a
- # str in Python 2 which is not unicode, so we need to go ahead and
- # decode it.
- try:
- s = six.text_type(s, 'utf-8')
- except UnicodeDecodeError as e:
- seq = ' '.join(["0x%2x" % ord(c)
- for c in e.object[e.start:e.end] if ord(c) >= 0x80])
- return "not a valid UTF-8 string: invalid UTF-8 sequence %s" % seq
- p = Parser(check_trailer=True)
- p.feed(s)
- return p.finish()
-
-
-class Parser(object):
- # Maximum height of parsing stack. #
- MAX_HEIGHT = 1000
-
- def __new__(cls, *args, **kwargs):
- if PARSER == PARSER_C:
- return ovs._json.Parser(*args, **kwargs)
- return super(Parser, cls).__new__(cls)
-
- def __init__(self, check_trailer=False):
- self.check_trailer = check_trailer
-
- # Lexical analysis.
- self.lex_state = Parser.__lex_start
- self.buffer = ""
- self.line_number = 0
- self.column_number = 0
- self.byte_number = 0
-
- # Parsing.
- self.parse_state = Parser.__parse_start
- self.stack = []
- self.member_name = None
-
- # Parse status.
- self.done = False
- self.error = None
-
- def __lex_start_space(self, c):
- pass
-
- def __lex_start_alpha(self, c):
- self.buffer = c
- self.lex_state = Parser.__lex_keyword
-
- def __lex_start_token(self, c):
- self.__parser_input(c)
-
- def __lex_start_number(self, c):
- self.buffer = c
- self.lex_state = Parser.__lex_number
-
- def __lex_start_string(self, _):
- self.lex_state = Parser.__lex_string
-
- def __lex_start_error(self, c):
- if ord(c) >= 32 and ord(c) < 128:
- self.__error("invalid character '%s'" % c)
- else:
- self.__error("invalid character U+%04x" % ord(c))
-
- __lex_start_actions = {}
- for c in " \t\n\r":
- __lex_start_actions[c] = __lex_start_space
- for c in "abcdefghijklmnopqrstuvwxyz":
- __lex_start_actions[c] = __lex_start_alpha
- for c in "[{]}:,":
- __lex_start_actions[c] = __lex_start_token
- for c in "-0123456789":
- __lex_start_actions[c] = __lex_start_number
- __lex_start_actions['"'] = __lex_start_string
-
- def __lex_start(self, c):
- Parser.__lex_start_actions.get(
- c, Parser.__lex_start_error)(self, c)
- return True
-
- __lex_alpha = {}
- for c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
- __lex_alpha[c] = True
-
- def __lex_finish_keyword(self):
- if self.buffer == "false":
- self.__parser_input(False)
- elif self.buffer == "true":
- self.__parser_input(True)
- elif self.buffer == "null":
- self.__parser_input(None)
- else:
- self.__error("invalid keyword '%s'" % self.buffer)
-
- def __lex_keyword(self, c):
- if c in Parser.__lex_alpha:
- self.buffer += c
- return True
- else:
- self.__lex_finish_keyword()
- return False
-
- __number_re = re.compile("(-)?(0|[1-9][0-9]*)"
- r"(?:\.([0-9]+))?(?:[eE]([-+]?[0-9]+))?$")
-
- def __lex_finish_number(self):
- s = self.buffer
- m = Parser.__number_re.match(s)
- if m:
- sign, integer, fraction, exp = m.groups()
- if (exp is not None and
- (int(exp) > sys.maxsize or int(exp) < -sys.maxsize - 1)):
- self.__error("exponent outside valid range")
- return
-
- if fraction is not None and len(fraction.lstrip('0')) == 0:
- fraction = None
-
- sig_string = integer
- if fraction is not None:
- sig_string += fraction
- significand = int(sig_string)
-
- pow10 = 0
- if fraction is not None:
- pow10 -= len(fraction)
- if exp is not None:
- pow10 += int(exp)
-
- if significand == 0:
- self.__parser_input(0)
- return
- elif significand <= 2 ** 63:
- while pow10 > 0 and significand <= 2 ** 63:
- significand *= 10
- pow10 -= 1
- while pow10 < 0 and significand % 10 == 0:
- significand //= 10
- pow10 += 1
- if (pow10 == 0 and
- ((not sign and significand < 2 ** 63) or
- (sign and significand <= 2 ** 63))):
- if sign:
- self.__parser_input(-significand)
- else:
- self.__parser_input(significand)
- return
-
- value = float(s)
- if value == float("inf") or value == float("-inf"):
- self.__error("number outside valid range")
- return
- if value == 0:
- # Suppress negative zero.
- value = 0
- self.__parser_input(value)
- elif re.match("-?0[0-9]", s):
- self.__error("leading zeros not allowed")
- elif re.match("-([^0-9]|$)", s):
- self.__error("'-' must be followed by digit")
- elif re.match(r"-?(0|[1-9][0-9]*)\.([^0-9]|$)", s):
- self.__error("decimal point must be followed by digit")
- elif re.search("e[-+]?([^0-9]|$)", s):
- self.__error("exponent must contain at least one digit")
- else:
- self.__error("syntax error in number")
-
- def __lex_number(self, c):
- if c in ".0123456789eE-+":
- self.buffer += c
- return True
- else:
- self.__lex_finish_number()
- return False
-
- __4hex_re = re.compile("[0-9a-fA-F]{4}")
-
- def __lex_4hex(self, s):
- if len(s) < 4:
- self.__error("quoted string ends within \\u escape")
- elif not Parser.__4hex_re.match(s):
- self.__error("malformed \\u escape")
- elif s == "0000":
- self.__error("null bytes not supported in quoted strings")
- else:
- return int(s, 16)
-
- @staticmethod
- def __is_leading_surrogate(c):
- """Returns true if 'c' is a Unicode code point for a leading
- surrogate."""
- return c >= 0xd800 and c <= 0xdbff
-
- @staticmethod
- def __is_trailing_surrogate(c):
- """Returns true if 'c' is a Unicode code point for a trailing
- surrogate."""
- return c >= 0xdc00 and c <= 0xdfff
-
- @staticmethod
- def __utf16_decode_surrogate_pair(leading, trailing):
- """Returns the unicode code point corresponding to leading surrogate
- 'leading' and trailing surrogate 'trailing'. The return value will not
- make any sense if 'leading' or 'trailing' are not in the correct ranges
- for leading or trailing surrogates."""
- # Leading surrogate: 110110wwwwxxxxxx
- # Trailing surrogate: 110111xxxxxxxxxx
- # Code point: 000uuuuuxxxxxxxxxxxxxxxx
- w = (leading >> 6) & 0xf
- u = w + 1
- x0 = leading & 0x3f
- x1 = trailing & 0x3ff
- return (u << 16) | (x0 << 10) | x1
- __unescape = {'"': u'"',
- "\\": u"\\",
- "/": u"/",
- "b": u"\b",
- "f": u"\f",
- "n": u"\n",
- "r": u"\r",
- "t": u"\t"}
-
- def __lex_finish_string(self):
- inp = self.buffer
- out = u""
- while len(inp):
- backslash = inp.find('\\')
- if backslash == -1:
- out += inp
- break
- out += inp[:backslash]
- inp = inp[backslash + 1:]
- if inp == "":
- self.__error("quoted string may not end with backslash")
- return
-
- replacement = Parser.__unescape.get(inp[0])
- if replacement is not None:
- out += replacement
- inp = inp[1:]
- continue
- elif inp[0] != u'u':
- self.__error("bad escape \\%s" % inp[0])
- return
-
- c0 = self.__lex_4hex(inp[1:5])
- if c0 is None:
- return
- inp = inp[5:]
-
- if Parser.__is_leading_surrogate(c0):
- if inp[:2] != u'\\u':
- self.__error("malformed escaped surrogate pair")
- return
- c1 = self.__lex_4hex(inp[2:6])
- if c1 is None:
- return
- if not Parser.__is_trailing_surrogate(c1):
- self.__error("second half of escaped surrogate pair is "
- "not trailing surrogate")
- return
- code_point = Parser.__utf16_decode_surrogate_pair(c0, c1)
- inp = inp[6:]
- else:
- code_point = c0
- out += six.unichr(code_point)
- self.__parser_input('string', out)
-
- def __lex_string_escape(self, c):
- self.buffer += c
- self.lex_state = Parser.__lex_string
- return True
-
- def __lex_string(self, c):
- if c == '\\':
- self.buffer += c
- self.lex_state = Parser.__lex_string_escape
- elif c == '"':
- self.__lex_finish_string()
- elif ord(c) >= 0x20:
- self.buffer += c
- else:
- self.__error("U+%04X must be escaped in quoted string" % ord(c))
- return True
-
- def __lex_input(self, c):
- eat = self.lex_state(self, c)
- assert eat is True or eat is False
- return eat
-
- def __parse_start(self, token, unused_string):
- if token == '{':
- self.__push_object()
- elif token == '[':
- self.__push_array()
- else:
- self.__error("syntax error at beginning of input")
-
- def __parse_end(self, unused_token, unused_string):
- self.__error("trailing garbage at end of input")
-
- def __parse_object_init(self, token, string):
- if token == '}':
- self.__parser_pop()
- else:
- self.__parse_object_name(token, string)
-
- def __parse_object_name(self, token, string):
- if token == 'string':
- self.member_name = string
- self.parse_state = Parser.__parse_object_colon
- else:
- self.__error("syntax error parsing object expecting string")
-
- def __parse_object_colon(self, token, unused_string):
- if token == ":":
- self.parse_state = Parser.__parse_object_value
- else:
- self.__error("syntax error parsing object expecting ':'")
-
- def __parse_object_value(self, token, string):
- self.__parse_value(token, string, Parser.__parse_object_next)
-
- def __parse_object_next(self, token, unused_string):
- if token == ",":
- self.parse_state = Parser.__parse_object_name
- elif token == "}":
- self.__parser_pop()
- else:
- self.__error("syntax error expecting '}' or ','")
-
- def __parse_array_init(self, token, string):
- if token == ']':
- self.__parser_pop()
- else:
- self.__parse_array_value(token, string)
-
- def __parse_array_value(self, token, string):
- self.__parse_value(token, string, Parser.__parse_array_next)
-
- def __parse_array_next(self, token, unused_string):
- if token == ",":
- self.parse_state = Parser.__parse_array_value
- elif token == "]":
- self.__parser_pop()
- else:
- self.__error("syntax error expecting ']' or ','")
-
- def __parser_input(self, token, string=None):
- self.lex_state = Parser.__lex_start
- self.buffer = ""
- self.parse_state(self, token, string)
-
- def __put_value(self, value):
- top = self.stack[-1]
- if isinstance(top, dict):
- top[self.member_name] = value
- else:
- top.append(value)
-
- def __parser_push(self, new_json, next_state):
- if len(self.stack) < Parser.MAX_HEIGHT:
- if len(self.stack) > 0:
- self.__put_value(new_json)
- self.stack.append(new_json)
- self.parse_state = next_state
- else:
- self.__error("input exceeds maximum nesting depth %d" %
- Parser.MAX_HEIGHT)
-
- def __push_object(self):
- self.__parser_push({}, Parser.__parse_object_init)
-
- def __push_array(self):
- self.__parser_push([], Parser.__parse_array_init)
-
- def __parser_pop(self):
- if len(self.stack) == 1:
- self.parse_state = Parser.__parse_end
- if not self.check_trailer:
- self.done = True
- else:
- self.stack.pop()
- top = self.stack[-1]
- if isinstance(top, list):
- self.parse_state = Parser.__parse_array_next
- else:
- self.parse_state = Parser.__parse_object_next
-
- def __parse_value(self, token, string, next_state):
- number_types = list(six.integer_types)
- number_types.extend([float])
- number_types = tuple(number_types)
- if token in [False, None, True] or isinstance(token, number_types):
- self.__put_value(token)
- elif token == 'string':
- self.__put_value(string)
- else:
- if token == '{':
- self.__push_object()
- elif token == '[':
- self.__push_array()
- else:
- self.__error("syntax error expecting value")
- return
- self.parse_state = next_state
-
- def __error(self, message):
- if self.error is None:
- self.error = ("line %d, column %d, byte %d: %s"
- % (self.line_number, self.column_number,
- self.byte_number, message))
- self.done = True
-
- def feed(self, s):
- i = 0
- while True:
- if self.done or i >= len(s):
- return i
-
- c = s[i]
- if self.__lex_input(c):
- self.byte_number += 1
- if c == '\n':
- self.column_number = 0
- self.line_number += 1
- else:
- self.column_number += 1
-
- i += 1
-
- def is_done(self):
- return self.done
-
- def finish(self):
- if self.lex_state == Parser.__lex_start:
- pass
- elif self.lex_state in (Parser.__lex_string,
- Parser.__lex_string_escape):
- self.__error("unexpected end of input in quoted string")
- else:
- self.__lex_input(" ")
-
- if self.parse_state == Parser.__parse_start:
- self.__error("empty input stream")
- elif self.parse_state != Parser.__parse_end:
- self.__error("unexpected end of input")
-
- if self.error is None:
- assert len(self.stack) == 1
- return self.stack.pop()
- else:
- return self.error
diff --git a/python/ovs/jsonrpc.py b/python/ovs/jsonrpc.py
deleted file mode 100644
index 4a3027e9e..000000000
--- a/python/ovs/jsonrpc.py
+++ /dev/null
@@ -1,616 +0,0 @@
-# Copyright (c) 2010, 2011, 2012, 2013 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import codecs
-import errno
-import os
-import random
-import sys
-
-import ovs.json
-import ovs.poller
-import ovs.reconnect
-import ovs.stream
-import ovs.timeval
-import ovs.util
-import ovs.vlog
-
-import six
-
-EOF = ovs.util.EOF
-vlog = ovs.vlog.Vlog("jsonrpc")
-
-
-class Message(object):
- T_REQUEST = 0 # Request.
- T_NOTIFY = 1 # Notification.
- T_REPLY = 2 # Successful reply.
- T_ERROR = 3 # Error reply.
-
- __types = {T_REQUEST: "request",
- T_NOTIFY: "notification",
- T_REPLY: "reply",
- T_ERROR: "error"}
-
- def __init__(self, type_, method, params, result, error, id):
- self.type = type_
- self.method = method
- self.params = params
- self.result = result
- self.error = error
- self.id = id
-
- _next_id = 0
-
- @staticmethod
- def _create_id():
- this_id = Message._next_id
- Message._next_id += 1
- return this_id
-
- @staticmethod
- def create_request(method, params):
- return Message(Message.T_REQUEST, method, params, None, None,
- Message._create_id())
-
- @staticmethod
- def create_notify(method, params):
- return Message(Message.T_NOTIFY, method, params, None, None,
- None)
-
- @staticmethod
- def create_reply(result, id):
- return Message(Message.T_REPLY, None, None, result, None, id)
-
- @staticmethod
- def create_error(error, id):
- return Message(Message.T_ERROR, None, None, None, error, id)
-
- @staticmethod
- def type_to_string(type_):
- return Message.__types[type_]
-
- def __validate_arg(self, value, name, must_have):
- if (value is not None) == (must_have != 0):
- return None
- else:
- type_name = Message.type_to_string(self.type)
- if must_have:
- verb = "must"
- else:
- verb = "must not"
- return "%s %s have \"%s\"" % (type_name, verb, name)
-
- def is_valid(self):
- if self.params is not None and not isinstance(self.params, list):
- return "\"params\" must be JSON array"
-
- pattern = {Message.T_REQUEST: 0x11001,
- Message.T_NOTIFY: 0x11000,
- Message.T_REPLY: 0x00101,
- Message.T_ERROR: 0x00011}.get(self.type)
- if pattern is None:
- return "invalid JSON-RPC message type %s" % self.type
-
- return (
- self.__validate_arg(self.method, "method", pattern & 0x10000) or
- self.__validate_arg(self.params, "params", pattern & 0x1000) or
- self.__validate_arg(self.result, "result", pattern & 0x100) or
- self.__validate_arg(self.error, "error", pattern & 0x10) or
- self.__validate_arg(self.id, "id", pattern & 0x1))
-
- @staticmethod
- def from_json(json):
- if not isinstance(json, dict):
- return "message is not a JSON object"
-
- # Make a copy to avoid modifying the caller's dict.
- json = dict(json)
-
- if "method" in json:
- method = json.pop("method")
- if not isinstance(method, six.string_types):
- return "method is not a JSON string"
- else:
- method = None
-
- params = json.pop("params", None)
- result = json.pop("result", None)
- error = json.pop("error", None)
- id_ = json.pop("id", None)
- if len(json):
- return "message has unexpected member \"%s\"" % json.popitem()[0]
-
- if result is not None:
- msg_type = Message.T_REPLY
- elif error is not None:
- msg_type = Message.T_ERROR
- elif id_ is not None:
- msg_type = Message.T_REQUEST
- else:
- msg_type = Message.T_NOTIFY
-
- msg = Message(msg_type, method, params, result, error, id_)
- validation_error = msg.is_valid()
- if validation_error is not None:
- return validation_error
- else:
- return msg
-
- def to_json(self):
- json = {}
-
- if self.method is not None:
- json["method"] = self.method
-
- if self.params is not None:
- json["params"] = self.params
-
- if self.result is not None or self.type == Message.T_ERROR:
- json["result"] = self.result
-
- if self.error is not None or self.type == Message.T_REPLY:
- json["error"] = self.error
-
- if self.id is not None or self.type == Message.T_NOTIFY:
- json["id"] = self.id
-
- return json
-
- def __str__(self):
- s = [Message.type_to_string(self.type)]
- if self.method is not None:
- s.append("method=\"%s\"" % self.method)
- if self.params is not None:
- s.append("params=" + ovs.json.to_string(self.params))
- if self.result is not None:
- s.append("result=" + ovs.json.to_string(self.result))
- if self.error is not None:
- s.append("error=" + ovs.json.to_string(self.error))
- if self.id is not None:
- s.append("id=" + ovs.json.to_string(self.id))
- return ", ".join(s)
-
-
-class Connection(object):
- def __init__(self, stream):
- self.name = stream.name
- self.stream = stream
- self.status = 0
- self.input = ""
- self.output = ""
- self.parser = None
- self.received_bytes = 0
-
- def close(self):
- self.stream.close()
- self.stream = None
-
- def run(self):
- if self.status:
- return
-
- while len(self.output):
- retval = self.stream.send(self.output)
- if retval >= 0:
- self.output = self.output[retval:]
- else:
- if retval != -errno.EAGAIN:
- vlog.warn("%s: send error: %s" %
- (self.name, os.strerror(-retval)))
- self.error(-retval)
- break
-
- def wait(self, poller):
- if not self.status:
- self.stream.run_wait(poller)
- if len(self.output):
- self.stream.send_wait(poller)
-
- def get_status(self):
- return self.status
-
- def get_backlog(self):
- if self.status != 0:
- return 0
- else:
- return len(self.output)
-
- def get_received_bytes(self):
- return self.received_bytes
-
- def __log_msg(self, title, msg):
- if vlog.dbg_is_enabled():
- vlog.dbg("%s: %s %s" % (self.name, title, msg))
-
- def send(self, msg):
- if self.status:
- return self.status
-
- self.__log_msg("send", msg)
-
- was_empty = len(self.output) == 0
- self.output += ovs.json.to_string(msg.to_json())
- if was_empty:
- self.run()
- return self.status
-
- def send_block(self, msg):
- error = self.send(msg)
- if error:
- return error
-
- while True:
- self.run()
- if not self.get_backlog() or self.get_status():
- return self.status
-
- poller = ovs.poller.Poller()
- self.wait(poller)
- poller.block()
-
- def recv(self):
- if self.status:
- return self.status, None
-
- decoder = codecs.getincrementaldecoder('utf-8')()
- while True:
- if not self.input:
- error, data = self.stream.recv(4096)
- # Python 3 has separate types for strings and bytes. We
- # received bytes from a socket. We expect it to be string
- # data, so we convert it here as soon as possible.
- if data and not error:
- try:
- if six.PY3 or ovs.json.PARSER == ovs.json.PARSER_PY:
- data = decoder.decode(data)
- except UnicodeError:
- error = errno.EILSEQ
- if error:
- if (sys.platform == "win32" and
- error == errno.WSAEWOULDBLOCK):
- # WSAEWOULDBLOCK would be the equivalent on Windows
- # for EAGAIN on Unix.
- error = errno.EAGAIN
- if error == errno.EAGAIN:
- return error, None
- else:
- # XXX rate-limit
- vlog.warn("%s: receive error: %s"
- % (self.name, os.strerror(error)))
- self.error(error)
- return self.status, None
- elif not data:
- self.error(EOF)
- return EOF, None
- else:
- self.input += data
- self.received_bytes += len(data)
- else:
- if self.parser is None:
- self.parser = ovs.json.Parser()
- if six.PY3 and ovs.json.PARSER == ovs.json.PARSER_C:
- self.input = self.input.encode('utf-8')[
- self.parser.feed(self.input):].decode()
- else:
- self.input = self.input[self.parser.feed(self.input):]
- if self.parser.is_done():
- msg = self.__process_msg()
- if msg:
- return 0, msg
- else:
- return self.status, None
-
- def recv_block(self):
- while True:
- error, msg = self.recv()
- if error != errno.EAGAIN:
- return error, msg
-
- self.run()
-
- poller = ovs.poller.Poller()
- self.wait(poller)
- self.recv_wait(poller)
- poller.block()
-
- def transact_block(self, request):
- id_ = request.id
-
- error = self.send(request)
- reply = None
- while not error:
- error, reply = self.recv_block()
- if (reply
- and (reply.type == Message.T_REPLY
- or reply.type == Message.T_ERROR)
- and reply.id == id_):
- break
- return error, reply
-
- def __process_msg(self):
- json = self.parser.finish()
- self.parser = None
- if isinstance(json, six.string_types):
- # XXX rate-limit
- vlog.warn("%s: error parsing stream: %s" % (self.name, json))
- self.error(errno.EPROTO)
- return
-
- msg = Message.from_json(json)
- if not isinstance(msg, Message):
- # XXX rate-limit
- vlog.warn("%s: received bad JSON-RPC message: %s"
- % (self.name, msg))
- self.error(errno.EPROTO)
- return
-
- self.__log_msg("received", msg)
- return msg
-
- def recv_wait(self, poller):
- if self.status or self.input:
- poller.immediate_wake()
- else:
- self.stream.recv_wait(poller)
-
- def error(self, error):
- if self.status == 0:
- self.status = error
- self.stream.close()
- self.output = ""
-
-
-class Session(object):
- """A JSON-RPC session with reconnection."""
-
- def __init__(self, reconnect, rpc, remotes):
- self.reconnect = reconnect
- self.rpc = rpc
- self.stream = None
- self.pstream = None
- self.seqno = 0
- if type(remotes) != list:
- remotes = [remotes]
- self.remotes = remotes
- random.shuffle(self.remotes)
- self.next_remote = 0
-
- @staticmethod
- def open(name, probe_interval=None):
- """Creates and returns a Session that maintains a JSON-RPC session to
- 'name', which should be a string acceptable to ovs.stream.Stream or
- ovs.stream.PassiveStream's initializer.
-
- If 'name' is an active connection method, e.g. "tcp:127.1.2.3", the new
- session connects and reconnects, with back-off, to 'name'.
-
- If 'name' is a passive connection method, e.g. "ptcp:", the new session
- listens for connections to 'name'. It maintains at most one connection
- at any given time. Any new connection causes the previous one (if any)
- to be dropped.
-
- If "probe_interval" is zero it disables the connection keepalive
- feature. If non-zero the value will be forced to at least 1000
- milliseconds. If None it will just use the default value in OVS.
- """
- return Session.open_multiple([name], probe_interval=probe_interval)
-
- @staticmethod
- def open_multiple(remotes, probe_interval=None):
- reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
- session = Session(reconnect, None, remotes)
- session.pick_remote()
- reconnect.enable(ovs.timeval.msec())
- reconnect.set_backoff_free_tries(len(remotes))
- if ovs.stream.PassiveStream.is_valid_name(reconnect.get_name()):
- reconnect.set_passive(True, ovs.timeval.msec())
-
- if not ovs.stream.stream_or_pstream_needs_probes(reconnect.get_name()):
- reconnect.set_probe_interval(0)
- elif probe_interval is not None:
- reconnect.set_probe_interval(probe_interval)
-
- return session
-
- @staticmethod
- def open_unreliably(jsonrpc):
- reconnect = ovs.reconnect.Reconnect(ovs.timeval.msec())
- session = Session(reconnect, None, [jsonrpc.name])
- reconnect.set_quiet(True)
- session.pick_remote()
- reconnect.set_max_tries(0)
- reconnect.connected(ovs.timeval.msec())
- return session
-
- def pick_remote(self):
- self.reconnect.set_name(self.remotes[self.next_remote])
- self.next_remote = (self.next_remote + 1) % len(self.remotes)
-
- def close(self):
- if self.rpc is not None:
- self.rpc.close()
- self.rpc = None
- if self.stream is not None:
- self.stream.close()
- self.stream = None
- if self.pstream is not None:
- self.pstream.close()
- self.pstream = None
-
- def __disconnect(self):
- if self.rpc is not None:
- self.rpc.error(EOF)
- self.rpc.close()
- self.rpc = None
- elif self.stream is not None:
- self.stream.close()
- self.stream = None
- else:
- return
-
- self.seqno += 1
- self.pick_remote()
-
- def __connect(self):
- self.__disconnect()
-
- name = self.reconnect.get_name()
- if not self.reconnect.is_passive():
- error, self.stream = ovs.stream.Stream.open(name)
- if not error:
- self.reconnect.connecting(ovs.timeval.msec())
- else:
- self.reconnect.connect_failed(ovs.timeval.msec(), error)
- self.stream = None
- self.pick_remote()
- elif self.pstream is None:
- error, self.pstream = ovs.stream.PassiveStream.open(name)
- if not error:
- self.reconnect.listening(ovs.timeval.msec())
- else:
- self.reconnect.connect_failed(ovs.timeval.msec(), error)
- self.pick_remote()
-
- self.seqno += 1
-
- def run(self):
- if self.pstream is not None:
- error, stream = self.pstream.accept()
- if error == 0:
- if self.rpc or self.stream:
- # XXX rate-limit
- vlog.info("%s: new connection replacing active "
- "connection" % self.reconnect.get_name())
- self.__disconnect()
- self.reconnect.connected(ovs.timeval.msec())
- self.rpc = Connection(stream)
- elif error != errno.EAGAIN:
- self.reconnect.listen_error(ovs.timeval.msec(), error)
- self.pstream.close()
- self.pstream = None
-
- if self.rpc:
- backlog = self.rpc.get_backlog()
- self.rpc.run()
- if self.rpc.get_backlog() < backlog:
- # Data previously caught in a queue was successfully sent (or
- # there's an error, which we'll catch below).
- #
- # We don't count data that is successfully sent immediately as
- # activity, because there's a lot of queuing downstream from
- # us, which means that we can push a lot of data into a
- # connection that has stalled and won't ever recover.
- self.reconnect.activity(ovs.timeval.msec())
-
- error = self.rpc.get_status()
- if error != 0:
- self.reconnect.disconnected(ovs.timeval.msec(), error)
- self.__disconnect()
- elif self.stream is not None:
- self.stream.run()
- error = self.stream.connect()
- if error == 0:
- self.reconnect.connected(ovs.timeval.msec())
- self.rpc = Connection(self.stream)
- self.stream = None
- elif error != errno.EAGAIN:
- self.reconnect.connect_failed(ovs.timeval.msec(), error)
- self.pick_remote()
- self.stream.close()
- self.stream = None
-
- action = self.reconnect.run(ovs.timeval.msec())
- if action == ovs.reconnect.CONNECT:
- self.__connect()
- elif action == ovs.reconnect.DISCONNECT:
- self.reconnect.disconnected(ovs.timeval.msec(), 0)
- self.__disconnect()
- elif action == ovs.reconnect.PROBE:
- if self.rpc:
- request = Message.create_request("echo", [])
- request.id = "echo"
- self.rpc.send(request)
- else:
- assert action is None
-
- def wait(self, poller):
- if self.rpc is not None:
- self.rpc.wait(poller)
- elif self.stream is not None:
- self.stream.run_wait(poller)
- self.stream.connect_wait(poller)
- if self.pstream is not None:
- self.pstream.wait(poller)
- self.reconnect.wait(poller, ovs.timeval.msec())
-
- def get_backlog(self):
- if self.rpc is not None:
- return self.rpc.get_backlog()
- else:
- return 0
-
- def get_name(self):
- return self.reconnect.get_name()
-
- def send(self, msg):
- if self.rpc is not None:
- return self.rpc.send(msg)
- else:
- return errno.ENOTCONN
-
- def recv(self):
- if self.rpc is not None:
- received_bytes = self.rpc.get_received_bytes()
- error, msg = self.rpc.recv()
- if received_bytes != self.rpc.get_received_bytes():
- # Data was successfully received.
- #
- # Previously we only counted receiving a full message as
- # activity, but with large messages or a slow connection that
- # policy could time out the session mid-message.
- self.reconnect.activity(ovs.timeval.msec())
-
- if not error:
- if msg.type == Message.T_REQUEST and msg.method == "echo":
- # Echo request. Send reply.
- self.send(Message.create_reply(msg.params, msg.id))
- elif msg.type == Message.T_REPLY and msg.id == "echo":
- # It's a reply to our echo request. Suppress it.
- pass
- else:
- return msg
- return None
-
- def recv_wait(self, poller):
- if self.rpc is not None:
- self.rpc.recv_wait(poller)
-
- def is_alive(self):
- if self.rpc is not None or self.stream is not None:
- return True
- else:
- max_tries = self.reconnect.get_max_tries()
- return max_tries is None or max_tries > 0
-
- def is_connected(self):
- return self.rpc is not None
-
- def get_seqno(self):
- return self.seqno
-
- def force_reconnect(self):
- self.reconnect.force_reconnect(ovs.timeval.msec())
-
- def get_num_of_remotes(self):
- return len(self.remotes)
diff --git a/python/ovs/ovsuuid.py b/python/ovs/ovsuuid.py
deleted file mode 100644
index 35c5bd29f..000000000
--- a/python/ovs/ovsuuid.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (c) 2009, 2010, 2011, 2016 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-import uuid
-
-import ovs.db.parser
-from ovs.db import error
-
-import six
-from six.moves import range
-
-uuidRE = re.compile("^xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx$"
- .replace('x', '[0-9a-fA-F]'))
-
-
-def zero():
- return uuid.UUID(int=0)
-
-
-def is_valid_string(s):
- return uuidRE.match(s) is not None
-
-
-def from_string(s):
- if not is_valid_string(s):
- raise error.Error("%s is not a valid UUID" % s)
- return uuid.UUID(s)
-
-
-def from_json(json, symtab=None):
- try:
- s = ovs.db.parser.unwrap_json(json, "uuid", six.string_types, "string")
- if not uuidRE.match(s):
- raise error.Error("\"%s\" is not a valid UUID" % s, json)
- return uuid.UUID(s)
- except error.Error as e:
- if not symtab:
- raise e
- try:
- name = ovs.db.parser.unwrap_json(json, "named-uuid",
- six.string_types, "string")
- except error.Error:
- raise e
-
- if name not in symtab:
- symtab[name] = uuid.uuid4()
- return symtab[name]
-
-
-def to_json(uuid_):
- return ["uuid", str(uuid_)]
-
-
-def to_c_initializer(uuid_, var):
- hex_string = uuid_.hex
- parts = ["0x%s" % (hex_string[x * 8:(x + 1) * 8])
- for x in range(4)]
- return "{ %s }," % ", ".join(parts)
diff --git a/python/ovs/poller.py b/python/ovs/poller.py
deleted file mode 100644
index 3624ec865..000000000
--- a/python/ovs/poller.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright (c) 2010, 2015 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import errno
-import os
-
-import select
-import socket
-import sys
-
-import ovs.timeval
-import ovs.vlog
-
-if sys.platform == "win32":
- import ovs.winutils as winutils
-
-try:
- from OpenSSL import SSL
-except ImportError:
- SSL = None
-
-try:
- from eventlet import patcher as eventlet_patcher
-
- def _using_eventlet_green_select():
- return eventlet_patcher.is_monkey_patched(select)
-except:
- eventlet_patcher = None
-
- def _using_eventlet_green_select():
- return False
-
-try:
- from gevent import monkey as gevent_monkey
-except:
- gevent_monkey = None
-
-
-vlog = ovs.vlog.Vlog("poller")
-
-POLLIN = 0x001
-POLLOUT = 0x004
-POLLERR = 0x008
-POLLHUP = 0x010
-POLLNVAL = 0x020
-
-
-# eventlet/gevent doesn't support select.poll. If select.poll is used,
-# python interpreter is blocked as a whole instead of switching from the
-# current thread that is about to block to other runnable thread.
-# So emulate select.poll by select.select because using python means that
-# performance isn't so important.
-class _SelectSelect(object):
- """ select.poll emulation by using select.select.
- Only register and poll are needed at the moment.
- """
- def __init__(self):
- self.rlist = []
- self.wlist = []
- self.xlist = []
-
- def register(self, fd, events):
- if isinstance(fd, socket.socket):
- fd = fd.fileno()
- if SSL and isinstance(fd, SSL.Connection):
- fd = fd.fileno()
-
- if sys.platform != 'win32':
- # Skip this on Windows, it also register events
- assert isinstance(fd, int)
- if events & POLLIN:
- self.rlist.append(fd)
- events &= ~POLLIN
- if events & POLLOUT:
- self.wlist.append(fd)
- events &= ~POLLOUT
- if events:
- self.xlist.append(fd)
-
- def poll(self, timeout):
- # XXX workaround a bug in eventlet
- # see https://github.com/eventlet/eventlet/pull/25
- if timeout == 0 and _using_eventlet_green_select():
- timeout = 0.1
- if sys.platform == 'win32':
- events = self.rlist + self.wlist + self.xlist
- if not events:
- return []
- if len(events) > winutils.win32event.MAXIMUM_WAIT_OBJECTS:
- raise WindowsError("Cannot handle more than maximum wait"
- "objects\n")
-
- # win32event.INFINITE timeout is -1
- # timeout must be an int number, expressed in ms
- if timeout == 0.1:
- timeout = 100
- else:
- timeout = int(timeout)
-
- # Wait until any of the events is set to signaled
- try:
- retval = winutils.win32event.WaitForMultipleObjects(
- events,
- False, # Wait all
- timeout)
- except winutils.pywintypes.error:
- return [(0, POLLERR)]
-
- if retval == winutils.winerror.WAIT_TIMEOUT:
- return []
-
- if events[retval] in self.rlist:
- revent = POLLIN
- elif events[retval] in self.wlist:
- revent = POLLOUT
- else:
- revent = POLLERR
-
- return [(events[retval], revent)]
- else:
- if timeout == -1:
- # epoll uses -1 for infinite timeout, select uses None.
- timeout = None
- else:
- timeout = float(timeout) / 1000
- rlist, wlist, xlist = select.select(self.rlist,
- self.wlist,
- self.xlist,
- timeout)
- events_dict = {}
- for fd in rlist:
- events_dict[fd] = events_dict.get(fd, 0) | POLLIN
- for fd in wlist:
- events_dict[fd] = events_dict.get(fd, 0) | POLLOUT
- for fd in xlist:
- events_dict[fd] = events_dict.get(fd, 0) | (POLLERR |
- POLLHUP |
- POLLNVAL)
- return list(events_dict.items())
-
-
-SelectPoll = _SelectSelect
-# If eventlet/gevent isn't used, we can use select.poll by replacing
-# _SelectPoll with select.poll class
-# _SelectPoll = select.poll
-
-
-class Poller(object):
- """High-level wrapper around the "poll" system call.
-
- Intended usage is for the program's main loop to go about its business
- servicing whatever events it needs to. Then, when it runs out of immediate
- tasks, it calls each subordinate module or object's "wait" function, which
- in turn calls one (or more) of the functions Poller.fd_wait(),
- Poller.immediate_wake(), and Poller.timer_wait() to register to be awakened
- when the appropriate event occurs. Then the main loop calls
- Poller.block(), which blocks until one of the registered events happens."""
-
- def __init__(self):
- self.__reset()
-
- def fd_wait(self, fd, events):
- """Registers 'fd' as waiting for the specified 'events' (which should
- be select.POLLIN or select.POLLOUT or their bitwise-OR). The following
- call to self.block() will wake up when 'fd' becomes ready for one or
- more of the requested events.
-
- The event registration is one-shot: only the following call to
- self.block() is affected. The event will need to be re-registered
- after self.block() is called if it is to persist.
-
- 'fd' may be an integer file descriptor or an object with a fileno()
- method that returns an integer file descriptor."""
- self.poll.register(fd, events)
-
- def __timer_wait(self, msec):
- if self.timeout < 0 or msec < self.timeout:
- self.timeout = msec
-
- def timer_wait(self, msec):
- """Causes the following call to self.block() to block for no more than
- 'msec' milliseconds. If 'msec' is nonpositive, the following call to
- self.block() will not block at all.
-
- The timer registration is one-shot: only the following call to
- self.block() is affected. The timer will need to be re-registered
- after self.block() is called if it is to persist."""
- if msec <= 0:
- self.immediate_wake()
- else:
- self.__timer_wait(msec)
-
- def timer_wait_until(self, msec):
- """Causes the following call to self.block() to wake up when the
- current time, as returned by ovs.timeval.msec(), reaches 'msec' or
- later. If 'msec' is earlier than the current time, the following call
- to self.block() will not block at all.
-
- The timer registration is one-shot: only the following call to
- self.block() is affected. The timer will need to be re-registered
- after self.block() is called if it is to persist."""
- now = ovs.timeval.msec()
- if msec <= now:
- self.immediate_wake()
- else:
- self.__timer_wait(msec - now)
-
- def immediate_wake(self):
- """Causes the following call to self.block() to wake up immediately,
- without blocking."""
- self.timeout = 0
-
- def block(self):
- """Blocks until one or more of the events registered with
- self.fd_wait() occurs, or until the minimum duration registered with
- self.timer_wait() elapses, or not at all if self.immediate_wake() has
- been called."""
- try:
- try:
- events = self.poll.poll(self.timeout)
- self.__log_wakeup(events)
- except OSError as e:
- """ On Windows, the select function from poll raises OSError
- exception if the polled array is empty."""
- if e.errno != errno.EINTR:
- vlog.err("poll: %s" % os.strerror(e.errno))
- except select.error as e:
- # XXX rate-limit
- error, msg = e
- if error != errno.EINTR:
- vlog.err("poll: %s" % e[1])
- finally:
- self.__reset()
-
- def __log_wakeup(self, events):
- if not events:
- vlog.dbg("%d-ms timeout" % self.timeout)
- else:
- for fd, revents in events:
- if revents != 0:
- s = ""
- if revents & POLLIN:
- s += "[POLLIN]"
- if revents & POLLOUT:
- s += "[POLLOUT]"
- if revents & POLLERR:
- s += "[POLLERR]"
- if revents & POLLHUP:
- s += "[POLLHUP]"
- if revents & POLLNVAL:
- s += "[POLLNVAL]"
- vlog.dbg("%s on fd %d" % (s, fd))
-
- def __reset(self):
- self.poll = SelectPoll()
- self.timeout = -1
-
-
-def get_system_poll():
- """Returns the original select.poll() object. If select.poll is
- monkey patched by eventlet or gevent library, it gets the original
- select.poll and returns an object of it using the
- eventlet.patcher.original/gevent.monkey.get_original functions.
-
- As a last resort, if there is any exception it returns the
- SelectPoll() object.
- """
- try:
- if _using_eventlet_green_select():
- _system_poll = eventlet_patcher.original("select").poll
- elif gevent_monkey and gevent_monkey.is_object_patched(
- 'select', 'poll'):
- _system_poll = gevent_monkey.get_original('select', 'poll')
- else:
- _system_poll = select.poll
- except:
- _system_poll = SelectPoll
-
- return _system_poll()
diff --git a/python/ovs/process.py b/python/ovs/process.py
deleted file mode 100644
index d7561310c..000000000
--- a/python/ovs/process.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright (c) 2010, 2011 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import signal
-
-
-def _signal_status_msg(type_, signr):
- s = "%s by signal %d" % (type_, signr)
- for name in signal.__dict__:
- if name.startswith("SIG") and getattr(signal, name) == signr:
- return "%s (%s)" % (s, name)
- return s
-
-
-def status_msg(status):
- """Given 'status', which is a process status in the form reported by
- waitpid(2) and returned by process_status(), returns a string describing
- how the process terminated."""
- if os.WIFEXITED(status):
- s = "exit status %d" % os.WEXITSTATUS(status)
- elif os.WIFSIGNALED(status):
- s = _signal_status_msg("killed", os.WTERMSIG(status))
- elif os.WIFSTOPPED(status):
- s = _signal_status_msg("stopped", os.WSTOPSIG(status))
- else:
- s = "terminated abnormally (%x)" % status
- if os.WCOREDUMP(status):
- s += ", core dumped"
- return s
diff --git a/python/ovs/reconnect.py b/python/ovs/reconnect.py
deleted file mode 100644
index 574db7fdd..000000000
--- a/python/ovs/reconnect.py
+++ /dev/null
@@ -1,608 +0,0 @@
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import ovs.util
-import ovs.vlog
-
-# Values returned by Reconnect.run()
-CONNECT = 'connect'
-DISCONNECT = 'disconnect'
-PROBE = 'probe'
-
-EOF = ovs.util.EOF
-vlog = ovs.vlog.Vlog("reconnect")
-
-
-class Reconnect(object):
- """A finite-state machine for connecting and reconnecting to a network
- resource with exponential backoff. It also provides optional support for
- detecting a connection on which the peer is no longer responding.
-
- The library does not implement anything networking related, only an FSM for
- networking code to use.
-
- Many Reconnect methods take a "now" argument. This makes testing easier
- since there is no hidden state. When not testing, just pass the return
- value of ovs.time.msec(). (Perhaps this design should be revisited
- later.)"""
-
- class Void(object):
- name = "VOID"
- is_connected = False
-
- @staticmethod
- def deadline(fsm):
- return None
-
- @staticmethod
- def run(fsm, now):
- return None
-
- class Listening(object):
- name = "LISTENING"
- is_connected = False
-
- @staticmethod
- def deadline(fsm):
- return None
-
- @staticmethod
- def run(fsm, now):
- return None
-
- class Backoff(object):
- name = "BACKOFF"
- is_connected = False
-
- @staticmethod
- def deadline(fsm):
- return fsm.state_entered + fsm.backoff
-
- @staticmethod
- def run(fsm, now):
- return CONNECT
-
- class ConnectInProgress(object):
- name = "CONNECTING"
- is_connected = False
-
- @staticmethod
- def deadline(fsm):
- return fsm.state_entered + max(1000, fsm.backoff)
-
- @staticmethod
- def run(fsm, now):
- return DISCONNECT
-
- class Active(object):
- name = "ACTIVE"
- is_connected = True
-
- @staticmethod
- def deadline(fsm):
- if fsm.probe_interval:
- base = max(fsm.last_activity, fsm.state_entered)
- return base + fsm.probe_interval
- return None
-
- @staticmethod
- def run(fsm, now):
- vlog.dbg("%s: idle %d ms, sending inactivity probe"
- % (fsm.name,
- now - max(fsm.last_activity, fsm.state_entered)))
- fsm._transition(now, Reconnect.Idle)
- return PROBE
-
- class Idle(object):
- name = "IDLE"
- is_connected = True
-
- @staticmethod
- def deadline(fsm):
- if fsm.probe_interval:
- return fsm.state_entered + fsm.probe_interval
- return None
-
- @staticmethod
- def run(fsm, now):
- vlog.err("%s: no response to inactivity probe after %.3g "
- "seconds, disconnecting"
- % (fsm.name, (now - fsm.state_entered) / 1000.0))
- return DISCONNECT
-
- class Reconnect(object):
- name = "RECONNECT"
- is_connected = False
-
- @staticmethod
- def deadline(fsm):
- return fsm.state_entered
-
- @staticmethod
- def run(fsm, now):
- return DISCONNECT
-
- def __init__(self, now):
- """Creates and returns a new reconnect FSM with default settings. The
- FSM is initially disabled. The caller will likely want to call
- self.enable() and self.set_name() on the returned object."""
-
- self.name = "void"
- self.min_backoff = 1000
- self.max_backoff = 8000
- self.probe_interval = 5000
- self.passive = False
- self.info_level = vlog.info
-
- self.state = Reconnect.Void
- self.state_entered = now
- self.backoff = 0
- self.last_activity = now
- self.last_connected = None
- self.last_disconnected = None
- self.max_tries = None
- self.backoff_free_tries = 0
-
- self.creation_time = now
- self.n_attempted_connections = 0
- self.n_successful_connections = 0
- self.total_connected_duration = 0
- self.seqno = 0
-
- def set_quiet(self, quiet):
- """If 'quiet' is true, this object will log informational messages at
- debug level, by default keeping them out of log files. This is
- appropriate if the connection is one that is expected to be
- short-lived, so that the log messages are merely distracting.
-
- If 'quiet' is false, this object logs informational messages at info
- level. This is the default.
-
- This setting has no effect on the log level of debugging, warning, or
- error messages."""
- if quiet:
- self.info_level = vlog.dbg
- else:
- self.info_level = vlog.info
-
- def get_name(self):
- return self.name
-
- def set_name(self, name):
- """Sets this object's name to 'name'. If 'name' is None, then "void"
- is used instead.
-
- The name is used in log messages."""
- if name is None:
- self.name = "void"
- else:
- self.name = name
-
- def get_min_backoff(self):
- """Return the minimum number of milliseconds to back off between
- consecutive connection attempts. The default is 1000 ms."""
- return self.min_backoff
-
- def get_max_backoff(self):
- """Return the maximum number of milliseconds to back off between
- consecutive connection attempts. The default is 8000 ms."""
- return self.max_backoff
-
- def get_probe_interval(self):
- """Returns the "probe interval" in milliseconds. If this is zero, it
- disables the connection keepalive feature. If it is nonzero, then if
- the interval passes while the FSM is connected and without
- self.activity() being called, self.run() returns ovs.reconnect.PROBE.
- If the interval passes again without self.activity() being called,
- self.run() returns ovs.reconnect.DISCONNECT."""
- return self.probe_interval
-
- def set_max_tries(self, max_tries):
- """Limits the maximum number of times that this object will ask the
- client to try to reconnect to 'max_tries'. None (the default) means an
- unlimited number of tries.
-
- After the number of tries has expired, the FSM will disable itself
- instead of backing off and retrying."""
- self.max_tries = max_tries
-
- def get_max_tries(self):
- """Returns the current remaining number of connection attempts,
- None if the number is unlimited."""
- return self.max_tries
-
- def set_backoff(self, min_backoff, max_backoff):
- """Configures the backoff parameters for this FSM. 'min_backoff' is
- the minimum number of milliseconds, and 'max_backoff' is the maximum,
- between connection attempts.
-
- 'min_backoff' must be at least 1000, and 'max_backoff' must be greater
- than or equal to 'min_backoff'."""
- self.min_backoff = max(min_backoff, 1000)
- if self.max_backoff:
- self.max_backoff = max(max_backoff, 1000)
- else:
- self.max_backoff = 8000
- if self.min_backoff > self.max_backoff:
- self.max_backoff = self.min_backoff
-
- if (self.state == Reconnect.Backoff and
- self.backoff > self.max_backoff):
- self.backoff = self.max_backoff
-
- def set_backoff_free_tries(self, backoff_free_tries):
- """Sets the number of connection attempts that will be made without
- backoff to 'backoff_free_tries'. Values 0 and 1 both
- represent a single attempt."""
- self.backoff_free_tries = backoff_free_tries
-
- def set_probe_interval(self, probe_interval):
- """Sets the "probe interval" to 'probe_interval', in milliseconds. If
- this is zero, it disables the connection keepalive feature. If it is
- nonzero, then if the interval passes while this FSM is connected and
- without self.activity() being called, self.run() returns
- ovs.reconnect.PROBE. If the interval passes again without
- self.activity() being called, self.run() returns
- ovs.reconnect.DISCONNECT.
-
- If 'probe_interval' is nonzero, then it will be forced to a value of at
- least 1000 ms."""
- if probe_interval:
- self.probe_interval = max(1000, probe_interval)
- else:
- self.probe_interval = 0
-
- def is_passive(self):
- """Returns true if 'fsm' is in passive mode, false if 'fsm' is in
- active mode (the default)."""
- return self.passive
-
- def set_passive(self, passive, now):
- """Configures this FSM for active or passive mode. In active mode (the
- default), the FSM is attempting to connect to a remote host. In
- passive mode, the FSM is listening for connections from a remote
- host."""
- if self.passive != passive:
- self.passive = passive
-
- if ((passive and self.state in (Reconnect.ConnectInProgress,
- Reconnect.Reconnect)) or
- (not passive and self.state == Reconnect.Listening
- and self.__may_retry())):
- self._transition(now, Reconnect.Backoff)
- self.backoff = 0
-
- def is_enabled(self):
- """Returns true if this FSM has been enabled with self.enable().
- Calling another function that indicates a change in connection state,
- such as self.disconnected() or self.force_reconnect(), will also enable
- a reconnect FSM."""
- return self.state != Reconnect.Void
-
- def enable(self, now):
- """If this FSM is disabled (the default for newly created FSMs),
- enables it, so that the next call to reconnect_run() for 'fsm' will
- return ovs.reconnect.CONNECT.
-
- If this FSM is not disabled, this function has no effect."""
- if self.state == Reconnect.Void and self.__may_retry():
- self._transition(now, Reconnect.Backoff)
- self.backoff = 0
-
- def disable(self, now):
- """Disables this FSM. Until 'fsm' is enabled again, self.run() will
- always return 0."""
- if self.state != Reconnect.Void:
- self._transition(now, Reconnect.Void)
-
- def force_reconnect(self, now):
- """If this FSM is enabled and currently connected (or attempting to
- connect), forces self.run() to return ovs.reconnect.DISCONNECT the next
- time it is called, which should cause the client to drop the connection
- (or attempt), back off, and then reconnect."""
- if self.state in (Reconnect.ConnectInProgress,
- Reconnect.Active,
- Reconnect.Idle):
- self._transition(now, Reconnect.Reconnect)
-
- def disconnected(self, now, error):
- """Tell this FSM that the connection dropped or that a connection
- attempt failed. 'error' specifies the reason: a positive value
- represents an errno value, EOF indicates that the connection was closed
- by the peer (e.g. read() returned 0), and 0 indicates no specific
- error.
-
- The FSM will back off, then reconnect."""
- if self.state not in (Reconnect.Backoff, Reconnect.Void):
- # Report what happened
- if self.state in (Reconnect.Active, Reconnect.Idle):
- if error > 0:
- vlog.warn("%s: connection dropped (%s)"
- % (self.name, os.strerror(error)))
- elif error == EOF:
- self.info_level("%s: connection closed by peer"
- % self.name)
- else:
- self.info_level("%s: connection dropped" % self.name)
- elif self.state == Reconnect.Listening:
- if error > 0:
- vlog.warn("%s: error listening for connections (%s)"
- % (self.name, os.strerror(error)))
- else:
- self.info_level("%s: error listening for connections"
- % self.name)
- elif self.state == Reconnect.Reconnect:
- self.info_level("%s: connection closed by client"
- % self.name)
- elif self.backoff < self.max_backoff:
- if self.passive:
- type_ = "listen"
- else:
- type_ = "connection"
- if error > 0:
- vlog.warn("%s: %s attempt failed (%s)"
- % (self.name, type_, os.strerror(error)))
- else:
- self.info_level("%s: %s attempt timed out"
- % (self.name, type_))
-
- if (self.state in (Reconnect.Active, Reconnect.Idle)):
- self.last_disconnected = now
-
- if not self.__may_retry():
- self._transition(now, Reconnect.Void)
- return
-
- # Back off
- if self.backoff_free_tries > 1:
- self.backoff_free_tries -= 1
- self.backoff = 0
- elif (self.state in (Reconnect.Active, Reconnect.Idle) and
- (self.last_activity - self.last_connected >= self.backoff or
- self.passive)):
- if self.passive:
- self.backoff = 0
- else:
- self.backoff = self.min_backoff
- else:
- if self.backoff < self.min_backoff:
- self.backoff = self.min_backoff
- elif self.backoff < self.max_backoff / 2:
- self.backoff *= 2
- if self.passive:
- action = "trying to listen again"
- else:
- action = "reconnect"
- self.info_level("%s: waiting %.3g seconds before %s"
- % (self.name, self.backoff / 1000.0,
- action))
- else:
- if self.backoff < self.max_backoff:
- if self.passive:
- action = "try to listen"
- else:
- action = "reconnect"
- self.info_level("%s: continuing to %s in the "
- "background but suppressing further "
- "logging" % (self.name, action))
- self.backoff = self.max_backoff
- self._transition(now, Reconnect.Backoff)
-
- def connecting(self, now):
- """Tell this FSM that a connection or listening attempt is in progress.
-
- The FSM will start a timer, after which the connection or listening
- attempt will be aborted (by returning ovs.reconnect.DISCONNECT from
- self.run())."""
- if self.state != Reconnect.ConnectInProgress:
- if self.passive:
- self.info_level("%s: listening..." % self.name)
- elif self.backoff < self.max_backoff:
- self.info_level("%s: connecting..." % self.name)
- self._transition(now, Reconnect.ConnectInProgress)
-
- def listening(self, now):
- """Tell this FSM that the client is listening for connection attempts.
- This state last indefinitely until the client reports some change.
-
- The natural progression from this state is for the client to report
- that a connection has been accepted or is in progress of being
- accepted, by calling self.connecting() or self.connected().
-
- The client may also report that listening failed (e.g. accept()
- returned an unexpected error such as ENOMEM) by calling
- self.listen_error(), in which case the FSM will back off and eventually
- return ovs.reconnect.CONNECT from self.run() to tell the client to try
- listening again."""
- if self.state != Reconnect.Listening:
- self.info_level("%s: listening..." % self.name)
- self._transition(now, Reconnect.Listening)
-
- def listen_error(self, now, error):
- """Tell this FSM that the client's attempt to accept a connection
- failed (e.g. accept() returned an unexpected error such as ENOMEM).
-
- If the FSM is currently listening (self.listening() was called), it
- will back off and eventually return ovs.reconnect.CONNECT from
- self.run() to tell the client to try listening again. If there is an
- active connection, this will be delayed until that connection drops."""
- if self.state == Reconnect.Listening:
- self.disconnected(now, error)
-
- def connected(self, now):
- """Tell this FSM that the connection was successful.
-
- The FSM will start the probe interval timer, which is reset by
- self.activity(). If the timer expires, a probe will be sent (by
- returning ovs.reconnect.PROBE from self.run(). If the timer expires
- again without being reset, the connection will be aborted (by returning
- ovs.reconnect.DISCONNECT from self.run()."""
- if not self.state.is_connected:
- self.connecting(now)
-
- self.info_level("%s: connected" % self.name)
- self._transition(now, Reconnect.Active)
- self.last_connected = now
-
- def connect_failed(self, now, error):
- """Tell this FSM that the connection attempt failed.
-
- The FSM will back off and attempt to reconnect."""
- self.connecting(now)
- self.disconnected(now, error)
-
- def activity(self, now):
- """Tell this FSM that some activity occurred on the connection. This
- resets the probe interval timer, so that the connection is known not to
- be idle."""
- if self.state != Reconnect.Active:
- self._transition(now, Reconnect.Active)
- self.last_activity = now
-
- def _transition(self, now, state):
- if self.state == Reconnect.ConnectInProgress:
- self.n_attempted_connections += 1
- if state == Reconnect.Active:
- self.n_successful_connections += 1
-
- connected_before = self.state.is_connected
- connected_now = state.is_connected
- if connected_before != connected_now:
- if connected_before:
- self.total_connected_duration += now - self.last_connected
- self.seqno += 1
-
- vlog.dbg("%s: entering %s" % (self.name, state.name))
- self.state = state
- self.state_entered = now
-
- def run(self, now):
- """Assesses whether any action should be taken on this FSM. The return
- value is one of:
-
- - None: The client need not take any action.
-
- - Active client, ovs.reconnect.CONNECT: The client should start a
- connection attempt and indicate this by calling
- self.connecting(). If the connection attempt has definitely
- succeeded, it should call self.connected(). If the connection
- attempt has definitely failed, it should call
- self.connect_failed().
-
- The FSM is smart enough to back off correctly after successful
- connections that quickly abort, so it is OK to call
- self.connected() after a low-level successful connection
- (e.g. connect()) even if the connection might soon abort due to a
- failure at a high-level (e.g. SSL negotiation failure).
-
- - Passive client, ovs.reconnect.CONNECT: The client should try to
- listen for a connection, if it is not already listening. It
- should call self.listening() if successful, otherwise
- self.connecting() or reconnected_connect_failed() if the attempt
- is in progress or definitely failed, respectively.
-
- A listening passive client should constantly attempt to accept a
- new connection and report an accepted connection with
- self.connected().
-
- - ovs.reconnect.DISCONNECT: The client should abort the current
- connection or connection attempt or listen attempt and call
- self.disconnected() or self.connect_failed() to indicate it.
-
- - ovs.reconnect.PROBE: The client should send some kind of request
- to the peer that will elicit a response, to ensure that the
- connection is indeed in working order. (This will only be
- returned if the "probe interval" is nonzero--see
- self.set_probe_interval())."""
-
- deadline = self.state.deadline(self)
- if deadline is not None and now >= deadline:
- return self.state.run(self, now)
- else:
- return None
-
- def wait(self, poller, now):
- """Causes the next call to poller.block() to wake up when self.run()
- should be called."""
- timeout = self.timeout(now)
- if timeout is not None and timeout >= 0:
- poller.timer_wait(timeout)
-
- def timeout(self, now):
- """Returns the number of milliseconds after which self.run() should be
- called if nothing else notable happens in the meantime, or None if this
- is currently unnecessary."""
- deadline = self.state.deadline(self)
- if deadline is not None:
- remaining = deadline - now
- return max(0, remaining)
- else:
- return None
-
- def is_connected(self):
- """Returns True if this FSM is currently believed to be connected, that
- is, if self.connected() was called more recently than any call to
- self.connect_failed() or self.disconnected() or self.disable(), and
- False otherwise."""
- return self.state.is_connected
-
- def get_last_connect_elapsed(self, now):
- """Returns the number of milliseconds since 'fsm' was last connected
- to its peer. Returns None if never connected."""
- if self.last_connected:
- return now - self.last_connected
- else:
- return None
-
- def get_last_disconnect_elapsed(self, now):
- """Returns the number of milliseconds since 'fsm' was last disconnected
- from its peer. Returns None if never disconnected."""
- if self.last_disconnected:
- return now - self.last_disconnected
- else:
- return None
-
- def get_stats(self, now):
- class Stats(object):
- pass
- stats = Stats()
- stats.creation_time = self.creation_time
- stats.last_connected = self.last_connected
- stats.last_disconnected = self.last_disconnected
- stats.last_activity = self.last_activity
- stats.backoff = self.backoff
- stats.seqno = self.seqno
- stats.is_connected = self.is_connected()
- stats.msec_since_connect = self.get_last_connect_elapsed(now)
- stats.msec_since_disconnect = self.get_last_disconnect_elapsed(now)
- stats.total_connected_duration = self.total_connected_duration
- if self.is_connected():
- stats.total_connected_duration += (
- self.get_last_connect_elapsed(now))
- stats.n_attempted_connections = self.n_attempted_connections
- stats.n_successful_connections = self.n_successful_connections
- stats.state = self.state.name
- stats.state_elapsed = now - self.state_entered
- return stats
-
- def __may_retry(self):
- if self.max_tries is None:
- return True
- elif self.max_tries > 0:
- self.max_tries -= 1
- return True
- else:
- return False
diff --git a/python/ovs/socket_util.py b/python/ovs/socket_util.py
deleted file mode 100644
index 8f9d31825..000000000
--- a/python/ovs/socket_util.py
+++ /dev/null
@@ -1,335 +0,0 @@
-# Copyright (c) 2010, 2012, 2014, 2015 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import errno
-import os
-import os.path
-import random
-import socket
-import sys
-
-import ovs.fatal_signal
-import ovs.poller
-import ovs.vlog
-
-import six
-from six.moves import range
-
-if sys.platform == 'win32':
- import ovs.winutils as winutils
- import win32file
-
-vlog = ovs.vlog.Vlog("socket_util")
-
-
-def make_short_name(long_name):
- if long_name is None:
- return None
- long_name = os.path.abspath(long_name)
- long_dirname = os.path.dirname(long_name)
- tmpdir = os.getenv('TMPDIR', '/tmp')
- for x in range(0, 1000):
- link_name = \
- '%s/ovs-un-py-%d-%d' % (tmpdir, random.randint(0, 10000), x)
- try:
- os.symlink(long_dirname, link_name)
- ovs.fatal_signal.add_file_to_unlink(link_name)
- return os.path.join(link_name, os.path.basename(long_name))
- except OSError as e:
- if e.errno != errno.EEXIST:
- break
- raise Exception("Failed to create temporary symlink")
-
-
-def free_short_name(short_name):
- if short_name is None:
- return
- link_name = os.path.dirname(short_name)
- ovs.fatal_signal.unlink_file_now(link_name)
-
-
-def make_unix_socket(style, nonblock, bind_path, connect_path, short=False):
- """Creates a Unix domain socket in the given 'style' (either
- socket.SOCK_DGRAM or socket.SOCK_STREAM) that is bound to 'bind_path' (if
- 'bind_path' is not None) and connected to 'connect_path' (if 'connect_path'
- is not None). If 'nonblock' is true, the socket is made non-blocking.
-
- Returns (error, socket): on success 'error' is 0 and 'socket' is a new
- socket object, on failure 'error' is a positive errno value and 'socket' is
- None."""
-
- try:
- sock = socket.socket(socket.AF_UNIX, style)
- except socket.error as e:
- return get_exception_errno(e), None
-
- try:
- if nonblock:
- set_nonblocking(sock)
- if bind_path is not None:
- # Delete bind_path but ignore ENOENT.
- try:
- os.unlink(bind_path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- return e.errno, None
-
- ovs.fatal_signal.add_file_to_unlink(bind_path)
- sock.bind(bind_path)
-
- try:
- os.fchmod(sock.fileno(), 0o700)
- except OSError:
- pass
- if connect_path is not None:
- try:
- sock.connect(connect_path)
- except socket.error as e:
- if get_exception_errno(e) != errno.EINPROGRESS:
- raise
- return 0, sock
- except socket.error as e:
- sock.close()
- if (bind_path is not None and
- os.path.exists(bind_path)):
- ovs.fatal_signal.unlink_file_now(bind_path)
- eno = ovs.socket_util.get_exception_errno(e)
- if (eno == "AF_UNIX path too long" and
- os.uname()[0] == "Linux"):
- short_connect_path = None
- short_bind_path = None
- connect_dirfd = None
- bind_dirfd = None
- # Try workaround using /proc/self/fd
- if connect_path is not None:
- dirname = os.path.dirname(connect_path)
- basename = os.path.basename(connect_path)
- try:
- connect_dirfd = os.open(dirname,
- os.O_DIRECTORY | os.O_RDONLY)
- except OSError as err:
- return get_exception_errno(err), None
- short_connect_path = "/proc/self/fd/%d/%s" % (connect_dirfd,
- basename)
-
- if bind_path is not None:
- dirname = os.path.dirname(bind_path)
- basename = os.path.basename(bind_path)
- try:
- bind_dirfd = os.open(dirname, os.O_DIRECTORY | os.O_RDONLY)
- except OSError as err:
- return get_exception_errno(err), None
- short_bind_path = "/proc/self/fd/%d/%s" % (bind_dirfd,
- basename)
-
- try:
- return make_unix_socket(style, nonblock, short_bind_path,
- short_connect_path)
- finally:
- if connect_dirfd is not None:
- os.close(connect_dirfd)
- if bind_dirfd is not None:
- os.close(bind_dirfd)
- elif (eno == "AF_UNIX path too long"):
- if short:
- return get_exception_errno(e), None
- short_bind_path = None
- try:
- short_bind_path = make_short_name(bind_path)
- short_connect_path = make_short_name(connect_path)
- except:
- free_short_name(short_bind_path)
- return errno.ENAMETOOLONG, None
- try:
- return make_unix_socket(style, nonblock, short_bind_path,
- short_connect_path, short=True)
- finally:
- free_short_name(short_bind_path)
- free_short_name(short_connect_path)
- else:
- return get_exception_errno(e), None
-
-
-def check_connection_completion(sock):
- if sys.platform == "win32":
- p = ovs.poller.SelectPoll()
- event = winutils.get_new_event(None, False, True, None)
- # Receive notification of readiness for writing, of completed
- # connection or multipoint join operation, and of socket closure.
- win32file.WSAEventSelect(sock, event,
- win32file.FD_WRITE |
- win32file.FD_CONNECT |
- win32file.FD_CLOSE)
- p.register(event, ovs.poller.POLLOUT)
- else:
- p = ovs.poller.get_system_poll()
- p.register(sock, ovs.poller.POLLOUT)
- pfds = p.poll(0)
- if len(pfds) == 1:
- revents = pfds[0][1]
- if revents & ovs.poller.POLLERR or revents & ovs.poller.POLLHUP:
- try:
- # The following should raise an exception.
- sock.send("\0".encode(), socket.MSG_DONTWAIT)
-
- # (Here's where we end up if it didn't.)
- # XXX rate-limit
- vlog.err("poll return POLLERR but send succeeded")
- return errno.EPROTO
- except socket.error as e:
- return get_exception_errno(e)
- else:
- return 0
- else:
- return errno.EAGAIN
-
-
-def is_valid_ipv4_address(address):
- try:
- socket.inet_pton(socket.AF_INET, address)
- except AttributeError:
- try:
- socket.inet_aton(address)
- except socket.error:
- return False
- except socket.error:
- return False
-
- return True
-
-
-def inet_parse_active(target, default_port):
- address = target.split(":")
- if len(address) >= 2:
- host_name = ":".join(address[0:-1]).lstrip('[').rstrip(']')
- port = int(address[-1])
- else:
- if default_port:
- port = default_port
- else:
- raise ValueError("%s: port number must be specified" % target)
- host_name = address[0]
- if not host_name:
- raise ValueError("%s: bad peer name format" % target)
- return (host_name, port)
-
-
-def inet_open_active(style, target, default_port, dscp):
- address = inet_parse_active(target, default_port)
- try:
- is_addr_inet = is_valid_ipv4_address(address[0])
- if is_addr_inet:
- sock = socket.socket(socket.AF_INET, style, 0)
- family = socket.AF_INET
- else:
- sock = socket.socket(socket.AF_INET6, style, 0)
- family = socket.AF_INET6
- except socket.error as e:
- return get_exception_errno(e), None
-
- try:
- set_nonblocking(sock)
- set_dscp(sock, family, dscp)
- try:
- sock.connect(address)
- except socket.error as e:
- error = get_exception_errno(e)
- if sys.platform == 'win32' and error == errno.WSAEWOULDBLOCK:
- # WSAEWOULDBLOCK would be the equivalent on Windows
- # for EINPROGRESS on Unix.
- error = errno.EINPROGRESS
- if error != errno.EINPROGRESS:
- raise
- return 0, sock
- except socket.error as e:
- sock.close()
- return get_exception_errno(e), None
-
-
-def get_exception_errno(e):
- """A lot of methods on Python socket objects raise socket.error, but that
- exception is documented as having two completely different forms of
- arguments: either a string or a (errno, string) tuple. We only want the
- errno."""
- if isinstance(e.args, tuple):
- return e.args[0]
- else:
- return errno.EPROTO
-
-
-null_fd = -1
-
-
-def get_null_fd():
- """Returns a readable and writable fd for /dev/null, if successful,
- otherwise a negative errno value. The caller must not close the returned
- fd (because the same fd will be handed out to subsequent callers)."""
- global null_fd
- if null_fd < 0:
- try:
- # os.devnull ensures compatibility with Windows, returns
- # '/dev/null' for Unix and 'nul' for Windows
- null_fd = os.open(os.devnull, os.O_RDWR)
- except OSError as e:
- vlog.err("could not open %s: %s" % (os.devnull,
- os.strerror(e.errno)))
- return -e.errno
- return null_fd
-
-
-def write_fully(fd, buf):
- """Returns an (error, bytes_written) tuple where 'error' is 0 on success,
- otherwise a positive errno value, and 'bytes_written' is the number of
- bytes that were written before the error occurred. 'error' is 0 if and
- only if 'bytes_written' is len(buf)."""
- bytes_written = 0
- if len(buf) == 0:
- return 0, 0
- if six.PY3 and not isinstance(buf, six.binary_type):
- buf = six.binary_type(buf, 'utf-8')
- while True:
- try:
- retval = os.write(fd, buf)
- assert retval >= 0
- if retval == len(buf):
- return 0, bytes_written + len(buf)
- elif retval == 0:
- vlog.warn("write returned 0")
- return errno.EPROTO, bytes_written
- else:
- bytes_written += retval
- buf = buf[:retval]
- except OSError as e:
- return e.errno, bytes_written
-
-
-def set_nonblocking(sock):
- try:
- sock.setblocking(0)
- except socket.error as e:
- vlog.err("could not set nonblocking mode on socket: %s"
- % os.strerror(get_exception_errno(e)))
-
-
-def set_dscp(sock, family, dscp):
- if dscp > 63:
- raise ValueError("Invalid dscp %d" % dscp)
-
- val = dscp << 2
- if family == socket.AF_INET:
- sock.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, val)
- elif family == socket.AF_INET6:
- sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_TCLASS, val)
- else:
- raise ValueError('Invalid family %d' % family)
diff --git a/python/ovs/stream.py b/python/ovs/stream.py
deleted file mode 100644
index c15be4b3e..000000000
--- a/python/ovs/stream.py
+++ /dev/null
@@ -1,831 +0,0 @@
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import errno
-import os
-import socket
-import sys
-
-import ovs.poller
-import ovs.socket_util
-import ovs.vlog
-
-import six
-
-try:
- from OpenSSL import SSL
-except ImportError:
- SSL = None
-
-if sys.platform == 'win32':
- import ovs.winutils as winutils
- import pywintypes
- import win32event
- import win32file
- import win32pipe
-
-vlog = ovs.vlog.Vlog("stream")
-
-
-def stream_or_pstream_needs_probes(name):
- """ True if the stream or pstream specified by 'name' needs periodic probes
- to verify connectivity. For [p]streams which need probes, it can take a
- long time to notice the connection was dropped. Returns False if probes
- aren't needed, and None if 'name' is invalid"""
-
- cls = Stream._find_method(name)
- if cls:
- return cls.needs_probes()
- elif PassiveStream.is_valid_name(name):
- return PassiveStream.needs_probes(name)
- else:
- return None
-
-
-class Stream(object):
- """Bidirectional byte stream. Unix domain sockets, tcp and ssl
- are implemented."""
-
- # States.
- __S_CONNECTING = 0
- __S_CONNECTED = 1
- __S_DISCONNECTED = 2
-
- # Kinds of events that one might wait for.
- W_CONNECT = 0 # Connect complete (success or failure).
- W_RECV = 1 # Data received.
- W_SEND = 2 # Send buffer room available.
-
- _SOCKET_METHODS = {}
-
- _SSL_private_key_file = None
- _SSL_certificate_file = None
- _SSL_ca_cert_file = None
-
- # Windows only
- _write = None # overlapped for write operation
- _read = None # overlapped for read operation
- _write_pending = False
- _read_pending = False
- _retry_connect = False
-
- @staticmethod
- def register_method(method, cls):
- Stream._SOCKET_METHODS[method + ":"] = cls
-
- @staticmethod
- def _find_method(name):
- for method, cls in six.iteritems(Stream._SOCKET_METHODS):
- if name.startswith(method):
- return cls
- return None
-
- @staticmethod
- def is_valid_name(name):
- """Returns True if 'name' is a stream name in the form "TYPE:ARGS" and
- TYPE is a supported stream type ("unix:", "tcp:" and "ssl:"),
- otherwise False."""
- return bool(Stream._find_method(name))
-
- def __init__(self, socket, name, status, pipe=None, is_server=False):
- self.socket = socket
- self.pipe = pipe
- if sys.platform == 'win32':
- if pipe is not None:
- # Flag to check if fd is a server HANDLE. In the case of a
- # server handle we have to issue a disconnect before closing
- # the actual handle.
- self._server = is_server
- suffix = name.split(":", 1)[1]
- suffix = ovs.util.abs_file_name(ovs.dirs.RUNDIR, suffix)
- self._pipename = winutils.get_pipe_name(suffix)
- self._read = pywintypes.OVERLAPPED()
- self._read.hEvent = winutils.get_new_event()
- self._write = pywintypes.OVERLAPPED()
- self._write.hEvent = winutils.get_new_event()
- else:
- self._wevent = winutils.get_new_event(bManualReset=False,
- bInitialState=False)
-
- self.name = name
- if status == errno.EAGAIN:
- self.state = Stream.__S_CONNECTING
- elif status == 0:
- self.state = Stream.__S_CONNECTED
- else:
- self.state = Stream.__S_DISCONNECTED
-
- self.error = 0
-
- # Default value of dscp bits for connection between controller and manager.
- # Value of IPTOS_PREC_INTERNETCONTROL = 0xc0 which is defined
- # in <netinet/ip.h> is used.
- IPTOS_PREC_INTERNETCONTROL = 0xc0
- DSCP_DEFAULT = IPTOS_PREC_INTERNETCONTROL >> 2
-
- @staticmethod
- def open(name, dscp=DSCP_DEFAULT):
- """Attempts to connect a stream to a remote peer. 'name' is a
- connection name in the form "TYPE:ARGS", where TYPE is an active stream
- class's name and ARGS are stream class-specific. The supported TYPEs
- include "unix", "tcp", and "ssl".
-
- Returns (error, stream): on success 'error' is 0 and 'stream' is the
- new Stream, on failure 'error' is a positive errno value and 'stream'
- is None.
-
- Never returns errno.EAGAIN or errno.EINPROGRESS. Instead, returns 0
- and a new Stream. The connect() method can be used to check for
- successful connection completion."""
- cls = Stream._find_method(name)
- if not cls:
- return errno.EAFNOSUPPORT, None
-
- suffix = name.split(":", 1)[1]
- if name.startswith("unix:"):
- suffix = ovs.util.abs_file_name(ovs.dirs.RUNDIR, suffix)
- if sys.platform == 'win32':
- pipename = winutils.get_pipe_name(suffix)
-
- if len(suffix) > 255:
- # Return invalid argument if the name is too long
- return errno.ENOENT, None
-
- try:
- # In case of "unix:" argument, the assumption is that
- # there is a file created in the path (suffix).
- open(suffix, 'r').close()
- except:
- return errno.ENOENT, None
-
- try:
- npipe = winutils.create_file(pipename)
- try:
- winutils.set_pipe_mode(npipe,
- win32pipe.PIPE_READMODE_BYTE)
- except pywintypes.error:
- return errno.ENOENT, None
- except pywintypes.error as e:
- if e.winerror == winutils.winerror.ERROR_PIPE_BUSY:
- # Pipe is busy, set the retry flag to true and retry
- # again during the connect function.
- Stream.retry_connect = True
- return 0, cls(None, name, errno.EAGAIN,
- pipe=win32file.INVALID_HANDLE_VALUE,
- is_server=False)
- return errno.ENOENT, None
- return 0, cls(None, name, 0, pipe=npipe, is_server=False)
-
- error, sock = cls._open(suffix, dscp)
- if error:
- return error, None
- else:
- err = ovs.socket_util.check_connection_completion(sock)
- if err == errno.EAGAIN or err == errno.EINPROGRESS:
- status = errno.EAGAIN
- err = 0
- elif err == 0:
- status = 0
- else:
- status = err
- return err, cls(sock, name, status)
-
- @staticmethod
- def _open(suffix, dscp):
- raise NotImplementedError("This method must be overrided by subclass")
-
- @staticmethod
- def open_block(error_stream, timeout=None):
- """Blocks until a Stream completes its connection attempt, either
- succeeding or failing, but no more than 'timeout' milliseconds.
- (error, stream) should be the tuple returned by Stream.open().
- Negative value of 'timeout' means infinite waiting.
- Returns a tuple of the same form.
-
- Typical usage:
- error, stream = Stream.open_block(Stream.open("unix:/tmp/socket"))"""
-
- # Py3 doesn't support tuple parameter unpacking - PEP 3113
- error, stream = error_stream
- if not error:
- deadline = None
- if timeout is not None and timeout >= 0:
- deadline = ovs.timeval.msec() + timeout
- while True:
- error = stream.connect()
- if sys.platform == 'win32' and error == errno.WSAEWOULDBLOCK:
- # WSAEWOULDBLOCK would be the equivalent on Windows
- # for EAGAIN on Unix.
- error = errno.EAGAIN
- if error != errno.EAGAIN:
- break
- if deadline is not None and ovs.timeval.msec() > deadline:
- error = errno.ETIMEDOUT
- break
- stream.run()
- poller = ovs.poller.Poller()
- stream.run_wait(poller)
- stream.connect_wait(poller)
- if deadline is not None:
- poller.timer_wait_until(deadline)
- poller.block()
- if stream.socket is not None:
- assert error != errno.EINPROGRESS
-
- if error and stream:
- stream.close()
- stream = None
- return error, stream
-
- def close(self):
- if self.socket is not None:
- self.socket.close()
- if self.pipe is not None:
- if self._server:
- # Flush the pipe to allow the client to read the pipe
- # before disconnecting.
- win32pipe.FlushFileBuffers(self.pipe)
- win32pipe.DisconnectNamedPipe(self.pipe)
- winutils.close_handle(self.pipe, vlog.warn)
- winutils.close_handle(self._read.hEvent, vlog.warn)
- winutils.close_handle(self._write.hEvent, vlog.warn)
-
- def __scs_connecting(self):
- if self.socket is not None:
- retval = ovs.socket_util.check_connection_completion(self.socket)
- assert retval != errno.EINPROGRESS
- elif sys.platform == 'win32':
- if self.retry_connect:
- try:
- self.pipe = winutils.create_file(self._pipename)
- self._retry_connect = False
- retval = 0
- except pywintypes.error as e:
- if e.winerror == winutils.winerror.ERROR_PIPE_BUSY:
- retval = errno.EAGAIN
- else:
- self._retry_connect = False
- retval = errno.ENOENT
- else:
- # If retry_connect is false, it means it's already
- # connected so we can set the value of retval to 0
- retval = 0
-
- if retval == 0:
- self.state = Stream.__S_CONNECTED
- elif retval != errno.EAGAIN:
- self.state = Stream.__S_DISCONNECTED
- self.error = retval
-
- def connect(self):
- """Tries to complete the connection on this stream. If the connection
- is complete, returns 0 if the connection was successful or a positive
- errno value if it failed. If the connection is still in progress,
- returns errno.EAGAIN."""
-
- if self.state == Stream.__S_CONNECTING:
- self.__scs_connecting()
-
- if self.state == Stream.__S_CONNECTING:
- return errno.EAGAIN
- elif self.state == Stream.__S_CONNECTED:
- return 0
- else:
- assert self.state == Stream.__S_DISCONNECTED
- return self.error
-
- def recv(self, n):
- """Tries to receive up to 'n' bytes from this stream. Returns a
- (error, string) tuple:
-
- - If successful, 'error' is zero and 'string' contains between 1
- and 'n' bytes of data.
-
- - On error, 'error' is a positive errno value.
-
- - If the connection has been closed in the normal fashion or if 'n'
- is 0, the tuple is (0, "").
-
- The recv function will not block waiting for data to arrive. If no
- data have been received, it returns (errno.EAGAIN, "") immediately."""
-
- retval = self.connect()
- if retval != 0:
- return (retval, "")
- elif n == 0:
- return (0, "")
-
- if sys.platform == 'win32' and self.socket is None:
- return self.__recv_windows(n)
-
- try:
- return (0, self.socket.recv(n))
- except socket.error as e:
- return (ovs.socket_util.get_exception_errno(e), "")
-
- def __recv_windows(self, n):
- if self._read_pending:
- try:
- nBytesRead = winutils.get_overlapped_result(self.pipe,
- self._read,
- False)
- self._read_pending = False
- except pywintypes.error as e:
- if e.winerror == winutils.winerror.ERROR_IO_INCOMPLETE:
- # The operation is still pending, try again
- self._read_pending = True
- return (errno.EAGAIN, "")
- elif e.winerror in winutils.pipe_disconnected_errors:
- # If the pipe was disconnected, return 0.
- return (0, "")
- else:
- return (errno.EINVAL, "")
- else:
- (errCode, self._read_buffer) = winutils.read_file(self.pipe,
- n,
- self._read)
- if errCode:
- if errCode == winutils.winerror.ERROR_IO_PENDING:
- self._read_pending = True
- return (errno.EAGAIN, "")
- elif errCode in winutils.pipe_disconnected_errors:
- # If the pipe was disconnected, return 0.
- return (0, "")
- else:
- return (errCode, "")
-
- try:
- nBytesRead = winutils.get_overlapped_result(self.pipe,
- self._read,
- False)
- winutils.win32event.SetEvent(self._read.hEvent)
- except pywintypes.error as e:
- if e.winerror in winutils.pipe_disconnected_errors:
- # If the pipe was disconnected, return 0.
- return (0, "")
- else:
- return (e.winerror, "")
-
- recvBuffer = self._read_buffer[:nBytesRead]
- # recvBuffer will have the type memoryview in Python3.
- # We can use bytes to convert it to type bytes which works on
- # both Python2 and Python3.
- return (0, bytes(recvBuffer))
-
- def send(self, buf):
- """Tries to send 'buf' on this stream.
-
- If successful, returns the number of bytes sent, between 1 and
- len(buf). 0 is only a valid return value if len(buf) is 0.
-
- On error, returns a negative errno value.
-
- Will not block. If no bytes can be immediately accepted for
- transmission, returns -errno.EAGAIN immediately."""
-
- retval = self.connect()
- if retval != 0:
- return -retval
- elif len(buf) == 0:
- return 0
-
- # We must have bytes for sending.
- if isinstance(buf, six.text_type):
- buf = buf.encode('utf-8')
-
- if sys.platform == 'win32' and self.socket is None:
- return self.__send_windows(buf)
-
- try:
- return self.socket.send(buf)
- except socket.error as e:
- return -ovs.socket_util.get_exception_errno(e)
-
- def __send_windows(self, buf):
- if self._write_pending:
- try:
- nBytesWritten = winutils.get_overlapped_result(self.pipe,
- self._write,
- False)
- self._write_pending = False
- except pywintypes.error as e:
- if e.winerror == winutils.winerror.ERROR_IO_INCOMPLETE:
- # The operation is still pending, try again
- self._read_pending = True
- return -errno.EAGAIN
- elif e.winerror in winutils.pipe_disconnected_errors:
- # If the pipe was disconnected, return connection reset.
- return -errno.ECONNRESET
- else:
- return -errno.EINVAL
- else:
- (errCode, nBytesWritten) = winutils.write_file(self.pipe,
- buf,
- self._write)
- if errCode:
- if errCode == winutils.winerror.ERROR_IO_PENDING:
- self._write_pending = True
- return -errno.EAGAIN
- if (not nBytesWritten and
- errCode in winutils.pipe_disconnected_errors):
- # If the pipe was disconnected, return connection reset.
- return -errno.ECONNRESET
- return nBytesWritten
-
- def run(self):
- pass
-
- def run_wait(self, poller):
- pass
-
- def wait(self, poller, wait):
- assert wait in (Stream.W_CONNECT, Stream.W_RECV, Stream.W_SEND)
-
- if self.state == Stream.__S_DISCONNECTED:
- poller.immediate_wake()
- return
-
- if self.state == Stream.__S_CONNECTING:
- wait = Stream.W_CONNECT
-
- if sys.platform == 'win32':
- self.__wait_windows(poller, wait)
- return
-
- if wait == Stream.W_RECV:
- poller.fd_wait(self.socket, ovs.poller.POLLIN)
- else:
- poller.fd_wait(self.socket, ovs.poller.POLLOUT)
-
- def __wait_windows(self, poller, wait):
- if self.socket is not None:
- if wait == Stream.W_RECV:
- mask = (win32file.FD_READ |
- win32file.FD_ACCEPT |
- win32file.FD_CLOSE)
- event = ovs.poller.POLLIN
- else:
- mask = (win32file.FD_WRITE |
- win32file.FD_CONNECT |
- win32file.FD_CLOSE)
- event = ovs.poller.POLLOUT
-
- try:
- win32file.WSAEventSelect(self.socket,
- self._wevent,
- mask)
- except pywintypes.error as e:
- vlog.err("failed to associate events with socket: %s"
- % e.strerror)
- poller.fd_wait(self._wevent, event)
- else:
- if wait == Stream.W_RECV:
- if self._read:
- poller.fd_wait(self._read.hEvent, ovs.poller.POLLIN)
- elif wait == Stream.W_SEND:
- if self._write:
- poller.fd_wait(self._write.hEvent, ovs.poller.POLLOUT)
- elif wait == Stream.W_CONNECT:
- return
-
- def connect_wait(self, poller):
- self.wait(poller, Stream.W_CONNECT)
-
- def recv_wait(self, poller):
- self.wait(poller, Stream.W_RECV)
-
- def send_wait(self, poller):
- self.wait(poller, Stream.W_SEND)
-
- def __del__(self):
- # Don't delete the file: we might have forked.
- if self.socket is not None:
- self.socket.close()
- if self.pipe is not None:
- # Check if there are any remaining valid handles and close them
- if self.pipe:
- winutils.close_handle(self.pipe)
- if self._read.hEvent:
- winutils.close_handle(self._read.hEvent)
- if self._write.hEvent:
- winutils.close_handle(self._write.hEvent)
-
- @staticmethod
- def ssl_set_private_key_file(file_name):
- Stream._SSL_private_key_file = file_name
-
- @staticmethod
- def ssl_set_certificate_file(file_name):
- Stream._SSL_certificate_file = file_name
-
- @staticmethod
- def ssl_set_ca_cert_file(file_name):
- Stream._SSL_ca_cert_file = file_name
-
-
-class PassiveStream(object):
- # Windows only
- connect = None # overlapped for read operation
- connect_pending = False
-
- @staticmethod
- def needs_probes(name):
- return False if name.startswith("punix:") else True
-
- @staticmethod
- def is_valid_name(name):
- """Returns True if 'name' is a passive stream name in the form
- "TYPE:ARGS" and TYPE is a supported passive stream type (currently
- "punix:" or "ptcp"), otherwise False."""
- return name.startswith("punix:") | name.startswith("ptcp:")
-
- def __init__(self, sock, name, bind_path, pipe=None):
- self.name = name
- self.pipe = pipe
- self.socket = sock
- if pipe is not None:
- self.connect = pywintypes.OVERLAPPED()
- self.connect.hEvent = winutils.get_new_event()
- self.connect_pending = False
- suffix = name.split(":", 1)[1]
- suffix = ovs.util.abs_file_name(ovs.dirs.RUNDIR, suffix)
- self._pipename = winutils.get_pipe_name(suffix)
-
- self.bind_path = bind_path
-
- @staticmethod
- def open(name):
- """Attempts to start listening for remote stream connections. 'name'
- is a connection name in the form "TYPE:ARGS", where TYPE is an passive
- stream class's name and ARGS are stream class-specific. Currently the
- supported values for TYPE are "punix" and "ptcp".
-
- Returns (error, pstream): on success 'error' is 0 and 'pstream' is the
- new PassiveStream, on failure 'error' is a positive errno value and
- 'pstream' is None."""
- if not PassiveStream.is_valid_name(name):
- return errno.EAFNOSUPPORT, None
-
- bind_path = name[6:]
- if name.startswith("punix:"):
- bind_path = ovs.util.abs_file_name(ovs.dirs.RUNDIR, bind_path)
- if sys.platform != 'win32':
- error, sock = ovs.socket_util.make_unix_socket(
- socket.SOCK_STREAM, True, bind_path, None)
- if error:
- return error, None
- else:
- # Branch used only on Windows
- try:
- open(bind_path, 'w').close()
- except:
- return errno.ENOENT, None
-
- pipename = winutils.get_pipe_name(bind_path)
- if len(pipename) > 255:
- # Return invalid argument if the name is too long
- return errno.ENOENT, None
-
- npipe = winutils.create_named_pipe(pipename)
- if not npipe:
- return errno.ENOENT, None
- return 0, PassiveStream(None, name, bind_path, pipe=npipe)
-
- elif name.startswith("ptcp:"):
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- remote = name.split(':')
- sock.bind((remote[1], int(remote[2])))
-
- else:
- raise Exception('Unknown connection string')
-
- try:
- sock.listen(10)
- except socket.error as e:
- vlog.err("%s: listen: %s" % (name, os.strerror(e.error)))
- sock.close()
- return e.error, None
-
- return 0, PassiveStream(sock, name, bind_path)
-
- def close(self):
- """Closes this PassiveStream."""
- if self.socket is not None:
- self.socket.close()
- if self.pipe is not None:
- winutils.close_handle(self.pipe, vlog.warn)
- winutils.close_handle(self.connect.hEvent, vlog.warn)
- if self.bind_path is not None:
- ovs.fatal_signal.unlink_file_now(self.bind_path)
- self.bind_path = None
-
- def accept(self):
- """Tries to accept a new connection on this passive stream. Returns
- (error, stream): if successful, 'error' is 0 and 'stream' is the new
- Stream object, and on failure 'error' is a positive errno value and
- 'stream' is None.
-
- Will not block waiting for a connection. If no connection is ready to
- be accepted, returns (errno.EAGAIN, None) immediately."""
- if sys.platform == 'win32' and self.socket is None:
- return self.__accept_windows()
- while True:
- try:
- sock, addr = self.socket.accept()
- ovs.socket_util.set_nonblocking(sock)
- if (sys.platform != 'win32' and sock.family == socket.AF_UNIX):
- return 0, Stream(sock, "unix:%s" % addr, 0)
- return 0, Stream(sock, 'ptcp:%s:%s' % (addr[0],
- str(addr[1])), 0)
- except socket.error as e:
- error = ovs.socket_util.get_exception_errno(e)
- if sys.platform == 'win32' and error == errno.WSAEWOULDBLOCK:
- # WSAEWOULDBLOCK would be the equivalent on Windows
- # for EAGAIN on Unix.
- error = errno.EAGAIN
- if error != errno.EAGAIN:
- # XXX rate-limit
- vlog.dbg("accept: %s" % os.strerror(error))
- return error, None
-
- def __accept_windows(self):
- if self.connect_pending:
- try:
- winutils.get_overlapped_result(self.pipe, self.connect, False)
- except pywintypes.error as e:
- if e.winerror == winutils.winerror.ERROR_IO_INCOMPLETE:
- # The operation is still pending, try again
- self.connect_pending = True
- return errno.EAGAIN, None
- else:
- if self.pipe:
- win32pipe.DisconnectNamedPipe(self.pipe)
- return errno.EINVAL, None
- self.connect_pending = False
-
- error = winutils.connect_named_pipe(self.pipe, self.connect)
- if error:
- if error == winutils.winerror.ERROR_IO_PENDING:
- self.connect_pending = True
- return errno.EAGAIN, None
- elif error != winutils.winerror.ERROR_PIPE_CONNECTED:
- if self.pipe:
- win32pipe.DisconnectNamedPipe(self.pipe)
- self.connect_pending = False
- return errno.EINVAL, None
- else:
- win32event.SetEvent(self.connect.hEvent)
-
- npipe = winutils.create_named_pipe(self._pipename)
- if not npipe:
- return errno.ENOENT, None
-
- old_pipe = self.pipe
- self.pipe = npipe
- winutils.win32event.ResetEvent(self.connect.hEvent)
- return 0, Stream(None, self.name, 0, pipe=old_pipe)
-
- def wait(self, poller):
- if sys.platform != 'win32' or self.socket is not None:
- poller.fd_wait(self.socket, ovs.poller.POLLIN)
- else:
- poller.fd_wait(self.connect.hEvent, ovs.poller.POLLIN)
-
- def __del__(self):
- # Don't delete the file: we might have forked.
- if self.socket is not None:
- self.socket.close()
- if self.pipe is not None:
- # Check if there are any remaining valid handles and close them
- if self.pipe:
- winutils.close_handle(self.pipe)
- if self._connect.hEvent:
- winutils.close_handle(self._read.hEvent)
-
-
-def usage(name):
- return """
-Active %s connection methods:
- unix:FILE Unix domain socket named FILE
- tcp:HOST:PORT TCP socket to HOST with port no of PORT
- ssl:HOST:PORT SSL socket to HOST with port no of PORT
-
-Passive %s connection methods:
- punix:FILE Listen on Unix domain socket FILE""" % (name, name)
-
-
-class UnixStream(Stream):
- @staticmethod
- def needs_probes():
- return False
-
- @staticmethod
- def _open(suffix, dscp):
- connect_path = suffix
- return ovs.socket_util.make_unix_socket(socket.SOCK_STREAM,
- True, None, connect_path)
-
-
-Stream.register_method("unix", UnixStream)
-
-
-class TCPStream(Stream):
- @staticmethod
- def needs_probes():
- return True
-
- @staticmethod
- def _open(suffix, dscp):
- error, sock = ovs.socket_util.inet_open_active(socket.SOCK_STREAM,
- suffix, 0, dscp)
- if not error:
- try:
- sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
- except socket.error as e:
- sock.close()
- return ovs.socket_util.get_exception_errno(e), None
- return error, sock
-
-
-Stream.register_method("tcp", TCPStream)
-
-
-class SSLStream(Stream):
- @staticmethod
- def needs_probes():
- return True
-
- @staticmethod
- def verify_cb(conn, cert, errnum, depth, ok):
- return ok
-
- @staticmethod
- def _open(suffix, dscp):
- error, sock = TCPStream._open(suffix, dscp)
- if error:
- return error, None
-
- # Create an SSL context
- ctx = SSL.Context(SSL.SSLv23_METHOD)
- ctx.set_verify(SSL.VERIFY_PEER, SSLStream.verify_cb)
- ctx.set_options(SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
- # If the client has not set the SSL configuration files
- # exception would be raised.
- ctx.use_privatekey_file(Stream._SSL_private_key_file)
- ctx.use_certificate_file(Stream._SSL_certificate_file)
- ctx.load_verify_locations(Stream._SSL_ca_cert_file)
-
- ssl_sock = SSL.Connection(ctx, sock)
- ssl_sock.set_connect_state()
- return error, ssl_sock
-
- def connect(self):
- retval = super(SSLStream, self).connect()
-
- if retval:
- return retval
-
- # TCP Connection is successful. Now do the SSL handshake
- try:
- self.socket.do_handshake()
- except SSL.WantReadError:
- return errno.EAGAIN
- except SSL.SysCallError as e:
- return ovs.socket_util.get_exception_errno(e)
-
- return 0
-
- def recv(self, n):
- try:
- return super(SSLStream, self).recv(n)
- except SSL.WantReadError:
- return (errno.EAGAIN, "")
- except SSL.SysCallError as e:
- return (ovs.socket_util.get_exception_errno(e), "")
- except SSL.ZeroReturnError:
- return (0, "")
-
- def send(self, buf):
- try:
- return super(SSLStream, self).send(buf)
- except SSL.WantWriteError:
- return -errno.EAGAIN
- except SSL.SysCallError as e:
- return -ovs.socket_util.get_exception_errno(e)
-
-
-if SSL:
- # Register SSL only if the OpenSSL module is available
- Stream.register_method("ssl", SSLStream)
diff --git a/python/ovs/timeval.py b/python/ovs/timeval.py
deleted file mode 100644
index 9a0cf6762..000000000
--- a/python/ovs/timeval.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright (c) 2009, 2010 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-import time
-
-try:
- import ctypes
-
- LIBRT = 'librt.so.1'
- clock_gettime_name = 'clock_gettime'
-
- if sys.platform.startswith("linux"):
- CLOCK_MONOTONIC = 1
- time_t = ctypes.c_long
- elif sys.platform.startswith("netbsd"):
- # NetBSD uses function renaming for ABI versioning. While the proper
- # way to get the appropriate version is of course "#include <time.h>",
- # it is difficult with ctypes. The following is appropriate for
- # recent versions of NetBSD, including NetBSD-6.
- LIBRT = 'libc.so.12'
- clock_gettime_name = '__clock_gettime50'
- CLOCK_MONOTONIC = 3
- time_t = ctypes.c_int64
- elif sys.platform.startswith("freebsd"):
- CLOCK_MONOTONIC = 4
- time_t = ctypes.c_int64
- else:
- raise Exception
-
- class timespec(ctypes.Structure):
- _fields_ = [
- ('tv_sec', time_t),
- ('tv_nsec', ctypes.c_long),
- ]
-
- librt = ctypes.CDLL(LIBRT)
- clock_gettime = getattr(librt, clock_gettime_name)
- clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)]
-except:
- # Librt shared library could not be loaded
- librt = None
-
-
-def monotonic():
- if not librt:
- return time.time()
-
- t = timespec()
- if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(t)) == 0:
- return t.tv_sec + t.tv_nsec * 1e-9
- # Kernel does not support CLOCK_MONOTONIC
- return time.time()
-
-
-# Use time.monotonic() if Python version >= 3.3
-if not hasattr(time, 'monotonic'):
- time.monotonic = monotonic
-
-
-def msec():
- """ Returns the system's monotonic time if possible, otherwise returns the
- current time as the amount of time since the epoch, in milliseconds, as a
- float."""
- return time.monotonic() * 1000.0
-
-
-def postfork():
- # Just a stub for now
- pass
diff --git a/python/ovs/unixctl/__init__.py b/python/ovs/unixctl/__init__.py
deleted file mode 100644
index c2e5aca8d..000000000
--- a/python/ovs/unixctl/__init__.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright (c) 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-import ovs.util
-
-import six
-
-commands = {}
-strtypes = six.string_types
-
-
-class _UnixctlCommand(object):
- def __init__(self, usage, min_args, max_args, callback, aux):
- self.usage = usage
- self.min_args = min_args
- self.max_args = max_args
- self.callback = callback
- self.aux = aux
-
-
-def _unixctl_help(conn, unused_argv, unused_aux):
- reply = "The available commands are:\n"
- command_names = sorted(commands.keys())
- for name in command_names:
- reply += " "
- usage = commands[name].usage
- if usage:
- reply += "%-23s %s" % (name, usage)
- else:
- reply += name
- reply += "\n"
- conn.reply(reply)
-
-
-def command_register(name, usage, min_args, max_args, callback, aux):
- """ Registers a command with the given 'name' to be exposed by the
- UnixctlServer. 'usage' describes the arguments to the command; it is used
- only for presentation to the user in "help" output.
-
- 'callback' is called when the command is received. It is passed a
- UnixctlConnection object, the list of arguments as unicode strings, and
- 'aux'. Normally 'callback' should reply by calling
- UnixctlConnection.reply() or UnixctlConnection.reply_error() before it
- returns, but if the command cannot be handled immediately, then it can
- defer the reply until later. A given connection can only process a single
- request at a time, so a reply must be made eventually to avoid blocking
- that connection."""
-
- assert isinstance(name, strtypes)
- assert isinstance(usage, strtypes)
- assert isinstance(min_args, int)
- assert isinstance(max_args, int)
- assert callable(callback)
-
- if name not in commands:
- commands[name] = _UnixctlCommand(usage, min_args, max_args, callback,
- aux)
-
-
-def socket_name_from_target(target):
- assert isinstance(target, strtypes)
-
- """ On Windows an absolute path contains ':' ( i.e: C:\\ ) """
- if target.startswith('/') or target.find(':') > -1:
- return 0, target
-
- pidfile_name = "%s/%s.pid" % (ovs.dirs.RUNDIR, target)
- pid = ovs.daemon.read_pidfile(pidfile_name)
- if pid < 0:
- return -pid, "cannot read pidfile \"%s\"" % pidfile_name
-
- if sys.platform == "win32":
- return 0, "%s/%s.ctl" % (ovs.dirs.RUNDIR, target)
- else:
- return 0, "%s/%s.%d.ctl" % (ovs.dirs.RUNDIR, target, pid)
-
-
-command_register("help", "", 0, 0, _unixctl_help, None)
diff --git a/python/ovs/unixctl/client.py b/python/ovs/unixctl/client.py
deleted file mode 100644
index e07b0380c..000000000
--- a/python/ovs/unixctl/client.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright (c) 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-import ovs.jsonrpc
-import ovs.stream
-import ovs.util
-
-import six
-
-vlog = ovs.vlog.Vlog("unixctl_client")
-strtypes = six.string_types
-
-
-class UnixctlClient(object):
- def __init__(self, conn):
- assert isinstance(conn, ovs.jsonrpc.Connection)
- self._conn = conn
-
- def transact(self, command, argv):
- assert isinstance(command, strtypes)
- assert isinstance(argv, list)
- for arg in argv:
- assert isinstance(arg, strtypes)
-
- request = ovs.jsonrpc.Message.create_request(command, argv)
- error, reply = self._conn.transact_block(request)
-
- if error:
- vlog.warn("error communicating with %s: %s"
- % (self._conn.name, os.strerror(error)))
- return error, None, None
-
- if reply.error is not None:
- return 0, str(reply.error), None
- else:
- assert reply.result is not None
- return 0, None, str(reply.result)
-
- def close(self):
- self._conn.close()
- self.conn = None
-
- @staticmethod
- def create(path):
- assert isinstance(path, str)
-
- unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
- error, stream = ovs.stream.Stream.open_block(
- ovs.stream.Stream.open(unix))
-
- if error:
- vlog.warn("failed to connect to %s" % path)
- return error, None
-
- return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))
diff --git a/python/ovs/unixctl/server.py b/python/ovs/unixctl/server.py
deleted file mode 100644
index d5fb0807d..000000000
--- a/python/ovs/unixctl/server.py
+++ /dev/null
@@ -1,260 +0,0 @@
-# Copyright (c) 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import copy
-import errno
-import os
-import sys
-
-import ovs.dirs
-import ovs.jsonrpc
-import ovs.stream
-import ovs.unixctl
-import ovs.util
-import ovs.version
-import ovs.vlog
-
-import six
-from six.moves import range
-
-Message = ovs.jsonrpc.Message
-vlog = ovs.vlog.Vlog("unixctl_server")
-strtypes = six.string_types
-
-
-class UnixctlConnection(object):
- def __init__(self, rpc):
- assert isinstance(rpc, ovs.jsonrpc.Connection)
- self._rpc = rpc
- self._request_id = None
-
- def run(self):
- self._rpc.run()
- error = self._rpc.get_status()
- if error or self._rpc.get_backlog():
- return error
-
- for _ in range(10):
- if error or self._request_id:
- break
-
- error, msg = self._rpc.recv()
- if msg:
- if msg.type == Message.T_REQUEST:
- self._process_command(msg)
- else:
- # XXX: rate-limit
- vlog.warn("%s: received unexpected %s message"
- % (self._rpc.name,
- Message.type_to_string(msg.type)))
- error = errno.EINVAL
-
- if not error:
- error = self._rpc.get_status()
-
- return error
-
- def reply(self, body):
- self._reply_impl(True, body)
-
- def reply_error(self, body):
- self._reply_impl(False, body)
-
- # Called only by unixctl classes.
- def _close(self):
- self._rpc.close()
- self._request_id = None
-
- def _wait(self, poller):
- self._rpc.wait(poller)
- if not self._rpc.get_backlog():
- self._rpc.recv_wait(poller)
-
- def _reply_impl(self, success, body):
- assert isinstance(success, bool)
- assert body is None or isinstance(body, strtypes)
-
- assert self._request_id is not None
-
- if body is None:
- body = ""
-
- if body and not body.endswith("\n"):
- body += "\n"
-
- if success:
- reply = Message.create_reply(body, self._request_id)
- else:
- reply = Message.create_error(body, self._request_id)
-
- self._rpc.send(reply)
- self._request_id = None
-
- def _process_command(self, request):
- assert isinstance(request, ovs.jsonrpc.Message)
- assert request.type == ovs.jsonrpc.Message.T_REQUEST
-
- self._request_id = request.id
-
- error = None
- params = request.params
- method = request.method
- command = ovs.unixctl.commands.get(method)
- if command is None:
- error = '"%s" is not a valid command' % method
- elif len(params) < command.min_args:
- error = '"%s" command requires at least %d arguments' \
- % (method, command.min_args)
- elif len(params) > command.max_args:
- error = '"%s" command takes at most %d arguments' \
- % (method, command.max_args)
- else:
- for param in params:
- if not isinstance(param, strtypes):
- error = '"%s" command has non-string argument' % method
- break
-
- if error is None:
- unicode_params = [six.text_type(p) for p in params]
- command.callback(self, unicode_params, command.aux)
-
- if error:
- self.reply_error(error)
-
-
-def _unixctl_version(conn, unused_argv, version):
- assert isinstance(conn, UnixctlConnection)
- version = "%s (Open vSwitch) %s" % (ovs.util.PROGRAM_NAME, version)
- conn.reply(version)
-
-
-class UnixctlServer(object):
- def __init__(self, listener):
- assert isinstance(listener, ovs.stream.PassiveStream)
- self._listener = listener
- self._conns = []
-
- def run(self):
- for _ in range(10):
- error, stream = self._listener.accept()
- if sys.platform == "win32" and error == errno.WSAEWOULDBLOCK:
- # WSAEWOULDBLOCK would be the equivalent on Windows
- # for EAGAIN on Unix.
- error = errno.EAGAIN
- if not error:
- rpc = ovs.jsonrpc.Connection(stream)
- self._conns.append(UnixctlConnection(rpc))
- elif error == errno.EAGAIN:
- break
- else:
- # XXX: rate-limit
- vlog.warn("%s: accept failed: %s" % (self._listener.name,
- os.strerror(error)))
-
- for conn in copy.copy(self._conns):
- error = conn.run()
- if error and error != errno.EAGAIN:
- conn._close()
- self._conns.remove(conn)
-
- def wait(self, poller):
- self._listener.wait(poller)
- for conn in self._conns:
- conn._wait(poller)
-
- def close(self):
- for conn in self._conns:
- conn._close()
- self._conns = None
-
- self._listener.close()
- self._listener = None
-
- @staticmethod
- def create(path, version=None):
- """Creates a new UnixctlServer which listens on a unixctl socket
- created at 'path'. If 'path' is None, the default path is chosen.
- 'version' contains the version of the server as reported by the unixctl
- version command. If None, ovs.version.VERSION is used."""
-
- assert path is None or isinstance(path, strtypes)
-
- if path is not None:
- path = "punix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
- else:
- if sys.platform == "win32":
- path = "punix:%s/%s.ctl" % (ovs.dirs.RUNDIR,
- ovs.util.PROGRAM_NAME)
- else:
- path = "punix:%s/%s.%d.ctl" % (ovs.dirs.RUNDIR,
- ovs.util.PROGRAM_NAME,
- os.getpid())
-
- if version is None:
- version = ovs.version.VERSION
-
- error, listener = ovs.stream.PassiveStream.open(path)
- if error:
- ovs.util.ovs_error(error, "could not initialize control socket %s"
- % path)
- return error, None
-
- ovs.unixctl.command_register("version", "", 0, 0, _unixctl_version,
- version)
-
- return 0, UnixctlServer(listener)
-
-
-class UnixctlClient(object):
- def __init__(self, conn):
- assert isinstance(conn, ovs.jsonrpc.Connection)
- self._conn = conn
-
- def transact(self, command, argv):
- assert isinstance(command, strtypes)
- assert isinstance(argv, list)
- for arg in argv:
- assert isinstance(arg, strtypes)
-
- request = Message.create_request(command, argv)
- error, reply = self._conn.transact_block(request)
-
- if error:
- vlog.warn("error communicating with %s: %s"
- % (self._conn.name, os.strerror(error)))
- return error, None, None
-
- if reply.error is not None:
- return 0, str(reply.error), None
- else:
- assert reply.result is not None
- return 0, None, str(reply.result)
-
- def close(self):
- self._conn.close()
- self.conn = None
-
- @staticmethod
- def create(path):
- assert isinstance(path, str)
-
- unix = "unix:%s" % ovs.util.abs_file_name(ovs.dirs.RUNDIR, path)
- error, stream = ovs.stream.Stream.open_block(
- ovs.stream.Stream.open(unix))
-
- if error:
- vlog.warn("failed to connect to %s" % path)
- return error, None
-
- return 0, UnixctlClient(ovs.jsonrpc.Connection(stream))
diff --git a/python/ovs/util.py b/python/ovs/util.py
deleted file mode 100644
index 3dba022f8..000000000
--- a/python/ovs/util.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import os.path
-import sys
-
-PROGRAM_NAME = os.path.basename(sys.argv[0])
-EOF = -1
-
-
-def abs_file_name(dir_, file_name):
- """If 'file_name' starts with '/', returns a copy of 'file_name'.
- Otherwise, returns an absolute path to 'file_name' considering it relative
- to 'dir_', which itself must be absolute. 'dir_' may be None or the empty
- string, in which case the current working directory is used.
-
- Returns None if 'dir_' is None and getcwd() fails.
-
- This differs from os.path.abspath() in that it will never change the
- meaning of a file name.
-
- On Windows an absolute path contains ':' ( i.e: C:\\ ) """
- if file_name.startswith('/') or file_name.find(':') > -1:
- return file_name
- else:
- if dir_ is None or dir_ == "":
- try:
- dir_ = os.getcwd()
- except OSError:
- return None
-
- if dir_.endswith('/'):
- return dir_ + file_name
- else:
- return "%s/%s" % (dir_, file_name)
-
-
-def ovs_retval_to_string(retval):
- """Many OVS functions return an int which is one of:
- - 0: no error yet
- - >0: errno value
- - EOF: end of file (not necessarily an error; depends on the function
- called)
-
- Returns the appropriate human-readable string."""
-
- if not retval:
- return ""
- if retval > 0:
- return os.strerror(retval)
- if retval == EOF:
- return "End of file"
- return "***unknown return value: %s***" % retval
-
-
-def ovs_error(err_no, message, vlog=None):
- """Prints 'message' on stderr and emits an ERROR level log message to
- 'vlog' if supplied. If 'err_no' is nonzero, then it is formatted with
- ovs_retval_to_string() and appended to the message inside parentheses.
-
- 'message' should not end with a new-line, because this function will add
- one itself."""
-
- err_msg = "%s: %s" % (PROGRAM_NAME, message)
- if err_no:
- err_msg += " (%s)" % ovs_retval_to_string(err_no)
-
- sys.stderr.write("%s\n" % err_msg)
- if vlog:
- vlog.err(err_msg)
-
-
-def ovs_fatal(*args, **kwargs):
- """Prints 'message' on stderr and emits an ERROR level log message to
- 'vlog' if supplied. If 'err_no' is nonzero, then it is formatted with
- ovs_retval_to_string() and appended to the message inside parentheses.
- Then, terminates with exit code 1 (indicating a failure).
-
- 'message' should not end with a new-line, because this function will add
- one itself."""
-
- ovs_error(*args, **kwargs)
- sys.exit(1)
diff --git a/python/ovs/vlog.py b/python/ovs/vlog.py
deleted file mode 100644
index ae5156d60..000000000
--- a/python/ovs/vlog.py
+++ /dev/null
@@ -1,475 +0,0 @@
-
-# Copyright (c) 2011, 2012, 2013, 2015, 2016 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import datetime
-import logging
-import logging.handlers
-import os
-import re
-import socket
-import sys
-import threading
-
-import ovs.dirs
-import ovs.unixctl
-import ovs.util
-
-import six
-from six.moves import range
-
-DESTINATIONS = {"console": "info", "file": "info", "syslog": "info"}
-PATTERNS = {
- "console": "%D{%Y-%m-%dT%H:%M:%SZ}|%05N|%c%T|%p|%m",
- "file": "%D{%Y-%m-%dT%H:%M:%S.###Z}|%05N|%c%T|%p|%m",
- "syslog": "ovs|%05N|%c%T|%p|%m",
-}
-LEVELS = {
- "dbg": logging.DEBUG,
- "info": logging.INFO,
- "warn": logging.WARNING,
- "err": logging.ERROR,
- "emer": logging.CRITICAL,
- "off": logging.CRITICAL
-}
-FACILITIES = ['auth', 'authpriv', 'cron', 'daemon', 'ftp', 'kern', 'lpr',
- 'mail', 'news', 'syslog', 'user', 'uucp', 'local0', 'local1',
- 'local2', 'local3', 'local4', 'local5', 'local6', 'local7']
-syslog_facility = "daemon"
-syslog_handler = ''
-
-
-def get_level(level_str):
- return LEVELS.get(level_str.lower())
-
-
-class Vlog(object):
- __inited = False
- __msg_num = 0
- __start_time = 0
- __mfl = {} # Module -> destination -> level
- __log_file = None
- __file_handler = None
- __log_patterns = PATTERNS
-
- def __init__(self, name):
- """Creates a new Vlog object representing a module called 'name'. The
- created Vlog object will do nothing until the Vlog.init() static method
- is called. Once called, no more Vlog objects may be created."""
-
- assert not Vlog.__inited
- self.name = name.lower()
- if name not in Vlog.__mfl:
- Vlog.__mfl[self.name] = DESTINATIONS.copy()
-
- def __log(self, level, message, **kwargs):
- if not Vlog.__inited:
- return
-
- level_num = LEVELS.get(level.lower(), logging.DEBUG)
- msg_num = Vlog.__msg_num
- Vlog.__msg_num += 1
-
- for f, f_level in six.iteritems(Vlog.__mfl[self.name]):
- f_level = LEVELS.get(f_level, logging.CRITICAL)
- if level_num >= f_level:
- msg = self._build_message(message, f, level, msg_num)
- logging.getLogger(f).log(level_num, msg, **kwargs)
-
- def _build_message(self, message, destination, level, msg_num):
- pattern = self.__log_patterns[destination]
- tmp = pattern
-
- tmp = self._format_time(tmp)
-
- matches = re.findall("(%-?[0]?[0-9]?[AcmNnpPrtT])", tmp)
- for m in matches:
- if "A" in m:
- tmp = self._format_field(tmp, m, ovs.util.PROGRAM_NAME)
- elif "c" in m:
- tmp = self._format_field(tmp, m, self.name)
- elif "m" in m:
- tmp = self._format_field(tmp, m, message)
- elif "N" in m:
- tmp = self._format_field(tmp, m, str(msg_num))
- elif "n" in m:
- tmp = re.sub(m, "\n", tmp)
- elif "p" in m:
- tmp = self._format_field(tmp, m, level.upper())
- elif "P" in m:
- self._format_field(tmp, m, str(os.getpid()))
- elif "r" in m:
- now = datetime.datetime.utcnow()
- delta = now - self.__start_time
- ms = delta.microseconds / 1000
- tmp = self._format_field(tmp, m, str(ms))
- elif "t" in m:
- subprogram = threading.currentThread().getName()
- if subprogram == "MainThread":
- subprogram = "main"
- tmp = self._format_field(tmp, m, subprogram)
- elif "T" in m:
- subprogram = threading.currentThread().getName()
- if not subprogram == "MainThread":
- subprogram = "({})".format(subprogram)
- else:
- subprogram = ""
- tmp = self._format_field(tmp, m, subprogram)
- return tmp.strip()
-
- def _format_field(self, tmp, match, replace):
- formatting = re.compile("^%(0)?([1-9])?")
- matches = formatting.match(match)
- # Do we need to apply padding?
- if not matches.group(1) and replace != "":
- replace = replace.center(len(replace) + 2)
- # Does the field have a minimum width
- if matches.group(2):
- min_width = int(matches.group(2))
- if len(replace) < min_width:
- replace = replace.center(min_width)
- return re.sub(match, replace.replace('\\', r'\\'), tmp)
-
- def _format_time(self, tmp):
- date_regex = re.compile(r'(%(0?[1-9]?[dD])(\{(.*)\})?)')
- match = date_regex.search(tmp)
-
- if match is None:
- return tmp
-
- # UTC date or Local TZ?
- if match.group(2) == "d":
- now = datetime.datetime.now()
- elif match.group(2) == "D":
- now = datetime.datetime.utcnow()
-
- # Custom format or ISO format?
- if match.group(3):
- time = datetime.date.strftime(now, match.group(4))
- try:
- i = len(re.search("#+", match.group(4)).group(0))
- msec = '{0:0>{i}.{i}}'.format(str(now.microsecond / 1000), i=i)
- time = re.sub('#+', msec, time)
- except AttributeError:
- pass
- else:
- time = datetime.datetime.isoformat(now.replace(microsecond=0))
-
- return self._format_field(tmp, match.group(1), time)
-
- def emer(self, message, **kwargs):
- self.__log("EMER", message, **kwargs)
-
- def err(self, message, **kwargs):
- self.__log("ERR", message, **kwargs)
-
- def warn(self, message, **kwargs):
- self.__log("WARN", message, **kwargs)
-
- def info(self, message, **kwargs):
- self.__log("INFO", message, **kwargs)
-
- def dbg(self, message, **kwargs):
- self.__log("DBG", message, **kwargs)
-
- def __is_enabled(self, level):
- level = LEVELS.get(level.lower(), logging.DEBUG)
- for f, f_level in six.iteritems(Vlog.__mfl[self.name]):
- f_level = LEVELS.get(f_level, logging.CRITICAL)
- if level >= f_level:
- return True
- return False
-
- def emer_is_enabled(self):
- return self.__is_enabled("EMER")
-
- def err_is_enabled(self):
- return self.__is_enabled("ERR")
-
- def warn_is_enabled(self):
- return self.__is_enabled("WARN")
-
- def info_is_enabled(self):
- return self.__is_enabled("INFO")
-
- def dbg_is_enabled(self):
- return self.__is_enabled("DBG")
-
- def exception(self, message):
- """Logs 'message' at ERR log level. Includes a backtrace when in
- exception context."""
- self.err(message, exc_info=True)
-
- @staticmethod
- def init(log_file=None):
- """Intializes the Vlog module. Causes Vlog to write to 'log_file' if
- not None. Should be called after all Vlog objects have been created.
- No logging will occur until this function is called."""
-
- if Vlog.__inited:
- return
-
- Vlog.__inited = True
- Vlog.__start_time = datetime.datetime.utcnow()
- logging.raiseExceptions = False
- Vlog.__log_file = log_file
- for f in DESTINATIONS:
- logger = logging.getLogger(f)
- logger.setLevel(logging.DEBUG)
-
- try:
- if f == "console":
- logger.addHandler(logging.StreamHandler(sys.stderr))
- elif f == "syslog":
- Vlog.add_syslog_handler()
- elif f == "file" and Vlog.__log_file:
- Vlog.__file_handler = logging.FileHandler(Vlog.__log_file)
- logger.addHandler(Vlog.__file_handler)
- except (IOError, socket.error):
- logger.disabled = True
-
- ovs.unixctl.command_register("vlog/reopen", "", 0, 0,
- Vlog._unixctl_vlog_reopen, None)
- ovs.unixctl.command_register("vlog/close", "", 0, 0,
- Vlog._unixctl_vlog_close, None)
- try:
- # Windows limitation on Python 2, sys.maxsize is a long number
- # on 64 bits environments, while sys.maxint is the maximum int
- # number. Python 3 works as expected.
- maxsize_int = sys.maxint
- except AttributeError:
- maxsize_int = sys.maxsize
- ovs.unixctl.command_register("vlog/set", "spec", 1, maxsize_int,
- Vlog._unixctl_vlog_set, None)
- ovs.unixctl.command_register("vlog/list", "", 0, 0,
- Vlog._unixctl_vlog_list, None)
-
- @staticmethod
- def set_level(module, destination, level):
- """ Sets the log level of the 'module'-'destination' tuple to 'level'.
- All three arguments are strings which are interpreted the same as
- arguments to the --verbose flag. Should be called after all Vlog
- objects have already been created."""
-
- module = module.lower()
- destination = destination.lower()
- level = level.lower()
-
- if destination != "any" and destination not in DESTINATIONS:
- return
-
- if module != "any" and module not in Vlog.__mfl:
- return
-
- if level not in LEVELS:
- return
-
- if module == "any":
- modules = list(Vlog.__mfl.keys())
- else:
- modules = [module]
-
- if destination == "any":
- destinations = list(DESTINATIONS.keys())
- else:
- destinations = [destination]
-
- for m in modules:
- for f in destinations:
- Vlog.__mfl[m][f] = level
-
- @staticmethod
- def set_pattern(destination, pattern):
- """ Sets the log pattern of the 'destination' to 'pattern' """
- destination = destination.lower()
- Vlog.__log_patterns[destination] = pattern
-
- @staticmethod
- def add_syslog_handler(facility=None):
- global syslog_facility, syslog_handler
-
- # If handler is already added and there is no change in 'facility',
- # there is nothing to do.
- if (not facility or facility == syslog_facility) and syslog_handler:
- return
-
- logger = logging.getLogger('syslog')
- # Disable the logger if the "null" syslog method requested
- # by environment.
- if os.environ.get('OVS_SYSLOG_METHOD') == "null":
- logger.disabled = True
- return
-
- if facility is None:
- facility = syslog_facility
-
- new_handler = logging.handlers.SysLogHandler(address="/dev/log",
- facility=facility)
-
- if syslog_handler:
- logger.removeHandler(syslog_handler)
-
- syslog_handler = new_handler
- syslog_facility = facility
-
- logger.addHandler(syslog_handler)
- return
-
- @staticmethod
- def set_levels_from_string(s):
- module = None
- level = None
- destination = None
-
- words = re.split('[ :]', s)
- if words[0] == "pattern":
- try:
- if words[1] in DESTINATIONS and words[2]:
- segments = [words[i] for i in range(2, len(words))]
- pattern = "".join(segments)
- Vlog.set_pattern(words[1], pattern)
- return
- else:
- return "Destination %s does not exist" % words[1]
- except IndexError:
- return "Please supply a valid pattern and destination"
- elif words[0] == "FACILITY":
- if words[1] in FACILITIES:
- try:
- Vlog.add_syslog_handler(words[1])
- except (IOError, socket.error):
- logger = logging.getLogger('syslog')
- logger.disabled = True
- return
- else:
- return "Facility %s is invalid" % words[1]
-
- for word in [w.lower() for w in words]:
- if word == "any":
- pass
- elif word in DESTINATIONS:
- if destination:
- return "cannot specify multiple destinations"
- destination = word
- elif word in LEVELS:
- if level:
- return "cannot specify multiple levels"
- level = word
- elif word in Vlog.__mfl:
- if module:
- return "cannot specify multiple modules"
- module = word
- else:
- return "no destination, level, or module \"%s\"" % word
-
- Vlog.set_level(module or "any", destination or "any", level or "any")
-
- @staticmethod
- def get_levels():
- lines = [" console syslog file\n",
- " ------- ------ ------\n"]
- lines.extend(sorted(["%-16s %4s %4s %4s\n"
- % (m,
- Vlog.__mfl[m]["console"],
- Vlog.__mfl[m]["syslog"],
- Vlog.__mfl[m]["file"]) for m in Vlog.__mfl]))
- return ''.join(lines)
-
- @staticmethod
- def reopen_log_file():
- """Closes and then attempts to re-open the current log file. (This is
- useful just after log rotation, to ensure that the new log file starts
- being used.)"""
-
- if Vlog.__log_file:
- logger = logging.getLogger("file")
- logger.removeHandler(Vlog.__file_handler)
- Vlog.__file_handler = logging.FileHandler(Vlog.__log_file)
- logger.addHandler(Vlog.__file_handler)
-
- @staticmethod
- def close_log_file():
- """Closes the current log file. (This is useful on Windows, to ensure
- that a reference to the file is not kept by the daemon in case of
- detach.)"""
- if Vlog.__log_file:
- logger = logging.getLogger("file")
- logger.removeHandler(Vlog.__file_handler)
- Vlog.__file_handler.close()
-
- @staticmethod
- def _unixctl_vlog_reopen(conn, unused_argv, unused_aux):
- if Vlog.__log_file:
- Vlog.reopen_log_file()
- conn.reply(None)
- else:
- conn.reply("Logging to file not configured")
-
- @staticmethod
- def _unixctl_vlog_close(conn, unused_argv, unused_aux):
- if Vlog.__log_file:
- if sys.platform != 'win32':
- logger = logging.getLogger("file")
- logger.removeHandler(Vlog.__file_handler)
- else:
- Vlog.close_log_file()
- conn.reply(None)
-
- @staticmethod
- def _unixctl_vlog_set(conn, argv, unused_aux):
- for arg in argv:
- msg = Vlog.set_levels_from_string(arg)
- if msg:
- conn.reply(msg)
- return
- conn.reply(None)
-
- @staticmethod
- def _unixctl_vlog_list(conn, unused_argv, unused_aux):
- conn.reply(Vlog.get_levels())
-
-
-def add_args(parser):
- """Adds vlog related options to 'parser', an ArgumentParser object. The
- resulting arguments parsed by 'parser' should be passed to handle_args."""
-
- group = parser.add_argument_group(title="Logging Options")
- group.add_argument("--log-file", nargs="?", const="default",
- help="Enables logging to a file. Default log file"
- " is used if LOG_FILE is omitted.")
- group.add_argument("-v", "--verbose", nargs="*",
- help="Sets logging levels, see ovs-vswitchd(8)."
- " Defaults to dbg.")
-
-
-def handle_args(args):
- """ Handles command line arguments ('args') parsed by an ArgumentParser.
- The ArgumentParser should have been primed by add_args(). Also takes care
- of initializing the Vlog module."""
-
- log_file = args.log_file
- if log_file == "default":
- log_file = "%s/%s.log" % (ovs.dirs.LOGDIR, ovs.util.PROGRAM_NAME)
-
- if args.verbose is None:
- args.verbose = []
- elif args.verbose == []:
- args.verbose = ["any:any:dbg"]
-
- for verbose in args.verbose:
- msg = Vlog.set_levels_from_string(verbose)
- if msg:
- ovs.util.ovs_fatal(0, "processing \"%s\": %s" % (verbose, msg))
-
- Vlog.init(log_file)
diff --git a/python/ovs/winutils.py b/python/ovs/winutils.py
deleted file mode 100644
index 8f3151a36..000000000
--- a/python/ovs/winutils.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# Copyright (c) 2016 Cloudbase Solutions Srl
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import sys
-
-if sys.platform != 'win32':
- raise Exception("Intended to use only on Windows")
-else:
- import ntsecuritycon
- import pywintypes
- import win32con
- import win32event
- import win32file
- import win32pipe
- import win32security
- import winerror
-
-
-def close_handle(handle, logger=None):
- try:
- win32file.CloseHandle(handle)
- return None
- except pywintypes.error as e:
- if logger is not None:
- logger("failed to close handle: %s" % e.strerror)
- return e.winerror
-
-
-def windows_create_pipe(sAttrs=-1, nSize=None):
- # Default values if parameters are not passed
- if sAttrs == -1:
- sAttrs = win32security.SECURITY_ATTRIBUTES()
- sAttrs.bInheritHandle = 1
- if nSize is None:
- # If this parameter is zero, the system uses the default buffer size.
- nSize = 0
-
- try:
- (read_pipe, write_pipe) = win32pipe.CreatePipe(sAttrs, nSize)
- except pywintypes.error:
- raise
-
- return (read_pipe, write_pipe)
-
-
-def windows_read_pipe(fd, length):
- try:
- (error, data) = win32file.ReadFile(fd, length)
- return error, data
- except pywintypes.error as e:
- return e.winerror, ""
-
-
-def create_file(filename, desiredAccess=None, shareMode=None, attributes=-1,
- CreationDisposition=None, flagsAndAttributes=None,
- hTemplateFile=-1):
- # Default values if parameters are not passed
- if desiredAccess is None:
- desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
- if shareMode is None:
- shareMode = 0
- if attributes == -1:
- # attributes can be None
- attributes = None
- if CreationDisposition is None:
- CreationDisposition = win32file.OPEN_EXISTING
- if flagsAndAttributes is None:
- flagsAndAttributes = (win32file.FILE_ATTRIBUTE_NORMAL |
- win32file.FILE_FLAG_OVERLAPPED |
- win32file.FILE_FLAG_NO_BUFFERING)
- if hTemplateFile == -1:
- hTemplateFile = None
-
- try:
- npipe = win32file.CreateFile(filename,
- desiredAccess,
- shareMode,
- attributes,
- CreationDisposition,
- flagsAndAttributes,
- hTemplateFile)
- except pywintypes.error:
- raise
- return npipe
-
-
-def write_file(handle, data, overlapped=None):
- try:
- (errCode, nBytesWritten) = win32file.WriteFile(handle,
- data,
- overlapped)
- # Note: win32file.WriteFile doesn't throw an exception
- # in case it receives ERROR_IO_PENDING.
- return (errCode, nBytesWritten)
- except pywintypes.error as e:
- return (e.winerror, 0)
-
-
-def read_file(handle, bufsize, overlapped=None):
- try:
- # Note: win32file.ReadFile doesn't throw an exception
- # in case it receives ERROR_IO_PENDING.
- (errCode, read_buffer) = win32file.ReadFile(
- handle, bufsize, overlapped)
- return (errCode, read_buffer)
- except pywintypes.error as e:
- return (e.winerror, "")
-
-
-def create_named_pipe(pipename, openMode=None, pipeMode=None,
- nMaxInstances=None, nOutBufferSize=None,
- nInBufferSize=None, nDefaultTimeOut=None,
- saAttr=-1):
- # Default values if parameters are not passed
- if openMode is None:
- openMode = win32con.PIPE_ACCESS_DUPLEX | win32con.FILE_FLAG_OVERLAPPED
- if pipeMode is None:
- pipeMode = (win32con.PIPE_TYPE_MESSAGE |
- win32con.PIPE_READMODE_BYTE |
- win32con.PIPE_WAIT)
- if nMaxInstances is None:
- nMaxInstances = 64
- if nOutBufferSize is None:
- nOutBufferSize = 65000
- if nInBufferSize is None:
- nInBufferSize = 65000
- if nDefaultTimeOut is None:
- nDefaultTimeOut = 0
- if saAttr == -1:
- # saAttr can be None
- saAttr = win32security.SECURITY_ATTRIBUTES()
-
- # The identifier authority.
- sia = ntsecuritycon.SECURITY_NT_AUTHORITY
-
- # Initialize the SID.
- remoteAccessSid = win32security.SID()
- remoteAccessSid.Initialize(
- sia, # The identifier authority.
- 1) # The number of sub authorities to allocate.
- # Disable access over network.
- remoteAccessSid.SetSubAuthority(
- 0, # The index of the sub authority to set
- ntsecuritycon.SECURITY_NETWORK_RID)
-
- allowedPsids = []
- # Allow Windows Services to access the Named Pipe.
- allowedPsid_0 = win32security.SID()
- allowedPsid_0.Initialize(
- sia, # The identifier authority.
- 1) # The number of sub authorities to allocate.
- allowedPsid_0.SetSubAuthority(
- 0, # The index of the sub authority to set
- ntsecuritycon.SECURITY_LOCAL_SYSTEM_RID)
- # Allow Administrators to access the Named Pipe.
- allowedPsid_1 = win32security.SID()
- allowedPsid_1.Initialize(
- sia, # The identifier authority.
- 2) # The number of sub authorities to allocate.
- allowedPsid_1.SetSubAuthority(
- 0, # The index of the sub authority to set
- ntsecuritycon.SECURITY_BUILTIN_DOMAIN_RID)
- allowedPsid_1.SetSubAuthority(
- 1, # The index of the sub authority to set
- ntsecuritycon.DOMAIN_ALIAS_RID_ADMINS)
-
- allowedPsids.append(allowedPsid_0)
- allowedPsids.append(allowedPsid_1)
-
- # Initialize an ACL.
- acl = win32security.ACL()
- acl.Initialize()
- # Add denied ACL.
- acl.AddAccessDeniedAce(win32security.ACL_REVISION,
- ntsecuritycon.GENERIC_ALL,
- remoteAccessSid)
- # Add allowed ACLs.
- for allowedPsid in allowedPsids:
- acl.AddAccessAllowedAce(win32security.ACL_REVISION,
- ntsecuritycon.GENERIC_ALL,
- allowedPsid)
-
- # Initialize an SD.
- sd = win32security.SECURITY_DESCRIPTOR()
- sd.Initialize()
- # Set DACL.
- sd.SetSecurityDescriptorDacl(True, acl, False)
-
- saAttr.bInheritHandle = 1
- saAttr.SECURITY_DESCRIPTOR = sd
-
- try:
- npipe = win32pipe.CreateNamedPipe(pipename,
- openMode,
- pipeMode,
- nMaxInstances,
- nOutBufferSize,
- nInBufferSize,
- nDefaultTimeOut,
- saAttr)
-
- if npipe == win32file.INVALID_HANDLE_VALUE:
- return None
-
- return npipe
- except pywintypes.error:
- return None
-
-
-def set_pipe_mode(hPipe, mode=-1, maxCollectionCount=None,
- collectDataTimeout=None):
- # Default values if parameters are not passed
- if mode == -1:
- mode = win32pipe.PIPE_READMODE_BYTE
- try:
- win32pipe.SetNamedPipeHandleState(
- hPipe, mode, maxCollectionCount, collectDataTimeout)
- except pywintypes.error:
- raise
-
-
-def connect_named_pipe(pipe_handle, overlapped=None):
- try:
- # If the result of ConnectNamedPipe is ERROR_IO_PENDING or
- # ERROR_PIPE_CONNECTED, then this value is returned.
- # All other error values raise a win32 exception
- error = win32pipe.ConnectNamedPipe(pipe_handle, overlapped)
- return error
- except pywintypes.error as e:
- return e.winerror
-
-
-def get_pipe_name(name):
- name = name.replace('/', '')
- name = name.replace('\\', '')
- name = "\\\\.\\pipe\\" + name
- return name
-
-
-def get_overlapped_result(handle, overlapped=None, bWait=False):
- try:
- return win32file.GetOverlappedResult(handle, overlapped, bWait)
- except pywintypes.error:
- raise
-
-
-def get_new_event(sa=None, bManualReset=True, bInitialState=True,
- objectName=None):
- return win32event.CreateEvent(sa, bManualReset, bInitialState, objectName)
-
-
-pipe_disconnected_errors = [winerror.ERROR_PIPE_NOT_CONNECTED,
- winerror.ERROR_BAD_PIPE,
- winerror.ERROR_NO_DATA,
- winerror.ERROR_BROKEN_PIPE]
diff --git a/python/ovstest/__init__.py b/python/ovstest/__init__.py
deleted file mode 100644
index 218d8921e..000000000
--- a/python/ovstest/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# This file intentionally left blank.
diff --git a/python/ovstest/args.py b/python/ovstest/args.py
deleted file mode 100644
index 975d1880b..000000000
--- a/python/ovstest/args.py
+++ /dev/null
@@ -1,283 +0,0 @@
-# Copyright (c) 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-ovsargs provide argument parsing for ovs-test utility
-"""
-
-import argparse
-import re
-import socket
-import sys
-
-CONTROL_PORT = 15531
-DATA_PORT = 15532
-
-
-def ip_address(string):
- """Verifies if string is a valid IP address"""
- try:
- socket.inet_aton(string)
- except socket.error:
- raise argparse.ArgumentTypeError("Not a valid IPv4 address")
- return string
-
-
-def ip_optional_mask(string):
- """
- Verifies if string contains a valid IP address and an optional mask in
- CIDR notation.
- """
- token = string.split("/")
- if len(token) > 2:
- raise argparse.ArgumentTypeError("IP address and netmask must be "
- "separated by a single slash")
- elif len(token) == 2:
- try:
- mask = int(token[1])
- except ValueError:
- raise argparse.ArgumentTypeError("Netmask is not a valid integer")
- if mask < 0 or mask > 31:
- raise argparse.ArgumentTypeError("Netmask must be in range 0..31")
- ip_address(token[0])
- return string
-
-
-def port(string):
- """Convert a string into a TCP/UDP Port (integer)"""
- try:
- port_number = int(string)
- if port_number < 1 or port_number > 65535:
- raise argparse.ArgumentTypeError("Port is out of range")
- except ValueError:
- raise argparse.ArgumentTypeError("Port is not an integer")
- return port_number
-
-
-def ip_optional_port(string, default_port, ip_callback):
- """Convert a string into IP and Port pair. If port was absent then use
- default_port as the port. The third argument is a callback that verifies
- whether IP address is given in correct format."""
- value = string.split(':')
- if len(value) == 1:
- return (ip_callback(value[0]), default_port)
- elif len(value) == 2:
- return (ip_callback(value[0]), port(value[1]))
- else:
- raise argparse.ArgumentTypeError("IP address from the optional Port "
- "must be colon-separated")
-
-
-def ip_optional_port_port(string, default_port1, default_port2, ip_callback):
- """Convert a string into IP, Port1, Port2 tuple. If any of ports were
- missing, then default ports will be used. The fourth argument is a
- callback that verifies whether IP address is given in the expected
- format."""
- value = string.split(':')
- if len(value) == 1:
- return (ip_callback(value[0]), default_port1, default_port2)
- elif len(value) == 2:
- return (ip_callback(value[0]), port(value[1]), default_port2)
- elif len(value) == 3:
- return (ip_callback(value[0]), port(value[1]), port(value[2]))
- else:
- raise argparse.ArgumentTypeError("Expected IP address and at most "
- "two colon-separated ports")
-
-
-def vlan_tag(string):
- """
- This function verifies whether given string is a correct VLAN tag.
- """
- try:
- value = int(string)
- except ValueError:
- raise argparse.ArgumentTypeError("VLAN tag is not a valid integer")
- if value < 1 or value > 4094:
- raise argparse.ArgumentTypeError("Not a valid VLAN tag. "
- "VLAN tag should be in the "
- "range 1..4094.")
- return string
-
-
-def server_endpoint(string):
- """Converts a string OuterIP[:OuterPort],InnerIP[/Mask][:InnerPort]
- into a 4-tuple, where:
- 1. First element is OuterIP
- 2. Second element is OuterPort (if omitted will use default value 15531)
- 3 Third element is InnerIP with optional mask
- 4. Fourth element is InnerPort (if omitted will use default value 15532)
- """
- value = string.split(',')
- if len(value) == 2:
- ret1 = ip_optional_port(value[0], CONTROL_PORT, ip_address)
- ret2 = ip_optional_port(value[1], DATA_PORT, ip_optional_mask)
- return (ret1[0], ret1[1], ret2[0], ret2[1])
- else:
- raise argparse.ArgumentTypeError("OuterIP:OuterPort and InnerIP/Mask:"
- "InnerPort must be comma separated")
-
-
-class UniqueServerAction(argparse.Action):
- """
- This custom action class will prevent user from entering multiple ovs-test
- servers with the same OuterIP. If there is an server with 127.0.0.1 outer
- IP address then it will be inserted in the front of the list.
- """
- def __call__(self, parser, namespace, values, option_string=None):
- outer_ips = set()
- endpoints = []
- for server in values:
- try:
- endpoint = server_endpoint(server)
- except argparse.ArgumentTypeError:
- raise argparse.ArgumentError(self, str(sys.exc_info()[1]))
- if endpoint[0] in outer_ips:
- raise argparse.ArgumentError(self, "Duplicate OuterIPs found")
- else:
- outer_ips.add(endpoint[0])
- if endpoint[0] == "127.0.0.1":
- endpoints.insert(0, endpoint)
- else:
- endpoints.append(endpoint)
- setattr(namespace, self.dest, endpoints)
-
-
-def bandwidth(string):
- """Convert a string (given in bits/second with optional magnitude for
- units) into a long (bytes/second)"""
- if re.match("^[1-9][0-9]*[MK]?$", string) is None:
- raise argparse.ArgumentTypeError("Not a valid target bandwidth")
- bwidth = string.replace("M", "000000")
- bwidth = bwidth.replace("K", "000")
- return int(bwidth) / 8 # Convert from bits to bytes
-
-
-def tunnel_types(string):
- """
- This function converts a string into a list that contains all tunnel types
- that user intended to test.
- """
- return string.split(',')
-
-
-def l3_endpoint_client(string):
- """
- This function parses command line argument string in
- remoteIP,localInnerIP[/mask][:ControlPort[:TestPort]],remoteInnerIP[:
- ControlPort[:TestPort]] format.
- """
- try:
- remote_ip, me, he = string.split(',')
- except ValueError:
- raise argparse.ArgumentTypeError("All 3 IP addresses must be comma "
- "separated.")
- r = (ip_address(remote_ip),
- ip_optional_port_port(me, CONTROL_PORT, DATA_PORT, ip_optional_mask),
- ip_optional_port_port(he, CONTROL_PORT, DATA_PORT, ip_address))
- return r
-
-
-def l3_endpoint_server(string):
- """
- This function parses a command line argument string in
- remoteIP,localInnerIP[/mask][:ControlPort] format.
- """
- try:
- remote_ip, me = string.split(',')
- except ValueError:
- raise argparse.ArgumentTypeError("Both IP addresses must be comma "
- "separated.")
- return (ip_address(remote_ip),
- ip_optional_port(me, CONTROL_PORT, ip_optional_mask))
-
-
-def ovs_initialize_args():
- """
- Initialize argument parsing for ovs-test utility.
- """
- parser = argparse.ArgumentParser(description='Test connectivity '
- 'between two Open vSwitches.')
-
- parser.add_argument('-v', '--version', action='version',
- version='ovs-test (Open vSwitch) @VERSION@')
-
- parser.add_argument("-b", "--bandwidth", action='store',
- dest="targetBandwidth", default="1M", type=bandwidth,
- help='Target bandwidth for UDP tests in bits/second. Use '
- 'postfix M or K to alter unit magnitude.')
- parser.add_argument("-i", "--interval", action='store',
- dest="testInterval", default=5, type=int,
- help='Interval for how long to run each test in seconds.')
-
- parser.add_argument("-t", "--tunnel-modes", action='store',
- dest="tunnelModes", default=(), type=tunnel_types,
- help='Do L3 tests with the given tunnel modes.')
- parser.add_argument("-l", "--vlan-tag", action='store',
- dest="vlanTag", default=None, type=vlan_tag,
- help='Do VLAN tests and use the given VLAN tag.')
- parser.add_argument("-d", "--direct", action='store_true',
- dest="direct", default=None,
- help='Do direct tests between both ovs-test servers.')
-
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument("-s", "--server", action="store", dest="port",
- type=port,
- help='Run in server mode and wait for the client to '
- 'connect to this port.')
- group.add_argument('-c', "--client", nargs=2,
- dest="servers", action=UniqueServerAction,
- metavar=("SERVER1", "SERVER2"),
- help='Run in client mode and do tests between these '
- 'two ovs-test servers. Each server must be specified in '
- 'following format - OuterIP:OuterPort,InnerIP[/mask] '
- ':InnerPort. It is possible to start local instance of '
- 'ovs-test server in the client mode by using 127.0.0.1 as '
- 'OuterIP.')
- return parser.parse_args()
-
-
-def l3_initialize_args():
- """
- Initialize argument parsing for ovs-l3ping utility.
- """
- parser = argparse.ArgumentParser(description='Test L3 tunnel '
- 'connectivity between two Open vSwitch instances.')
-
- parser.add_argument('-v', '--version', action='version',
- version='ovs-l3ping (Open vSwitch) @VERSION@')
-
- parser.add_argument("-b", "--bandwidth", action='store',
- dest="targetBandwidth", default="1M", type=bandwidth,
- help='Target bandwidth for UDP tests in bits/second. Use '
- 'postfix M or K to alter unit magnitude.')
- parser.add_argument("-i", "--interval", action='store',
- dest="testInterval", default=5, type=int,
- help='Interval for how long to run each test in seconds.')
-
- parser.add_argument("-t", "--tunnel-mode", action='store',
- dest="tunnelMode", required=True,
- help='Do L3 tests with this tunnel type.')
-
- group = parser.add_mutually_exclusive_group(required=True)
- group.add_argument("-s", "--server", action="store", dest="server",
- metavar="TUNNELIP,SERVER",
- type=l3_endpoint_server,
- help='Run in server mode and wait for the client to '
- 'connect.')
- group.add_argument('-c', "--client", action="store", dest="client",
- metavar="TUNNELIP,CLIENT,SERVER",
- type=l3_endpoint_client,
- help='Run in client mode and connect to the server.')
- return parser.parse_args()
diff --git a/python/ovstest/rpcserver.py b/python/ovstest/rpcserver.py
deleted file mode 100644
index ab5b7e89e..000000000
--- a/python/ovstest/rpcserver.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# Copyright (c) 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-rpcserver is an XML RPC server that allows RPC client to initiate tests
-"""
-
-from __future__ import print_function
-
-import sys
-
-import exceptions
-
-import six.moves.xmlrpc_client
-
-import tcp
-
-from twisted.internet import reactor
-from twisted.internet.error import CannotListenError
-from twisted.web import server
-from twisted.web import xmlrpc
-
-import udp
-
-import util
-
-import vswitch
-
-
-class TestArena(xmlrpc.XMLRPC):
- """
- This class contains all the functions that ovs-test client will call
- remotely. The caller is responsible to use designated handleIds
- for designated methods (e.g. do not mix UDP and TCP handles).
- """
-
- def __init__(self):
- xmlrpc.XMLRPC.__init__(self, allowNone=True)
- self.handle_id = 1
- self.handle_map = {}
- self.bridges = set()
- self.pbridges = set()
- self.ports = set()
- self.request = None
-
- def __acquire_handle(self, value):
- """
- Allocates new handle and assigns value object to it
- """
- handle = self.handle_id
- self.handle_map[handle] = value
- self.handle_id += 1
- return handle
-
- def __get_handle_resources(self, handle):
- """
- Return resources that were assigned to handle
- """
- return self.handle_map[handle]
-
- def __delete_handle(self, handle):
- """
- Releases handle from handle_map
- """
- del self.handle_map[handle]
-
- def cleanup(self):
- """
- Delete all remaining bridges and ports if ovs-test client did not had
- a chance to remove them. It is necessary to call this function if
- ovs-test server is abruptly terminated when doing the tests.
- """
- for port in self.ports:
- # Remove ports that were added to existing bridges
- vswitch.ovs_vsctl_del_port_from_bridge(port)
-
- for bridge in self.bridges:
- # Remove bridges that were added for L3 tests
- vswitch.ovs_vsctl_del_bridge(bridge)
-
- for pbridge in self.pbridges:
- # Remove bridges that were added for VLAN tests
- vswitch.ovs_vsctl_del_pbridge(pbridge[0], pbridge[1])
-
- def render(self, request):
- """
- This method overrides the original XMLRPC.render method so that it
- would be possible to get the XML RPC client IP address from the
- request object.
- """
- self.request = request
- return xmlrpc.XMLRPC.render(self, request)
-
- def xmlrpc_get_my_address(self):
- """
- Returns the RPC client's IP address.
- """
- return self.request.getClientIP()
-
- def xmlrpc_get_my_address_from(self, his_ip, his_port):
- """
- Returns the ovs-test server IP address that the other ovs-test server
- with the given ip will see.
- """
- server1 = six.moves.xmlrpc_client.Server("http://%s:%u/" %
- (his_ip, his_port))
- return server1.get_my_address()
-
- def xmlrpc_create_udp_listener(self, port):
- """
- Creates a UDP listener that will receive packets from UDP sender
- """
- try:
- listener = udp.UdpListener()
- reactor.listenUDP(port, listener)
- handle_id = self.__acquire_handle(listener)
- except CannotListenError:
- return -1
- return handle_id
-
- def xmlrpc_create_udp_sender(self, host, count, size, duration):
- """
- Send UDP datagrams to UDP listener
- """
- sender = udp.UdpSender(tuple(host), count, size, duration)
- reactor.listenUDP(0, sender)
- handle_id = self.__acquire_handle(sender)
- return handle_id
-
- def xmlrpc_get_udp_listener_results(self, handle):
- """
- Returns number of datagrams that were received
- """
- listener = self.__get_handle_resources(handle)
- return listener.getResults()
-
- def xmlrpc_get_udp_sender_results(self, handle):
- """
- Returns number of datagrams that were sent
- """
- sender = self.__get_handle_resources(handle)
- return sender.getResults()
-
- def xmlrpc_close_udp_listener(self, handle):
- """
- Releases UdpListener and all its resources
- """
- listener = self.__get_handle_resources(handle)
- listener.transport.stopListening()
- self.__delete_handle(handle)
- return 0
-
- def xmlrpc_close_udp_sender(self, handle):
- """
- Releases UdpSender and all its resources
- """
- sender = self.__get_handle_resources(handle)
- sender.transport.stopListening()
- self.__delete_handle(handle)
- return 0
-
- def xmlrpc_create_tcp_listener(self, port):
- """
- Creates a TcpListener that will accept connection from TcpSender
- """
- try:
- listener = tcp.TcpListenerFactory()
- port = reactor.listenTCP(port, listener)
- handle_id = self.__acquire_handle((listener, port))
- return handle_id
- except CannotListenError:
- return -1
-
- def xmlrpc_create_tcp_sender(self, his_ip, his_port, duration):
- """
- Creates a TcpSender that will connect to TcpListener
- """
- sender = tcp.TcpSenderFactory(duration)
- connector = reactor.connectTCP(his_ip, his_port, sender)
- handle_id = self.__acquire_handle((sender, connector))
- return handle_id
-
- def xmlrpc_get_tcp_listener_results(self, handle):
- """
- Returns number of bytes received
- """
- (listener, _) = self.__get_handle_resources(handle)
- return listener.getResults()
-
- def xmlrpc_get_tcp_sender_results(self, handle):
- """
- Returns number of bytes sent
- """
- (sender, _) = self.__get_handle_resources(handle)
- return sender.getResults()
-
- def xmlrpc_close_tcp_listener(self, handle):
- """
- Releases TcpListener and all its resources
- """
- try:
- (_, port) = self.__get_handle_resources(handle)
- port.loseConnection()
- self.__delete_handle(handle)
- except exceptions.KeyError:
- return -1
- return 0
-
- def xmlrpc_close_tcp_sender(self, handle):
- """
- Releases TcpSender and all its resources
- """
- try:
- (_, connector) = self.__get_handle_resources(handle)
- connector.disconnect()
- self.__delete_handle(handle)
- except exceptions.KeyError:
- return -1
- return 0
-
- def xmlrpc_create_test_bridge(self, bridge, iface):
- """
- This function creates a physical bridge from iface. It moves the
- IP configuration from the physical interface to the bridge.
- """
- ret = vswitch.ovs_vsctl_add_bridge(bridge)
- if ret == 0:
- self.pbridges.add((bridge, iface))
- util.interface_up(bridge)
- (ip_addr, mask) = util.interface_get_ip(iface)
- util.interface_assign_ip(bridge, ip_addr, mask)
- util.interface_up(bridge)
- util.move_routes(iface, bridge)
- util.interface_remove_ip(iface, ip_addr, mask)
- ret = vswitch.ovs_vsctl_add_port_to_bridge(bridge, iface)
- if ret == 0:
- self.ports.add(iface)
- else:
- util.interface_assign_ip(iface, ip_addr, mask)
- util.interface_up(iface)
- util.move_routes(bridge, iface)
- vswitch.ovs_vsctl_del_bridge(bridge)
-
- return ret
-
- def xmlrpc_del_test_bridge(self, bridge, iface):
- """
- This function deletes the test bridge and moves its IP configuration
- back to the physical interface.
- """
- ret = vswitch.ovs_vsctl_del_pbridge(bridge, iface)
- self.pbridges.discard((bridge, iface))
- return ret
-
- def xmlrpc_get_iface_from_bridge(self, brname):
- """
- Tries to figure out physical interface from bridge.
- """
- return vswitch.ovs_get_physical_interface(brname)
-
- def xmlrpc_create_bridge(self, brname):
- """
- Creates an OVS bridge.
- """
- ret = vswitch.ovs_vsctl_add_bridge(brname)
- if ret == 0:
- self.bridges.add(brname)
- return ret
-
- def xmlrpc_del_bridge(self, brname):
- """
- Deletes an OVS bridge.
- """
- ret = vswitch.ovs_vsctl_del_bridge(brname)
- if ret == 0:
- self.bridges.discard(brname)
- return ret
-
- def xmlrpc_is_ovs_bridge(self, bridge):
- """
- This function verifies whether given interface is an ovs bridge.
- """
- return vswitch.ovs_vsctl_is_ovs_bridge(bridge)
-
- def xmlrpc_add_port_to_bridge(self, bridge, port):
- """
- Adds a port to the OVS bridge.
- """
- ret = vswitch.ovs_vsctl_add_port_to_bridge(bridge, port)
- if ret == 0:
- self.ports.add(port)
- return ret
-
- def xmlrpc_del_port_from_bridge(self, port):
- """
- Removes a port from OVS bridge.
- """
- ret = vswitch.ovs_vsctl_del_port_from_bridge(port)
- if ret == 0:
- self.ports.discard(port)
- return ret
-
- def xmlrpc_ovs_vsctl_set(self, table, record, column, key, value):
- """
- This function allows to alter OVS database.
- """
- return vswitch.ovs_vsctl_set(table, record, column, key, value)
-
- def xmlrpc_interface_up(self, iface):
- """
- This function brings up given interface.
- """
- return util.interface_up(iface)
-
- def xmlrpc_interface_assign_ip(self, iface, ip_address, mask):
- """
- This function allows to assing ip address to the given interface.
- """
- return util.interface_assign_ip(iface, ip_address, mask)
-
- def xmlrpc_interface_remove_ip(self, iface, ip_address, mask):
- """
- This function allows to assing ip address to the given interface.
- """
- return util.interface_remove_ip(iface, ip_address, mask)
-
- def xmlrpc_get_interface(self, address):
- """
- Finds first interface that has given address
- """
- return util.get_interface(address)
-
- def xmlrpc_get_interface_mtu(self, iface):
- """
- Returns MTU of the given interface
- """
- return util.get_interface_mtu(iface)
-
- def xmlrpc_uname(self):
- """
- Return information about running kernel
- """
- return util.uname()
-
- def xmlrpc_get_driver(self, iface):
- """
- Returns driver version
- """
- return util.get_driver(iface)
-
- def xmlrpc_get_interface_from_routing_decision(self, ip):
- """
- Returns driver version
- """
- return util.get_interface_from_routing_decision(ip)
-
-
-def start_rpc_server(port):
- """
- This function creates a RPC server and adds it to the Twisted Reactor.
- """
- rpc_server = TestArena()
- reactor.listenTCP(port, server.Site(rpc_server))
- try:
- print("Starting RPC server\n")
- sys.stdout.flush()
- # If this server was started from ovs-test client then we must flush
- # STDOUT so that client would know that server is ready to accept
- # XML RPC connections.
- reactor.run()
- finally:
- rpc_server.cleanup()
diff --git a/python/ovstest/tcp.py b/python/ovstest/tcp.py
deleted file mode 100644
index c495717f2..000000000
--- a/python/ovstest/tcp.py
+++ /dev/null
@@ -1,120 +0,0 @@
-# Copyright (c) 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-tcp module contains listener and sender classes for TCP protocol
-"""
-
-import time
-
-from twisted.internet import interfaces
-from twisted.internet.protocol import ClientFactory, Factory, Protocol
-
-from zope.interface import implements
-
-
-class TcpListenerConnection(Protocol):
- """
- This per-connection class is instantiated each time sender connects
- """
- def __init__(self):
- self.stats = 0
-
- def dataReceived(self, data):
- self.stats += len(data)
-
- def connectionLost(self, reason):
- self.factory.stats += self.stats
-
-
-class TcpListenerFactory(Factory):
- """
- This per-listening socket class is used to
- instantiate TcpListenerConnections
- """
- protocol = TcpListenerConnection
-
- def __init__(self):
- self.stats = 0
-
- def getResults(self):
- """ returns the number of bytes received as string"""
- # XML RPC does not support 64bit int (http://bugs.python.org/issue2985)
- # so we have to convert the amount of bytes into a string
- return str(self.stats)
-
-
-class Producer(object):
- implements(interfaces.IPushProducer)
- """
- This producer class generates infinite byte stream for a specified time
- duration
- """
- def __init__(self, proto, duration):
- self.proto = proto
- self.start = time.time()
- self.produced = 0
- self.paused = False
- self.data = "X" * 65535
- self.duration = duration
-
- def pauseProducing(self):
- """This function is called whenever write() to socket would block"""
- self.paused = True
-
- def resumeProducing(self):
- """This function is called whenever socket becomes writable"""
- self.paused = False
- current = time.time()
- while (not self.paused) and (current < self.start + self.duration):
- self.proto.transport.write(self.data)
- self.produced += len(self.data)
- current = time.time()
- if current >= self.start + self.duration:
- self.proto.factory.stats += self.produced
- self.proto.transport.unregisterProducer()
- self.proto.transport.loseConnection()
-
- def stopProducing(self):
- pass
-
-
-class TcpSenderConnection(Protocol):
- """
- TCP connection instance class that sends all traffic at full speed.
- """
-
- def connectionMade(self):
- producer = Producer(self, self.factory.duration)
- self.transport.registerProducer(producer, True)
- producer.resumeProducing()
-
- def dataReceived(self, data):
- self.transport.loseConnection()
-
-
-class TcpSenderFactory(ClientFactory):
- """
- This factory is responsible to instantiate TcpSenderConnection classes
- each time sender initiates connection
- """
- protocol = TcpSenderConnection
-
- def __init__(self, duration):
- self.duration = duration
- self.stats = 0
-
- def getResults(self):
- """Returns amount of bytes sent to the Listener (as a string)"""
- return str(self.stats)
diff --git a/python/ovstest/tests.py b/python/ovstest/tests.py
deleted file mode 100644
index 6de3cc3af..000000000
--- a/python/ovstest/tests.py
+++ /dev/null
@@ -1,250 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-
-import math
-import time
-
-import ovstest.util as util
-
-DEFAULT_TEST_BRIDGE = "ovstestbr0"
-DEFAULT_TEST_PORT = "ovstestport0"
-DEFAULT_TEST_TUN = "ovstestport1"
-NO_HANDLE = -1
-
-
-def do_udp_tests(receiver, sender, tbwidth, duration, port_sizes):
- """Schedule UDP tests between receiver and sender"""
- server1 = util.rpc_client(receiver[0], receiver[1])
- server2 = util.rpc_client(sender[0], sender[1])
-
- udpformat = '{0:>15} {1:>15} {2:>15} {3:>15} {4:>15}'
-
- print("UDP test from %s:%u to %s:%u with target bandwidth %s" %
- (sender[0], sender[1], receiver[0], receiver[1],
- util.bandwidth_to_string(tbwidth)))
- print(udpformat.format("Datagram Size", "Snt Datagrams", "Rcv Datagrams",
- "Datagram Loss", "Bandwidth"))
-
- for size in port_sizes:
- listen_handle = NO_HANDLE
- send_handle = NO_HANDLE
- try:
- packetcnt = (tbwidth * duration) / size
-
- listen_handle = server1.create_udp_listener(receiver[3])
- if listen_handle == NO_HANDLE:
- print("Server could not open UDP listening socket on port"
- " %u. Try to restart the server.\n" % receiver[3])
- return
- send_handle = server2.create_udp_sender(
- (util.ip_from_cidr(receiver[2]),
- receiver[3]), packetcnt, size,
- duration)
-
- # Using sleep here because there is no other synchronization
- # source that would notify us when all sent packets were received
- time.sleep(duration + 1)
-
- rcv_packets = server1.get_udp_listener_results(listen_handle)
- snt_packets = server2.get_udp_sender_results(send_handle)
-
- loss = math.ceil(((snt_packets - rcv_packets) * 10000.0) /
- snt_packets) / 100
- bwidth = (rcv_packets * size) / duration
-
- print(udpformat.format(size, snt_packets, rcv_packets,
- '%.2f%%' % loss, util.bandwidth_to_string(bwidth)))
- finally:
- if listen_handle != NO_HANDLE:
- server1.close_udp_listener(listen_handle)
- if send_handle != NO_HANDLE:
- server2.close_udp_sender(send_handle)
- print("\n")
-
-
-def do_tcp_tests(receiver, sender, duration):
- """Schedule TCP tests between receiver and sender"""
- server1 = util.rpc_client(receiver[0], receiver[1])
- server2 = util.rpc_client(sender[0], sender[1])
-
- tcpformat = '{0:>15} {1:>15} {2:>15}'
- print("TCP test from %s:%u to %s:%u (full speed)" % (sender[0], sender[1],
- receiver[0], receiver[1]))
- print(tcpformat.format("Snt Bytes", "Rcv Bytes", "Bandwidth"))
-
- listen_handle = NO_HANDLE
- send_handle = NO_HANDLE
- try:
- listen_handle = server1.create_tcp_listener(receiver[3])
- if listen_handle == NO_HANDLE:
- print("Server was unable to open TCP listening socket on port"
- " %u. Try to restart the server.\n" % receiver[3])
- return
- send_handle = server2.create_tcp_sender(util.ip_from_cidr(receiver[2]),
- receiver[3], duration)
-
- time.sleep(duration + 1)
-
- rcv_bytes = int(server1.get_tcp_listener_results(listen_handle))
- snt_bytes = int(server2.get_tcp_sender_results(send_handle))
-
- bwidth = rcv_bytes / duration
-
- print(tcpformat.format(snt_bytes, rcv_bytes,
- util.bandwidth_to_string(bwidth)))
- finally:
- if listen_handle != NO_HANDLE:
- server1.close_tcp_listener(listen_handle)
- if send_handle != NO_HANDLE:
- server2.close_tcp_sender(send_handle)
- print("\n")
-
-
-def do_l3_tests(node1, node2, bandwidth, duration, ps, type):
- """
- Do L3 tunneling tests. Each node is given as 4 tuple - physical
- interface IP, control port, test IP and test port.
- """
- server1 = util.rpc_client(node1[0], node1[1])
- server2 = util.rpc_client(node2[0], node2[1])
- servers_with_bridges = []
- try:
- server1.create_bridge(DEFAULT_TEST_BRIDGE)
- servers_with_bridges.append(server1)
- server2.create_bridge(DEFAULT_TEST_BRIDGE)
- servers_with_bridges.append(server2)
-
- server1.interface_up(DEFAULT_TEST_BRIDGE)
- server2.interface_up(DEFAULT_TEST_BRIDGE)
-
- server1.interface_assign_ip(DEFAULT_TEST_BRIDGE, node1[2], None)
- server2.interface_assign_ip(DEFAULT_TEST_BRIDGE, node2[2], None)
-
- server1.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_TUN)
- server2.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_TUN)
-
- server1.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "type",
- None, type)
- server2.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "type",
- None, type)
- server1.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "options",
- "remote_ip", node2[0])
- server2.ovs_vsctl_set("Interface", DEFAULT_TEST_TUN, "options",
- "remote_ip", node1[0])
-
- do_udp_tests(node1, node2, bandwidth, duration, ps)
- do_udp_tests(node2, node1, bandwidth, duration, ps)
- do_tcp_tests(node1, node2, duration)
- do_tcp_tests(node2, node1, duration)
-
- finally:
- for server in servers_with_bridges:
- server.del_bridge(DEFAULT_TEST_BRIDGE)
-
-
-def do_vlan_tests(node1, node2, bandwidth, duration, ps, tag):
- """
- Do VLAN tests between node1 and node2. Each node is given
- as 4 tuple - physical interface IP, control port, test IP and
- test port.
- """
- server1 = util.rpc_client(node1[0], node1[1])
- server2 = util.rpc_client(node2[0], node2[1])
-
- br_name1 = None
- br_name2 = None
-
- servers_with_test_ports = []
-
- try:
- interface_node1 = server1.get_interface(node1[0])
- interface_node2 = server2.get_interface(node2[0])
-
- if server1.is_ovs_bridge(interface_node1):
- br_name1 = interface_node1
- else:
- br_name1 = DEFAULT_TEST_BRIDGE
- server1.create_test_bridge(br_name1, interface_node1)
-
- if server2.is_ovs_bridge(interface_node2):
- br_name2 = interface_node2
- else:
- br_name2 = DEFAULT_TEST_BRIDGE
- server2.create_test_bridge(br_name2, interface_node2)
-
- server1.add_port_to_bridge(br_name1, DEFAULT_TEST_PORT)
- servers_with_test_ports.append(server1)
- server2.add_port_to_bridge(br_name2, DEFAULT_TEST_PORT)
- servers_with_test_ports.append(server2)
-
- server1.ovs_vsctl_set("Port", DEFAULT_TEST_PORT, "tag", None, tag)
- server2.ovs_vsctl_set("Port", DEFAULT_TEST_PORT, "tag", None, tag)
-
- server1.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type", None,
- "internal")
- server2.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type", None,
- "internal")
-
- server1.interface_assign_ip(DEFAULT_TEST_PORT, node1[2], None)
- server2.interface_assign_ip(DEFAULT_TEST_PORT, node2[2], None)
-
- server1.interface_up(DEFAULT_TEST_PORT)
- server2.interface_up(DEFAULT_TEST_PORT)
-
- do_udp_tests(node1, node2, bandwidth, duration, ps)
- do_udp_tests(node2, node1, bandwidth, duration, ps)
- do_tcp_tests(node1, node2, duration)
- do_tcp_tests(node2, node1, duration)
-
- finally:
- for server in servers_with_test_ports:
- server.del_port_from_bridge(DEFAULT_TEST_PORT)
- if br_name1 == DEFAULT_TEST_BRIDGE:
- server1.del_test_bridge(br_name1, interface_node1)
- if br_name2 == DEFAULT_TEST_BRIDGE:
- server2.del_test_bridge(br_name2, interface_node2)
-
-
-def do_direct_tests(node1, node2, bandwidth, duration, ps):
- """
- Do tests between outer IPs without involving Open vSwitch. Each
- node is given as 4 tuple - physical interface IP, control port,
- test IP and test port. Direct tests will use physical interface
- IP as the test IP address.
- """
- n1 = (node1[0], node1[1], node1[0], node1[3])
- n2 = (node2[0], node2[1], node2[0], node2[3])
-
- do_udp_tests(n1, n2, bandwidth, duration, ps)
- do_udp_tests(n2, n1, bandwidth, duration, ps)
- do_tcp_tests(n1, n2, duration)
- do_tcp_tests(n2, n1, duration)
-
-
-def configure_l3(conf, tunnel_mode):
- """
- This function creates a temporary test bridge and adds an L3 tunnel.
- """
- s = util.start_local_server(conf[1][1])
- server = util.rpc_client("127.0.0.1", conf[1][1])
- server.create_bridge(DEFAULT_TEST_BRIDGE)
- server.add_port_to_bridge(DEFAULT_TEST_BRIDGE, DEFAULT_TEST_PORT)
- server.interface_up(DEFAULT_TEST_BRIDGE)
- server.interface_assign_ip(DEFAULT_TEST_BRIDGE, conf[1][0],
- None)
- server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "type",
- None, tunnel_mode)
- server.ovs_vsctl_set("Interface", DEFAULT_TEST_PORT, "options",
- "remote_ip", conf[0])
- return s
diff --git a/python/ovstest/udp.py b/python/ovstest/udp.py
deleted file mode 100644
index acd28d575..000000000
--- a/python/ovstest/udp.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# Copyright (c) 2011, 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-ovsudp contains listener and sender classes for UDP protocol
-"""
-
-import array
-import struct
-import time
-
-from twisted.internet.protocol import DatagramProtocol
-from twisted.internet.task import LoopingCall
-
-
-class UdpListener(DatagramProtocol):
- """
- Class that will listen for incoming UDP packets
- """
- def __init__(self):
- self.stats = []
-
- def datagramReceived(self, data, _1_2):
- """This function is called each time datagram is received"""
- try:
- self.stats.append(struct.unpack_from("Q", data, 0))
- except struct.error:
- pass # ignore packets that are less than 8 bytes of size
-
- def getResults(self):
- """Returns number of packets that were actually received"""
- return len(self.stats)
-
-
-class UdpSender(DatagramProtocol):
- """
- Class that will send UDP packets to UDP Listener
- """
- def __init__(self, host, count, size, duration):
- # LoopingCall does not know whether UDP socket is actually writable
- self.looper = None
- self.host = host
- self.count = count
- self.duration = duration
- self.start = time.time()
- self.sent = 0
- self.data = array.array('c', 'X' * size)
-
- def startProtocol(self):
- self.looper = LoopingCall(self.sendData)
- period = self.duration / float(self.count)
- self.looper.start(period, now=False)
-
- def stopProtocol(self):
- if (self.looper is not None):
- self.looper.stop()
- self.looper = None
-
- def datagramReceived(self, data, host_port):
- pass
-
- def sendData(self):
- """This function is called from LoopingCall"""
- if self.start + self.duration < time.time():
- self.looper.stop()
- self.looper = None
-
- self.sent += 1
- struct.pack_into('Q', self.data, 0, self.sent)
- self.transport.write(self.data, self.host)
-
- def getResults(self):
- """Returns number of packets that were sent"""
- return self.sent
diff --git a/python/ovstest/util.py b/python/ovstest/util.py
deleted file mode 100644
index db2ae989a..000000000
--- a/python/ovstest/util.py
+++ /dev/null
@@ -1,253 +0,0 @@
-# Copyright (c) 2011, 2012, 2017 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-util module contains some helper function
-"""
-import array
-import fcntl
-
-import os
-import re
-import select
-import signal
-import socket
-import struct
-import subprocess
-
-import exceptions
-
-import six.moves.xmlrpc_client
-from six.moves import range
-
-
-def str_ip(ip_address):
- """
- Converts an IP address from binary format to a string.
- """
- (x1, x2, x3, x4) = struct.unpack("BBBB", ip_address)
- return ("%u.%u.%u.%u") % (x1, x2, x3, x4)
-
-
-def get_interface_mtu(iface):
- """
- Returns MTU of the given interface.
- """
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- indata = iface + ('\0' * (32 - len(iface)))
- try:
- outdata = fcntl.ioctl(s.fileno(), 0x8921, indata) # socket.SIOCGIFMTU
- mtu = struct.unpack("16si12x", outdata)[1]
- except:
- return 0
-
- return mtu
-
-
-def get_interface(address):
- """
- Finds first interface that has given address
- """
- bytes = 256 * 32
- s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- names = array.array('B', '\0' * bytes)
- outbytes = struct.unpack('iL', fcntl.ioctl(
- s.fileno(),
- 0x8912, # SIOCGIFCONF
- struct.pack('iL', bytes, names.buffer_info()[0])
- ))[0]
- namestr = names.tostring()
-
- for i in range(0, outbytes, 40):
- name = namestr[i:i + 16].split('\0', 1)[0]
- if address == str_ip(namestr[i + 20:i + 24]):
- return name
- return None # did not find interface we were looking for
-
-
-def uname():
- os_info = os.uname()
- return os_info[2] # return only the kernel version number
-
-
-def start_process(args):
- try:
- p = subprocess.Popen(args,
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = p.communicate()
- return (p.returncode, out, err)
- except exceptions.OSError:
- return (-1, None, None)
-
-
-def get_driver(iface):
- ret, out, _err = start_process(["ethtool", "-i", iface])
- if ret == 0:
- lines = out.splitlines()
- driver = "%s(%s)" % (lines[0], lines[1]) # driver name + version
- else:
- driver = None
- return driver
-
-
-def interface_up(iface):
- """
- This function brings given iface up.
- """
- ret, _out, _err = start_process(["ip", "link", "set", iface, "up"])
- return ret
-
-
-def interface_assign_ip(iface, ip_addr, mask):
- """
- This function adds an IP address to an interface. If mask is None
- then a mask will be selected automatically. In case of success
- this function returns 0.
- """
- interface_ip_op(iface, ip_addr, mask, "add")
-
-
-def interface_remove_ip(iface, ip_addr, mask):
- """
- This function removes an IP address from an interface. If mask is
- None then a mask will be selected automatically. In case of
- success this function returns 0.
- """
- interface_ip_op(iface, ip_addr, mask, "del")
-
-
-def interface_ip_op(iface, ip_addr, mask, op):
- if mask is not None:
- arg = "%s/%s" % (ip_addr, mask)
- elif '/' in ip_addr:
- arg = ip_addr
- else:
- (x1, x2, x3, x4) = struct.unpack("BBBB", socket.inet_aton(ip_addr))
- if x1 < 128:
- arg = "%s/8" % ip_addr
- elif x1 < 192:
- arg = "%s/16" % ip_addr
- else:
- arg = "%s/24" % ip_addr
- ret, _out, _err = start_process(["ip", "addr", op, arg, "dev", iface])
- return ret
-
-
-def interface_get_ip(iface):
- """
- This function returns tuple - ip and mask that was assigned to the
- interface.
- """
- args = ["ip", "addr", "show", iface]
- ret, out, _err = start_process(args)
-
- if ret == 0:
- ip = re.search(r'inet (\S+)/(\S+)', out)
- if ip is not None:
- return (ip.group(1), ip.group(2))
- else:
- return ret
-
-
-def move_routes(iface1, iface2):
- """
- This function moves routes from iface1 to iface2.
- """
- args = ["ip", "route", "show", "dev", iface1]
- ret, out, _err = start_process(args)
- if ret == 0:
- for route in out.splitlines():
- args = ["ip", "route", "replace", "dev", iface2] + route.split()
- start_process(args)
-
-
-def get_interface_from_routing_decision(ip):
- """
- This function returns the interface through which the given ip address
- is reachable.
- """
- args = ["ip", "route", "get", ip]
- ret, out, _err = start_process(args)
- if ret == 0:
- iface = re.search(r'dev (\S+)', out)
- if iface:
- return iface.group(1)
- return None
-
-
-def rpc_client(ip, port):
- return six.moves.xmlrpc_client.Server("http://%s:%u/" % (ip, port),
- allow_none=True)
-
-
-def sigint_intercept():
- """
- Intercept SIGINT from child (the local ovs-test server process).
- """
- signal.signal(signal.SIGINT, signal.SIG_IGN)
-
-
-def start_local_server(port):
- """
- This function spawns an ovs-test server that listens on specified port
- and blocks till the spawned ovs-test server is ready to accept XML RPC
- connections.
- """
- p = subprocess.Popen(["ovs-test", "-s", str(port)],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE,
- preexec_fn=sigint_intercept)
- fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(p.stdout.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
-
- while p.poll() is None:
- fd = select.select([p.stdout.fileno()], [], [])[0]
- if fd:
- out = p.stdout.readline()
- if out.startswith("Starting RPC server"):
- break
- if p.poll() is not None:
- raise RuntimeError("Couldn't start local instance of ovs-test server")
- return p
-
-
-def get_datagram_sizes(mtu1, mtu2):
- """
- This function calculates all the "interesting" datagram sizes so that
- we test both - receive and send side with different packets sizes.
- """
- s1 = set([8, mtu1 - 100, mtu1 - 28, mtu1])
- s2 = set([8, mtu2 - 100, mtu2 - 28, mtu2])
- return sorted(s1.union(s2))
-
-
-def ip_from_cidr(string):
- """
- This function removes the netmask (if present) from the given string and
- returns the IP address.
- """
- token = string.split("/")
- return token[0]
-
-
-def bandwidth_to_string(bwidth):
- """Convert bandwidth from long to string and add units."""
- bwidth = bwidth * 8 # Convert back to bits/second
- if bwidth >= 10000000:
- return str(int(bwidth / 1000000)) + "Mbps"
- elif bwidth > 10000:
- return str(int(bwidth / 1000)) + "Kbps"
- else:
- return str(int(bwidth)) + "bps"
diff --git a/python/ovstest/vswitch.py b/python/ovstest/vswitch.py
deleted file mode 100644
index 9d5b5cffd..000000000
--- a/python/ovstest/vswitch.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (c) 2012 Nicira, Inc.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""
-vswitch module allows its callers to interact with OVS DB.
-"""
-import util
-
-
-def ovs_vsctl_add_bridge(bridge):
- """
- This function creates an OVS bridge.
- """
- ret, _out, _err = util.start_process(["ovs-vsctl", "add-br", bridge])
- return ret
-
-
-def ovs_vsctl_del_bridge(bridge):
- """
- This function deletes the OVS bridge.
- """
- ret, _out, _err = util.start_process(["ovs-vsctl", "del-br", bridge])
- return ret
-
-
-def ovs_vsctl_del_pbridge(bridge, iface):
- """
- This function deletes the OVS bridge and assigns the bridge IP address
- back to the iface.
- """
- (ip_addr, mask) = util.interface_get_ip(bridge)
- util.interface_assign_ip(iface, ip_addr, mask)
- util.interface_up(iface)
- util.move_routes(bridge, iface)
- return ovs_vsctl_del_bridge(bridge)
-
-
-def ovs_vsctl_is_ovs_bridge(bridge):
- """
- This function verifies whether given port is an OVS bridge. If it is an
- OVS bridge then it will return True.
- """
- ret, _out, _err = util.start_process(["ovs-vsctl", "br-exists", bridge])
- return ret == 0
-
-
-def ovs_vsctl_add_port_to_bridge(bridge, iface):
- """
- This function adds given interface to the bridge.
- """
- ret, _out, _err = util.start_process(["ovs-vsctl", "add-port", bridge,
- iface])
- return ret
-
-
-def ovs_vsctl_del_port_from_bridge(port):
- """
- This function removes given port from a OVS bridge.
- """
- ret, _out, _err = util.start_process(["ovs-vsctl", "del-port", port])
- return ret
-
-
-def ovs_vsctl_set(table, record, column, key, value):
- """
- This function allows to alter the OVS database. If column is a map, then
- caller should also set the key, otherwise the key should be left as an
- empty string.
- """
- if key is None:
- index = column
- else:
- index = "%s:%s" % (column, key)
- index_value = "%s=%s" % (index, value)
- ret, _out, _err = util.start_process(["ovs-vsctl", "set", table, record,
- index_value])
- return ret
-
-
-def ovs_get_physical_interface(bridge):
- """
- This function tries to figure out which is the physical interface that
- belongs to the bridge. If there are multiple physical interfaces assigned
- to this bridge then it will return the first match.
- """
- ret, out, _err = util.start_process(["ovs-vsctl", "list-ifaces", bridge])
-
- if ret == 0:
- ifaces = out.splitlines()
- for iface in ifaces:
- ret, out, _err = util.start_process(["ovs-vsctl", "get",
- "Interface", iface, "type"])
- if ret == 0:
- if ('""' in out) or ('system' in out):
- return iface # this should be the physical interface
- return None
diff --git a/python/setup.py b/python/setup.py
deleted file mode 100644
index b7252800c..000000000
--- a/python/setup.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at:
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from __future__ import print_function
-
-import sys
-
-from distutils.command.build_ext import build_ext
-from distutils.errors import CCompilerError, DistutilsExecError, \
- DistutilsPlatformError
-
-import setuptools
-
-VERSION = "unknown"
-
-try:
- # Try to set the version from the generated ovs/version.py
- exec(open("ovs/version.py").read())
-except IOError:
- print("Ensure version.py is created by running make python/ovs/version.py",
- file=sys.stderr)
- sys.exit(-1)
-
-ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
-if sys.platform == 'win32':
- ext_errors += (IOError, ValueError)
-
-
-class BuildFailed(Exception):
- pass
-
-
-class try_build_ext(build_ext):
- # This class allows C extension building to fail
- # NOTE: build_ext is not a new-style class
-
- def run(self):
- try:
- build_ext.run(self)
- except DistutilsPlatformError:
- raise BuildFailed()
-
- def build_extension(self, ext):
- try:
- build_ext.build_extension(self, ext)
- except ext_errors:
- raise BuildFailed()
-
-
-setup_args = dict(
- name='ovs',
- description='Open vSwitch library',
- version=VERSION,
- url='http://www.openvswitch.org/',
- author='Open vSwitch',
- author_email='dev at openvswitch.org',
- packages=['ovs', 'ovs.compat', 'ovs.compat.sortedcontainers',
- 'ovs.db', 'ovs.unixctl'],
- keywords=['openvswitch', 'ovs', 'OVSDB'],
- license='Apache 2.0',
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Topic :: Database :: Front-Ends',
- 'Topic :: Software Development :: Libraries :: Python Modules',
- 'Topic :: System :: Networking',
- 'License :: OSI Approved :: Apache Software License',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
- ],
- ext_modules=[setuptools.Extension("ovs._json", sources=["ovs/_json.c"],
- libraries=['openvswitch'])],
- cmdclass={'build_ext': try_build_ext},
- install_requires=['sortedcontainers'],
- extras_require={':sys_platform == "win32"': ['pywin32 >= 1.0']},
-)
-
-try:
- setuptools.setup(**setup_args)
-except BuildFailed:
- BUILD_EXT_WARNING = ("WARNING: The C extension could not be compiled, "
- "speedups are not enabled.")
- print("*" * 75)
- print(BUILD_EXT_WARNING)
- print("Failure information, if any, is above.")
- print("Retrying the build without the C extension.")
- print("*" * 75)
-
- del(setup_args['cmdclass'])
- del(setup_args['ext_modules'])
- setuptools.setup(**setup_args)
diff --git a/tests/ovn-controller-vtep.at b/tests/ovn-controller-vtep.at
index e8c731fca..d4744d76e 100644
--- a/tests/ovn-controller-vtep.at
+++ b/tests/ovn-controller-vtep.at
@@ -53,6 +53,8 @@ m4_define([OVN_CONTROLLER_VTEP_START],
dnl Start ovs-vtep.
AT_CHECK([vtep-ctl add-ps br-vtep -- set Physical_Switch br-vtep tunnel_ips=1.2.3.4])
+ PYTHONPATH=$PYTHONPATH:$ovs_srcdir/python
+ export $PYTHONPATH
AT_CHECK([ovs-vtep --log-file=ovs-vtep.log --pidfile=ovs-vtep.pid --detach --no-chdir br-vtep \], [0], [], [stderr])
on_exit "kill `cat ovs-vtep.pid`"
AT_CHECK([[sed < stderr '
--
2.21.0
More information about the dev
mailing list