summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthon van der Neut <anthon@mnt.org>2018-08-03 22:14:57 +0200
committerAnthon van der Neut <anthon@mnt.org>2018-08-03 22:14:57 +0200
commitdce10fcff1de54121fb8b440b883ef5d3fe2f96a (patch)
tree072b4bd247e6f1cd95c08c7b67fea0fc96f0578e
parent2966a4f215861fa05e0dc7e0cd53350766e794c6 (diff)
downloadruamel.yaml-dce10fcff1de54121fb8b440b883ef5d3fe2f96a.tar.gz
Apply oitnb and mypy 0.620, then make everything work again0.15.48
-rw-r--r--CHANGES3
-rw-r--r--README.rst9
-rw-r--r--__init__.py70
-rw-r--r--_doc/_static/pypi.svg2
-rw-r--r--_doc/conf.py35
-rw-r--r--_test/lib/canonical.py60
-rw-r--r--_test/lib/test_all.py2
-rw-r--r--_test/lib/test_appliance.py27
-rw-r--r--_test/lib/test_build.py7
-rw-r--r--_test/lib/test_build_ext.py7
-rw-r--r--_test/lib/test_canonical.py3
-rw-r--r--_test/lib/test_constructor.py80
-rw-r--r--_test/lib/test_emitter.py42
-rw-r--r--_test/lib/test_errors.py26
-rw-r--r--_test/lib/test_input_output.py78
-rw-r--r--_test/lib/test_mark.py4
-rw-r--r--_test/lib/test_reader.py5
-rw-r--r--_test/lib/test_recursive.py15
-rw-r--r--_test/lib/test_representer.py19
-rw-r--r--_test/lib/test_resolver.py28
-rw-r--r--_test/lib/test_structure.py36
-rw-r--r--_test/lib/test_tokens.py8
-rw-r--r--_test/lib/test_yaml.py23
-rw-r--r--_test/lib/test_yaml_ext.py110
-rw-r--r--_test/roundtrip.py100
-rw-r--r--_test/test_a_dedent.py51
-rw-r--r--_test/test_add_xxx.py50
-rw-r--r--_test/test_anchor.py132
-rw-r--r--_test/test_api_change.py49
-rw-r--r--_test/test_class_register.py51
-rw-r--r--_test/test_collections.py5
-rw-r--r--_test/test_comment_manipulation.py359
-rw-r--r--_test/test_comments.py401
-rw-r--r--_test/test_copy.py56
-rw-r--r--_test/test_cyaml.py20
-rw-r--r--_test/test_datetime.py113
-rw-r--r--_test/test_deprecation.py2
-rw-r--r--_test/test_documents.py61
-rw-r--r--_test/test_fail.py97
-rw-r--r--_test/test_float.py79
-rw-r--r--_test/test_flowsequencekey.py9
-rw-r--r--_test/test_indentation.py219
-rw-r--r--_test/test_int.py48
-rw-r--r--_test/test_issues.py7
-rw-r--r--_test/test_json_numbers.py15
-rw-r--r--_test/test_line_col.py42
-rw-r--r--_test/test_literal.py142
-rw-r--r--_test/test_none.py17
-rw-r--r--_test/test_numpy.py1
-rw-r--r--_test/test_program_config.py18
-rw-r--r--_test/test_string.py120
-rw-r--r--_test/test_tag.py62
-rw-r--r--_test/test_version.py57
-rw-r--r--_test/test_yamlfile.py104
-rw-r--r--_test/test_yamlobject.py2
-rw-r--r--_test/test_z_check_debug_leftovers.py12
-rw-r--r--_test/test_z_data.py11
-rw-r--r--comments.py80
-rw-r--r--compat.py23
-rw-r--r--composer.py84
-rw-r--r--configobjwalker.py2
-rw-r--r--constructor.py642
-rw-r--r--cyaml.py174
-rw-r--r--dumper.py282
-rw-r--r--emitter.py404
-rw-r--r--error.py128
-rw-r--r--events.py74
-rw-r--r--loader.py28
-rw-r--r--main.py540
-rw-r--r--nodes.py64
-rw-r--r--parser.py205
-rw-r--r--reader.py91
-rw-r--r--representer.py315
-rw-r--r--resolver.py86
-rw-r--r--scalarfloat.py56
-rw-r--r--scalarint.py29
-rw-r--r--scalarstring.py10
-rw-r--r--scanner.py692
-rw-r--r--serializer.py103
-rw-r--r--setup.py284
-rw-r--r--timestamp.py3
-rw-r--r--tokens.py20
-rw-r--r--tox.ini12
-rw-r--r--util.py19
84 files changed, 4821 insertions, 2840 deletions
diff --git a/CHANGES b/CHANGES
index 082da98..3fc9b84 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,6 @@
+[0, 15, 48]: 2018-08-03
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
[0, 15, 47]: 2018-07-31
- fix broken 3.6 manylinux1 (result of an unclean ``build`` (reported by
`Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
diff --git a/README.rst b/README.rst
index 78971a4..2f91868 100644
--- a/README.rst
+++ b/README.rst
@@ -4,8 +4,8 @@ ruamel.yaml
``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python.
-:version: 0.15.47
-:updated: 2018-07-31
+:version: 0.15.48
+:updated: 2018-08-03
:documentation: http://yaml.readthedocs.io
:repository: https://bitbucket.org/ruamel/
:pypi: https://pypi.org/project/ruamel.yaml/
@@ -46,12 +46,17 @@ when the status of the API is stable enough to make the transition.
.. image:: https://bitbucket.org/ruamel/oitnb/raw/default/_doc/_static/oitnb.svg
:target: https://pypi.org/project/oitnb/
+.. image:: http://www.mypy-lang.org/static/mypy_badge.svg
+ :target: http://mypy-lang.org/
ChangeLog
=========
.. should insert NEXT: at the beginning of line for next key (with empty line)
+0.15.48 (2018-08-03):
+ - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity
+
0.15.47 (2018-07-31):
- fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by
`Roman Sichnyi <https://bitbucket.org/rsichnyi-gl/>`__)
diff --git a/__init__.py b/__init__.py
index e5363cc..c675a8e 100644
--- a/__init__.py
+++ b/__init__.py
@@ -7,24 +7,33 @@ if False: # MYPY
_package_data = dict(
full_package_name='ruamel.yaml',
- version_info=(0, 15, 47),
- __version__='0.15.47',
+ version_info=(0, 15, 48),
+ __version__='0.15.48',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA
entry_points=None,
since=2014,
- extras_require={':platform_python_implementation=="CPython" and python_version<="2.7"': [
- 'ruamel.ordereddict',
- ], 'jinja2': ['ruamel.yaml.jinja2>=0.2'], 'docs': ['ryd']},
- ext_modules=[dict(
+ extras_require={
+ ':platform_python_implementation=="CPython" and python_version<="2.7"': [
+ 'ruamel.ordereddict'
+ ],
+ 'jinja2': ['ruamel.yaml.jinja2>=0.2'],
+ 'docs': ['ryd'],
+ },
+ ext_modules=[
+ dict(
name='_ruamel_yaml',
- src=['ext/_ruamel_yaml.c', 'ext/api.c', 'ext/writer.c', 'ext/dumper.c',
- 'ext/loader.c',
- 'ext/reader.c',
- 'ext/scanner.c',
- 'ext/parser.c',
- 'ext/emitter.c',
+ src=[
+ 'ext/_ruamel_yaml.c',
+ 'ext/api.c',
+ 'ext/writer.c',
+ 'ext/dumper.c',
+ 'ext/loader.c',
+ 'ext/reader.c',
+ 'ext/scanner.c',
+ 'ext/parser.c',
+ 'ext/emitter.c',
],
lib=[],
test="""
@@ -34,20 +43,21 @@ _package_data = dict(
return 0;
}
""",
- )],
- # NOQA
- # test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n', # NOQA
+ )
+ ],
+ # NOQA
+ # test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n', # NOQA
classifiers=[
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3.4',
- 'Programming Language :: Python :: 3.5',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Programming Language :: Python :: Implementation :: CPython',
- 'Programming Language :: Python :: Implementation :: PyPy',
- 'Programming Language :: Python :: Implementation :: Jython',
- 'Topic :: Software Development :: Libraries :: Python Modules',
- 'Topic :: Text Processing :: Markup',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Programming Language :: Python :: Implementation :: PyPy',
+ 'Programming Language :: Python :: Implementation :: Jython',
+ 'Topic :: Software Development :: Libraries :: Python Modules',
+ 'Topic :: Text Processing :: Markup',
],
keywords='yaml 1.2 parser round-trip preserve quotes order config',
windows_wheels=True,
@@ -55,12 +65,9 @@ _package_data = dict(
many_linux='libyaml-devel',
supported=[(2, 7), (3, 4)], # minimum
tox=dict(
- flake8=dict(
- version='==2.5.5',
- ),
- env='*pn', # also test narrow 2.7.15
- deps='ruamel.std.pathlib',
- fl8excl='_test/lib',
+ env=u'*pn', # also test narrow 2.7.15
+ deps=u'ruamel.std.pathlib',
+ fl8excl=u'_test/lib',
),
rtfd=47359,
) # type: Dict[Any, Any]
@@ -71,6 +78,7 @@ __version__ = _package_data['__version__']
try:
from .cyaml import * # NOQA
+
__with_libyaml__ = True
except (ImportError, ValueError): # for Jython
__with_libyaml__ = False
diff --git a/_doc/_static/pypi.svg b/_doc/_static/pypi.svg
index 42d5a40..45c5650 100644
--- a/_doc/_static/pypi.svg
+++ b/_doc/_static/pypi.svg
@@ -1 +1 @@
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="86" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="86" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h33v20H0z"/><path fill="#007ec6" d="M33 0h53v20H33z"/><path fill="url(#b)" d="M0 0h86v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"> <text x="175" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="230">pypi</text><text x="175" y="140" transform="scale(.1)" textLength="230">pypi</text><text x="585" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="430">0.15.47</text><text x="585" y="140" transform="scale(.1)" textLength="430">0.15.47</text></g> </svg>
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="86" height="20"><linearGradient id="b" x2="0" y2="100%"><stop offset="0" stop-color="#bbb" stop-opacity=".1"/><stop offset="1" stop-opacity=".1"/></linearGradient><clipPath id="a"><rect width="86" height="20" rx="3" fill="#fff"/></clipPath><g clip-path="url(#a)"><path fill="#555" d="M0 0h33v20H0z"/><path fill="#007ec6" d="M33 0h53v20H33z"/><path fill="url(#b)" d="M0 0h86v20H0z"/></g><g fill="#fff" text-anchor="middle" font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="110"> <text x="175" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="230">pypi</text><text x="175" y="140" transform="scale(.1)" textLength="230">pypi</text><text x="585" y="150" fill="#010101" fill-opacity=".3" transform="scale(.1)" textLength="430">0.15.48</text><text x="585" y="140" transform="scale(.1)" textLength="430">0.15.48</text></g> </svg>
diff --git a/_doc/conf.py b/_doc/conf.py
index 19a19a3..632b117 100644
--- a/_doc/conf.py
+++ b/_doc/conf.py
@@ -13,7 +13,7 @@
# serve to show the default.
import sys # NOQA
-import os # NOQA
+import os # NOQA
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
@@ -55,6 +55,7 @@ author = u'Anthon van der Neut'
#
try:
from ruamel.yaml import __version__, version_info # NOQA
+
# The short X.Y version.
version = '.'.join([str(l) for l in version_info[:3]])
# The full version, including alpha/beta/rc tags.
@@ -69,6 +70,7 @@ print('ruamel.yaml version', version)
try:
from ryd.__main__ import RYDCmd
from ruamel.std.pathlib import Path
+
oldargv = sys.argv
for fn in Path('.').glob('*.ryd'):
sys.argv = ['ryd', 'convert', '--no-pdf', str(fn)]
@@ -141,7 +143,7 @@ html_theme = 'default'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
-html_title = "Python YAML package documentation"
+html_title = 'Python YAML package documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
@@ -228,13 +230,10 @@ htmlhelp_basename = 'yamldoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
-
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
-
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
-
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
@@ -243,8 +242,13 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, 'yaml.tex', u'Python YAML package documentation',
- u'Anthon van der Neut', 'manual'),
+ (
+ master_doc,
+ 'yaml.tex',
+ u'Python YAML package documentation',
+ u'Anthon van der Neut',
+ 'manual',
+ )
]
# The name of an image file (relative to this directory) to place at the top of
@@ -272,10 +276,7 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'yaml', u'yaml Documentation',
- [author], 1)
-]
+man_pages = [(master_doc, 'yaml', u'yaml Documentation', [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
@@ -287,9 +288,15 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
- (master_doc, 'yaml', u'yaml Documentation',
- author, 'yaml', 'One line description of project.',
- 'Miscellaneous'),
+ (
+ master_doc,
+ 'yaml',
+ u'yaml Documentation',
+ author,
+ 'yaml',
+ 'One line description of project.',
+ 'Miscellaneous',
+ )
]
# Documents to append as an appendix to all manuals.
diff --git a/_test/lib/canonical.py b/_test/lib/canonical.py
index 64f3153..af2c3cf 100644
--- a/_test/lib/canonical.py
+++ b/_test/lib/canonical.py
@@ -11,7 +11,6 @@ class CanonicalError(ruamel.yaml.YAMLError):
class CanonicalScanner:
-
def __init__(self, data):
try:
if PY3:
@@ -20,7 +19,7 @@ class CanonicalScanner:
else:
data = unicode(data, 'utf-8') # NOQA
except UnicodeDecodeError:
- raise CanonicalError("utf-8 stream is expected")
+ raise CanonicalError('utf-8 stream is expected')
self.data = data + u'\0'
self.index = 0
self.tokens = []
@@ -48,7 +47,7 @@ class CanonicalScanner:
self.scan()
token = self.tokens.pop(0)
if choice and not isinstance(token, choice):
- raise CanonicalError("unexpected token " + repr(token))
+ raise CanonicalError('unexpected token ' + repr(token))
return token
def get_token_value(self):
@@ -65,7 +64,7 @@ class CanonicalScanner:
break
elif ch == u'%':
self.tokens.append(self.scan_directive())
- elif ch == u'-' and self.data[self.index:self.index + 3] == u'---':
+ elif ch == u'-' and self.data[self.index : self.index + 3] == u'---':
self.index += 3
self.tokens.append(ruamel.yaml.DocumentStartToken(None, None))
elif ch == u'[':
@@ -96,18 +95,20 @@ class CanonicalScanner:
elif ch == u'"':
self.tokens.append(self.scan_scalar())
else:
- raise CanonicalError("invalid token")
+ raise CanonicalError('invalid token')
self.scanned = True
DIRECTIVE = u'%YAML 1.1'
def scan_directive(self):
- if self.data[self.index:self.index + len(self.DIRECTIVE)] == self.DIRECTIVE and \
- self.data[self.index + len(self.DIRECTIVE)] in u' \n\0':
+ if (
+ self.data[self.index : self.index + len(self.DIRECTIVE)] == self.DIRECTIVE
+ and self.data[self.index + len(self.DIRECTIVE)] in u' \n\0'
+ ):
self.index += len(self.DIRECTIVE)
return ruamel.yaml.DirectiveToken('YAML', (1, 1), None, None)
else:
- raise CanonicalError("invalid directive")
+ raise CanonicalError('invalid directive')
def scan_alias(self):
if self.data[self.index] == u'*':
@@ -118,7 +119,7 @@ class CanonicalScanner:
start = self.index
while self.data[self.index] not in u', \n\0':
self.index += 1
- value = self.data[start:self.index]
+ value = self.data[start : self.index]
return TokenClass(value, None, None)
def scan_tag(self):
@@ -126,7 +127,7 @@ class CanonicalScanner:
start = self.index
while self.data[self.index] not in u' \n\0':
self.index += 1
- value = self.data[start:self.index]
+ value = self.data[start : self.index]
if not value:
value = u'!'
elif value[0] == u'!':
@@ -137,15 +138,11 @@ class CanonicalScanner:
value = u'!' + value
return ruamel.yaml.TagToken(value, None, None)
- QUOTE_CODES = {
- 'x': 2,
- 'u': 4,
- 'U': 8,
- }
+ QUOTE_CODES = {'x': 2, 'u': 4, 'U': 8}
QUOTE_REPLACES = {
u'\\': u'\\',
- u'\"': u'\"',
+ u'"': u'"',
u' ': u' ',
u'a': u'\x07',
u'b': u'\x08',
@@ -160,7 +157,6 @@ class CanonicalScanner:
u'P': u'\u2029',
u'_': u'_',
u'0': u'\x00',
-
}
def scan_scalar(self):
@@ -171,7 +167,7 @@ class CanonicalScanner:
while self.data[self.index] != u'"':
if self.data[self.index] == u'\\':
ignore_spaces = False
- chunks.append(self.data[start:self.index])
+ chunks.append(self.data[start : self.index])
self.index += 1
ch = self.data[self.index]
self.index += 1
@@ -179,16 +175,16 @@ class CanonicalScanner:
ignore_spaces = True
elif ch in self.QUOTE_CODES:
length = self.QUOTE_CODES[ch]
- code = int(self.data[self.index:self.index + length], 16)
+ code = int(self.data[self.index : self.index + length], 16)
chunks.append(unichr(code))
self.index += length
else:
if ch not in self.QUOTE_REPLACES:
- raise CanonicalError("invalid escape code")
+ raise CanonicalError('invalid escape code')
chunks.append(self.QUOTE_REPLACES[ch])
start = self.index
elif self.data[self.index] == u'\n':
- chunks.append(self.data[start:self.index])
+ chunks.append(self.data[start : self.index])
chunks.append(u' ')
self.index += 1
start = self.index
@@ -199,9 +195,9 @@ class CanonicalScanner:
else:
ignore_spaces = False
self.index += 1
- chunks.append(self.data[start:self.index])
+ chunks.append(self.data[start : self.index])
self.index += 1
- return ruamel.yaml.ScalarToken(u''.join(chunks), False, None, None)
+ return ruamel.yaml.ScalarToken("".join(chunks), False, None, None)
def find_token(self):
found = False
@@ -233,7 +229,7 @@ class CanonicalParser:
if self.check_token(ruamel.yaml.DirectiveToken, ruamel.yaml.DocumentStartToken):
self.parse_document()
else:
- raise CanonicalError("document is expected, got " + repr(self.tokens[0]))
+ raise CanonicalError('document is expected, got ' + repr(self.tokens[0]))
self.get_token(ruamel.yaml.StreamEndToken)
self.events.append(ruamel.yaml.StreamEndEvent(None, None))
@@ -259,8 +255,11 @@ class CanonicalParser:
if self.check_token(ruamel.yaml.TagToken):
tag = self.get_token_value()
if self.check_token(ruamel.yaml.ScalarToken):
- self.events.append(ruamel.yaml.ScalarEvent(anchor, tag, (False, False),
- self.get_token_value(), None, None))
+ self.events.append(
+ ruamel.yaml.ScalarEvent(
+ anchor, tag, (False, False), self.get_token_value(), None, None
+ )
+ )
elif self.check_token(ruamel.yaml.FlowSequenceStartToken):
self.events.append(ruamel.yaml.SequenceStartEvent(anchor, tag, None, None))
self.parse_sequence()
@@ -268,8 +267,9 @@ class CanonicalParser:
self.events.append(ruamel.yaml.MappingStartEvent(anchor, tag, None, None))
self.parse_mapping()
else:
- raise CanonicalError("SCALAR, '[', or '{' is expected, got " +
- repr(self.tokens[0]))
+ raise CanonicalError(
+ "SCALAR, '[', or '{' is expected, got " + repr(self.tokens[0])
+ )
# sequence: SEQUENCE-START (node (ENTRY node)*)? ENTRY? SEQUENCE-END
def parse_sequence(self):
@@ -328,9 +328,7 @@ class CanonicalParser:
return self.events[0]
-class CanonicalLoader(CanonicalScanner, CanonicalParser,
- Composer, Constructor, Resolver):
-
+class CanonicalLoader(CanonicalScanner, CanonicalParser, Composer, Constructor, Resolver):
def __init__(self, stream):
if hasattr(stream, 'read'):
stream = stream.read()
diff --git a/_test/lib/test_all.py b/_test/lib/test_all.py
index 9e951eb..8099ec8 100644
--- a/_test/lib/test_all.py
+++ b/_test/lib/test_all.py
@@ -7,9 +7,11 @@ import test_appliance
def main(args=None):
collections = []
import test_yaml
+
collections.append(test_yaml)
if ruamel.yaml.__with_libyaml__:
import test_yaml_ext
+
collections.append(test_yaml_ext)
test_appliance.run(collections, args)
diff --git a/_test/lib/test_appliance.py b/_test/lib/test_appliance.py
index a96052d..137c271 100644
--- a/_test/lib/test_appliance.py
+++ b/_test/lib/test_appliance.py
@@ -44,31 +44,34 @@ def find_test_filenames(directory):
def parse_arguments(args):
""""""
- parser = argparse.ArgumentParser(usage=""" run the yaml tests. By default
+ parser = argparse.ArgumentParser(
+ usage=""" run the yaml tests. By default
all functions on all appropriate test_files are run. Functions have
unittest attributes that determine the required extensions to filenames
that need to be available in order to run that test. E.g.\n\n
python test_yaml.py test_constructor_types\n
python test_yaml.py --verbose test_tokens spec-02-05\n\n
The presence of an extension in the .skip attribute of a function
- disables the test for that function.""")
+ disables the test for that function."""
+ )
# ToDo: make into int and test > 0 in functions
parser.add_argument(
- '--verbose', '-v', action="store_true",
+ '--verbose',
+ '-v',
+ action='store_true',
default='YAML_TEST_VERBOSE' in os.environ,
- help="set verbosity output",
+ help='set verbosity output',
)
parser.add_argument(
- '--list-functions', action="store_true",
+ '--list-functions',
+ action='store_true',
help="""list all functions with required file extensions for test files
- """
- )
- parser.add_argument(
- 'function', nargs='?',
- help="""restrict function to run""",
+ """,
)
+ parser.add_argument('function', nargs='?', help="""restrict function to run""")
parser.add_argument(
- 'filenames', nargs='*',
+ 'filenames',
+ nargs='*',
help="""basename of filename set, extensions (.code, .data) have to
be a superset of those in the unittest attribute of the selected
function""",
@@ -194,7 +197,7 @@ def run(collections, args=None):
results = []
for function in test_functions:
if include_functions and function.__name__ not in include_functions:
- continue
+ continue
if function.unittest:
for base, exts in test_filenames:
if include_filenames and base not in include_filenames:
diff --git a/_test/lib/test_build.py b/_test/lib/test_build.py
index 5d19e3a..f7837eb 100644
--- a/_test/lib/test_build.py
+++ b/_test/lib/test_build.py
@@ -3,11 +3,14 @@ if __name__ == '__main__':
import sys
import os
import distutils.util
+
build_lib = 'build/lib'
- build_lib_ext = os.path.join('build', 'lib.%s-%s' % (distutils.util.get_platform(),
- sys.version[0:3]))
+ build_lib_ext = os.path.join(
+ 'build', 'lib.%s-%s' % (distutils.util.get_platform(), sys.version[0:3])
+ )
sys.path.insert(0, build_lib)
sys.path.insert(0, build_lib_ext)
import test_yaml
import test_appliance
+
test_appliance.run(test_yaml)
diff --git a/_test/lib/test_build_ext.py b/_test/lib/test_build_ext.py
index 92d927e..1a58fd2 100644
--- a/_test/lib/test_build_ext.py
+++ b/_test/lib/test_build_ext.py
@@ -4,11 +4,14 @@ if __name__ == '__main__':
import sys
import os
import distutils.util
+
build_lib = 'build/lib'
- build_lib_ext = os.path.join('build', 'lib.%s-%s' % (distutils.util.get_platform(),
- sys.version[0:3]))
+ build_lib_ext = os.path.join(
+ 'build', 'lib.%s-%s' % (distutils.util.get_platform(), sys.version[0:3])
+ )
sys.path.insert(0, build_lib)
sys.path.insert(0, build_lib_ext)
import test_yaml_ext
import test_appliance
+
test_appliance.run(test_yaml_ext)
diff --git a/_test/lib/test_canonical.py b/_test/lib/test_canonical.py
index 682f6ab..48a1764 100644
--- a/_test/lib/test_canonical.py
+++ b/_test/lib/test_canonical.py
@@ -40,7 +40,7 @@ def test_canonical_error(data_filename, canonical_filename, verbose=False):
if verbose:
print(exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_canonical_error.unittest = ['.data', '.canonical']
@@ -48,4 +48,5 @@ test_canonical_error.skip = ['.empty']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_constructor.py b/_test/lib/test_constructor.py
index d10ea4d..a66ff1a 100644
--- a/_test/lib/test_constructor.py
+++ b/_test/lib/test_constructor.py
@@ -6,6 +6,7 @@ import pprint
from ruamel.yaml.compat import PY2
import datetime
+
try:
set
except NameError:
@@ -20,10 +21,7 @@ def execute(code):
def _make_objects():
- global MyLoader, MyDumper, MyTestClass1, MyTestClass2, MyTestClass3, YAMLobject1, \
- YAMLobject2, AnObject, AnInstance, AState, ACustomState, InitArgs, InitArgsWithState, \
- NewArgs, NewArgsWithState, Reduce, ReduceWithState, MyInt, MyList, MyDict, \
- FixedOffset, today, execute
+ global MyLoader, MyDumper, MyTestClass1, MyTestClass2, MyTestClass3, YAMLobject1, YAMLobject2, AnObject, AnInstance, AState, ACustomState, InitArgs, InitArgsWithState, NewArgs, NewArgsWithState, Reduce, ReduceWithState, MyInt, MyList, MyDict, FixedOffset, today, execute
class MyLoader(ruamel.yaml.Loader):
pass
@@ -48,27 +46,29 @@ def _make_objects():
return MyTestClass1(**mapping)
def represent1(representer, native):
- return representer.represent_mapping("!tag1", native.__dict__)
+ return representer.represent_mapping('!tag1', native.__dict__)
- ruamel.yaml.add_constructor("!tag1", construct1, Loader=MyLoader)
+ ruamel.yaml.add_constructor('!tag1', construct1, Loader=MyLoader)
ruamel.yaml.add_representer(MyTestClass1, represent1, Dumper=MyDumper)
class MyTestClass2(MyTestClass1, ruamel.yaml.YAMLObject):
ruamel.yaml.loader = MyLoader
ruamel.yaml.dumper = MyDumper
- ruamel.yaml.tag = "!tag2"
+ ruamel.yaml.tag = '!tag2'
def from_yaml(cls, constructor, node):
x = constructor.construct_yaml_int(node)
return cls(x=x)
+
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_scalar(cls.yaml_tag, str(native.x))
+
to_yaml = classmethod(to_yaml)
class MyTestClass3(MyTestClass2):
- ruamel.yaml.tag = "!tag3"
+ ruamel.yaml.tag = '!tag3'
def from_yaml(cls, constructor, node):
mapping = constructor.construct_mapping(node)
@@ -77,10 +77,12 @@ def _make_objects():
del mapping['=']
mapping['x'] = x
return cls(**mapping)
+
from_yaml = classmethod(from_yaml)
def to_yaml(cls, representer, native):
return representer.represent_mapping(cls.yaml_tag, native.__dict__)
+
to_yaml = classmethod(to_yaml)
class YAMLobject1(ruamel.yaml.YAMLObject):
@@ -131,12 +133,17 @@ def _make_objects():
return self
def __cmp__(self, other):
- return cmp((type(self), self.foo, self.bar, self.baz), # NOQA
- (type(other), other.foo, other.bar, other.baz))
+ return cmp(
+ (type(self), self.foo, self.bar, self.baz), # NOQA
+ (type(other), other.foo, other.bar, other.baz),
+ )
def __eq__(self, other):
- return type(self) is type(other) and \
- (self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
+ return type(self) is type(other) and (self.foo, self.bar, self.baz) == (
+ other.foo,
+ other.bar,
+ other.baz,
+ )
class AnInstance:
def __init__(self, foo=None, bar=None, baz=None):
@@ -145,20 +152,21 @@ def _make_objects():
self.baz = baz
def __cmp__(self, other):
- return cmp((type(self), self.foo, self.bar, self.baz), # NOQA
- (type(other), other.foo, other.bar, other.baz))
+ return cmp(
+ (type(self), self.foo, self.bar, self.baz), # NOQA
+ (type(other), other.foo, other.bar, other.baz),
+ )
def __eq__(self, other):
- return type(self) is type(other) and \
- (self.foo, self.bar, self.baz) == (other.foo, other.bar, other.baz)
+ return type(self) is type(other) and (self.foo, self.bar, self.baz) == (
+ other.foo,
+ other.bar,
+ other.baz,
+ )
class AState(AnInstance):
def __getstate__(self):
- return {
- '_foo': self.foo,
- '_bar': self.bar,
- '_baz': self.baz,
- }
+ return {'_foo': self.foo, '_bar': self.bar, '_baz': self.baz}
def __setstate__(self, state):
self.foo = state['_foo']
@@ -259,6 +267,7 @@ try:
from ruamel.ordereddict import ordereddict
except ImportError:
from collections import OrderedDict
+
# to get the right name import ... as ordereddict doesn't do that
class ordereddict(OrderedDict):
@@ -277,7 +286,7 @@ def _serialize_value(data):
for key, value in data.items():
key = _serialize_value(key)
value = _serialize_value(value)
- items.append("%s: %s" % (key, value))
+ items.append('%s: %s' % (key, value))
items.sort()
return '{%s}' % ', '.join(items)
elif isinstance(data, datetime.datetime):
@@ -308,16 +317,16 @@ def test_constructor_types(data_filename, code_filename, verbose=False):
pass
# print('native1', native1)
if verbose:
- print("SERIALIZED NATIVE1:")
+ print('SERIALIZED NATIVE1:')
print(_serialize_value(native1))
- print("SERIALIZED NATIVE2:")
+ print('SERIALIZED NATIVE2:')
print(_serialize_value(native2))
assert _serialize_value(native1) == _serialize_value(native2), (native1, native2)
finally:
if verbose:
- print("NATIVE1:")
+ print('NATIVE1:')
pprint.pprint(native1)
- print("NATIVE2:")
+ print('NATIVE2:')
pprint.pprint(native2)
@@ -327,23 +336,28 @@ test_constructor_types.unittest = ['.data', '.code']
def test_roundtrip_data(code_filename, roundtrip_filename, verbose=False):
_make_objects()
with open(code_filename, 'rb') as fp0:
- value1 = fp0 .read()
+ value1 = fp0.read()
native2 = list(ruamel.yaml.load_all(value1, Loader=MyLoader))
if len(native2) == 1:
native2 = native2[0]
try:
- value2 = ruamel.yaml.dump(native2, Dumper=MyDumper, default_flow_style=False,
- allow_unicode=True, encoding='utf-8')
+ value2 = ruamel.yaml.dump(
+ native2,
+ Dumper=MyDumper,
+ default_flow_style=False,
+ allow_unicode=True,
+ encoding='utf-8',
+ )
# value2 += x
if verbose:
- print("SERIALIZED NATIVE1:")
+ print('SERIALIZED NATIVE1:')
print(value1)
- print("SERIALIZED NATIVE2:")
+ print('SERIALIZED NATIVE2:')
print(value2)
assert value1 == value2, (value1, value2)
finally:
if verbose:
- print("NATIVE2:")
+ print('NATIVE2:')
pprint.pprint(native2)
@@ -353,6 +367,8 @@ test_roundtrip_data.unittest = ['.data', '.roundtrip']
if __name__ == '__main__':
import sys
import test_constructor # NOQA
+
sys.modules['test_constructor'] = sys.modules['__main__']
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_emitter.py b/_test/lib/test_emitter.py
index 4b6fb76..fbdbb79 100644
--- a/_test/lib/test_emitter.py
+++ b/_test/lib/test_emitter.py
@@ -23,7 +23,7 @@ def test_emitter_on_data(data_filename, canonical_filename, verbose=False):
events = list(yaml.parse(fp0))
output = yaml.emit(events)
if verbose:
- print("OUTPUT:")
+ print('OUTPUT:')
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
@@ -38,7 +38,7 @@ def test_emitter_on_canonical(canonical_filename, verbose=False):
for canonical in [False, True]:
output = yaml.emit(events, canonical=canonical)
if verbose:
- print("OUTPUT (canonical=%s):" % canonical)
+ print('OUTPUT (canonical=%s):' % canonical)
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
@@ -52,23 +52,28 @@ def test_emitter_styles(data_filename, canonical_filename, verbose=False):
with open(filename, 'rb') as fp0:
events = list(yaml.parse(fp0))
for flow_style in [False, True]:
- for style in ['|', '>', '"', '\'', '']:
+ for style in ['|', '>', '"', "'", ""]:
styled_events = []
for event in events:
if isinstance(event, yaml.ScalarEvent):
- event = yaml.ScalarEvent(event.anchor, event.tag,
- event.implicit, event.value, style=style)
+ event = yaml.ScalarEvent(
+ event.anchor, event.tag, event.implicit, event.value, style=style
+ )
elif isinstance(event, yaml.SequenceStartEvent):
- event = yaml.SequenceStartEvent(event.anchor, event.tag,
- event.implicit, flow_style=flow_style)
+ event = yaml.SequenceStartEvent(
+ event.anchor, event.tag, event.implicit, flow_style=flow_style
+ )
elif isinstance(event, yaml.MappingStartEvent):
- event = yaml.MappingStartEvent(event.anchor, event.tag,
- event.implicit, flow_style=flow_style)
+ event = yaml.MappingStartEvent(
+ event.anchor, event.tag, event.implicit, flow_style=flow_style
+ )
styled_events.append(event)
output = yaml.emit(styled_events)
if verbose:
- print("OUTPUT (filename=%r, flow_style=%r, style=%r)" %
- (filename, flow_style, style))
+ print(
+ 'OUTPUT (filename=%r, flow_style=%r, style=%r)'
+ % (filename, flow_style, style)
+ )
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
@@ -78,15 +83,18 @@ test_emitter_styles.unittest = ['.data', '.canonical']
class EventsLoader(yaml.Loader):
-
def construct_event(self, node):
if isinstance(node, yaml.ScalarNode):
mapping = {}
else:
mapping = self.construct_mapping(node)
class_name = str(node.tag[1:]) + 'Event'
- if class_name in ['AliasEvent', 'ScalarEvent', 'SequenceStartEvent',
- 'MappingStartEvent']:
+ if class_name in [
+ 'AliasEvent',
+ 'ScalarEvent',
+ 'SequenceStartEvent',
+ 'MappingStartEvent',
+ ]:
mapping.setdefault('anchor', None)
if class_name in ['ScalarEvent', 'SequenceStartEvent', 'MappingStartEvent']:
mapping.setdefault('tag', None)
@@ -94,10 +102,11 @@ class EventsLoader(yaml.Loader):
mapping.setdefault('implicit', True)
if class_name == 'ScalarEvent':
mapping.setdefault('implicit', (False, True))
- mapping.setdefault('value', '')
+ mapping.setdefault('value', "")
value = getattr(yaml, class_name)(**mapping)
return value
+
# if Loader is not a composite, add this function
# EventsLoader.add_constructor = yaml.constructor.Constructor.add_constructor
@@ -110,7 +119,7 @@ def test_emitter_events(events_filename, verbose=False):
events = list(yaml.load(fp0, Loader=EventsLoader))
output = yaml.emit(events)
if verbose:
- print("OUTPUT:")
+ print('OUTPUT:')
print(output)
new_events = list(yaml.parse(output))
_compare_events(events, new_events)
@@ -118,4 +127,5 @@ def test_emitter_events(events_filename, verbose=False):
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_errors.py b/_test/lib/test_errors.py
index cfaa000..b43540c 100644
--- a/_test/lib/test_errors.py
+++ b/_test/lib/test_errors.py
@@ -4,6 +4,7 @@ from __future__ import print_function
import ruamel.yaml as yaml
import test_emitter
import warnings
+
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
@@ -13,9 +14,9 @@ def test_loader_error(error_filename, verbose=False):
list(yaml.load_all(fp0))
except yaml.YAMLError as exc:
if verbose:
- print("%s:" % exc.__class__.__name__, exc)
+ print('%s:' % exc.__class__.__name__, exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_loader_error.unittest = ['.loader-error']
@@ -27,9 +28,9 @@ def test_loader_error_string(error_filename, verbose=False):
list(yaml.load_all(fp0.read()))
except yaml.YAMLError as exc:
if verbose:
- print("%s:" % exc.__class__.__name__, exc)
+ print('%s:' % exc.__class__.__name__, exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_loader_error_string.unittest = ['.loader-error']
@@ -41,9 +42,9 @@ def test_loader_error_single(error_filename, verbose=False):
yaml.load(fp0.read())
except yaml.YAMLError as exc:
if verbose:
- print("%s:" % exc.__class__.__name__, exc)
+ print('%s:' % exc.__class__.__name__, exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_loader_error_single.unittest = ['.single-loader-error']
@@ -51,15 +52,14 @@ test_loader_error_single.unittest = ['.single-loader-error']
def test_emitter_error(error_filename, verbose=False):
with open(error_filename, 'rb') as fp0:
- events = list(yaml.load(fp0,
- Loader=test_emitter.EventsLoader))
+ events = list(yaml.load(fp0, Loader=test_emitter.EventsLoader))
try:
yaml.emit(events)
except yaml.YAMLError as exc:
if verbose:
- print("%s:" % exc.__class__.__name__, exc)
+ print('%s:' % exc.__class__.__name__, exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_emitter_error.unittest = ['.emitter-error']
@@ -70,16 +70,18 @@ def test_dumper_error(error_filename, verbose=False):
code = fp0.read()
try:
import yaml
+
exec(code)
except yaml.YAMLError as exc:
if verbose:
- print("%s:" % exc.__class__.__name__, exc)
+ print('%s:' % exc.__class__.__name__, exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_dumper_error.unittest = ['.dumper-error']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_input_output.py b/_test/lib/test_input_output.py
index 27f7aed..c36477f 100644
--- a/_test/lib/test_input_output.py
+++ b/_test/lib/test_input_output.py
@@ -9,6 +9,7 @@ import os.path
from ruamel.yaml.compat import PY2, PY3, StringIO, BytesIO
if PY2:
+
def _unicode_open(file, encoding, errors='strict'):
info = codecs.lookup(encoding)
if isinstance(info, tuple):
@@ -21,7 +22,9 @@ if PY2:
srw.encoding = encoding
return srw
+
if PY3:
+
def test_unicode_input(unicode_filename, verbose=False):
with open(unicode_filename, 'rb') as fp:
data = fp.read().decode('utf-8')
@@ -30,64 +33,77 @@ if PY3:
assert output == value, (output, value)
output = yaml.load(StringIO(data))
assert output == value, (output, value)
- for input in [data.encode('utf-8'),
- codecs.BOM_UTF8 + data.encode('utf-8'),
- codecs.BOM_UTF16_BE + data.encode('utf-16-be'),
- codecs.BOM_UTF16_LE + data.encode('utf-16-le')]:
+ for input in [
+ data.encode('utf-8'),
+ codecs.BOM_UTF8 + data.encode('utf-8'),
+ codecs.BOM_UTF16_BE + data.encode('utf-16-be'),
+ codecs.BOM_UTF16_LE + data.encode('utf-16-le'),
+ ]:
if verbose:
- print("INPUT:", repr(input[:10]), "...")
+ print('INPUT:', repr(input[:10]), '...')
output = yaml.load(input)
assert output == value, (output, value)
output = yaml.load(BytesIO(input))
assert output == value, (output, value)
+
+
else:
+
def test_unicode_input(unicode_filename, verbose=False):
with open(unicode_filename, 'rb') as fp:
data = fp.read().decode('utf-8')
value = ' '.join(data.split())
output = yaml.load(_unicode_open(StringIO(data.encode('utf-8')), 'utf-8'))
assert output == value, (output, value)
- for input in [data, data.encode('utf-8'),
- codecs.BOM_UTF8 + data.encode('utf-8'),
- codecs.BOM_UTF16_BE + data.encode('utf-16-be'),
- codecs.BOM_UTF16_LE + data.encode('utf-16-le')]:
+ for input in [
+ data,
+ data.encode('utf-8'),
+ codecs.BOM_UTF8 + data.encode('utf-8'),
+ codecs.BOM_UTF16_BE + data.encode('utf-16-be'),
+ codecs.BOM_UTF16_LE + data.encode('utf-16-le'),
+ ]:
if verbose:
- print("INPUT:", repr(input[:10]), "...")
+ print('INPUT:', repr(input[:10]), '...')
output = yaml.load(input)
assert output == value, (output, value)
output = yaml.load(StringIO(input))
assert output == value, (output, value)
+
test_unicode_input.unittest = ['.unicode']
def test_unicode_input_errors(unicode_filename, verbose=False):
with open(unicode_filename, 'rb') as fp:
data = fp.read().decode('utf-8')
- for input in [data.encode('latin1', 'ignore'),
- data.encode('utf-16-be'), data.encode('utf-16-le'),
- codecs.BOM_UTF8 + data.encode('utf-16-be'),
- codecs.BOM_UTF16_BE + data.encode('utf-16-le'),
- codecs.BOM_UTF16_LE + data.encode('utf-8') + b'!']:
+ for input in [
+ data.encode('latin1', 'ignore'),
+ data.encode('utf-16-be'),
+ data.encode('utf-16-le'),
+ codecs.BOM_UTF8 + data.encode('utf-16-be'),
+ codecs.BOM_UTF16_BE + data.encode('utf-16-le'),
+ codecs.BOM_UTF16_LE + data.encode('utf-8') + b'!',
+ ]:
try:
yaml.load(input)
except yaml.YAMLError as exc:
if verbose:
print(exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
try:
yaml.load(BytesIO(input) if PY3 else StringIO(input))
except yaml.YAMLError as exc:
if verbose:
print(exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
test_unicode_input_errors.unittest = ['.unicode']
if PY3:
+
def test_unicode_output(unicode_filename, verbose=False):
with open(unicode_filename, 'rb') as fp:
data = fp.read().decode('utf-8')
@@ -105,19 +121,20 @@ if PY3:
stream = BytesIO()
if encoding is None:
try:
- yaml.dump(value, stream, encoding=encoding,
- allow_unicode=allow_unicode)
+ yaml.dump(
+ value, stream, encoding=encoding, allow_unicode=allow_unicode
+ )
except TypeError as exc:
if verbose:
print(exc)
data4 = None
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
else:
yaml.dump(value, stream, encoding=encoding, allow_unicode=allow_unicode)
data4 = stream.getvalue()
if verbose:
- print("BYTES:", data4[:50])
+ print('BYTES:', data4[:50])
data4 = data4.decode(encoding)
for copy in [data1, data2, data3, data4]:
if copy is None:
@@ -130,12 +147,15 @@ if PY3:
if verbose:
print(exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
else:
copy[4:].encode('ascii')
assert isinstance(data1, str), (type(data1), encoding)
assert isinstance(data2, str), (type(data2), encoding)
+
+
else:
+
def test_unicode_output(unicode_filename, verbose=False):
with open(unicode_filename, 'rb') as fp:
data = fp.read().decode('utf-8')
@@ -144,8 +164,12 @@ else:
data1 = yaml.dump(value, allow_unicode=allow_unicode)
for encoding in [None, 'utf-8', 'utf-16-be', 'utf-16-le']:
stream = StringIO()
- yaml.dump(value, _unicode_open(stream, 'utf-8'), encoding=encoding,
- allow_unicode=allow_unicode)
+ yaml.dump(
+ value,
+ _unicode_open(stream, 'utf-8'),
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ )
data2 = stream.getvalue()
data3 = yaml.dump(value, encoding=encoding, allow_unicode=allow_unicode)
stream = StringIO()
@@ -159,7 +183,7 @@ else:
if verbose:
print(exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
else:
copy[4:].encode('ascii')
assert isinstance(data1, str), (type(data1), encoding)
@@ -206,8 +230,7 @@ def test_file_output(unicode_filename, verbose=False):
with open(filename, 'rb') as fp0:
data2 = fp0.read()
with open(filename, 'wb') as stream:
- yaml.dump(data, stream, encoding='utf-16-le',
- allow_unicode=True)
+ yaml.dump(data, stream, encoding='utf-16-le', allow_unicode=True)
with open(filename, 'rb') as fp0:
data3 = fp0.read().decode('utf-16-le')[1:].encode('utf-8')
stream = _unicode_open(open(filename, 'wb'), 'utf-8')
@@ -267,4 +290,5 @@ test_unicode_transfer.unittest = ['.unicode']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_mark.py b/_test/lib/test_mark.py
index 4e0728f..0ff2789 100644
--- a/_test/lib/test_mark.py
+++ b/_test/lib/test_mark.py
@@ -19,8 +19,7 @@ def test_marks(marks_filename, verbose=False):
else:
column += 1
index += 1
- mark = yaml.Mark(marks_filename, index, line, column, text_type(input),
- index)
+ mark = yaml.Mark(marks_filename, index, line, column, text_type(input), index)
snippet = mark.get_snippet(indent=2, max_length=79)
if verbose:
print(snippet)
@@ -35,4 +34,5 @@ test_marks.unittest = ['.marks']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_reader.py b/_test/lib/test_reader.py
index 7ce0dc9..6604f24 100644
--- a/_test/lib/test_reader.py
+++ b/_test/lib/test_reader.py
@@ -1,7 +1,7 @@
from __future__ import absolute_import
from __future__ import print_function
-import codecs # NOQA
+import codecs # NOQA
import io
from ruamel.yaml.compat import PY2
@@ -17,7 +17,7 @@ def _run_reader(data, verbose):
if verbose:
print(exc)
else:
- raise AssertionError("expected an exception")
+ raise AssertionError('expected an exception')
def test_stream_error(error_filename, verbose=False):
@@ -47,4 +47,5 @@ test_stream_error.unittest = ['.stream-error']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_recursive.py b/_test/lib/test_recursive.py
index e7084e7..c87f879 100644
--- a/_test/lib/test_recursive.py
+++ b/_test/lib/test_recursive.py
@@ -11,14 +11,12 @@ class AnInstance:
def __repr__(self):
try:
- return "%s(foo=%r, bar=%r)" % (self.__class__.__name__,
- self.foo, self.bar)
+ return '%s(foo=%r, bar=%r)' % (self.__class__.__name__, self.foo, self.bar)
except RuntimeError:
- return "%s(foo=..., bar=...)" % self.__class__.__name__
+ return '%s(foo=..., bar=...)' % self.__class__.__name__
class AnInstanceWithState(AnInstance):
-
def __getstate__(self):
return {'attributes': [self.foo, self.bar]}
@@ -41,11 +39,11 @@ def test_recursive(recursive_filename, verbose=False):
assert output1 == output2, (output1, output2)
finally:
if verbose:
- print("VALUE1:", value1)
- print("VALUE2:", value2)
- print("OUTPUT1:")
+ print('VALUE1:', value1)
+ print('VALUE2:', value2)
+ print('OUTPUT1:')
print(output1)
- print("OUTPUT2:")
+ print('OUTPUT2:')
print(output2)
@@ -53,4 +51,5 @@ test_recursive.unittest = ['.recursive']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_representer.py b/_test/lib/test_representer.py
index 2c7e2c8..a83d2b2 100644
--- a/_test/lib/test_representer.py
+++ b/_test/lib/test_representer.py
@@ -14,8 +14,12 @@ def test_representer_types(code_filename, verbose=False):
native1 = test_constructor._load_code(fp0.read())
native2 = None
try:
- output = yaml.dump(native1, Dumper=test_constructor.MyDumper,
- allow_unicode=allow_unicode, encoding=encoding)
+ output = yaml.dump(
+ native1,
+ Dumper=test_constructor.MyDumper,
+ allow_unicode=allow_unicode,
+ encoding=encoding,
+ )
native2 = yaml.load(output, Loader=test_constructor.MyLoader)
try:
if native1 == native2:
@@ -25,18 +29,18 @@ def test_representer_types(code_filename, verbose=False):
value1 = test_constructor._serialize_value(native1)
value2 = test_constructor._serialize_value(native2)
if verbose:
- print("SERIALIZED NATIVE1:")
+ print('SERIALIZED NATIVE1:')
print(value1)
- print("SERIALIZED NATIVE2:")
+ print('SERIALIZED NATIVE2:')
print(value2)
assert value1 == value2, (native1, native2)
finally:
if verbose:
- print("NATIVE1:")
+ print('NATIVE1:')
pprint.pprint(native1)
- print("NATIVE2:")
+ print('NATIVE2:')
pprint.pprint(native2)
- print("OUTPUT:")
+ print('OUTPUT:')
print(output)
@@ -44,4 +48,5 @@ test_representer_types.unittest = ['.code']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_resolver.py b/_test/lib/test_resolver.py
index 4d1e6a4..0a04e7a 100644
--- a/_test/lib/test_resolver.py
+++ b/_test/lib/test_resolver.py
@@ -20,9 +20,9 @@ def test_implicit_resolver(data_filename, detect_filename, verbose=False):
assert scalar.tag == correct_tag, (scalar.tag, correct_tag)
finally:
if verbose:
- print("CORRECT TAG:", correct_tag)
+ print('CORRECT TAG:', correct_tag)
if hasattr(node, 'value'):
- print("CHILDREN:")
+ print('CHILDREN:')
pprint.pprint(node.value)
@@ -38,16 +38,19 @@ def _make_path_loader_and_dumper():
class MyDumper(yaml.Dumper):
pass
- yaml.add_path_resolver(u'!root', [],
- Loader=MyLoader, Dumper=MyDumper)
- yaml.add_path_resolver(u'!root/scalar', [], str,
- Loader=MyLoader, Dumper=MyDumper)
- yaml.add_path_resolver(u'!root/key11/key12/*', ['key11', 'key12'],
- Loader=MyLoader, Dumper=MyDumper)
- yaml.add_path_resolver(u'!root/key21/1/*', ['key21', 1],
- Loader=MyLoader, Dumper=MyDumper)
- yaml.add_path_resolver(u'!root/key31/*/*/key14/map', ['key31', None, None, 'key14'], dict,
- Loader=MyLoader, Dumper=MyDumper)
+ yaml.add_path_resolver(u'!root', [], Loader=MyLoader, Dumper=MyDumper)
+ yaml.add_path_resolver(u'!root/scalar', [], str, Loader=MyLoader, Dumper=MyDumper)
+ yaml.add_path_resolver(
+ u'!root/key11/key12/*', ['key11', 'key12'], Loader=MyLoader, Dumper=MyDumper
+ )
+ yaml.add_path_resolver(u'!root/key21/1/*', ['key21', 1], Loader=MyLoader, Dumper=MyDumper)
+ yaml.add_path_resolver(
+ u'!root/key31/*/*/key14/map',
+ ['key31', None, None, 'key14'],
+ dict,
+ Loader=MyLoader,
+ Dumper=MyDumper,
+ )
return MyLoader, MyDumper
@@ -106,4 +109,5 @@ test_path_resolver_dumper.unittest = ['.data', '.path']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_structure.py b/_test/lib/test_structure.py
index 4d04b11..2656bbb 100644
--- a/_test/lib/test_structure.py
+++ b/_test/lib/test_structure.py
@@ -2,7 +2,7 @@ from __future__ import absolute_import
from __future__ import print_function
import ruamel.yaml as yaml
-import canonical # NOQA
+import canonical # NOQA
import pprint
from ruamel.yaml.compat import text_type, PY3
@@ -46,8 +46,12 @@ def test_structure(data_filename, structure_filename, verbose=False):
with open(data_filename, 'rb') as fp:
loader = yaml.Loader(fp)
while loader.check_event():
- if loader.check_event(yaml.StreamStartEvent, yaml.StreamEndEvent,
- yaml.DocumentStartEvent, yaml.DocumentEndEvent):
+ if loader.check_event(
+ yaml.StreamStartEvent,
+ yaml.StreamEndEvent,
+ yaml.DocumentStartEvent,
+ yaml.DocumentEndEvent,
+ ):
loader.get_event()
continue
nodes1.append(_convert_structure(loader))
@@ -56,9 +60,9 @@ def test_structure(data_filename, structure_filename, verbose=False):
assert nodes1 == nodes2, (nodes1, nodes2)
finally:
if verbose:
- print("NODES1:")
+ print('NODES1:')
pprint.pprint(nodes1)
- print("NODES2:")
+ print('NODES2:')
pprint.pprint(nodes2)
@@ -89,9 +93,9 @@ def test_parser(data_filename, canonical_filename, verbose=False):
_compare_events(events1, events2)
finally:
if verbose:
- print("EVENTS1:")
+ print('EVENTS1:')
pprint.pprint(events1)
- print("EVENTS2:")
+ print('EVENTS2:')
pprint.pprint(events2)
@@ -109,9 +113,9 @@ def test_parser_on_canonical(canonical_filename, verbose=False):
_compare_events(events1, events2, full=True)
finally:
if verbose:
- print("EVENTS1:")
+ print('EVENTS1:')
pprint.pprint(events1)
- print("EVENTS2:")
+ print('EVENTS2:')
pprint.pprint(events2)
@@ -146,9 +150,9 @@ def test_composer(data_filename, canonical_filename, verbose=False):
_compare_nodes(node1, node2)
finally:
if verbose:
- print("NODES1:")
+ print('NODES1:')
pprint.pprint(nodes1)
- print("NODES2:")
+ print('NODES2:')
pprint.pprint(nodes2)
@@ -189,8 +193,9 @@ def _make_canonical_loader():
def construct_undefined(self, node):
return self.construct_scalar(node)
- MyCanonicalLoader.add_constructor(u'tag:yaml.org,2002:map',
- MyCanonicalLoader.construct_mapping)
+ MyCanonicalLoader.add_constructor(
+ u'tag:yaml.org,2002:map', MyCanonicalLoader.construct_mapping
+ )
MyCanonicalLoader.add_constructor(None, MyCanonicalLoader.construct_undefined)
@@ -207,9 +212,9 @@ def test_constructor(data_filename, canonical_filename, verbose=False):
assert native1 == native2, (native1, native2)
finally:
if verbose:
- print("NATIVE1:")
+ print('NATIVE1:')
pprint.pprint(native1)
- print("NATIVE2:")
+ print('NATIVE2:')
pprint.pprint(native2)
@@ -217,4 +222,5 @@ test_constructor.unittest = ['.data', '.canonical']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_tokens.py b/_test/lib/test_tokens.py
index e240efe..cdb41ba 100644
--- a/_test/lib/test_tokens.py
+++ b/_test/lib/test_tokens.py
@@ -53,13 +53,12 @@ def test_tokens(data_filename, tokens_filename, verbose=False):
try:
with open(data_filename, 'rb') as fp1:
for token in yaml.scan(fp1):
- if not isinstance(
- token, (yaml.StreamStartToken, yaml.StreamEndToken)):
+ if not isinstance(token, (yaml.StreamStartToken, yaml.StreamEndToken)):
tokens1.append(_replaces[token.__class__])
finally:
if verbose:
- print("TOKENS1:", ' '.join(tokens1))
- print("TOKENS2:", ' '.join(tokens2))
+ print('TOKENS1:', ' '.join(tokens1))
+ print('TOKENS2:', ' '.join(tokens2))
assert len(tokens1) == len(tokens2), (tokens1, tokens2)
for token1, token2 in zip(tokens1, tokens2):
assert token1 == token2, (token1, token2)
@@ -84,4 +83,5 @@ test_scanner.unittest = ['.data', '.canonical']
if __name__ == '__main__':
import test_appliance
+
test_appliance.run(globals())
diff --git a/_test/lib/test_yaml.py b/_test/lib/test_yaml.py
index c650762..cf64a73 100644
--- a/_test/lib/test_yaml.py
+++ b/_test/lib/test_yaml.py
@@ -1,19 +1,20 @@
# coding: utf-8
-from test_mark import * # NOQA
-from test_reader import * # NOQA
-from test_canonical import * # NOQA
-from test_tokens import * # NOQA
-from test_structure import * # NOQA
-from test_errors import * # NOQA
-from test_resolver import * # NOQA
-from test_constructor import * # NOQA
-from test_emitter import * # NOQA
-from test_representer import * # NOQA
-from test_recursive import * # NOQA
+from test_mark import * # NOQA
+from test_reader import * # NOQA
+from test_canonical import * # NOQA
+from test_tokens import * # NOQA
+from test_structure import * # NOQA
+from test_errors import * # NOQA
+from test_resolver import * # NOQA
+from test_constructor import * # NOQA
+from test_emitter import * # NOQA
+from test_representer import * # NOQA
+from test_recursive import * # NOQA
from test_input_output import * # NOQA
if __name__ == '__main__':
import sys
import test_appliance
+
sys.exit(test_appliance.run(globals()))
diff --git a/_test/lib/test_yaml_ext.py b/_test/lib/test_yaml_ext.py
index c4a1493..e36ddd0 100644
--- a/_test/lib/test_yaml_ext.py
+++ b/_test/lib/test_yaml_ext.py
@@ -173,8 +173,10 @@ def test_c_version(verbose=False):
if verbose:
print(_ruamel_yaml.get_version())
print(_ruamel_yaml.get_version_string())
- assert ("%s.%s.%s" % _ruamel_yaml.get_version()) == _ruamel_yaml.get_version_string(), \
- (_ruamel_yaml.get_version(), _ruamel_yaml.get_version_string())
+ assert ('%s.%s.%s' % _ruamel_yaml.get_version()) == _ruamel_yaml.get_version_string(), (
+ _ruamel_yaml.get_version(),
+ _ruamel_yaml.get_version_string(),
+ )
def _compare_scanners(py_data, c_data, verbose):
@@ -190,20 +192,29 @@ def _compare_scanners(py_data, c_data, verbose):
assert py_token.value == c_token.value, (py_token, c_token)
if isinstance(py_token, ruamel.yaml.StreamEndToken):
continue
- py_start = (py_token.start_mark.index, py_token.start_mark.line,
- py_token.start_mark.column)
- py_end = (py_token.end_mark.index, py_token.end_mark.line,
- py_token.end_mark.column)
- c_start = (c_token.start_mark.index, c_token.start_mark.line,
- c_token.start_mark.column)
+ py_start = (
+ py_token.start_mark.index,
+ py_token.start_mark.line,
+ py_token.start_mark.column,
+ )
+ py_end = (
+ py_token.end_mark.index,
+ py_token.end_mark.line,
+ py_token.end_mark.column,
+ )
+ c_start = (
+ c_token.start_mark.index,
+ c_token.start_mark.line,
+ c_token.start_mark.column,
+ )
c_end = (c_token.end_mark.index, c_token.end_mark.line, c_token.end_mark.column)
assert py_start == c_start, (py_start, c_start)
assert py_end == c_end, (py_end, c_end)
finally:
if verbose:
- print("PY_TOKENS:")
+ print('PY_TOKENS:')
pprint.pprint(py_tokens)
- print("C_TOKENS:")
+ print('C_TOKENS:')
pprint.pprint(c_tokens)
@@ -234,16 +245,24 @@ def _compare_parsers(py_data, c_data, verbose):
c_events.append(event)
assert len(py_events) == len(c_events), (len(py_events), len(c_events))
for py_event, c_event in zip(py_events, c_events):
- for attribute in ['__class__', 'anchor', 'tag', 'implicit',
- 'value', 'explicit', 'version', 'tags']:
+ for attribute in [
+ '__class__',
+ 'anchor',
+ 'tag',
+ 'implicit',
+ 'value',
+ 'explicit',
+ 'version',
+ 'tags',
+ ]:
py_value = getattr(py_event, attribute, None)
c_value = getattr(c_event, attribute, None)
assert py_value == c_value, (py_event, c_event, attribute)
finally:
if verbose:
- print("PY_EVENTS:")
+ print('PY_EVENTS:')
pprint.pprint(py_events)
- print("C_EVENTS:")
+ print('C_EVENTS:')
pprint.pprint(c_events)
@@ -277,13 +296,25 @@ def _compare_emitters(data, verbose):
assert len(events) == len(py_events), (len(events), len(py_events))
assert len(events) == len(c_events), (len(events), len(c_events))
for event, py_event, c_event in zip(events, py_events, c_events):
- for attribute in ['__class__', 'anchor', 'tag', 'implicit',
- 'value', 'explicit', 'version', 'tags']:
+ for attribute in [
+ '__class__',
+ 'anchor',
+ 'tag',
+ 'implicit',
+ 'value',
+ 'explicit',
+ 'version',
+ 'tags',
+ ]:
value = getattr(event, attribute, None)
py_value = getattr(py_event, attribute, None)
c_value = getattr(c_event, attribute, None)
- if attribute == 'tag' and value in [None, u'!'] \
- and py_value in [None, u'!'] and c_value in [None, u'!']:
+ if (
+ attribute == 'tag'
+ and value in [None, u'!']
+ and py_value in [None, u'!']
+ and c_value in [None, u'!']
+ ):
continue
if attribute == 'explicit' and (py_value or c_value):
continue
@@ -291,11 +322,11 @@ def _compare_emitters(data, verbose):
assert value == c_value, (event, c_event, attribute)
finally:
if verbose:
- print("EVENTS:")
+ print('EVENTS:')
pprint.pprint(events)
- print("PY_EVENTS:")
+ print('PY_EVENTS:')
pprint.pprint(py_events)
- print("C_EVENTS:")
+ print('C_EVENTS:')
pprint.pprint(c_events)
@@ -317,6 +348,7 @@ def wrap_ext_function(function):
function(*args, **kwds)
finally:
_tear_down()
+
if PY3:
wrapper.__name__ = '%s_ext' % function.__name__
else:
@@ -349,19 +381,33 @@ def wrap_ext(collections):
assert function.unittest_name not in globals()
globals()[function.unittest_name] = function
-import test_tokens # NOQA
-import test_structure # NOQA
-import test_errors # NOQA
-import test_resolver # NOQA
-import test_constructor # NOQA
-import test_emitter # NOQA
-import test_representer # NOQA
-import test_recursive # NOQA
-import test_input_output # NOQA
-wrap_ext([test_tokens, test_structure, test_errors, test_resolver, test_constructor,
- test_emitter, test_representer, test_recursive, test_input_output])
+
+import test_tokens # NOQA
+import test_structure # NOQA
+import test_errors # NOQA
+import test_resolver # NOQA
+import test_constructor # NOQA
+import test_emitter # NOQA
+import test_representer # NOQA
+import test_recursive # NOQA
+import test_input_output # NOQA
+
+wrap_ext(
+ [
+ test_tokens,
+ test_structure,
+ test_errors,
+ test_resolver,
+ test_constructor,
+ test_emitter,
+ test_representer,
+ test_recursive,
+ test_input_output,
+ ]
+)
if __name__ == '__main__':
import sys
import test_appliance
+
sys.exit(test_appliance.run(globals()))
diff --git a/_test/roundtrip.py b/_test/roundtrip.py
index 8548969..86e4862 100644
--- a/_test/roundtrip.py
+++ b/_test/roundtrip.py
@@ -21,12 +21,13 @@ def dedent(data):
except ValueError:
pass
else:
- data = data[position_of_first_newline + 1:]
+ data = data[position_of_first_newline + 1 :]
return textwrap.dedent(data)
def round_trip_load(inp, preserve_quotes=None, version=None):
import ruamel.yaml # NOQA
+
dinp = dedent(inp)
return ruamel.yaml.load(
dinp,
@@ -38,6 +39,7 @@ def round_trip_load(inp, preserve_quotes=None, version=None):
def round_trip_load_all(inp, preserve_quotes=None, version=None):
import ruamel.yaml # NOQA
+
dinp = dedent(inp)
return ruamel.yaml.load_all(
dinp,
@@ -47,21 +49,35 @@ def round_trip_load_all(inp, preserve_quotes=None, version=None):
)
-def round_trip_dump(data, stream=None,
- indent=None, block_seq_indent=None, top_level_colon_align=None,
- prefix_colon=None, explicit_start=None, explicit_end=None, version=None):
+def round_trip_dump(
+ data,
+ stream=None,
+ indent=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+):
import ruamel.yaml # NOQA
- return ruamel.yaml.round_trip_dump(data, stream=stream,
- indent=indent, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align,
- prefix_colon=prefix_colon,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version)
+
+ return ruamel.yaml.round_trip_dump(
+ data,
+ stream=stream,
+ indent=indent,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ )
def diff(inp, outp, file_name='stdin'):
import difflib
+
inl = inp.splitlines(True) # True for keepends
outl = outp.splitlines(True)
diff = difflib.unified_diff(inl, outl, file_name, 'round trip YAML')
@@ -73,11 +89,20 @@ def diff(inp, outp, file_name='stdin'):
sys.stdout.write(line)
-def round_trip(inp, outp=None, extra=None, intermediate=None, indent=None,
- block_seq_indent=None, top_level_colon_align=None, prefix_colon=None,
- preserve_quotes=None,
- explicit_start=None, explicit_end=None,
- version=None):
+def round_trip(
+ inp,
+ outp=None,
+ extra=None,
+ intermediate=None,
+ indent=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ preserve_quotes=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+):
"""
inp: input string to parse
outp: expected output (equals input if not specified)
@@ -94,23 +119,31 @@ def round_trip(inp, outp=None, extra=None, intermediate=None, indent=None,
if data[k] != v:
print('{0!r} <> {1!r}'.format(data[k], v))
raise ValueError
- res = round_trip_dump(data, indent=indent, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align,
- prefix_colon=prefix_colon,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version)
+ res = round_trip_dump(
+ data,
+ indent=indent,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ )
if res != doutp:
- diff(doutp, res, "input string")
- print('\nroundtrip data:\n', res, sep='')
+ diff(doutp, res, 'input string')
+ print('\nroundtrip data:\n', res, sep="")
assert res == doutp
- res = round_trip_dump(data, indent=indent, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align,
- prefix_colon=prefix_colon,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version)
- print('roundtrip second round data:\n', res, sep='')
+ res = round_trip_dump(
+ data,
+ indent=indent,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ )
+ print('roundtrip second round data:\n', res, sep="")
assert res == doutp
return data
@@ -120,6 +153,7 @@ def YAML(**kw):
class MyYAML(ruamel.yaml.YAML):
"""auto dedent string parameters on load"""
+
def load(self, stream):
if isinstance(stream, str):
if stream and stream[0] == '\n':
@@ -137,6 +171,7 @@ def YAML(**kw):
def dump(self, data, **kw):
from ruamel.yaml.compat import StringIO, BytesIO # NOQA
+
assert ('stream' in kw) ^ ('compare' in kw)
if 'stream' in kw:
return ruamel.yaml.YAML.dump(data, **kw)
@@ -156,6 +191,7 @@ def YAML(**kw):
def round_trip(self, stream, **kw):
from ruamel.yaml.compat import StringIO, BytesIO # NOQA
+
assert isinstance(stream, ruamel.yaml.compat.text_type)
lkw = kw.copy()
if stream and stream[0] == '\n':
@@ -167,7 +203,7 @@ def YAML(**kw):
ruamel.yaml.YAML.dump(self, data, **lkw)
res = st.getvalue()
if res != outp:
- diff(outp, res, "input string")
+ diff(outp, res, 'input string')
assert res == outp
return MyYAML(**kw)
diff --git a/_test/test_a_dedent.py b/_test/test_a_dedent.py
index 1362deb..984e1a2 100644
--- a/_test/test_a_dedent.py
+++ b/_test/test_a_dedent.py
@@ -4,39 +4,54 @@ from roundtrip import dedent
class TestDedent:
def test_start_newline(self):
- x = dedent("""
+ x = dedent(
+ """
123
456
- """)
- assert x == "123\n 456\n"
+ """
+ )
+ assert x == '123\n 456\n'
def test_start_space_newline(self):
# special construct to prevent stripping of following whitespac
- x = dedent(" " """
+ x = dedent(
+ ' '
+ """
123
- """)
- assert x == "123\n"
+ """
+ )
+ assert x == '123\n'
def test_start_no_newline(self):
# special construct to prevent stripping of following whitespac
- x = dedent("""\
+ x = dedent(
+ """\
123
456
- """)
- assert x == "123\n 456\n"
+ """
+ )
+ assert x == '123\n 456\n'
def test_preserve_no_newline_at_end(self):
- x = dedent("""
- 123""")
- assert x == "123"
+ x = dedent(
+ """
+ 123"""
+ )
+ assert x == '123'
def test_preserve_no_newline_at_all(self):
- x = dedent("""\
- 123""")
- assert x == "123"
+ x = dedent(
+ """\
+ 123"""
+ )
+ assert x == '123'
def test_multiple_dedent(self):
- x = dedent(dedent("""
+ x = dedent(
+ dedent(
+ """
123
- """))
- assert x == "123\n"
+ """
+ )
+ )
+ assert x == '123\n'
diff --git a/_test/test_add_xxx.py b/_test/test_add_xxx.py
index f2e976f..031b89f 100644
--- a/_test/test_add_xxx.py
+++ b/_test/test_add_xxx.py
@@ -1,7 +1,7 @@
# coding: utf-8
import re
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import dedent
@@ -12,7 +12,7 @@ class Dice(tuple):
return tuple.__new__(cls, [a, b])
def __repr__(self):
- return "Dice(%s,%s)" % self
+ return 'Dice(%s,%s)' % self
def dice_constructor(loader, node):
@@ -27,6 +27,7 @@ def dice_representer(dumper, data):
def test_dice_constructor():
import ruamel.yaml # NOQA
+
ruamel.yaml.add_constructor(u'!dice', dice_constructor)
data = ruamel.yaml.load('initial hit points: !dice 8d4', Loader=ruamel.yaml.Loader)
assert str(data) == "{'initial hit points': Dice(8,4)}"
@@ -34,6 +35,7 @@ def test_dice_constructor():
def test_dice_constructor_with_loader():
import ruamel.yaml # NOQA
+
ruamel.yaml.add_constructor(u'!dice', dice_constructor, Loader=ruamel.yaml.Loader)
data = ruamel.yaml.load('initial hit points: !dice 8d4', Loader=ruamel.yaml.Loader)
assert str(data) == "{'initial hit points': Dice(8,4)}"
@@ -41,20 +43,27 @@ def test_dice_constructor_with_loader():
def test_dice_representer():
import ruamel.yaml # NOQA
+
ruamel.yaml.add_representer(Dice, dice_representer)
# ruamel.yaml 0.15.8+ no longer forces quotes tagged scalars
- assert ruamel.yaml.dump(dict(gold=Dice(10, 6)), default_flow_style=False) == \
- "gold: !dice 10d6\n"
+ assert (
+ ruamel.yaml.dump(dict(gold=Dice(10, 6)), default_flow_style=False)
+ == 'gold: !dice 10d6\n'
+ )
def test_dice_implicit_resolver():
import ruamel.yaml # NOQA
+
pattern = re.compile(r'^\d+d\d+$')
ruamel.yaml.add_implicit_resolver(u'!dice', pattern)
- assert ruamel.yaml.dump(dict(treasure=Dice(10, 20)), default_flow_style=False) == \
- 'treasure: 10d20\n'
- assert ruamel.yaml.load('damage: 5d10', Loader=ruamel.yaml.Loader) == \
- dict(damage=Dice(5, 10))
+ assert (
+ ruamel.yaml.dump(dict(treasure=Dice(10, 20)), default_flow_style=False)
+ == 'treasure: 10d20\n'
+ )
+ assert ruamel.yaml.load('damage: 5d10', Loader=ruamel.yaml.Loader) == dict(
+ damage=Dice(5, 10)
+ )
class Obj1(dict):
@@ -78,6 +87,7 @@ class YAMLObj1(object):
@classmethod
def from_yaml(cls, loader, suffix, node):
import ruamel.yaml # NOQA
+
obj1 = Obj1(suffix)
if isinstance(node, ruamel.yaml.MappingNode):
obj1.add_node(loader.construct_mapping(node))
@@ -92,22 +102,25 @@ class YAMLObj1(object):
def test_yaml_obj():
import ruamel.yaml # NOQA
+
ruamel.yaml.add_representer(Obj1, YAMLObj1.to_yaml)
ruamel.yaml.add_multi_constructor(YAMLObj1.yaml_tag, YAMLObj1.from_yaml)
x = ruamel.yaml.load('!obj:x.2\na: 1', Loader=ruamel.yaml.Loader)
print(x)
- assert ruamel.yaml.dump(x) == '''!obj:x.2 "{'a': 1}"\n'''
+ assert ruamel.yaml.dump(x) == """!obj:x.2 "{'a': 1}"\n"""
def test_yaml_obj_with_loader_and_dumper():
import ruamel.yaml # NOQA
+
ruamel.yaml.add_representer(Obj1, YAMLObj1.to_yaml, Dumper=ruamel.yaml.Dumper)
- ruamel.yaml.add_multi_constructor(YAMLObj1.yaml_tag, YAMLObj1.from_yaml,
- Loader=ruamel.yaml.Loader)
+ ruamel.yaml.add_multi_constructor(
+ YAMLObj1.yaml_tag, YAMLObj1.from_yaml, Loader=ruamel.yaml.Loader
+ )
x = ruamel.yaml.load('!obj:x.2\na: 1', Loader=ruamel.yaml.Loader)
# x = ruamel.yaml.load('!obj:x.2\na: 1')
print(x)
- assert ruamel.yaml.dump(x) == '''!obj:x.2 "{'a': 1}"\n'''
+ assert ruamel.yaml.dump(x) == """!obj:x.2 "{'a': 1}"\n"""
# ToDo use nullege to search add_multi_representer and add_path_resolver
@@ -134,12 +147,13 @@ def test_issue_127():
@classmethod
def to_yaml(cls, dumper, data):
if isinstance(data.logical_id, ruamel.yaml.scalarstring.ScalarString):
- style = data.logical_id.style # ruamel.yaml>0.15.8
+ style = data.logical_id.style # ruamel.yaml>0.15.8
else:
style = None
return dumper.represent_scalar(cls.yaml_tag, data.logical_id, style=style)
- document = dedent('''\
+ document = dedent(
+ """\
AList:
- !Ref One
- !Ref 'Two'
@@ -149,7 +163,9 @@ def test_issue_127():
CList:
- Five Six
- 'Seven Eight'
- ''')
+ """
+ )
data = ruamel.yaml.round_trip_load(document, preserve_quotes=True)
- assert ruamel.yaml.round_trip_dump(data, indent=4, block_seq_indent=2) == \
- document.replace('\n Two and', ' Two and')
+ assert ruamel.yaml.round_trip_dump(data, indent=4, block_seq_indent=2) == document.replace(
+ '\n Two and', ' Two and'
+ )
diff --git a/_test/test_anchor.py b/_test/test_anchor.py
index ae6469a..cb632d8 100644
--- a/_test/test_anchor.py
+++ b/_test/test_anchor.py
@@ -22,23 +22,30 @@ def compare(d, s):
class TestAnchorsAliases:
def test_anchor_id_renumber(self):
from ruamel.yaml.serializer import Serializer
+
assert Serializer.ANCHOR_TEMPLATE == 'id%03d'
- data = load("""
+ data = load(
+ """
a: &id002
b: 1
c: 2
d: *id002
- """)
- compare(data, """
+ """
+ )
+ compare(
+ data,
+ """
a: &id001
b: 1
c: 2
d: *id001
- """)
+ """,
+ )
def test_template_matcher(self):
"""test if id matches the anchor template"""
from ruamel.yaml.serializer import templated_id
+
assert templated_id(u'id001')
assert templated_id(u'id999')
assert templated_id(u'id1000')
@@ -56,7 +63,9 @@ class TestAnchorsAliases:
def test_anchor_assigned(self):
from ruamel.yaml.comments import CommentedMap
- data = load("""
+
+ data = load(
+ """
a: &id002
b: 1
c: 2
@@ -65,7 +74,8 @@ class TestAnchorsAliases:
b: 1
c: 2
f: *etemplate
- """)
+ """
+ )
d = data['d']
assert isinstance(d, CommentedMap)
assert d.yaml_anchor() is None # got dropped as it matches pattern
@@ -75,7 +85,8 @@ class TestAnchorsAliases:
assert e.yaml_anchor().always_dump is False
def test_anchor_id_retained(self):
- data = load("""
+ data = load(
+ """
a: &id002
b: 1
c: 2
@@ -84,8 +95,11 @@ class TestAnchorsAliases:
b: 1
c: 2
f: *etemplate
- """)
- compare(data, """
+ """
+ )
+ compare(
+ data,
+ """
a: &id001
b: 1
c: 2
@@ -94,26 +108,33 @@ class TestAnchorsAliases:
b: 1
c: 2
f: *etemplate
- """)
+ """,
+ )
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_alias_before_anchor(self):
from ruamel.yaml.composer import ComposerError
+
with pytest.raises(ComposerError):
- data = load("""
+ data = load(
+ """
d: *id002
a: &id002
b: 1
c: 2
- """)
+ """
+ )
data = data
def test_anchor_on_sequence(self):
# as reported by Bjorn Stabell
# https://bitbucket.org/ruamel/yaml/issue/7/anchor-names-not-preserved
from ruamel.yaml.comments import CommentedSeq
- data = load("""
+
+ data = load(
+ """
nut1: &alice
- 1
- 2
@@ -123,13 +144,15 @@ class TestAnchorsAliases:
nut3:
- *blake
- *alice
- """)
+ """
+ )
r = data['nut1']
assert isinstance(r, CommentedSeq)
assert r.yaml_anchor() is not None
assert r.yaml_anchor().value == 'alice'
- merge_yaml = dedent("""
+ merge_yaml = dedent(
+ """
- &CENTER {x: 1, y: 2}
- &LEFT {x: 0, y: 2}
- &BIG {r: 10}
@@ -151,7 +174,8 @@ class TestAnchorsAliases:
- <<: [*BIG, *LEFT, *SMALL]
x: 1
label: center/huge
- """)
+ """
+ )
def test_merge_00(self):
data = load(self.merge_yaml)
@@ -171,14 +195,17 @@ class TestAnchorsAliases:
def test_merge_accessible(self):
from ruamel.yaml.comments import CommentedMap, merge_attrib
- data = load("""
+
+ data = load(
+ """
k: &level_2 { a: 1, b2 }
l: &level_1 { a: 10, c: 3 }
m:
<<: *level_1
c: 30
d: 40
- """)
+ """
+ )
d = data['m']
assert isinstance(d, CommentedMap)
assert hasattr(d, merge_attrib)
@@ -188,7 +215,7 @@ class TestAnchorsAliases:
compare(data, self.merge_yaml)
def test_merge_nested(self):
- yaml = '''
+ yaml = """
a:
<<: &content
1: plugh
@@ -196,11 +223,11 @@ class TestAnchorsAliases:
0: xyzzy
b:
<<: *content
- '''
+ """
data = round_trip(yaml) # NOQA
def test_merge_nested_with_sequence(self):
- yaml = '''
+ yaml = """
a:
<<: &content
<<: &y2
@@ -209,11 +236,12 @@ class TestAnchorsAliases:
0: xyzzy
b:
<<: [*content, *y2]
- '''
+ """
data = round_trip(yaml) # NOQA
def test_add_anchor(self):
from ruamel.yaml.comments import CommentedMap
+
data = CommentedMap()
data_a = CommentedMap()
data['a'] = data_a
@@ -221,31 +249,37 @@ class TestAnchorsAliases:
data['b'] = 2
data.yaml_set_anchor('klm', always_dump=True)
data['a'].yaml_set_anchor('xyz', always_dump=True)
- compare(data, """
+ compare(
+ data,
+ """
&klm
a: &xyz
c: 3
b: 2
- """)
+ """,
+ )
# this is an error in PyYAML
def test_reused_anchor(self):
from ruamel.yaml.error import ReusedAnchorWarning
- yaml = '''
+
+ yaml = """
- &a
x: 1
- <<: *a
- &a
x: 2
- <<: *a
- '''
+ """
with pytest.warns(ReusedAnchorWarning):
data = round_trip(yaml) # NOQA
def test_issue_130(self):
# issue 130 reported by Devid Fee
import ruamel.yaml
- ys = dedent("""\
+
+ ys = dedent(
+ """\
components:
server: &server_component
type: spark.server:ServerComponent
@@ -261,14 +295,17 @@ class TestAnchorsAliases:
<<: *shell_component
components:
server: {<<: *server_service}
- """)
+ """
+ )
data = ruamel.yaml.safe_load(ys)
assert data['services']['shell']['components']['server']['port'] == 8000
def test_issue_130a(self):
# issue 130 reported by Devid Fee
import ruamel.yaml
- ys = dedent("""\
+
+ ys = dedent(
+ """\
components:
server: &server_component
type: spark.server:ServerComponent
@@ -285,14 +322,16 @@ class TestAnchorsAliases:
<<: *shell_component
components:
server: {<<: *server_service}
- """)
+ """
+ )
data = ruamel.yaml.safe_load(ys)
assert data['services']['shell']['components']['server']['port'] == 4000
class TestMergeKeysValues:
- yaml_str = dedent("""\
+ yaml_str = dedent(
+ """\
- &mx
a: x1
b: x2
@@ -306,12 +345,14 @@ class TestMergeKeysValues:
<<: *mx
m: 6
<<: *my
- """)
+ """
+ )
# in the following d always has "expanded" the merges
def test_merge_for(self):
from ruamel.yaml import safe_load
+
d = safe_load(self.yaml_str)
data = round_trip_load(self.yaml_str)
count = 0
@@ -322,6 +363,7 @@ class TestMergeKeysValues:
def test_merge_keys(self):
from ruamel.yaml import safe_load
+
d = safe_load(self.yaml_str)
data = round_trip_load(self.yaml_str)
count = 0
@@ -332,6 +374,7 @@ class TestMergeKeysValues:
def test_merge_values(self):
from ruamel.yaml import safe_load
+
d = safe_load(self.yaml_str)
data = round_trip_load(self.yaml_str)
count = 0
@@ -342,6 +385,7 @@ class TestMergeKeysValues:
def test_merge_items(self):
from ruamel.yaml import safe_load
+
d = safe_load(self.yaml_str)
data = round_trip_load(self.yaml_str)
count = 0
@@ -353,6 +397,7 @@ class TestMergeKeysValues:
def test_len_items_delete(self):
from ruamel.yaml import safe_load
from ruamel.yaml.compat import PY3
+
d = safe_load(self.yaml_str)
data = round_trip_load(self.yaml_str)
x = data[2].items()
@@ -377,13 +422,16 @@ class TestDuplicateKeyThroughAnchor:
from ruamel.yaml import version_info
from ruamel.yaml import safe_load, round_trip_load
from ruamel.yaml.constructor import DuplicateKeyFutureWarning, DuplicateKeyError
- s = dedent("""\
+
+ s = dedent(
+ """\
&anchor foo:
foo: bar
*anchor : duplicate key
baz: bat
*anchor : duplicate key
- """)
+ """
+ )
if version_info < (0, 15, 1):
pass
elif version_info < (0, 16, 0):
@@ -402,27 +450,27 @@ class TestFullCharSetAnchors:
def test_master_of_orion(self):
# https://bitbucket.org/ruamel/yaml/issues/72/not-allowed-in-anchor-names
# submitted by Shalon Wood
- yaml_str = '''
+ yaml_str = """
- collection: &Backend.Civilizations.RacialPerk
items:
- key: perk_population_growth_modifier
- *Backend.Civilizations.RacialPerk
- '''
+ """
data = load(yaml_str) # NOQA
def test_roundtrip_00(self):
- yaml_str = '''
+ yaml_str = """
- &dotted.words.here
a: 1
b: 2
- *dotted.words.here
- '''
+ """
data = round_trip(yaml_str) # NOQA
def test_roundtrip_01(self):
- yaml_str = '''
+ yaml_str = """
- &dotted.words.here[a, b]
- *dotted.words.here
- '''
+ """
data = load(yaml_str) # NOQA
compare(data, yaml_str.replace('[', ' [')) # an extra space is inserted
diff --git a/_test/test_api_change.py b/_test/test_api_change.py
index 59473fe..f191c56 100644
--- a/_test/test_api_change.py
+++ b/_test/test_api_change.py
@@ -16,6 +16,7 @@ class TestNewAPI:
def test_duplicate_keys_00(self):
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
+
yaml = YAML()
with pytest.raises(DuplicateKeyError):
yaml.load('{a: 1, a: 2}')
@@ -23,6 +24,7 @@ class TestNewAPI:
def test_duplicate_keys_01(self):
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
+
yaml = YAML(typ='safe', pure=True)
with pytest.raises(DuplicateKeyError):
yaml.load('{a: 1, a: 2}')
@@ -31,6 +33,7 @@ class TestNewAPI:
def test_duplicate_keys_02(self):
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
+
yaml = YAML(typ='safe')
with pytest.raises(DuplicateKeyError):
yaml.load('{a: 1, a: 2}')
@@ -38,6 +41,7 @@ class TestNewAPI:
def test_issue_135(self):
# reported by Andrzej Ostrowski
from ruamel.yaml import YAML
+
data = {'a': 1, 'b': 2}
yaml = YAML(typ='safe')
# originally on 2.7: with pytest.raises(TypeError):
@@ -46,6 +50,7 @@ class TestNewAPI:
def test_issue_135_temporary_workaround(self):
# never raised error
from ruamel.yaml import YAML
+
data = {'a': 1, 'b': 2}
yaml = YAML(typ='safe', pure=True)
yaml.dump(data, sys.stdout)
@@ -54,16 +59,18 @@ class TestNewAPI:
class TestWrite:
def test_dump_path(self, tmpdir):
from ruamel.yaml import YAML
+
fn = Path(str(tmpdir)) / 'test.yaml'
yaml = YAML()
data = yaml.map()
data['a'] = 1
data['b'] = 2
yaml.dump(data, fn)
- assert fn.read_text() == "a: 1\nb: 2\n"
+ assert fn.read_text() == 'a: 1\nb: 2\n'
def test_dump_file(self, tmpdir):
from ruamel.yaml import YAML
+
fn = Path(str(tmpdir)) / 'test.yaml'
yaml = YAML()
data = yaml.map()
@@ -71,10 +78,11 @@ class TestWrite:
data['b'] = 2
with open(str(fn), 'w') as fp:
yaml.dump(data, fp)
- assert fn.read_text() == "a: 1\nb: 2\n"
+ assert fn.read_text() == 'a: 1\nb: 2\n'
def test_dump_missing_stream(self):
from ruamel.yaml import YAML
+
yaml = YAML()
data = yaml.map()
data['a'] = 1
@@ -84,6 +92,7 @@ class TestWrite:
def test_dump_too_many_args(self, tmpdir):
from ruamel.yaml import YAML
+
fn = Path(str(tmpdir)) / 'test.yaml'
yaml = YAML()
data = yaml.map()
@@ -104,23 +113,25 @@ class TestWrite:
data['a'] = 1
data['b'] = 2
yaml.dump(data, fn, transform=tr)
- assert fn.read_text() == "a: 1\nb: 2\n"
+ assert fn.read_text() == 'a: 1\nb: 2\n'
def test_print(self, capsys):
from ruamel.yaml import YAML
+
yaml = YAML()
data = yaml.map()
data['a'] = 1
data['b'] = 2
yaml.dump(data, sys.stdout)
out, err = capsys.readouterr()
- assert out == "a: 1\nb: 2\n"
+ assert out == 'a: 1\nb: 2\n'
class TestRead:
def test_multi_load(self):
# make sure reader, scanner, parser get reset
from ruamel.yaml import YAML
+
yaml = YAML()
yaml.load('a: 1')
yaml.load('a: 1') # did not work in 0.15.4
@@ -130,14 +141,19 @@ class TestLoadAll:
def test_multi_document_load(self, tmpdir):
"""this went wrong on 3.7 because of StopIteration, PR 37 and Issue 211"""
from ruamel.yaml import YAML
+
fn = Path(str(tmpdir)) / 'test.yaml'
- fn.write_text(textwrap.dedent(u"""\
+ fn.write_text(
+ textwrap.dedent(
+ u"""\
---
- a
---
- b
...
- """))
+ """
+ )
+ )
yaml = YAML()
assert list(yaml.load_all(fn)) == [['a'], ['b']]
@@ -147,15 +163,20 @@ class TestDuplSet:
# round-trip-loader should except
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
+
yaml = YAML()
with pytest.raises(DuplicateKeyError):
- yaml.load(textwrap.dedent("""\
+ yaml.load(
+ textwrap.dedent(
+ """\
!!set
? a
? b
? c
? a
- """))
+ """
+ )
+ )
class TestDumpLoadUnicode:
@@ -163,26 +184,29 @@ class TestDumpLoadUnicode:
# and answer by randomir (https://stackoverflow.com/a/45281922/1307905)
def test_write_unicode(self, tmpdir):
from ruamel.yaml import YAML
+
yaml = YAML()
- text_dict = {"text": u"HELLO_WORLD©"}
+ text_dict = {'text': u'HELLO_WORLD©'}
file_name = str(tmpdir) + '/tstFile.yaml'
yaml.dump(text_dict, open(file_name, 'w'))
assert open(file_name, 'rb').read().decode('utf-8') == u'text: HELLO_WORLD©\n'
def test_read_unicode(self, tmpdir):
from ruamel.yaml import YAML
+
yaml = YAML()
file_name = str(tmpdir) + '/tstFile.yaml'
with open(file_name, 'wb') as fp:
fp.write(u'text: HELLO_WORLD©\n'.encode('utf-8'))
text_dict = yaml.load(open(file_name, 'r'))
- assert text_dict["text"] == u"HELLO_WORLD©"
+ assert text_dict['text'] == u'HELLO_WORLD©'
class TestFlowStyle:
def test_flow_style(self, capsys):
# https://stackoverflow.com/questions/45791712/
from ruamel.yaml import YAML
+
yaml = YAML()
yaml.default_flow_style = None
data = yaml.map()
@@ -190,15 +214,16 @@ class TestFlowStyle:
data['a'] = [[1, 2], [3, 4]]
yaml.dump(data, sys.stdout)
out, err = capsys.readouterr()
- assert out == "b: 1\na:\n- [1, 2]\n- [3, 4]\n"
+ assert out == 'b: 1\na:\n- [1, 2]\n- [3, 4]\n'
class TestOldAPI:
- @pytest.mark.skipif(sys.version_info >= (3, 0), reason="ok on Py3")
+ @pytest.mark.skipif(sys.version_info >= (3, 0), reason='ok on Py3')
@pytest.mark.xfail(strict=True)
def test_duplicate_keys_02(self):
# Issue 165 unicode keys in error/warning
from ruamel.yaml import safe_load
from ruamel.yaml.constructor import DuplicateKeyFutureWarning
+
with pytest.warns(DuplicateKeyFutureWarning):
safe_load('type: Doméstica\ntype: International')
diff --git a/_test/test_class_register.py b/_test/test_class_register.py
index 3da9fb7..126d93f 100644
--- a/_test/test_class_register.py
+++ b/_test/test_class_register.py
@@ -22,8 +22,7 @@ class User1(object):
@classmethod
def to_yaml(cls, representer, node):
- return representer.represent_scalar(cls.yaml_tag,
- u'{.name}-{.age}'.format(node, node))
+ return representer.represent_scalar(cls.yaml_tag, u'{.name}-{.age}'.format(node, node))
@classmethod
def from_yaml(cls, constructor, node):
@@ -34,66 +33,66 @@ class TestRegisterClass(object):
def test_register_0_rt(self):
yaml = YAML()
yaml.register_class(User0)
- ys = '''
+ ys = """
- !User0
name: Anthon
age: 18
- '''
+ """
d = yaml.load(ys)
yaml.dump(d, compare=ys, unordered_lines=True)
def test_register_0_safe(self):
# default_flow_style = None
- yaml = YAML(typ="safe")
+ yaml = YAML(typ='safe')
yaml.register_class(User0)
- ys = '''
+ ys = """
- !User0 {age: 18, name: Anthon}
- '''
+ """
d = yaml.load(ys)
yaml.dump(d, compare=ys)
def test_register_0_unsafe(self):
# default_flow_style = None
- yaml = YAML(typ="unsafe")
+ yaml = YAML(typ='unsafe')
yaml.register_class(User0)
- ys = '''
+ ys = """
- !User0 {age: 18, name: Anthon}
- '''
+ """
d = yaml.load(ys)
yaml.dump(d, compare=ys)
def test_register_1_rt(self):
yaml = YAML()
yaml.register_class(User1)
- ys = '''
+ ys = """
- !user Anthon-18
- '''
+ """
d = yaml.load(ys)
yaml.dump(d, compare=ys)
def test_register_1_safe(self):
- yaml = YAML(typ="safe")
+ yaml = YAML(typ='safe')
yaml.register_class(User1)
- ys = '''
+ ys = """
[!user Anthon-18]
- '''
+ """
d = yaml.load(ys)
yaml.dump(d, compare=ys)
def test_register_1_unsafe(self):
- yaml = YAML(typ="unsafe")
+ yaml = YAML(typ='unsafe')
yaml.register_class(User1)
- ys = '''
+ ys = """
[!user Anthon-18]
- '''
+ """
d = yaml.load(ys)
yaml.dump(d, compare=ys)
class TestDecorator(object):
-
def test_decorator_implicit(self):
from ruamel.yaml import yaml_object
+
yml = YAML()
@yaml_object(yml)
@@ -102,16 +101,17 @@ class TestDecorator(object):
self.name = name
self.age = age
- ys = '''
+ ys = """
- !User2
name: Anthon
age: 18
- '''
+ """
d = yml.load(ys)
yml.dump(d, compare=ys, unordered_lines=True)
def test_decorator_explicit(self):
from ruamel.yaml import yaml_object
+
yml = YAML()
@yaml_object(yml)
@@ -124,15 +124,16 @@ class TestDecorator(object):
@classmethod
def to_yaml(cls, representer, node):
- return representer.represent_scalar(cls.yaml_tag,
- u'{.name}-{.age}'.format(node, node))
+ return representer.represent_scalar(
+ cls.yaml_tag, u'{.name}-{.age}'.format(node, node)
+ )
@classmethod
def from_yaml(cls, constructor, node):
return cls(*node.value.split('-'))
- ys = '''
+ ys = """
- !USER Anthon-18
- '''
+ """
d = yml.load(ys)
yml.dump(d, compare=ys)
diff --git a/_test/test_collections.py b/_test/test_collections.py
index e70dbd2..e6033bb 100644
--- a/_test/test_collections.py
+++ b/_test/test_collections.py
@@ -7,7 +7,7 @@ This is now so integrated in Python that it can be mapped to !!omap
"""
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NOQA
@@ -16,5 +16,6 @@ from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NO
class TestOrderedDict:
def test_ordereddict(self):
from collections import OrderedDict
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
+
assert ruamel.yaml.dump(OrderedDict()) == '!!omap []\n'
diff --git a/_test/test_comment_manipulation.py b/_test/test_comment_manipulation.py
index 7586954..92810da 100644
--- a/_test/test_comment_manipulation.py
+++ b/_test/test_comment_manipulation.py
@@ -16,8 +16,11 @@ def compare(data, s, **kw):
def compare_eol(data, s):
- assert round_trip_dump(data).replace('\n', '|\n') == \
- dedent(s).replace('EOL', '').replace('\n', '|\n')
+ assert round_trip_dump(data).replace('\n', '|\n') == dedent(s).replace('EOL', "").replace(
+ '\n', '|\n'
+ )
+
+
# @pytest.mark.xfail
@@ -25,336 +28,451 @@ class TestCommentsManipulation:
# list
def test_seq_set_comment_on_existing_explicit_column(self):
- data = load("""
+ data = load(
+ """
- a # comment 1
- b
- c
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key=1, column=6)
- compare(data, """
+ compare(
+ data,
+ """
- a # comment 1
- b # comment 2
- c
- """)
+ """,
+ )
def test_seq_overwrite_comment_on_existing_explicit_column(self):
- data = load("""
+ data = load(
+ """
- a # comment 1
- b
- c
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key=0, column=6)
- compare(data, """
+ compare(
+ data,
+ """
- a # comment 2
- b
- c
- """)
+ """,
+ )
def test_seq_first_comment_explicit_column(self):
- data = load("""
+ data = load(
+ """
- a
- b
- c
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 1', key=1, column=6)
- compare(data, """
+ compare(
+ data,
+ """
- a
- b # comment 1
- c
- """)
+ """,
+ )
def test_seq_set_comment_on_existing_column_prev(self):
- data = load("""
+ data = load(
+ """
- a # comment 1
- b
- c
- d # comment 3
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key=1)
- compare(data, """
+ compare(
+ data,
+ """
- a # comment 1
- b # comment 2
- c
- d # comment 3
- """)
+ """,
+ )
def test_seq_set_comment_on_existing_column_next(self):
- data = load("""
+ data = load(
+ """
- a # comment 1
- b
- c
- d # comment 3
- """)
+ """
+ )
print(data._yaml_comment)
# print(type(data._yaml_comment._items[0][0].start_mark))
# ruamel.yaml.error.Mark
# print(type(data._yaml_comment._items[0][0].start_mark))
data.yaml_add_eol_comment('comment 2', key=2)
- compare(data, """
+ compare(
+ data,
+ """
- a # comment 1
- b
- c # comment 2
- d # comment 3
- """)
+ """,
+ )
def test_seq_set_comment_on_existing_column_further_away(self):
"""
no comment line before or after, take the latest before
the new position
"""
- data = load("""
+ data = load(
+ """
- a # comment 1
- b
- c
- d
- e
- f # comment 3
- """)
+ """
+ )
print(data._yaml_comment)
# print(type(data._yaml_comment._items[0][0].start_mark))
# ruamel.yaml.error.Mark
# print(type(data._yaml_comment._items[0][0].start_mark))
data.yaml_add_eol_comment('comment 2', key=3)
- compare(data, """
+ compare(
+ data,
+ """
- a # comment 1
- b
- c
- d # comment 2
- e
- f # comment 3
- """)
+ """,
+ )
def test_seq_set_comment_on_existing_explicit_column_with_hash(self):
- data = load("""
+ data = load(
+ """
- a # comment 1
- b
- c
- """)
+ """
+ )
data.yaml_add_eol_comment('# comment 2', key=1, column=6)
- compare(data, """
+ compare(
+ data,
+ """
- a # comment 1
- b # comment 2
- c
- """)
+ """,
+ )
# dict
def test_dict_set_comment_on_existing_explicit_column(self):
- data = load("""
+ data = load(
+ """
a: 1 # comment 1
b: 2
c: 3
d: 4
e: 5
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key='c', column=7)
- compare(data, """
+ compare(
+ data,
+ """
a: 1 # comment 1
b: 2
c: 3 # comment 2
d: 4
e: 5
- """)
+ """,
+ )
def test_dict_overwrite_comment_on_existing_explicit_column(self):
- data = load("""
+ data = load(
+ """
a: 1 # comment 1
b: 2
c: 3
d: 4
e: 5
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key='a', column=7)
- compare(data, """
+ compare(
+ data,
+ """
a: 1 # comment 2
b: 2
c: 3
d: 4
e: 5
- """)
+ """,
+ )
def test_map_set_comment_on_existing_column_prev(self):
- data = load("""
+ data = load(
+ """
a: 1 # comment 1
b: 2
c: 3
d: 4
e: 5 # comment 3
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key='b')
- compare(data, """
+ compare(
+ data,
+ """
a: 1 # comment 1
b: 2 # comment 2
c: 3
d: 4
e: 5 # comment 3
- """)
+ """,
+ )
def test_map_set_comment_on_existing_column_next(self):
- data = load("""
+ data = load(
+ """
a: 1 # comment 1
b: 2
c: 3
d: 4
e: 5 # comment 3
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key='d')
- compare(data, """
+ compare(
+ data,
+ """
a: 1 # comment 1
b: 2
c: 3
d: 4 # comment 2
e: 5 # comment 3
- """)
+ """,
+ )
def test_map_set_comment_on_existing_column_further_away(self):
"""
no comment line before or after, take the latest before
the new position
"""
- data = load("""
+ data = load(
+ """
a: 1 # comment 1
b: 2
c: 3
d: 4
e: 5 # comment 3
- """)
+ """
+ )
data.yaml_add_eol_comment('comment 2', key='c')
print(round_trip_dump(data))
- compare(data, """
+ compare(
+ data,
+ """
a: 1 # comment 1
b: 2
c: 3 # comment 2
d: 4
e: 5 # comment 3
- """)
+ """,
+ )
# @pytest.mark.xfail
def test_before_top_map_rt(self):
- data = load("""
+ data = load(
+ """
a: 1
b: 2
- """)
+ """
+ )
data.yaml_set_start_comment('Hello\nWorld\n')
- compare(data, """
+ compare(
+ data,
+ """
# Hello
# World
a: 1
b: 2
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_top_map_replace(self):
- data = load("""
+ data = load(
+ """
# abc
# def
a: 1 # 1
b: 2
- """)
+ """
+ )
data.yaml_set_start_comment('Hello\nWorld\n')
- compare(data, """
+ compare(
+ data,
+ """
# Hello
# World
a: 1 # 1
b: 2
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_top_map_from_scratch(self):
from ruamel.yaml.comments import CommentedMap
+
data = CommentedMap()
data['a'] = 1
data['b'] = 2
data.yaml_set_start_comment('Hello\nWorld\n')
# print(data.ca)
# print(data.ca._items)
- compare(data, """
+ compare(
+ data,
+ """
# Hello
# World
a: 1
b: 2
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_top_seq_rt(self):
- data = load("""
+ data = load(
+ """
- a
- b
- """)
+ """
+ )
data.yaml_set_start_comment('Hello\nWorld\n')
print(round_trip_dump(data))
- compare(data, """
+ compare(
+ data,
+ """
# Hello
# World
- a
- b
- """)
+ """,
+ )
def test_before_top_seq_rt_replace(self):
- data = load("""
+ data = load(
+ """
# this
# that
- a
- b
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ )
+ )
data.yaml_set_start_comment('Hello\nWorld\n')
print(round_trip_dump(data))
- compare(data, """
+ compare(
+ data,
+ """
# Hello
# World
- a
- b
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_top_seq_from_scratch(self):
from ruamel.yaml.comments import CommentedSeq
+
data = CommentedSeq()
data.append('a')
data.append('b')
data.yaml_set_start_comment('Hello\nWorld\n')
print(round_trip_dump(data))
- compare(data, """
+ compare(
+ data,
+ """
# Hello
# World
- a
- b
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
# nested variants
def test_before_nested_map_rt(self):
- data = load("""
+ data = load(
+ """
a: 1
b:
c: 2
d: 3
- """)
+ """
+ )
data['b'].yaml_set_start_comment('Hello\nWorld\n')
- compare(data, """
+ compare(
+ data,
+ """
a: 1
b:
# Hello
# World
c: 2
d: 3
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_nested_map_rt_indent(self):
- data = load("""
+ data = load(
+ """
a: 1
b:
c: 2
d: 3
- """)
+ """
+ )
data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2)
- compare(data, """
+ compare(
+ data,
+ """
a: 1
b:
# Hello
# World
c: 2
d: 3
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
print(data['b'].ca)
def test_before_nested_map_from_scratch(self):
from ruamel.yaml.comments import CommentedMap
+
data = CommentedMap()
datab = CommentedMap()
data['a'] = 1
@@ -362,17 +480,23 @@ class TestCommentsManipulation:
datab['c'] = 2
datab['d'] = 3
data['b'].yaml_set_start_comment('Hello\nWorld\n')
- compare(data, """
+ compare(
+ data,
+ """
a: 1
b:
# Hello
# World
c: 2
d: 3
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_nested_seq_from_scratch(self):
from ruamel.yaml.comments import CommentedMap, CommentedSeq
+
data = CommentedMap()
datab = CommentedSeq()
data['a'] = 1
@@ -380,17 +504,23 @@ class TestCommentsManipulation:
datab.append('c')
datab.append('d')
data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2)
- compare(data, """
+ compare(
+ data,
+ """
a: 1
b:
# Hello
# World
- c
- d
- """.format(comment='#'))
+ """.format(
+ comment='#'
+ ),
+ )
def test_before_nested_seq_from_scratch_block_seq_indent(self):
from ruamel.yaml.comments import CommentedMap, CommentedSeq
+
data = CommentedMap()
datab = CommentedSeq()
data['a'] = 1
@@ -398,18 +528,26 @@ class TestCommentsManipulation:
datab.append('c')
datab.append('d')
data['b'].yaml_set_start_comment('Hello\nWorld\n', indent=2)
- compare(data, """
+ compare(
+ data,
+ """
a: 1
b:
# Hello
# World
- c
- d
- """.format(comment='#'), indent=4, block_seq_indent=2)
+ """.format(
+ comment='#'
+ ),
+ indent=4,
+ block_seq_indent=2,
+ )
def test_map_set_comment_before_and_after_non_first_key_00(self):
# http://stackoverflow.com/a/40705671/1307905
- data = load("""
+ data = load(
+ """
xyz:
a: 1 # comment 1
b: 2
@@ -417,11 +555,15 @@ class TestCommentsManipulation:
test1:
test2:
test3: 3
- """)
- data.yaml_set_comment_before_after_key('test1', 'before test1 (top level)',
- after='before test2')
+ """
+ )
+ data.yaml_set_comment_before_after_key(
+ 'test1', 'before test1 (top level)', after='before test2'
+ )
data['test1']['test2'].yaml_set_start_comment('after test2', indent=4)
- compare(data, """
+ compare(
+ data,
+ """
xyz:
a: 1 # comment 1
b: 2
@@ -432,10 +574,12 @@ class TestCommentsManipulation:
test2:
# after test2
test3: 3
- """)
+ """,
+ )
def test_map_set_comment_before_and_after_non_first_key_01(self):
- data = load("""
+ data = load(
+ """
xyz:
a: 1 # comment 1
b: 2
@@ -443,12 +587,16 @@ class TestCommentsManipulation:
test1:
test2:
test3: 3
- """)
- data.yaml_set_comment_before_after_key('test1', 'before test1 (top level)',
- after='before test2\n\n')
+ """
+ )
+ data.yaml_set_comment_before_after_key(
+ 'test1', 'before test1 (top level)', after='before test2\n\n'
+ )
data['test1']['test2'].yaml_set_start_comment('after test2', indent=4)
# EOL is needed here as dedenting gets rid of spaces (as well as does Emacs
- compare_eol(data, """
+ compare_eol(
+ data,
+ """
xyz:
a: 1 # comment 1
b: 2
@@ -460,10 +608,12 @@ class TestCommentsManipulation:
test2:
# after test2
test3: 3
- """)
+ """,
+ )
def test_map_set_comment_before_and_after_non_first_key_02(self):
- data = load("""
+ data = load(
+ """
xyz:
a: 1 # comment 1
b: 2
@@ -471,12 +621,16 @@ class TestCommentsManipulation:
test1:
test2:
test3: 3
- """)
- data.yaml_set_comment_before_after_key('test1', 'xyz\n\nbefore test1 (top level)',
- after='\nbefore test2', after_indent=4)
+ """
+ )
+ data.yaml_set_comment_before_after_key(
+ 'test1', 'xyz\n\nbefore test1 (top level)', after='\nbefore test2', after_indent=4
+ )
data['test1']['test2'].yaml_set_start_comment('after test2', indent=4)
# EOL is needed here as dedenting gets rid of spaces (as well as does Emacs
- compare_eol(data, """
+ compare_eol(
+ data,
+ """
xyz:
a: 1 # comment 1
b: 2
@@ -490,4 +644,5 @@ class TestCommentsManipulation:
test2:
# after test2
test3: 3
- """)
+ """,
+ )
diff --git a/_test/test_comments.py b/_test/test_comments.py
index 921b193..7a43a97 100644
--- a/_test/test_comments.py
+++ b/_test/test_comments.py
@@ -27,15 +27,18 @@ class TestComments:
round_trip(x, extra='a\n')
def test_no_comments(self):
- round_trip("""
+ round_trip(
+ """
- europe: 10
- usa:
- ohio: 2
- california: 9
- """)
+ """
+ )
def test_round_trip_ordering(self):
- round_trip("""
+ round_trip(
+ """
a: 1
b: 2
c: 3
@@ -44,25 +47,32 @@ class TestComments:
d: 4
e: 5
f: 6
- """)
+ """
+ )
def test_complex(self):
- round_trip("""
+ round_trip(
+ """
- europe: 10 # top
- usa:
- ohio: 2
- california: 9 # o
- """)
+ """
+ )
def test_dropped(self):
- round_trip("""
+ round_trip(
+ """
# comment
scalar
...
- """, "scalar\n...\n")
+ """,
+ 'scalar\n...\n',
+ )
def test_main_mapping_begin_end(self):
- round_trip("""
+ round_trip(
+ """
# C start a
# C start b
abc: 1
@@ -70,7 +80,8 @@ class TestComments:
klm: 3
# C end a
# C end b
- """)
+ """
+ )
def test_reindent(self):
x = """\
@@ -80,14 +91,17 @@ class TestComments:
"""
d = round_trip_load(x)
y = round_trip_dump(d, indent=4)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
a:
b: # comment 1
c: 1 # comment 2
- """)
+ """
+ )
def test_main_mapping_begin_end_items_post(self):
- round_trip("""
+ round_trip(
+ """
# C start a
# C start b
abc: 1 # abc comment
@@ -95,10 +109,12 @@ class TestComments:
klm: 3 # klm comment
# C end a
# C end b
- """)
+ """
+ )
def test_main_sequence_begin_end(self):
- round_trip("""
+ round_trip(
+ """
# C start a
# C start b
- abc
@@ -106,10 +122,12 @@ class TestComments:
- klm
# C end a
# C end b
- """)
+ """
+ )
def test_main_sequence_begin_end_items_post(self):
- round_trip("""
+ round_trip(
+ """
# C start a
# C start b
- abc # abc comment
@@ -117,10 +135,12 @@ class TestComments:
- klm # klm comment
# C end a
# C end b
- """)
+ """
+ )
def test_main_mapping_begin_end_complex(self):
- round_trip("""
+ round_trip(
+ """
# C start a
# C start b
abc: 1
@@ -130,10 +150,12 @@ class TestComments:
3b: beta # it is all greek to me
# C end a
# C end b
- """)
+ """
+ )
- def test_09(self): # 2.9 from the examples in the spec
- round_trip("""
+ def test_09(self): # 2.9 from the examples in the spec
+ round_trip(
+ """
hr: # 1998 hr ranking
- Mark McGwire
- Sammy Sosa
@@ -141,10 +163,14 @@ class TestComments:
# 1998 rbi ranking
- Sammy Sosa
- Ken Griffey
- """, indent=4, block_seq_indent=2)
+ """,
+ indent=4,
+ block_seq_indent=2,
+ )
def test_09a(self):
- round_trip("""
+ round_trip(
+ """
hr: # 1998 hr ranking
- Mark McGwire
- Sammy Sosa
@@ -152,36 +178,44 @@ class TestComments:
# 1998 rbi ranking
- Sammy Sosa
- Ken Griffey
- """)
+ """
+ )
def test_simple_map_middle_comment(self):
- round_trip("""
+ round_trip(
+ """
abc: 1
# C 3a
# C 3b
ghi: 2
- """)
+ """
+ )
def test_map_in_map_0(self):
- round_trip("""
+ round_trip(
+ """
map1: # comment 1
# comment 2
map2:
key1: val1
- """)
+ """
+ )
def test_map_in_map_1(self):
# comment is moved from value to key
- round_trip("""
+ round_trip(
+ """
map1:
# comment 1
map2:
key1: val1
- """)
+ """
+ )
def test_application_arguments(self):
# application configur
- round_trip("""
+ round_trip(
+ """
args:
username: anthon
passwd: secret
@@ -190,7 +224,8 @@ class TestComments:
session-name: test
loop:
wait: 10
- """)
+ """
+ )
def test_substitute(self):
x = """
@@ -210,7 +245,8 @@ class TestComments:
assert round_trip_dump(data) == dedent(x)
def test_set_comment(self):
- round_trip("""
+ round_trip(
+ """
!!set
# the beginning
? a
@@ -218,29 +254,35 @@ class TestComments:
? b # You see? Promised you.
? c
# this is the end
- """)
+ """
+ )
def test_omap_comment_roundtrip(self):
- round_trip("""
+ round_trip(
+ """
!!omap
- a: 1
- b: 2 # two
- c: 3 # three
- d: 4
- """)
+ """
+ )
def test_omap_comment_roundtrip_pre_comment(self):
- round_trip("""
+ round_trip(
+ """
!!omap
- a: 1
- b: 2 # two
- c: 3 # three
# last one
- d: 4
- """)
+ """
+ )
def test_non_ascii(self):
- round_trip("""
+ round_trip(
+ """
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
@@ -259,34 +301,43 @@ class TestComments:
<<: *asia_europe
Spain: Madrid
Italy: Rome
- """)
+ """
+ )
def test_dump_utf8(self):
import ruamel.yaml # NOQA
- x = dedent("""\
+
+ x = dedent(
+ """\
ab:
- x # comment
- y # more comment
- """)
+ """
+ )
data = round_trip_load(x)
dumper = ruamel.yaml.RoundTripDumper
for utf in [True, False]:
- y = ruamel.yaml.dump(data, default_flow_style=False, Dumper=dumper,
- allow_unicode=utf)
+ y = ruamel.yaml.dump(
+ data, default_flow_style=False, Dumper=dumper, allow_unicode=utf
+ )
assert y == x
def test_dump_unicode_utf8(self):
import ruamel.yaml # NOQA
- x = dedent(u"""\
+
+ x = dedent(
+ u"""\
ab:
- x # comment
- y # more comment
- """)
+ """
+ )
data = round_trip_load(x)
dumper = ruamel.yaml.RoundTripDumper
for utf in [True, False]:
- y = ruamel.yaml.dump(data, default_flow_style=False, Dumper=dumper,
- allow_unicode=utf)
+ y = ruamel.yaml.dump(
+ data, default_flow_style=False, Dumper=dumper, allow_unicode=utf
+ )
assert y == x
def test_mlget_00(self):
@@ -327,7 +378,8 @@ class TestInsertPopList:
d = round_trip_load(self.ins)
d['ab'].insert(0, 'xyz')
y = round_trip_dump(d, indent=2)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- xyz
- a # a
@@ -338,13 +390,15 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
def test_insert_1(self):
d = round_trip_load(self.ins)
d['ab'].insert(4, 'xyz')
y = round_trip_dump(d, indent=2)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- a # a
- b # b
@@ -355,13 +409,15 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
def test_insert_2(self):
d = round_trip_load(self.ins)
d['ab'].insert(1, 'xyz')
y = round_trip_dump(d, indent=2)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- a # a
- xyz
@@ -372,14 +428,16 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
def test_pop_0(self):
d = round_trip_load(self.ins)
d['ab'].pop(0)
y = round_trip_dump(d, indent=2)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- b # b
- c
@@ -388,14 +446,16 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
def test_pop_1(self):
d = round_trip_load(self.ins)
d['ab'].pop(1)
y = round_trip_dump(d, indent=2)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- a # a
- c
@@ -404,14 +464,16 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
def test_pop_2(self):
d = round_trip_load(self.ins)
d['ab'].pop(2)
y = round_trip_dump(d, indent=2)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- a # a
- b # b
@@ -420,14 +482,16 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
def test_pop_3(self):
d = round_trip_load(self.ins)
d['ab'].pop(3)
y = round_trip_dump(d, indent=2)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
ab:
- a # a
- b # b
@@ -435,7 +499,8 @@ class TestInsertPopList:
de:
- 1
- 2
- """)
+ """
+ )
# inspired by demux' question on stackoverflow
@@ -451,45 +516,52 @@ class TestInsertInMapping:
def test_insert_at_pos_1(self):
d = round_trip_load(self.ins)
- d.insert(1, 'last name', 'Vandelay', comment="new key")
+ d.insert(1, 'last name', 'Vandelay', comment='new key')
y = round_trip_dump(d)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
first_name: Art
last name: Vandelay # new key
occupation: Architect # This is an occupation comment
about: Art Vandelay is a fictional character that George invents...
- """)
+ """
+ )
def test_insert_at_pos_0(self):
d = round_trip_load(self.ins)
- d.insert(0, 'last name', 'Vandelay', comment="new key")
+ d.insert(0, 'last name', 'Vandelay', comment='new key')
y = round_trip_dump(d)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
last name: Vandelay # new key
first_name: Art
occupation: Architect # This is an occupation comment
about: Art Vandelay is a fictional character that George invents...
- """)
+ """
+ )
def test_insert_at_pos_3(self):
# much more simple if done with appending.
d = round_trip_load(self.ins)
- d.insert(3, 'last name', 'Vandelay', comment="new key")
+ d.insert(3, 'last name', 'Vandelay', comment='new key')
y = round_trip_dump(d)
print(y)
- assert y == dedent("""\
+ assert y == dedent(
+ """\
first_name: Art
occupation: Architect # This is an occupation comment
about: Art Vandelay is a fictional character that George invents...
last name: Vandelay # new key
- """)
+ """
+ )
class TestCommentedMapMerge:
def test_in_operator(self):
- data = round_trip_load("""
+ data = round_trip_load(
+ """
x: &base
a: 1
b: 2
@@ -498,31 +570,36 @@ class TestCommentedMapMerge:
<<: *base
k: 4
l: 5
- """)
+ """
+ )
assert data['x']['a'] == 1
assert 'a' in data['x']
assert data['y']['a'] == 1
assert 'a' in data['y']
def test_issue_60(self):
- data = round_trip_load("""
+ data = round_trip_load(
+ """
x: &base
a: 1
y:
<<: *base
- """)
+ """
+ )
assert data['x']['a'] == 1
assert data['y']['a'] == 1
assert str(data['y']) == """ordereddict([('a', 1)])"""
def test_issue_60_1(self):
- data = round_trip_load("""
+ data = round_trip_load(
+ """
x: &base
a: 1
y:
<<: *base
b: 2
- """)
+ """
+ )
assert data['x']['a'] == 1
assert data['y']['a'] == 1
assert str(data['y']) == """ordereddict([('b', 2), ('a', 1)])"""
@@ -531,7 +608,8 @@ class TestCommentedMapMerge:
class TestEmptyLines:
# prompted by issue 46 from Alex Harvey
def test_issue_46(self):
- yaml_str = dedent("""\
+ yaml_str = dedent(
+ """\
---
# Please add key/value pairs in alphabetical order
@@ -540,13 +618,15 @@ class TestEmptyLines:
jenkins_ad_credentials:
bind_name: 'CN=svc-AAA-BBB-T,OU=Example,DC=COM,DC=EXAMPLE,DC=Local'
bind_pass: 'xxxxyyyy{'
- """)
+ """
+ )
d = round_trip_load(yaml_str, preserve_quotes=True)
y = round_trip_dump(d, explicit_start=True)
assert yaml_str == y
def test_multispace_map(self):
- round_trip("""
+ round_trip(
+ """
a: 1x
b: 2x
@@ -558,11 +638,13 @@ class TestEmptyLines:
d: 4x
- """)
+ """
+ )
@pytest.mark.xfail(strict=True)
def test_multispace_map_initial(self):
- round_trip("""
+ round_trip(
+ """
a: 1x
@@ -575,27 +657,33 @@ class TestEmptyLines:
d: 4x
- """)
+ """
+ )
def test_embedded_map(self):
- round_trip("""
+ round_trip(
+ """
- a: 1y
b: 2y
c: 3y
- """)
+ """
+ )
def test_toplevel_seq(self):
- round_trip("""\
+ round_trip(
+ """\
- 1
- 2
- 3
- """)
+ """
+ )
def test_embedded_seq(self):
- round_trip("""
+ round_trip(
+ """
a:
b:
- 1
@@ -604,7 +692,8 @@ class TestEmptyLines:
- 3
- """)
+ """
+ )
def test_line_with_only_spaces(self):
# issue 54
@@ -629,12 +718,14 @@ class TestEmptyLines:
assert stripped == y
def test_issue_54_not_ok(self):
- yaml_str = dedent("""\
+ yaml_str = dedent(
+ """\
toplevel:
# some comment
sublevel: 300
- """)
+ """
+ )
d = round_trip_load(yaml_str)
print(d.ca)
y = round_trip_dump(d, indent=4)
@@ -642,50 +733,61 @@ class TestEmptyLines:
assert yaml_str == y
def test_issue_54_ok(self):
- yaml_str = dedent("""\
+ yaml_str = dedent(
+ """\
toplevel:
# some comment
sublevel: 300
- """)
+ """
+ )
d = round_trip_load(yaml_str)
y = round_trip_dump(d, indent=4)
assert yaml_str == y
def test_issue_93(self):
- round_trip("""\
+ round_trip(
+ """\
a:
b:
- c1: cat # a1
# my comment on catfish
- c2: catfish # a2
- """)
+ """
+ )
def test_issue_93_00(self):
- round_trip("""\
+ round_trip(
+ """\
a:
- - c1: cat # a1
# my comment on catfish
- c2: catfish # a2
- """)
+ """
+ )
def test_issue_93_01(self):
- round_trip("""\
+ round_trip(
+ """\
- - c1: cat # a1
# my comment on catfish
- c2: catfish # a2
- """)
+ """
+ )
def test_issue_93_02(self):
# never failed as there is no indent
- round_trip("""\
+ round_trip(
+ """\
- c1: cat
# my comment on catfish
- c2: catfish
- """)
+ """
+ )
def test_issue_96(self):
# inserted extra line on trailing spaces
- round_trip("""\
+ round_trip(
+ """\
a:
b:
c: c_val
@@ -693,14 +795,15 @@ class TestEmptyLines:
e:
g: g_val
- """)
+ """
+ )
class TestUnicodeComments:
-
- @pytest.mark.skipif(sys.version_info < (2, 7), reason="wide unicode")
+ @pytest.mark.skipif(sys.version_info < (2, 7), reason='wide unicode')
def test_issue_55(self): # reported by Haraguroicha Hsu
- round_trip("""\
+ round_trip(
+ """\
name: TEST
description: test using
author: Harguroicha
@@ -715,87 +818,111 @@ class TestUnicodeComments:
- :no: 05338777 # 〇〇啓
- :no: 05273867 # 〇
- :no: 05205786 # 〇𤦌
- """)
+ """
+ )
class TestEmptyValueBeforeComments:
def test_issue_25a(self):
- round_trip("""\
+ round_trip(
+ """\
- a: b
c: d
d: # foo
- e: f
- """)
+ """
+ )
def test_issue_25a1(self):
- round_trip("""\
+ round_trip(
+ """\
- a: b
c: d
d: # foo
e: f
- """)
+ """
+ )
def test_issue_25b(self):
- round_trip("""\
+ round_trip(
+ """\
var1: #empty
var2: something #notempty
- """)
+ """
+ )
def test_issue_25c(self):
- round_trip("""\
+ round_trip(
+ """\
params:
a: 1 # comment a
b: # comment b
c: 3 # comment c
- """)
+ """
+ )
def test_issue_25c1(self):
- round_trip("""\
+ round_trip(
+ """\
params:
a: 1 # comment a
b: # comment b
# extra
c: 3 # comment c
- """)
+ """
+ )
def test_issue_25_00(self):
- round_trip("""\
+ round_trip(
+ """\
params:
a: 1 # comment a
b: # comment b
- """)
+ """
+ )
def test_issue_25_01(self):
- round_trip("""\
+ round_trip(
+ """\
a: # comment 1
# comment 2
- b: # comment 3
c: 1 # comment 4
- """)
+ """
+ )
def test_issue_25_02(self):
- round_trip("""\
+ round_trip(
+ """\
a: # comment 1
# comment 2
- b: 2 # comment 3
- """)
+ """
+ )
def test_issue_25_03(self):
- round_trip("""\
+ round_trip(
+ """\
a: # comment 1
# comment 2
- b: 2 # comment 3
- """, indent=4, block_seq_indent=2)
+ """,
+ indent=4,
+ block_seq_indent=2,
+ )
def test_issue_25_04(self):
- round_trip("""\
+ round_trip(
+ """\
a: # comment 1
# comment 2
b: 1 # comment 3
- """)
+ """
+ )
def test_flow_seq_within_seq(self):
- round_trip("""\
+ round_trip(
+ """\
# comment 1
- a
- b
@@ -807,7 +934,8 @@ class TestEmptyValueBeforeComments:
- f
# comment 4
- []
- """)
+ """
+ )
test_block_scalar_commented_line_template = """\
@@ -824,8 +952,17 @@ class TestBlockScalarWithComments:
# issue 99 reported by Colm O'Connor
def test_scalar_with_comments(self):
import ruamel.yaml # NOQA
- for x in ['', '\n', '\n# Another comment\n', '\n\n', '\n\n# abc\n#xyz\n',
- '\n\n# abc\n#xyz\n', '# abc\n\n#xyz\n', '\n\n # abc\n #xyz\n']:
+
+ for x in [
+ "",
+ '\n',
+ '\n# Another comment\n',
+ '\n\n',
+ '\n\n# abc\n#xyz\n',
+ '\n\n# abc\n#xyz\n',
+ '# abc\n\n#xyz\n',
+ '\n\n # abc\n #xyz\n',
+ ]:
commented_line = test_block_scalar_commented_line_template.format(x)
data = ruamel.yaml.round_trip_load(commented_line)
diff --git a/_test/test_copy.py b/_test/test_copy.py
index 9a6deb0..a912259 100644
--- a/_test/test_copy.py
+++ b/_test/test_copy.py
@@ -6,16 +6,18 @@ Testing copy and deepcopy, instigated by Issue 84 (Peter Amstutz)
import copy
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import dedent, round_trip_load, round_trip_dump
class TestDeepCopy:
def test_preserve_flow_style_simple(self):
- x = dedent("""\
+ x = dedent(
+ """\
{foo: bar, baz: quux}
- """)
+ """
+ )
data = round_trip_load(x)
data_copy = copy.deepcopy(data)
y = round_trip_dump(data_copy)
@@ -25,9 +27,11 @@ class TestDeepCopy:
assert data.fa.flow_style() == data_copy.fa.flow_style()
def test_deepcopy_flow_style_nested_dict(self):
- x = dedent("""\
+ x = dedent(
+ """\
a: {foo: bar, baz: quux}
- """)
+ """
+ )
data = round_trip_load(x)
assert data['a'].fa.flow_style() is True
data_copy = copy.deepcopy(data)
@@ -40,16 +44,20 @@ class TestDeepCopy:
print('x [{}]'.format(x))
print('y [{}]'.format(y))
- assert y == dedent("""\
+ assert y == dedent(
+ """\
a:
foo: bar
baz: quux
- """)
+ """
+ )
def test_deepcopy_flow_style_nested_list(self):
- x = dedent("""\
+ x = dedent(
+ """\
a: [1, 2, 3]
- """)
+ """
+ )
data = round_trip_load(x)
assert data['a'].fa.flow_style() is True
data_copy = copy.deepcopy(data)
@@ -62,19 +70,23 @@ class TestDeepCopy:
print('x [{}]'.format(x))
print('y [{}]'.format(y))
- assert y == dedent("""\
+ assert y == dedent(
+ """\
a:
- 1
- 2
- 3
- """)
+ """
+ )
class TestCopy:
def test_copy_flow_style_nested_dict(self):
- x = dedent("""\
+ x = dedent(
+ """\
a: {foo: bar, baz: quux}
- """)
+ """
+ )
data = round_trip_load(x)
assert data['a'].fa.flow_style() is True
data_copy = copy.copy(data)
@@ -87,16 +99,20 @@ class TestCopy:
z = round_trip_dump(data)
assert y == z
- assert y == dedent("""\
+ assert y == dedent(
+ """\
a:
foo: bar
baz: quux
- """)
+ """
+ )
def test_copy_flow_style_nested_list(self):
- x = dedent("""\
+ x = dedent(
+ """\
a: [1, 2, 3]
- """)
+ """
+ )
data = round_trip_load(x)
assert data['a'].fa.flow_style() is True
data_copy = copy.copy(data)
@@ -109,9 +125,11 @@ class TestCopy:
print('x [{}]'.format(x))
print('y [{}]'.format(y))
- assert y == dedent("""\
+ assert y == dedent(
+ """\
a:
- 1
- 2
- 3
- """)
+ """
+ )
diff --git a/_test/test_cyaml.py b/_test/test_cyaml.py
index 4c5c5db..2d67509 100644
--- a/_test/test_cyaml.py
+++ b/_test/test_cyaml.py
@@ -4,18 +4,26 @@ import platform
import pytest
-@pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+@pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+)
def test_load_cyaml():
import ruamel.yaml
+
assert ruamel.yaml.__with_libyaml__
from ruamel.yaml.cyaml import CLoader
- ruamel.yaml.load("abc: 1", Loader=CLoader)
+
+ ruamel.yaml.load('abc: 1', Loader=CLoader)
def test_dump_cyaml():
import ruamel.yaml
+
data = {'a': 1, 'b': 2}
- res = ruamel.yaml.dump(data, Dumper=ruamel.yaml.cyaml.CSafeDumper,
- default_flow_style=False, allow_unicode=True)
- assert res == "a: 1\nb: 2\n"
+ res = ruamel.yaml.dump(
+ data,
+ Dumper=ruamel.yaml.cyaml.CSafeDumper,
+ default_flow_style=False,
+ allow_unicode=True,
+ )
+ assert res == 'a: 1\nb: 2\n'
diff --git a/_test/test_datetime.py b/_test/test_datetime.py
index b48d529..fe94c02 100644
--- a/_test/test_datetime.py
+++ b/_test/test_datetime.py
@@ -20,110 +20,147 @@ Please note that a fraction can only be included if not equal to 0
"""
import copy
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NOQA
class TestDateTime:
def test_date_only(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02
- """, """
+ """,
+ """
- 2011-10-02
- """)
+ """,
+ )
def test_zero_fraction(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02 16:45:00.0
- """, """
+ """,
+ """
- 2011-10-02 16:45:00
- """)
+ """,
+ )
def test_long_fraction(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02 16:45:00.1234 # expand with zeros
- 2011-10-02 16:45:00.123456
- 2011-10-02 16:45:00.12345612 # round to microseconds
- 2011-10-02 16:45:00.1234565 # round up
- 2011-10-02 16:45:00.12345678 # round up
- """, """
+ """,
+ """
- 2011-10-02 16:45:00.123400 # expand with zeros
- 2011-10-02 16:45:00.123456
- 2011-10-02 16:45:00.123456 # round to microseconds
- 2011-10-02 16:45:00.123457 # round up
- 2011-10-02 16:45:00.123457 # round up
- """)
+ """,
+ )
def test_canonical(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02T16:45:00.1Z
- """, """
+ """,
+ """
- 2011-10-02T16:45:00.100000Z
- """)
+ """,
+ )
def test_spaced_timezone(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02T11:45:00 -5
- """, """
+ """,
+ """
- 2011-10-02T11:45:00-5
- """)
+ """,
+ )
def test_normal_timezone(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02T11:45:00-5
- 2011-10-02 11:45:00-5
- 2011-10-02T11:45:00-05:00
- 2011-10-02 11:45:00-05:00
- """)
+ """
+ )
def test_no_timezone(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02 6:45:00
- """, """
+ """,
+ """
- 2011-10-02 06:45:00
- """)
+ """,
+ )
def test_explicit_T(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02T16:45:00
- """, """
+ """,
+ """
- 2011-10-02T16:45:00
- """)
+ """,
+ )
def test_explicit_t(self): # to upper
- round_trip("""
+ round_trip(
+ """
- 2011-10-02t16:45:00
- """, """
+ """,
+ """
- 2011-10-02T16:45:00
- """)
+ """,
+ )
def test_no_T_multi_space(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02 16:45:00
- """, """
+ """,
+ """
- 2011-10-02 16:45:00
- """)
+ """,
+ )
def test_iso(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02T15:45:00+01:00
- """)
+ """
+ )
def test_zero_tz(self):
- round_trip("""
+ round_trip(
+ """
- 2011-10-02T15:45:00+0
- """)
+ """
+ )
def test_issue_45(self):
- round_trip("""
+ round_trip(
+ """
dt: 2016-08-19T22:45:47Z
- """)
+ """
+ )
def test_deepcopy_datestring(self):
# reported by Quuxplusone, http://stackoverflow.com/a/41577841/1307905
- x = dedent("""\
+ x = dedent(
+ """\
foo: 2016-10-12T12:34:56
- """)
+ """
+ )
data = copy.deepcopy(round_trip_load(x))
assert round_trip_dump(data) == x
diff --git a/_test/test_deprecation.py b/_test/test_deprecation.py
index f1a5419..35cb095 100644
--- a/_test/test_deprecation.py
+++ b/_test/test_deprecation.py
@@ -6,7 +6,7 @@ import sys
import pytest # NOQA
-@pytest.mark.skipif(sys.version_info < (3, 7), reason="collections not available?")
+@pytest.mark.skipif(sys.version_info < (3, 7), reason='collections not available?')
def test_collections_deprecation():
with pytest.warns(DeprecationWarning):
from collections import Hashable # NOQA
diff --git a/_test/test_documents.py b/_test/test_documents.py
index 6da293a..476f70c 100644
--- a/_test/test_documents.py
+++ b/_test/test_documents.py
@@ -1,70 +1,99 @@
# coding: utf-8
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import round_trip, round_trip_load_all
class TestDocument:
def test_single_doc_begin_end(self):
- round_trip("""\
+ round_trip(
+ """\
---
- a
- b
...
- """, explicit_start=True, explicit_end=True)
+ """,
+ explicit_start=True,
+ explicit_end=True,
+ )
def test_multi_doc_begin_end(self):
from ruamel import yaml
- docs = list(round_trip_load_all("""\
+
+ docs = list(
+ round_trip_load_all(
+ """\
---
- a
...
---
- b
...
- """))
+ """
+ )
+ )
assert docs == [['a'], ['b']]
- out = yaml.dump_all(docs, Dumper=yaml.RoundTripDumper, explicit_start=True,
- explicit_end=True)
- assert out == "---\n- a\n...\n---\n- b\n...\n"
+ out = yaml.dump_all(
+ docs, Dumper=yaml.RoundTripDumper, explicit_start=True, explicit_end=True
+ )
+ assert out == '---\n- a\n...\n---\n- b\n...\n'
def test_multi_doc_no_start(self):
- docs = list(round_trip_load_all("""\
+ docs = list(
+ round_trip_load_all(
+ """\
- a
...
---
- b
...
- """))
+ """
+ )
+ )
assert docs == [['a'], ['b']]
def test_multi_doc_no_end(self):
- docs = list(round_trip_load_all("""\
+ docs = list(
+ round_trip_load_all(
+ """\
- a
---
- b
- """))
+ """
+ )
+ )
assert docs == [['a'], ['b']]
def test_multi_doc_ends_only(self):
# this is ok in 1.2
- docs = list(round_trip_load_all("""\
+ docs = list(
+ round_trip_load_all(
+ """\
- a
...
- b
...
- """, version=(1, 2)))
+ """,
+ version=(1, 2),
+ )
+ )
assert docs == [['a'], ['b']]
def test_multi_doc_ends_only_1_1(self):
from ruamel import yaml
+
# this is not ok in 1.1
with pytest.raises(yaml.parser.ParserError):
- docs = list(round_trip_load_all("""\
+ docs = list(
+ round_trip_load_all(
+ """\
- a
...
- b
...
- """, version=(1, 1)))
+ """,
+ version=(1, 1),
+ )
+ )
assert docs == [['a'], ['b']] # not True, but not reached
diff --git a/_test/test_fail.py b/_test/test_fail.py
index fb85e5f..9c318a0 100644
--- a/_test/test_fail.py
+++ b/_test/test_fail.py
@@ -15,7 +15,8 @@ class TestCommentFailures:
@pytest.mark.xfail(strict=True)
def test_set_comment_before_tag(self):
# no comments before tags
- round_trip("""
+ round_trip(
+ """
# the beginning
!!set
# or this one?
@@ -24,11 +25,15 @@ class TestCommentFailures:
? b # You see? Promised you.
? c
# this is the end
- """)
+ """
+ )
def test_set_comment_before_tag_no_fail(self):
# no comments before tags
- assert round_trip_dump(round_trip_load("""
+ assert (
+ round_trip_dump(
+ round_trip_load(
+ """
# the beginning
!!set
# or this one?
@@ -37,7 +42,11 @@ class TestCommentFailures:
? b # You see? Promised you.
? c
# this is the end
- """)) == dedent("""
+ """
+ )
+ )
+ == dedent(
+ """
!!set
# or this one?
? a
@@ -45,15 +54,19 @@ class TestCommentFailures:
? b # You see? Promised you.
? c
# this is the end
- """)
+ """
+ )
+ )
@pytest.mark.xfail(strict=True)
def test_comment_dash_line(self):
- round_trip("""
+ round_trip(
+ """
- # abc
a: 1
b: 2
- """)
+ """
+ )
def test_comment_dash_line_fail(self):
x = """
@@ -63,18 +76,20 @@ class TestCommentFailures:
"""
data = round_trip_load(x)
# this is not nice
- assert round_trip_dump(data) == dedent("""
+ assert round_trip_dump(data) == dedent(
+ """
# abc
- a: 1
b: 2
- """)
+ """
+ )
class TestIndentFailures:
-
@pytest.mark.xfail(strict=True)
def test_indent_not_retained(self):
- round_trip("""
+ round_trip(
+ """
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
@@ -95,10 +110,14 @@ class TestIndentFailures:
Italy: Rome
Antarctica:
- too cold
- """)
+ """
+ )
def test_indent_not_retained_no_fail(self):
- assert round_trip_dump(round_trip_load("""
+ assert (
+ round_trip_dump(
+ round_trip_load(
+ """
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
@@ -119,7 +138,12 @@ class TestIndentFailures:
Italy: Rome
Antarctica:
- too cold
- """), indent=4) == dedent("""
+ """
+ ),
+ indent=4,
+ )
+ == dedent(
+ """
verbosity: 1 # 0 is minimal output, -1 none
base_url: http://gopher.net
special_indices: [1, 5, 8]
@@ -140,46 +164,67 @@ class TestIndentFailures:
Italy: Rome
Antarctica:
- too cold
- """)
+ """
+ )
+ )
def Xtest_indent_top_level_no_fail(self):
- round_trip("""
+ round_trip(
+ """
- a:
- b
- """, indent=4)
+ """,
+ indent=4,
+ )
class TestTagFailures:
@pytest.mark.xfail(strict=True)
def test_standard_short_tag(self):
- round_trip("""\
+ round_trip(
+ """\
!!map
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
def test_standard_short_tag_no_fail(self):
- assert round_trip_dump(round_trip_load("""
+ assert (
+ round_trip_dump(
+ round_trip_load(
+ """
!!map
name: Anthon
location: Germany
language: python
- """)) == dedent("""
+ """
+ )
+ )
+ == dedent(
+ """
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
+ )
class TestFlowValues:
def test_flow_value_with_colon(self):
- round_trip("""\
+ round_trip(
+ """\
{a: bcd:efg}
- """)
+ """
+ )
# @pytest.mark.xfail(strict=True)
def test_flow_value_with_colon_quoted(self):
- round_trip("""\
+ round_trip(
+ """\
{a: 'bcd:efg'}
- """, preserve_quotes=True)
+ """,
+ preserve_quotes=True,
+ )
diff --git a/_test/test_float.py b/_test/test_float.py
index 2bfe098..b115eb3 100644
--- a/_test/test_float.py
+++ b/_test/test_float.py
@@ -11,7 +11,8 @@ from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NO
class TestFloat:
def test_round_trip_non_exp(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- 1.0
- 1.00
- 23.100
@@ -21,7 +22,8 @@ class TestFloat:
- 42.
- -42.
- +42.
- """)
+ """
+ )
print(data)
assert 0.999 < data[0] < 1.001
assert 0.999 < data[1] < 1.001
@@ -34,7 +36,8 @@ class TestFloat:
assert 41.999 < data[8] < 42.001
def test_round_trip_zeros_0(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- 0.
- +0.
- -0.
@@ -44,7 +47,8 @@ class TestFloat:
- 0.00
- +0.00
- -0.00
- """)
+ """
+ )
print(data)
for d in data:
assert -0.00001 < d < 0.00001
@@ -52,114 +56,137 @@ class TestFloat:
# @pytest.mark.xfail(strict=True)
def test_round_trip_zeros_1(self):
# not sure if this should be supported, but it is
- data = round_trip("""\
+ data = round_trip(
+ """\
- 00.0
- +00.0
- -00.0
- """)
+ """
+ )
print(data)
for d in data:
assert -0.00001 < d < 0.00001
def Xtest_round_trip_non_exp_trailing_dot(self):
- data = round_trip("""\
- """)
+ data = round_trip(
+ """\
+ """
+ )
print(data)
def test_round_trip_exp_00(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- 42e56
- 42E56
- 42.0E56
- +42.0e56
- 42.0E+056
- +42.00e+056
- """)
+ """
+ )
print(data)
for d in data:
assert 41.99e56 < d < 42.01e56
# @pytest.mark.xfail(strict=True)
def test_round_trip_exp_00f(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- 42.E56
- """)
+ """
+ )
print(data)
for d in data:
assert 41.99e56 < d < 42.01e56
def test_round_trip_exp_01(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- -42e56
- -42E56
- -42.0e56
- -42.0E+056
- """)
+ """
+ )
print(data)
for d in data:
assert -41.99e56 > d > -42.01e56
def test_round_trip_exp_02(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- 42e-56
- 42E-56
- 42.0E-56
- +42.0e-56
- 42.0E-056
- +42.0e-056
- """)
+ """
+ )
print(data)
for d in data:
assert 41.99e-56 < d < 42.01e-56
def test_round_trip_exp_03(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- -42e-56
- -42E-56
- -42.0e-56
- -42.0E-056
- """)
+ """
+ )
print(data)
for d in data:
assert -41.99e-56 > d > -42.01e-56
def test_round_trip_exp_04(self):
- round_trip("""\
+ round_trip(
+ """\
- 1.2e+34
- 1.23e+034
- 1.230e+34
- 1.023e+34
- -1.023e+34
- 250e6
- """)
+ """
+ )
def test_round_trip_exp_05(self):
- data = round_trip("""\
+ data = round_trip(
+ """\
- 3.0517578123e-56
- 3.0517578123E-56
- 3.0517578123e-056
- 3.0517578123E-056
- """)
+ """
+ )
print(data)
for d in data:
assert 3.0517578122e-56 < d < 3.0517578124e-56
def test_yaml_1_1_no_dot(self):
from ruamel.yaml.error import MantissaNoDotYAML1_1Warning
+
with pytest.warns(MantissaNoDotYAML1_1Warning):
- round_trip_load("""\
+ round_trip_load(
+ """\
%YAML 1.1
---
- 1e6
- """)
+ """
+ )
class TestCalculations(object):
def test_mul_00(self):
# issue 149 reported by jan.brezina@tul.cz
- d = round_trip_load("""\
+ d = round_trip_load(
+ """\
- 0.1
- """)
+ """
+ )
d[0] *= -1
x = round_trip_dump(d)
assert x == '- -0.1\n'
diff --git a/_test/test_flowsequencekey.py b/_test/test_flowsequencekey.py
index 8490430..f8f122e 100644
--- a/_test/test_flowsequencekey.py
+++ b/_test/test_flowsequencekey.py
@@ -12,7 +12,8 @@ from roundtrip import round_trip # , dedent, round_trip_load, round_trip_dump
class TestFlowStyleSequenceKey:
def test_so_39595807(self):
- round_trip("""
+ round_trip(
+ """
%YAML 1.2
---
[2, 3, 4]:
@@ -21,4 +22,8 @@ class TestFlowStyleSequenceKey:
- 2
b: Hello World!
c: 'Voilà!'
- """, preserve_quotes=True, explicit_start=True, version=(1, 2))
+ """,
+ preserve_quotes=True,
+ explicit_start=True,
+ version=(1, 2),
+ )
diff --git a/_test/test_indentation.py b/_test/test_indentation.py
index 528e3b6..dcac772 100644
--- a/_test/test_indentation.py
+++ b/_test/test_indentation.py
@@ -12,10 +12,14 @@ from roundtrip import round_trip, round_trip_load, round_trip_dump, dedent, YAML
def rt(s):
import ruamel.yaml
- return ruamel.yaml.dump(
- ruamel.yaml.load(s, Loader=ruamel.yaml.RoundTripLoader),
- Dumper=ruamel.yaml.RoundTripDumper,
- ).strip() + '\n'
+
+ return (
+ ruamel.yaml.dump(
+ ruamel.yaml.load(s, Loader=ruamel.yaml.RoundTripLoader),
+ Dumper=ruamel.yaml.RoundTripDumper,
+ ).strip()
+ + '\n'
+ )
class TestIndent:
@@ -25,41 +29,50 @@ class TestIndent:
assert s == output
def test_roundtrip_mapping_of_inline_lists(self):
- s = dedent("""\
+ s = dedent(
+ """\
a: [a, b, c]
j: [k, l, m]
- """)
+ """
+ )
output = rt(s)
assert s == output
def test_roundtrip_mapping_of_inline_lists_comments(self):
- s = dedent("""\
+ s = dedent(
+ """\
# comment A
a: [a, b, c]
# comment B
j: [k, l, m]
- """)
+ """
+ )
output = rt(s)
assert s == output
def test_roundtrip_mapping_of_inline_sequence_eol_comments(self):
- s = dedent("""\
+ s = dedent(
+ """\
# comment A
a: [a, b, c] # comment B
j: [k, l, m] # comment C
- """)
+ """
+ )
output = rt(s)
assert s == output
# first test by explicitly setting flow style
def test_added_inline_list(self):
import ruamel.yaml
- s1 = dedent("""
+
+ s1 = dedent(
+ """
a:
- b
- c
- d
- """)
+ """
+ )
s = 'a: [b, c, d]\n'
data = ruamel.yaml.load(s1, Loader=ruamel.yaml.RoundTripLoader)
val = data['a']
@@ -72,58 +85,79 @@ class TestIndent:
def test_roundtrip_flow_mapping(self):
import ruamel.yaml
- s = dedent("""\
+
+ s = dedent(
+ """\
- {a: 1, b: hallo}
- {j: fka, k: 42}
- """)
+ """
+ )
data = ruamel.yaml.load(s, Loader=ruamel.yaml.RoundTripLoader)
output = ruamel.yaml.dump(data, Dumper=ruamel.yaml.RoundTripDumper)
assert s == output
def test_roundtrip_sequence_of_inline_mappings_eol_comments(self):
- s = dedent("""\
+ s = dedent(
+ """\
# comment A
- {a: 1, b: hallo} # comment B
- {j: fka, k: 42} # comment C
- """)
+ """
+ )
output = rt(s)
assert s == output
def test_indent_top_level(self):
- round_trip("""
+ round_trip(
+ """
- a:
- b
- """, indent=4)
+ """,
+ indent=4,
+ )
def test_set_indent_5_block_list_indent_1(self):
- round_trip("""
+ round_trip(
+ """
a:
- b: c
- 1
- d:
- 2
- """, indent=5, block_seq_indent=1)
+ """,
+ indent=5,
+ block_seq_indent=1,
+ )
def test_set_indent_4_block_list_indent_2(self):
- round_trip("""
+ round_trip(
+ """
a:
- b: c
- 1
- d:
- 2
- """, indent=4, block_seq_indent=2)
+ """,
+ indent=4,
+ block_seq_indent=2,
+ )
def test_set_indent_3_block_list_indent_0(self):
- round_trip("""
+ round_trip(
+ """
a:
- b: c
- 1
- d:
- 2
- """, indent=3, block_seq_indent=0)
+ """,
+ indent=3,
+ block_seq_indent=0,
+ )
def Xtest_set_indent_3_block_list_indent_2(self):
- round_trip("""
+ round_trip(
+ """
a:
-
b: c
@@ -133,19 +167,27 @@ class TestIndent:
d:
-
2
- """, indent=3, block_seq_indent=2)
+ """,
+ indent=3,
+ block_seq_indent=2,
+ )
def test_set_indent_3_block_list_indent_2(self):
- round_trip("""
+ round_trip(
+ """
a:
- b: c
- 1
- d:
- 2
- """, indent=3, block_seq_indent=2)
+ """,
+ indent=3,
+ block_seq_indent=2,
+ )
def Xtest_set_indent_2_block_list_indent_2(self):
- round_trip("""
+ round_trip(
+ """
a:
-
b: c
@@ -155,42 +197,55 @@ class TestIndent:
d:
-
2
- """, indent=2, block_seq_indent=2)
+ """,
+ indent=2,
+ block_seq_indent=2,
+ )
# this is how it should be: block_seq_indent stretches the indent
def test_set_indent_2_block_list_indent_2(self):
- round_trip("""
+ round_trip(
+ """
a:
- b: c
- 1
- d:
- 2
- """, indent=2, block_seq_indent=2)
+ """,
+ indent=2,
+ block_seq_indent=2,
+ )
# have to set indent!
def test_roundtrip_four_space_indents(self):
- s = (
- 'a:\n'
- '- foo\n'
- '- bar\n'
- )
+ s = 'a:\n' '- foo\n' '- bar\n'
round_trip(s, indent=4)
def test_roundtrip_four_space_indents_no_fail(self):
- assert round_trip_dump(round_trip_load("""
+ assert (
+ round_trip_dump(
+ round_trip_load(
+ """
a:
- foo
- bar
- """)) == dedent("""
+ """
+ )
+ )
+ == dedent(
+ """
a:
- foo
- bar
- """)
+ """
+ )
+ )
class TestYpkgIndent:
def test_00(self):
- round_trip("""
+ round_trip(
+ """
name : nano
version : 2.3.2
release : 1
@@ -205,41 +260,67 @@ class TestYpkgIndent:
GNU nano is an easy-to-use text editor originally designed
as a replacement for Pico, the ncurses-based editor from the non-free mailer
package Pine (itself now available under the Apache License as Alpine).
- """, indent=4, block_seq_indent=2, top_level_colon_align=True, prefix_colon=' ')
+ """,
+ indent=4,
+ block_seq_indent=2,
+ top_level_colon_align=True,
+ prefix_colon=' ',
+ )
def guess(s):
from ruamel.yaml.util import load_yaml_guess_indent
+
x, y, z = load_yaml_guess_indent(dedent(s))
return y, z
class TestGuessIndent:
def test_guess_20(self):
- assert guess("""\
+ assert (
+ guess(
+ """\
a:
- 1
- """) == (2, 0)
+ """
+ )
+ == (2, 0)
+ )
def test_guess_42(self):
- assert guess("""\
+ assert (
+ guess(
+ """\
a:
- 1
- """) == (4, 2)
+ """
+ )
+ == (4, 2)
+ )
def test_guess_42a(self):
# block seq indent prevails over nested key indent level
- assert guess("""\
+ assert (
+ guess(
+ """\
b:
a:
- 1
- """) == (4, 2)
+ """
+ )
+ == (4, 2)
+ )
def test_guess_3None(self):
- assert guess("""\
+ assert (
+ guess(
+ """\
b:
a: 1
- """) == (3, None)
+ """
+ )
+ == (3, None)
+ )
class TestSeparateMapSeqIndents:
@@ -250,58 +331,70 @@ class TestSeparateMapSeqIndents:
yaml = YAML()
yaml.indent = 6
yaml.block_seq_indent = 3
- yaml.round_trip("""
+ yaml.round_trip(
+ """
a:
- 1
- [1, 2]
- """)
+ """
+ )
def test_01(self):
yaml = YAML()
yaml.indent(sequence=6)
yaml.indent(offset=3)
- yaml.round_trip("""
+ yaml.round_trip(
+ """
a:
- 1
- {b: 3}
- """)
+ """
+ )
def test_02(self):
yaml = YAML()
yaml.indent(mapping=5, sequence=6, offset=3)
- yaml.round_trip("""
+ yaml.round_trip(
+ """
a:
b:
- 1
- [1, 2]
- """)
+ """
+ )
def test_03(self):
- round_trip("""
+ round_trip(
+ """
a:
b:
c:
- 1
- [1, 2]
- """, indent=4)
+ """,
+ indent=4,
+ )
def test_04(self):
yaml = YAML()
yaml.indent(mapping=5, sequence=6)
- yaml.round_trip("""
+ yaml.round_trip(
+ """
a:
b:
- 1
- [1, 2]
- {d: 3.14}
- """)
+ """
+ )
def test_issue_51(self):
yaml = YAML()
# yaml.map_indent = 2 # the default
yaml.indent(sequence=4, offset=2)
yaml.preserve_quotes = True
- yaml.round_trip("""
+ yaml.round_trip(
+ """
role::startup::author::rsyslog_inputs:
imfile:
- ruleset: 'AEM-slinglog'
@@ -312,6 +405,8 @@ class TestSeparateMapSeqIndents:
File: '/opt/aem/author/crx-quickstart/logs/stdout.log'
startmsg.regex: '^[-+T.:[:digit:]]*'
tag: 'stdout'
- """)
+ """
+ )
+
# ############ indentation
diff --git a/_test/test_int.py b/_test/test_int.py
index 080eb54..daf4fda 100644
--- a/_test/test_int.py
+++ b/_test/test_int.py
@@ -12,23 +12,27 @@ from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump
class TestBinHexOct:
# @pytest.mark.xfail(strict=True)
def test_round_trip_hex_oct(self):
- round_trip("""\
+ round_trip(
+ """\
- 42
- 0b101010
- 0x2a
- 0x2A
- 0o52
- """)
+ """
+ )
def test_calculate(self):
# make sure type, leading zero(s) and underscore are preserved
- s = dedent("""\
+ s = dedent(
+ """\
- 42
- 0b101010
- 0x_2a
- 0x2A
- 0o00_52
- """)
+ """
+ )
x = round_trip_load(s)
for idx, elem in enumerate(x):
# x[idx] = type(elem)(elem - 21)
@@ -51,7 +55,8 @@ class TestBinHexOct:
# the old octal representation.
def test_leading_zero_hex_oct_bin(self):
- round_trip("""\
+ round_trip(
+ """\
- 0b0101010
- 0b00101010
- 0x02a
@@ -60,26 +65,32 @@ class TestBinHexOct:
- 0x002A
- 0o052
- 0o0052
- """)
+ """
+ )
def test_leading_zero_int(self):
- round_trip("""\
+ round_trip(
+ """\
- 042
- 0042
- """)
+ """
+ )
def test_leading_zero_YAML_1_1(self):
- d = round_trip_load("""\
+ d = round_trip_load(
+ """\
%YAML 1.1
---
- 042
- 0o42
- """)
+ """
+ )
assert d[0] == 0o42
assert d[1] == '0o42'
def test_underscore(self):
- round_trip("""\
+ round_trip(
+ """\
- 0b10000_10010010
- 0b0_0000_1001_0010
- 0x2_87_57_b2_
@@ -87,23 +98,28 @@ class TestBinHexOct:
- 0x_0_2_8_7_5_7_B_2
- 0o2416_53662
- 42_42_
- """)
+ """
+ )
def test_leading_underscore(self):
- d = round_trip_load("""\
+ d = round_trip_load(
+ """\
- 0x_2_8_7_5_7_B_2
- _42_42_
- 42_42_
- """)
+ """
+ )
assert d[0] == 42424242
assert d[1] == '_42_42_'
assert d[2] == 4242
def test_big(self):
# bitbucket issue 144 reported by ccatterina
- d = round_trip_load("""\
+ d = round_trip_load(
+ """\
- 2_147_483_647
- 9_223_372_036_854_775_808
- """)
+ """
+ )
assert d[0] == 2147483647
assert d[1] == 9223372036854775808
diff --git a/_test/test_issues.py b/_test/test_issues.py
index 0e01875..559dcba 100644
--- a/_test/test_issues.py
+++ b/_test/test_issues.py
@@ -12,7 +12,9 @@ from roundtrip import round_trip, round_trip_load, round_trip_dump, dedent # NO
class TestIssue61:
def test_issue_61(self):
import ruamel.yaml
- s = dedent("""
+
+ s = dedent(
+ """
def1: &ANCHOR1
key1: value1
def: &ANCHOR
@@ -20,7 +22,8 @@ class TestIssue61:
key: value
comb:
<<: *ANCHOR
- """)
+ """
+ )
data = ruamel.yaml.round_trip_load(s)
assert str(data['comb']) == str(data['def'])
assert str(data['comb']) == "ordereddict([('key', 'value'), ('key1', 'value1')])"
diff --git a/_test/test_json_numbers.py b/_test/test_json_numbers.py
index a580277..56b7b6f 100644
--- a/_test/test_json_numbers.py
+++ b/_test/test_json_numbers.py
@@ -9,6 +9,7 @@ import json
def load(s, typ=float):
import ruamel.yaml
+
x = '{"low": %s }' % (s)
print('input: [%s]' % (s), repr(x))
# just to check it is loadable json
@@ -27,23 +28,29 @@ class TestJSONNumbers:
#
# which is not a superset of the JSON numbers
def test_json_number_float(self):
- for x in (y.split('#')[0].strip() for y in """
+ for x in (
+ y.split('#')[0].strip()
+ for y in """
1.0 # should fail on YAML spec on 1-9 allowed as single digit
-1.0
1e-06
3.1e-5
3.1e+5
3.1e5 # should fail on YAML spec: no +- after e
- """.splitlines()):
+ """.splitlines()
+ ):
if not x:
continue
res = load(x)
assert isinstance(res, float)
def test_json_number_int(self):
- for x in (y.split('#')[0].strip() for y in """
+ for x in (
+ y.split('#')[0].strip()
+ for y in """
42
- """.splitlines()):
+ """.splitlines()
+ ):
if not x:
continue
res = load(x, int)
diff --git a/_test/test_line_col.py b/_test/test_line_col.py
index febe9c2..4f7ad5d 100644
--- a/_test/test_line_col.py
+++ b/_test/test_line_col.py
@@ -11,80 +11,94 @@ def load(s):
class TestLineCol:
def test_item_00(self):
- data = load("""
+ data = load(
+ """
- a
- e
- [b, d]
- c
- """)
+ """
+ )
assert data[2].lc.line == 2
assert data[2].lc.col == 2
def test_item_01(self):
- data = load("""
+ data = load(
+ """
- a
- e
- {x: 3}
- c
- """)
+ """
+ )
assert data[2].lc.line == 2
assert data[2].lc.col == 2
def test_item_02(self):
- data = load("""
+ data = load(
+ """
- a
- e
- !!set {x, y}
- c
- """)
+ """
+ )
assert data[2].lc.line == 2
assert data[2].lc.col == 2
def test_item_03(self):
- data = load("""
+ data = load(
+ """
- a
- e
- !!omap
- x: 1
- y: 3
- c
- """)
+ """
+ )
assert data[2].lc.line == 2
assert data[2].lc.col == 2
def test_item_04(self):
- data = load("""
+ data = load(
+ """
# testing line and column based on SO
# http://stackoverflow.com/questions/13319067/
- key1: item 1
key2: item 2
- key3: another item 1
key4: another item 2
- """)
+ """
+ )
assert data[0].lc.line == 2
assert data[0].lc.col == 2
assert data[1].lc.line == 4
assert data[1].lc.col == 2
def test_pos_mapping(self):
- data = load("""
+ data = load(
+ """
a: 1
b: 2
c: 3
# comment
klm: 42
d: 4
- """)
+ """
+ )
assert data.lc.key('klm') == (4, 0)
assert data.lc.value('klm') == (4, 5)
def test_pos_sequence(self):
- data = load("""
+ data = load(
+ """
- a
- b
- c
# next one!
- klm
- d
- """)
+ """
+ )
assert data.lc.item(3) == (4, 2)
diff --git a/_test/test_literal.py b/_test/test_literal.py
index 0499a16..74dca3d 100644
--- a/_test/test_literal.py
+++ b/_test/test_literal.py
@@ -31,126 +31,178 @@ class TestNoIndent:
def test_top_literal_scalar_indent_example_9_5(self):
yaml = YAML()
s = '%!PS-Adobe-2.0'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- |
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_literal_scalar_no_indent(self):
yaml = YAML()
s = 'testing123'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- |
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_literal_scalar_no_indent_1_1(self):
yaml = YAML()
s = 'testing123'
- d = yaml.load("""
+ d = yaml.load(
+ """
%YAML 1.1
--- |
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_literal_scalar_no_indent_1_1_old_style(self):
from textwrap import dedent
from ruamel.yaml import safe_load
+
s = 'testing123'
- d = safe_load(dedent("""
+ d = safe_load(
+ dedent(
+ """
%YAML 1.1
--- |
{}
- """.format(s)))
+ """.format(
+ s
+ )
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_literal_scalar_no_indent_1_1_raise(self):
from ruamel.yaml.parser import ParserError
+
yaml = YAML()
yaml.top_level_block_style_scalar_no_indent_error_1_1 = True
s = 'testing123'
with pytest.raises(ParserError):
- yaml.load("""
+ yaml.load(
+ """
%YAML 1.1
--- |
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
def test_top_literal_scalar_indent_offset_one(self):
yaml = YAML()
s = 'testing123'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- |1
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_literal_scalar_indent_offset_four(self):
yaml = YAML()
s = 'testing123'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- |4
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_literal_scalar_indent_offset_two_leading_space(self):
yaml = YAML()
s = ' testing123'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- |4
{s}
{s}
- """.format(s=s))
+ """.format(
+ s=s
+ )
+ )
print(d)
assert d == (s + '\n') * 2
def test_top_literal_scalar_no_indent_special(self):
yaml = YAML()
s = '%!PS-Adobe-2.0'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- |
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_folding_scalar_indent(self):
yaml = YAML()
s = '%!PS-Adobe-2.0'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- >
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_folding_scalar_no_indent(self):
yaml = YAML()
s = 'testing123'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- >
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
def test_top_folding_scalar_no_indent_special(self):
yaml = YAML()
s = '%!PS-Adobe-2.0'
- d = yaml.load("""
+ d = yaml.load(
+ """
--- >
{}
- """.format(s))
+ """.format(
+ s
+ )
+ )
print(d)
assert d == s + '\n'
@@ -158,12 +210,18 @@ class TestNoIndent:
yaml = YAML(typ='safe', pure=True)
s1 = 'abc'
s2 = 'klm'
- for idx, d1 in enumerate(yaml.load_all("""
+ for idx, d1 in enumerate(
+ yaml.load_all(
+ """
--- |-
{}
--- |
{}
- """.format(s1, s2))):
+ """.format(
+ s1, s2
+ )
+ )
+ ):
print('d1:', d1)
assert ['abc', 'klm\n'][idx] == d1
@@ -176,7 +234,9 @@ class Test_RoundTripLiteral:
ys = """
--- |
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -188,7 +248,9 @@ class Test_RoundTripLiteral:
ys = """
--- |
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -200,7 +262,9 @@ class Test_RoundTripLiteral:
ys = """
---
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -212,7 +276,9 @@ class Test_RoundTripLiteral:
ys = """
---
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -224,7 +290,9 @@ class Test_RoundTripLiteral:
ys = """
---
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -238,7 +306,9 @@ class Test_RoundTripLiteral:
ys = """
---
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -249,7 +319,9 @@ class Test_RoundTripLiteral:
ys = """
--- |-
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
@@ -259,6 +331,8 @@ class Test_RoundTripLiteral:
ys = """
- |
{}
- """.format(s)
+ """.format(
+ s
+ )
d = yaml.load(ys)
yaml.dump(d, compare=ys)
diff --git a/_test/test_none.py b/_test/test_none.py
index 681f1e0..e313edc 100644
--- a/_test/test_none.py
+++ b/_test/test_none.py
@@ -1,12 +1,13 @@
# coding: utf-8
-import pytest # NOQA
+import pytest # NOQA
class TestNone:
def test_dump00(self):
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
+
data = None
s = ruamel.yaml.round_trip_dump(data)
assert s == 'null\n...\n'
@@ -14,7 +15,8 @@ class TestNone:
assert d == data
def test_dump01(self):
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
+
data = None
s = ruamel.yaml.round_trip_dump(data, explicit_end=True)
assert s == 'null\n...\n'
@@ -22,7 +24,8 @@ class TestNone:
assert d == data
def test_dump02(self):
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
+
data = None
s = ruamel.yaml.round_trip_dump(data, explicit_end=False)
assert s == 'null\n...\n'
@@ -30,7 +33,8 @@ class TestNone:
assert d == data
def test_dump03(self):
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
+
data = None
s = ruamel.yaml.round_trip_dump(data, explicit_start=True)
assert s == '---\n...\n'
@@ -38,7 +42,8 @@ class TestNone:
assert d == data
def test_dump04(self):
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
+
data = None
s = ruamel.yaml.round_trip_dump(data, explicit_start=True, explicit_end=False)
assert s == '---\n...\n'
diff --git a/_test/test_numpy.py b/_test/test_numpy.py
index e3a7718..2747fc4 100644
--- a/_test/test_numpy.py
+++ b/_test/test_numpy.py
@@ -10,6 +10,7 @@ except: # NOQA
def Xtest_numpy():
import ruamel.yaml
+
if numpy is None:
return
data = numpy.arange(10)
diff --git a/_test/test_program_config.py b/_test/test_program_config.py
index dcd8351..4d7cbd5 100644
--- a/_test/test_program_config.py
+++ b/_test/test_program_config.py
@@ -8,7 +8,8 @@ from roundtrip import round_trip
class TestProgramConfig:
def test_application_arguments(self):
# application configur
- round_trip("""
+ round_trip(
+ """
args:
username: anthon
passwd: secret
@@ -17,11 +18,13 @@ class TestProgramConfig:
session-name: test
loop:
wait: 10
- """)
+ """
+ )
def test_single(self):
# application configuration
- round_trip("""
+ round_trip(
+ """
# default arguments for the program
args: # needed to prevent comment wrapping
# this should be your username
@@ -36,11 +39,13 @@ class TestProgramConfig:
# experiment with the following
wait: 10
# no more argument info to pass
- """)
+ """
+ )
def test_multi(self):
# application configuration
- round_trip("""
+ round_trip(
+ """
# default arguments for the program
args: # needed to prevent comment wrapping
# this should be your username
@@ -55,4 +60,5 @@ class TestProgramConfig:
# experiment with the following
wait: 10
# no more argument info to pass
- """)
+ """
+ )
diff --git a/_test/test_string.py b/_test/test_string.py
index f095095..351ca0e 100644
--- a/_test/test_string.py
+++ b/_test/test_string.py
@@ -24,26 +24,35 @@ from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NO
class TestPreservedScalarString:
def test_basic_string(self):
- round_trip("""
+ round_trip(
+ """
a: abcdefg
- """, )
+ """
+ )
def test_quoted_integer_string(self):
- round_trip("""
+ round_trip(
+ """
a: '12345'
- """)
+ """
+ )
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_preserve_string(self):
- round_trip("""
+ round_trip(
+ """
a: |
abc
def
- """, intermediate=dict(a='abc\ndef\n'))
+ """,
+ intermediate=dict(a='abc\ndef\n'),
+ )
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_preserve_string_strip(self):
s = """
a: |-
@@ -53,105 +62,140 @@ class TestPreservedScalarString:
"""
round_trip(s, intermediate=dict(a='abc\ndef'))
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_preserve_string_keep(self):
- # with pytest.raises(AssertionError) as excinfo:
- round_trip("""
+ # with pytest.raises(AssertionError) as excinfo:
+ round_trip(
+ """
a: |+
ghi
jkl
b: x
- """, intermediate=dict(a='ghi\njkl\n\n\n', b='x'))
+ """,
+ intermediate=dict(a='ghi\njkl\n\n\n', b='x'),
+ )
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_preserve_string_keep_at_end(self):
# at EOF you have to specify the ... to get proper "closure"
# of the multiline scalar
- round_trip("""
+ round_trip(
+ """
a: |+
ghi
jkl
...
- """, intermediate=dict(a='ghi\njkl\n\n'))
+ """,
+ intermediate=dict(a='ghi\njkl\n\n'),
+ )
def test_fold_string(self):
with pytest.raises(AssertionError) as excinfo: # NOQA
- round_trip("""
+ round_trip(
+ """
a: >
abc
def
- """, intermediate=dict(a='abc def\n'))
+ """,
+ intermediate=dict(a='abc def\n'),
+ )
def test_fold_string_strip(self):
with pytest.raises(AssertionError) as excinfo: # NOQA
- round_trip("""
+ round_trip(
+ """
a: >-
abc
def
- """, intermediate=dict(a='abc def'))
+ """,
+ intermediate=dict(a='abc def'),
+ )
def test_fold_string_keep(self):
with pytest.raises(AssertionError) as excinfo: # NOQA
- round_trip("""
+ round_trip(
+ """
a: >+
abc
def
- """, intermediate=dict(a='abc def\n\n'))
+ """,
+ intermediate=dict(a='abc def\n\n'),
+ )
class TestQuotedScalarString:
def test_single_quoted_string(self):
- round_trip("""
+ round_trip(
+ """
a: 'abc'
- """, preserve_quotes=True)
+ """,
+ preserve_quotes=True,
+ )
def test_double_quoted_string(self):
- round_trip("""
+ round_trip(
+ """
a: "abc"
- """, preserve_quotes=True)
+ """,
+ preserve_quotes=True,
+ )
def test_non_preserved_double_quoted_string(self):
- round_trip("""
+ round_trip(
+ """
a: "abc"
- """, outp="""
+ """,
+ outp="""
a: abc
- """)
+ """,
+ )
class TestReplace:
"""inspired by issue 110 from sandres23"""
+
def test_replace_preserved_scalar_string(self):
import ruamel
- s = dedent("""\
+
+ s = dedent(
+ """\
foo: |
foo
foo
bar
foo
- """)
+ """
+ )
data = round_trip_load(s, preserve_quotes=True)
so = data['foo'].replace('foo', 'bar', 2)
assert isinstance(so, ruamel.yaml.scalarstring.PreservedScalarString)
- assert so == dedent("""
+ assert so == dedent(
+ """
bar
bar
bar
foo
- """)
+ """
+ )
def test_replace_double_quoted_scalar_string(self):
import ruamel
- s = dedent("""\
+
+ s = dedent(
+ """\
foo: "foo foo bar foo"
- """)
+ """
+ )
data = round_trip_load(s, preserve_quotes=True)
so = data['foo'].replace('foo', 'bar', 2)
assert isinstance(so, ruamel.yaml.scalarstring.DoubleQuotedScalarString)
diff --git a/_test/test_tag.py b/_test/test_tag.py
index c4fabc0..824a0c3 100644
--- a/_test/test_tag.py
+++ b/_test/test_tag.py
@@ -1,6 +1,6 @@
# coding: utf-8
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import round_trip, round_trip_load
@@ -25,92 +25,112 @@ def register_xxx(**kw):
class TestIndentFailures:
def test_tag(self):
- round_trip("""\
+ round_trip(
+ """\
!!python/object:__main__.Developer
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
def test_full_tag(self):
- round_trip("""\
+ round_trip(
+ """\
!!tag:yaml.org,2002:python/object:__main__.Developer
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
def test_standard_tag(self):
- round_trip("""\
+ round_trip(
+ """\
!!tag:yaml.org,2002:python/object:map
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
def test_Y1(self):
- round_trip("""\
+ round_trip(
+ """\
!yyy
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
def test_Y2(self):
- round_trip("""\
+ round_trip(
+ """\
!!yyy
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
class TestRoundTripCustom:
def test_X1(self):
register_xxx()
- round_trip("""\
+ round_trip(
+ """\
!xxx
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
@pytest.mark.xfail(strict=True)
def test_X_pre_tag_comment(self):
register_xxx()
- round_trip("""\
+ round_trip(
+ """\
-
# hello
!xxx
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
@pytest.mark.xfail(strict=True)
def test_X_post_tag_comment(self):
register_xxx()
- round_trip("""\
+ round_trip(
+ """\
- !xxx
# hello
name: Anthon
location: Germany
language: python
- """)
+ """
+ )
def test_scalar_00(self):
# https://stackoverflow.com/a/45967047/1307905
- round_trip("""\
+ round_trip(
+ """\
Outputs:
Vpc:
Value: !Ref: vpc # first tag
Export:
Name: !Sub "${AWS::StackName}-Vpc" # second tag
- """)
+ """
+ )
class TestIssue201:
def test_encoded_unicode_tag(self):
- round_trip_load("""
+ round_trip_load(
+ """
s: !!python/%75nicode 'abc'
- """)
+ """
+ )
diff --git a/_test/test_version.py b/_test/test_version.py
index 0855a5c..9aa4133 100644
--- a/_test/test_version.py
+++ b/_test/test_version.py
@@ -1,18 +1,20 @@
# coding: utf-8
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import dedent, round_trip, round_trip_load
def load(s, version=None):
import ruamel.yaml # NOQA
+
return ruamel.yaml.round_trip_load(dedent(s), version)
class TestVersions:
def test_explicit_1_2(self):
- r = load("""\
+ r = load(
+ """\
%YAML 1.2
---
- 12:34:56
@@ -24,7 +26,8 @@ class TestVersions:
- yes
- no
- true
- """)
+ """
+ )
assert r[0] == '12:34:56'
assert r[1] == 12
assert r[2] == 12345678
@@ -36,7 +39,8 @@ class TestVersions:
assert r[8] is True
def test_explicit_1_1(self):
- r = load("""\
+ r = load(
+ """\
%YAML 1.1
---
- 12:34:56
@@ -48,7 +52,8 @@ class TestVersions:
- yes
- no
- true
- """)
+ """
+ )
assert r[0] == 45296
assert r[1] == 10
assert r[2] == '012345678'
@@ -60,7 +65,8 @@ class TestVersions:
assert r[8] is True
def test_implicit_1_2(self):
- r = load("""\
+ r = load(
+ """\
- 12:34:56
- 12:34:56.78
- 012
@@ -71,7 +77,8 @@ class TestVersions:
- yes
- no
- true
- """)
+ """
+ )
assert r[0] == '12:34:56'
assert r[1] == '12:34:56.78'
assert r[2] == 12
@@ -84,7 +91,8 @@ class TestVersions:
assert r[9] is True
def test_load_version_1_1(self):
- r = load("""\
+ r = load(
+ """\
- 12:34:56
- 12:34:56.78
- 012
@@ -95,7 +103,9 @@ class TestVersions:
- yes
- no
- true
- """, version="1.1")
+ """,
+ version='1.1',
+ )
assert r[0] == 45296
assert r[1] == 45296.78
assert r[2] == 10
@@ -112,7 +122,9 @@ class TestIssue62:
# bitbucket issue 62, issue_62
def test_00(self):
import ruamel.yaml # NOQA
- s = dedent("""\
+
+ s = dedent(
+ """\
{}# Outside flow collection:
- ::vector
- ": - ()"
@@ -121,14 +133,17 @@ class TestIssue62:
- http://example.com/foo#bar
# Inside flow collection:
- [::vector, ": - ()", "Down, down and away!", -456, http://example.com/foo#bar]
- """)
+ """
+ )
with pytest.raises(ruamel.yaml.parser.ParserError):
round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True)
- round_trip(s.format(''), preserve_quotes=True)
+ round_trip(s.format(""), preserve_quotes=True)
def test_00_single_comment(self):
import ruamel.yaml # NOQA
- s = dedent("""\
+
+ s = dedent(
+ """\
{}# Outside flow collection:
- ::vector
- ": - ()"
@@ -136,22 +151,26 @@ class TestIssue62:
- -123
- http://example.com/foo#bar
- [::vector, ": - ()", "Down, down and away!", -456, http://example.com/foo#bar]
- """)
+ """
+ )
with pytest.raises(ruamel.yaml.parser.ParserError):
round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True)
- round_trip(s.format(''), preserve_quotes=True)
+ round_trip(s.format(""), preserve_quotes=True)
# round_trip(s.format('%YAML 1.2\n---\n'), preserve_quotes=True, version=(1, 2))
def test_01(self):
import ruamel.yaml # NOQA
- s = dedent("""\
+
+ s = dedent(
+ """\
{}[random plain value that contains a ? character]
- """)
+ """
+ )
with pytest.raises(ruamel.yaml.parser.ParserError):
round_trip(s.format('%YAML 1.1\n---\n'), preserve_quotes=True)
- round_trip(s.format(''), preserve_quotes=True)
+ round_trip(s.format(""), preserve_quotes=True)
# note the flow seq on the --- line!
- round_trip(s.format('%YAML 1.2\n--- '), preserve_quotes=True, version="1.2")
+ round_trip(s.format('%YAML 1.2\n--- '), preserve_quotes=True, version='1.2')
def test_so_45681626(self):
# was not properly parsing
diff --git a/_test/test_yamlfile.py b/_test/test_yamlfile.py
index d1ba63c..b5897f0 100644
--- a/_test/test_yamlfile.py
+++ b/_test/test_yamlfile.py
@@ -6,7 +6,7 @@ various test cases for YAML files
"""
import sys
-import pytest # NOQA
+import pytest # NOQA
import platform
from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NOQA
@@ -14,66 +14,77 @@ from roundtrip import round_trip, dedent, round_trip_load, round_trip_dump # NO
class TestYAML:
def test_backslash(self):
- round_trip("""
+ round_trip(
+ """
handlers:
static_files: applications/\\1/static/\\2
- """)
+ """
+ )
def test_omap_out(self):
# ordereddict mapped to !!omap
from ruamel.yaml.compat import ordereddict
import ruamel.yaml # NOQA
+
x = ordereddict([('a', 1), ('b', 2)])
res = ruamel.yaml.dump(x, default_flow_style=False)
- assert res == dedent("""
+ assert res == dedent(
+ """
!!omap
- a: 1
- b: 2
- """)
+ """
+ )
def test_omap_roundtrip(self):
- round_trip("""
+ round_trip(
+ """
!!omap
- a: 1
- b: 2
- c: 3
- d: 4
- """)
+ """
+ )
- @pytest.mark.skipif(sys.version_info < (2, 7), reason="collections not available")
+ @pytest.mark.skipif(sys.version_info < (2, 7), reason='collections not available')
def test_dump_collections_ordereddict(self):
from collections import OrderedDict
import ruamel.yaml # NOQA
+
# OrderedDict mapped to !!omap
x = OrderedDict([('a', 1), ('b', 2)])
- res = ruamel.yaml.dump(x,
- Dumper=ruamel.yaml.RoundTripDumper,
- default_flow_style=False)
- assert res == dedent("""
+ res = ruamel.yaml.dump(x, Dumper=ruamel.yaml.RoundTripDumper, default_flow_style=False)
+ assert res == dedent(
+ """
!!omap
- a: 1
- b: 2
- """)
+ """
+ )
- @pytest.mark.skipif(sys.version_info >= (3, 0) or
- platform.python_implementation() != "CPython",
- reason="ruamel.yaml not available")
+ @pytest.mark.skipif(
+ sys.version_info >= (3, 0) or platform.python_implementation() != 'CPython',
+ reason='ruamel.yaml not available',
+ )
def test_dump_ruamel_ordereddict(self):
from ruamel.ordereddict import ordereddict
import ruamel.yaml # NOQA
+
# OrderedDict mapped to !!omap
x = ordereddict([('a', 1), ('b', 2)])
- res = ruamel.yaml.dump(x,
- Dumper=ruamel.yaml.RoundTripDumper,
- default_flow_style=False)
- assert res == dedent("""
+ res = ruamel.yaml.dump(x, Dumper=ruamel.yaml.RoundTripDumper, default_flow_style=False)
+ assert res == dedent(
+ """
!!omap
- a: 1
- b: 2
- """)
+ """
+ )
def test_CommentedSet(self):
from ruamel.yaml.constructor import CommentedSet
+
s = CommentedSet(['a', 'b', 'c'])
s.remove('b')
s.add('d')
@@ -86,46 +97,56 @@ class TestYAML:
def test_set_out(self):
# preferable would be the shorter format without the ': null'
import ruamel.yaml # NOQA
+
x = set(['a', 'b', 'c'])
res = ruamel.yaml.dump(x, default_flow_style=False)
- assert res == dedent("""
+ assert res == dedent(
+ """
!!set
a: null
b: null
c: null
- """)
+ """
+ )
# @pytest.mark.xfail
# ordering is not preserved in a set
def test_set_compact(self):
# this format is read and also should be written by default
- round_trip("""
+ round_trip(
+ """
!!set
? a
? b
? c
- """)
+ """
+ )
def test_blank_line_after_comment(self):
- round_trip("""
+ round_trip(
+ """
# Comment with spaces after it.
a: 1
- """)
+ """
+ )
def test_blank_line_between_seq_items(self):
- round_trip("""
+ round_trip(
+ """
# Seq with spaces in between items.
b:
- bar
- baz
- """)
+ """
+ )
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_blank_line_after_literal_chip(self):
s = """
c:
@@ -147,8 +168,9 @@ class TestYAML:
assert d['c'][0].split('it.')[1] == '\n'
assert d['c'][1].split('line.')[1] == '\n'
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_blank_line_after_literal_keep(self):
""" have to insert an eof marker in YAML to test this"""
s = """
@@ -172,8 +194,9 @@ class TestYAML:
assert d['c'][0].split('it.')[1] == '\n\n'
assert d['c'][1].split('line.')[1] == '\n\n\n'
- @pytest.mark.skipif(platform.python_implementation() == 'Jython',
- reason="Jython throws RepresenterError")
+ @pytest.mark.skipif(
+ platform.python_implementation() == 'Jython', reason='Jython throws RepresenterError'
+ )
def test_blank_line_after_literal_strip(self):
s = """
c:
@@ -192,16 +215,19 @@ class TestYAML:
d = round_trip_load(dedent(s))
print(d)
round_trip(s)
- assert d['c'][0].split('it.')[1] == ''
- assert d['c'][1].split('line.')[1] == ''
+ assert d['c'][0].split('it.')[1] == ""
+ assert d['c'][1].split('line.')[1] == ""
def test_load_all_perserve_quotes(self):
import ruamel.yaml # NOQA
- s = dedent("""\
+
+ s = dedent(
+ """\
a: 'hello'
---
b: "goodbye"
- """)
+ """
+ )
data = []
for x in ruamel.yaml.round_trip_load_all(s, preserve_quotes=True):
data.append(x)
diff --git a/_test/test_yamlobject.py b/_test/test_yamlobject.py
index 4147bc3..1d730b5 100644
--- a/_test/test_yamlobject.py
+++ b/_test/test_yamlobject.py
@@ -2,7 +2,7 @@
from __future__ import print_function
-import pytest # NOQA
+import pytest # NOQA
from roundtrip import save_and_run # NOQA
diff --git a/_test/test_z_check_debug_leftovers.py b/_test/test_z_check_debug_leftovers.py
index f5be5df..37d6970 100644
--- a/_test/test_z_check_debug_leftovers.py
+++ b/_test/test_z_check_debug_leftovers.py
@@ -10,25 +10,29 @@ class TestLeftOverDebug:
# idea here is to capture round_trip_output via pytest stdout capture
# if there is are any leftover debug statements they should show up
def test_00(self, capsys):
- s = dedent("""
+ s = dedent(
+ """
a: 1
b: []
c: [a, 1]
d: {f: 3.14, g: 42}
- """)
+ """
+ )
d = round_trip_load(s)
round_trip_dump(d, sys.stdout)
out, err = capsys.readouterr()
assert out == s
def test_01(self, capsys):
- s = dedent("""
+ s = dedent(
+ """
- 1
- []
- [a, 1]
- {f: 3.14, g: 42}
- - 123
- """)
+ """
+ )
d = round_trip_load(s)
round_trip_dump(d, sys.stdout)
out, err = capsys.readouterr()
diff --git a/_test/test_z_data.py b/_test/test_z_data.py
index 9ea921b..5a142cf 100644
--- a/_test/test_z_data.py
+++ b/_test/test_z_data.py
@@ -4,12 +4,12 @@ from __future__ import print_function
import sys
import os
-import pytest # NOQA
+import pytest # NOQA
import platform # NOQA
sys.path.insert(0, os.path.dirname(__file__) + '/lib')
-import warnings # NOQA
+import warnings # NOQA
args = []
@@ -17,8 +17,10 @@ args = []
def test_data():
import test_appliance # NOQA
+
collections = []
import test_yaml
+
collections.append(test_yaml)
test_appliance.run(collections, args)
@@ -26,12 +28,15 @@ def test_data():
# @pytest.mark.skipif(not ruamel.yaml.__with_libyaml__,
# reason="no libyaml")
+
def test_data_ext():
collections = []
- import ruamel.yaml # NOQA
+ import ruamel.yaml # NOQA
import test_appliance # NOQA
+
warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning)
if ruamel.yaml.__with_libyaml__:
import test_yaml_ext
+
collections.append(test_yaml_ext)
test_appliance.run(collections, args)
diff --git a/comments.py b/comments.py
index 16fbbb8..32ce8ce 100644
--- a/comments.py
+++ b/comments.py
@@ -23,9 +23,11 @@ else:
if False: # MYPY
from typing import Any, Dict, Optional, List, Union # NOQA
+# fmt: off
__all__ = ["CommentedSeq", "CommentedKeySeq",
"CommentedMap", "CommentedOrderedMap",
"CommentedSet", 'comment_attrib', 'merge_attrib']
+# fmt: on
comment_attrib = '_yaml_comment'
format_attrib = '_yaml_format'
@@ -38,7 +40,7 @@ tag_attrib = '_yaml_tag'
class Comment(object):
# sys.getsize tested the Comment objects, __slots__ makes them bigger
# and adding self.end did not matter
- __slots__ = 'comment', '_items', '_end', '_start',
+ __slots__ = 'comment', '_items', '_end', '_start'
attrib = comment_attrib
def __init__(self):
@@ -56,9 +58,8 @@ class Comment(object):
if bool(self._end):
end = ',\n end=' + str(self._end)
else:
- end = ''
- return "Comment(comment={0},\n items={1}{2})".format(
- self.comment, self._items, end)
+ end = ""
+ return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end)
@property
def items(self):
@@ -93,7 +94,7 @@ def NoComment():
class Format(object):
- __slots__ = '_flow_style',
+ __slots__ = ('_flow_style',)
attrib = format_attrib
def __init__(self):
@@ -163,7 +164,7 @@ class LineCol(object):
class Anchor(object):
- __slots__ = 'value', 'always_dump',
+ __slots__ = 'value', 'always_dump'
attrib = anchor_attrib
def __init__(self):
@@ -174,7 +175,8 @@ class Anchor(object):
class Tag(object):
"""store tag information for roundtripping"""
- __slots__ = 'value',
+
+ __slots__ = ('value',)
attrib = tag_attrib
def __init__(self):
@@ -231,6 +233,7 @@ class CommentedBase(object):
"""
from .error import CommentMark
from .tokens import CommentToken
+
pre_comments = self._yaml_get_pre_comment()
if comment[-1] == '\n':
comment = comment[:-1] # strip final newline if there
@@ -238,8 +241,9 @@ class CommentedBase(object):
for com in comment.split('\n'):
pre_comments.append(CommentToken('# ' + com + '\n', start_mark, None))
- def yaml_set_comment_before_after_key(self, key, before=None, indent=0,
- after=None, after_indent=None):
+ def yaml_set_comment_before_after_key(
+ self, key, before=None, indent=0, after=None, after_indent=None
+ ):
# type: (Any, Any, Any, Any, Any) -> None
"""
expects comment (before/after) to be without `#` and possible have multiple lines
@@ -250,7 +254,7 @@ class CommentedBase(object):
def comment_token(s, mark):
# type: (Any, Any) -> Any
# handle empty lines as having no comment
- return CommentToken(('# ' if s else '') + s + '\n', mark, None)
+ return CommentToken(('# ' if s else "") + s + '\n', mark, None)
if after_indent is None:
after_indent = indent + 2
@@ -261,7 +265,7 @@ class CommentedBase(object):
start_mark = CommentMark(indent)
c = self.ca.items.setdefault(key, [None, [], None, None])
if before == '\n':
- c[1].append(comment_token('', start_mark))
+ c[1].append(comment_token("", start_mark))
elif before:
for com in before.split('\n'):
c[1].append(comment_token(com, start_mark))
@@ -291,6 +295,7 @@ class CommentedBase(object):
"""
from .tokens import CommentToken
from .error import CommentMark
+
if column is None:
column = self._yaml_get_column(key)
if comment[0] != '#':
@@ -354,6 +359,7 @@ class CommentedBase(object):
def copy_attributes(self, t, deep=False):
# type: (Any, bool) -> None
+ # fmt: off
for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib,
Tag.attrib, merge_attrib]:
if hasattr(self, a):
@@ -361,6 +367,7 @@ class CommentedBase(object):
setattr(t, a, copy.deepcopy(getattr(self, a)))
else:
setattr(t, a, getattr(self, a))
+ # fmt: on
def _yaml_add_eol_comment(self, comment, key):
# type: (Any, Any) -> None
@@ -376,7 +383,7 @@ class CommentedBase(object):
class CommentedSeq(list, CommentedBase):
- __slots__ = Comment.attrib,
+ __slots__ = (Comment.attrib,)
def _yaml_add_comment(self, comment, key=NoComment):
# type: (Any, Optional[Any]) -> None
@@ -404,7 +411,7 @@ class CommentedSeq(list, CommentedBase):
def pop(self, idx=None):
# type: (Any) -> Any
- res = list.pop(self, idx) # type: ignore
+ res = list.pop(self, idx)
self.ca.items.pop(idx, None) # might not be there -> default value
for list_index in sorted(self.ca.items):
if list_index < idx:
@@ -455,15 +462,18 @@ class CommentedSeq(list, CommentedBase):
# type: (Any, Any) -> None
# try to preserve the scalarstring type if setting an existing key to a new value
if idx < len(self):
- if isinstance(value, string_types) and \
- not isinstance(value, ScalarString) and \
- isinstance(self[idx], ScalarString):
+ if (
+ isinstance(value, string_types)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[idx], ScalarString)
+ ):
value = type(self[idx])(value)
list.__setitem__(self, idx, value)
class CommentedKeySeq(tuple, CommentedBase):
"""This primarily exists to be able to roundtrip keys that are sequences"""
+
def _yaml_add_comment(self, comment, key=NoComment):
# type: (Any, Optional[Any]) -> None
if key is not NoComment:
@@ -511,7 +521,7 @@ class CommentedKeySeq(tuple, CommentedBase):
class CommentedMapView(Sized):
- __slots__ = '_mapping',
+ __slots__ = ('_mapping',)
def __init__(self, mapping):
# type: (Any) -> None
@@ -597,7 +607,7 @@ class CommentedMapValuesView(CommentedMapView):
class CommentedMap(ordereddict, CommentedBase):
- __slots__ = Comment.attrib,
+ __slots__ = (Comment.attrib,)
def _yaml_add_comment(self, comment, key=NoComment, value=NoComment):
# type: (Any, Optional[Any], Optional[Any]) -> None
@@ -714,9 +724,11 @@ class CommentedMap(ordereddict, CommentedBase):
# type: (Any, Any) -> None
# try to preserve the scalarstring type if setting an existing key to a new value
if key in self:
- if isinstance(value, string_types) and \
- not isinstance(value, ScalarString) and \
- isinstance(self[key], ScalarString):
+ if (
+ isinstance(value, string_types)
+ and not isinstance(value, ScalarString)
+ and isinstance(self[key], ScalarString)
+ ):
value = type(self[key])(value)
ordereddict.__setitem__(self, key, value)
@@ -802,6 +814,7 @@ class CommentedMap(ordereddict, CommentedBase):
done.append(merged[1])
if PY2:
+
def keys(self):
# type: () -> Any
return list(self._keys())
@@ -813,7 +826,9 @@ class CommentedMap(ordereddict, CommentedBase):
def viewkeys(self):
# type: () -> Any
return CommentedMapKeysView(self)
+
else:
+
def keys(self):
# type: () -> Any
return CommentedMapKeysView(self)
@@ -835,6 +850,7 @@ class CommentedMap(ordereddict, CommentedBase):
done.append(merged[1])
if PY2:
+
def values(self):
# type: () -> Any
return list(self._values())
@@ -846,7 +862,9 @@ class CommentedMap(ordereddict, CommentedBase):
def viewvalues(self):
# type: () -> Any
return CommentedMapValuesView(self)
+
else:
+
def values(self):
# type: () -> Any
return CommentedMapValuesView(self)
@@ -868,6 +886,7 @@ class CommentedMap(ordereddict, CommentedBase):
done.append(merged[1])
if PY2:
+
def items(self):
# type: () -> Any
return list(self._items())
@@ -879,7 +898,9 @@ class CommentedMap(ordereddict, CommentedBase):
def viewitems(self):
# type: () -> Any
return CommentedMapItemsView(self)
+
else:
+
def items(self):
# type: () -> Any
return CommentedMapItemsView(self)
@@ -906,11 +927,11 @@ class CommentedMap(ordereddict, CommentedBase):
class CommentedOrderedMap(CommentedMap):
- __slots__ = Comment.attrib,
+ __slots__ = (Comment.attrib,)
class CommentedSet(MutableSet, CommentedMap):
- __slots__ = Comment.attrib, 'odict',
+ __slots__ = Comment.attrib, 'odict'
def __init__(self, values=None):
# type: (Any) -> None
@@ -949,11 +970,17 @@ class CommentedSet(MutableSet, CommentedMap):
class TaggedScalar(CommentedBase):
# the value and style attributes are set during roundtrip construction
+ def __init__(self):
+ # type: () -> None
+ self.value = None
+ self.style = None
+
def __str__(self):
+ # type: () -> Any
return self.value
-def dump_comments(d, name='', sep='.', out=sys.stdout):
+def dump_comments(d, name="", sep='.', out=sys.stdout):
# type: (Any, str, str, Any) -> None
"""
recursively dump comments, all but the toplevel preceded by the path
@@ -970,5 +997,6 @@ def dump_comments(d, name='', sep='.', out=sys.stdout):
print(name)
print(d.ca, file=out) # type: ignore
for idx, k in enumerate(d):
- dump_comments(k, name=(name + sep + str(idx)) if name else str(idx),
- sep=sep, out=out)
+ dump_comments(
+ k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out
+ )
diff --git a/compat.py b/compat.py
index e7ad8db..a962103 100644
--- a/compat.py
+++ b/compat.py
@@ -9,7 +9,7 @@ import os
import types
if False: # MYPY
- from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA
+ from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA
_DEFAULT_YAML_VERSION = (1, 2)
@@ -22,8 +22,9 @@ except: # NOQA
from ordereddict import OrderedDict # type: ignore
# to get the right name import ... as ordereddict doesn't do that
- class ordereddict(OrderedDict): # type: ignore
+ class ordereddict(OrderedDict): # type: ignore
if not hasattr(OrderedDict, 'insert'):
+
def insert(self, pos, key, value):
# type: (int, Any, Any) -> None
if pos >= len(self):
@@ -44,6 +45,7 @@ PY3 = sys.version_info[0] == 3
if PY3:
+
def utf8(s):
# type: (str) -> str
return s
@@ -56,7 +58,9 @@ if PY3:
# type: (str) -> str
return s
+
else:
+
def utf8(s):
# type: (unicode) -> str
return s.encode('utf-8')
@@ -67,7 +71,8 @@ else:
def to_unicode(s):
# type: (str) -> unicode
- return unicode(s) # NOQA
+ return unicode(s) # NOQA
+
if PY3:
string_types = str
@@ -79,6 +84,7 @@ if PY3:
MAXSIZE = sys.maxsize
unichr = chr
import io
+
StringIO = io.StringIO
BytesIO = io.BytesIO
# have unlimited precision
@@ -86,17 +92,19 @@ if PY3:
from collections.abc import Hashable # NOQA
else:
- string_types = basestring # NOQA
+ string_types = basestring # NOQA
integer_types = (int, long) # NOQA
class_types = (type, types.ClassType)
- text_type = unicode # NOQA
+ text_type = unicode # NOQA
binary_type = str
# to allow importing
unichr = unichr # type: ignore
from StringIO import StringIO as _StringIO
+
StringIO = _StringIO
import cStringIO
+
BytesIO = cStringIO.StringIO
# have unlimited precision
no_limit_int = long # NOQA not available on Python 3
@@ -120,7 +128,7 @@ UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2
def with_metaclass(meta, *bases):
# type: (Any, Any) -> Any
"""Create a base class with a metaclass."""
- return meta("NewBase", bases, {})
+ return meta('NewBase', bases, {})
DBG_TOKEN = 1
@@ -138,6 +146,7 @@ if 'RUAMELDEBUG' in os.environ:
if bool(_debug):
+
class ObjectCounter(object):
def __init__(self):
# type: () -> None
@@ -176,6 +185,7 @@ def nprint(*args, **kw):
if bool(dbg):
print(*args, **kw)
+
# char checkers following production rules
@@ -202,6 +212,7 @@ def check_anchorname_char(ch):
def version_tnf(t1, t2=None):
# type: (Any, Any) -> Any
from ruamel.yaml import version_info # NOQA
+
if version_info < t1:
return True
if t2 is not None and version_info < t2:
diff --git a/composer.py b/composer.py
index 516a3ae..80adb68 100644
--- a/composer.py
+++ b/composer.py
@@ -9,12 +9,16 @@ from ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning
from ruamel.yaml.compat import utf8
from ruamel.yaml.events import (
- StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
- SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
-)
-from ruamel.yaml.nodes import (
- MappingNode, ScalarNode, SequenceNode,
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
@@ -38,16 +42,16 @@ class Composer(object):
def parser(self):
# type: () -> Any
if hasattr(self.loader, 'typ'):
- self.loader.parser # type: ignore
- return self.loader._parser # type: ignore
+ self.loader.parser
+ return self.loader._parser
@property
def resolver(self):
# type: () -> Any
# assert self.loader._resolver is not None
if hasattr(self.loader, 'typ'):
- self.loader.resolver # type: ignore
- return self.loader._resolver # type: ignore
+ self.loader.resolver
+ return self.loader._resolver
def check_node(self):
# type: () -> Any
@@ -78,9 +82,11 @@ class Composer(object):
if not self.parser.check_event(StreamEndEvent):
event = self.parser.get_event()
raise ComposerError(
- "expected a single document in the stream",
- document.start_mark, "but found another document",
- event.start_mark)
+ 'expected a single document in the stream',
+ document.start_mark,
+ 'but found another document',
+ event.start_mark,
+ )
# Drop the STREAM-END event.
self.parser.get_event()
@@ -108,8 +114,8 @@ class Composer(object):
alias = event.anchor
if alias not in self.anchors:
raise ComposerError(
- None, None, "found undefined alias %r"
- % utf8(alias), event.start_mark)
+ None, None, 'found undefined alias %r' % utf8(alias), event.start_mark
+ )
return self.anchors[alias]
event = self.parser.peek_event()
anchor = event.anchor
@@ -119,9 +125,10 @@ class Composer(object):
# "found duplicate anchor %r; first occurrence"
# % utf8(anchor), self.anchors[anchor].start_mark,
# "second occurrence", event.start_mark)
- ws = "\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence "\
- "{}".format(
- (anchor), self.anchors[anchor].start_mark, event.start_mark)
+ ws = (
+ '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence '
+ '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark)
+ )
warnings.warn(ws, ReusedAnchorWarning)
self.resolver.descend_resolver(parent, index)
if self.parser.check_event(ScalarEvent):
@@ -139,9 +146,14 @@ class Composer(object):
tag = event.tag
if tag is None or tag == u'!':
tag = self.resolver.resolve(ScalarNode, event.value, event.implicit)
- node = ScalarNode(tag, event.value,
- event.start_mark, event.end_mark, style=event.style,
- comment=event.comment)
+ node = ScalarNode(
+ tag,
+ event.value,
+ event.start_mark,
+ event.end_mark,
+ style=event.style,
+ comment=event.comment,
+ )
if anchor is not None:
self.anchors[anchor] = node
return node
@@ -152,10 +164,15 @@ class Composer(object):
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolver.resolve(SequenceNode, None, start_event.implicit)
- node = SequenceNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style,
- comment=start_event.comment, anchor=anchor)
+ node = SequenceNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
if anchor is not None:
self.anchors[anchor] = node
index = 0
@@ -165,8 +182,10 @@ class Composer(object):
end_event = self.parser.get_event()
if node.flow_style is True and end_event.comment is not None:
if node.comment is not None:
- print('Warning: unexpected end_event commment in sequence '
- 'node {}'.format(node.flow_style))
+ print(
+ 'Warning: unexpected end_event commment in sequence '
+ 'node {}'.format(node.flow_style)
+ )
node.comment = end_event.comment
node.end_mark = end_event.end_mark
self.check_end_doc_comment(end_event, node)
@@ -178,10 +197,15 @@ class Composer(object):
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolver.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, [],
- start_event.start_mark, None,
- flow_style=start_event.flow_style,
- comment=start_event.comment, anchor=anchor)
+ node = MappingNode(
+ tag,
+ [],
+ start_event.start_mark,
+ None,
+ flow_style=start_event.flow_style,
+ comment=start_event.comment,
+ anchor=anchor,
+ )
if anchor is not None:
self.anchors[anchor] = node
while not self.parser.check_event(MappingEndEvent):
diff --git a/configobjwalker.py b/configobjwalker.py
index c0ab608..cbc6148 100644
--- a/configobjwalker.py
+++ b/configobjwalker.py
@@ -10,5 +10,5 @@ if False: # MYPY
def configobj_walker(cfg):
# type: (Any) -> Any
- warnings.warn("configobj_walker has moved to ruamel.yaml.util, please update your code")
+ warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code')
return new_configobj_walker(cfg)
diff --git a/constructor.py b/constructor.py
index a99f682..6a26da3 100644
--- a/constructor.py
+++ b/constructor.py
@@ -10,6 +10,7 @@ import sys
import types
import warnings
+# fmt: off
from ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning,
MantissaNoDotYAML1_1Warning)
from ruamel.yaml.nodes import * # NOQA
@@ -28,11 +29,12 @@ from ruamel.yaml.timestamp import TimeStamp
from ruamel.yaml.util import RegExp
if False: # MYPY
- from typing import Any, Dict, List, Set, Generator # NOQA
+ from typing import Any, Dict, List, Set, Generator, Union # NOQA
__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
'ConstructorError', 'RoundTripConstructor']
+# fmt: on
class ConstructorError(MarkedYAMLError):
@@ -49,11 +51,11 @@ class DuplicateKeyError(MarkedYAMLFutureWarning):
class BaseConstructor(object):
- yaml_constructors = {} # type: Dict[Any, Any]
- yaml_multi_constructors = {} # type: Dict[Any, Any]
+ yaml_constructors = {} # type: Dict[Any, Any]
+ yaml_multi_constructors = {} # type: Dict[Any, Any]
def __init__(self, preserve_quotes=None, loader=None):
- # type: (bool, Any) -> None
+ # type: (Union[None, bool], Any) -> None
self.loader = loader
if self.loader is not None and getattr(self.loader, '_constructor', None) is None:
self.loader._constructor = self
@@ -69,12 +71,12 @@ class BaseConstructor(object):
def composer(self):
# type: () -> Any
if hasattr(self.loader, 'typ'):
- return self.loader.composer # type: ignore
+ return self.loader.composer
try:
- return self.loader._composer # type: ignore
+ return self.loader._composer
except AttributeError:
print('slt', type(self))
- print('slc', self.loader._composer) # type: ignore
+ print('slc', self.loader._composer)
print(dir(self))
raise
@@ -82,8 +84,8 @@ class BaseConstructor(object):
def resolver(self):
# type: () -> Any
if hasattr(self.loader, 'typ'):
- return self.loader.resolver # type: ignore
- return self.loader._resolver # type: ignore
+ return self.loader.resolver
+ return self.loader._resolver
def check_data(self):
# type: () -> Any
@@ -130,8 +132,8 @@ class BaseConstructor(object):
self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(
- None, None,
- "found unconstructable recursive node", node.start_mark)
+ None, None, 'found unconstructable recursive node', node.start_mark
+ )
self.recursive_objects[node] = None
constructor = None # type: Any
tag_suffix = None
@@ -140,7 +142,7 @@ class BaseConstructor(object):
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
- tag_suffix = node.tag[len(tag_prefix):]
+ tag_suffix = node.tag[len(tag_prefix) :]
constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
@@ -154,16 +156,16 @@ class BaseConstructor(object):
elif isinstance(node, SequenceNode):
constructor = self.__class__.construct_sequence # type: ignore
elif isinstance(node, MappingNode):
- constructor = self.__class__.construct_mapping # type: ignore
+ constructor = self.__class__.construct_mapping # type: ignore
if tag_suffix is None:
data = constructor(self, node)
else:
data = constructor(self, tag_suffix, node)
if isinstance(data, types.GeneratorType):
generator = data
- data = next(generator) # type: ignore
+ data = next(generator)
if self.deep_construct:
- for _dummy in generator: # type: ignore
+ for _dummy in generator:
pass
else:
self.state_generators.append(generator)
@@ -177,9 +179,8 @@ class BaseConstructor(object):
# type: (Any) -> Any
if not isinstance(node, ScalarNode):
raise ConstructorError(
- None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark
+ )
return node.value
def construct_sequence(self, node, deep=False):
@@ -189,11 +190,9 @@ class BaseConstructor(object):
"""
if not isinstance(node, SequenceNode):
raise ConstructorError(
- None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
- return [self.construct_object(child, deep=deep)
- for child in node.value]
+ None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark
+ )
+ return [self.construct_object(child, deep=deep) for child in node.value]
def construct_mapping(self, node, deep=False):
# type: (Any, bool) -> Any
@@ -202,9 +201,8 @@ class BaseConstructor(object):
"""
if not isinstance(node, MappingNode):
raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
total_mapping = {}
if getattr(node, 'merge', None) is not None:
todo = [(node.merge, False), (node.value, False)]
@@ -224,14 +222,19 @@ class BaseConstructor(object):
hash(key)
except TypeError as exc:
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" %
- exc, key_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
else:
if not isinstance(key, Hashable):
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
value = self.construct_object(value_node, deep=deep)
if check:
@@ -245,10 +248,11 @@ class BaseConstructor(object):
if key in mapping:
if not self.allow_duplicate_keys:
args = [
- "while constructing a mapping", node.start_mark,
+ 'while constructing a mapping',
+ node.start_mark,
'found duplicate key "{}" with value "{}" '
- '(original value: "{}")'.format(
- key, value, mapping.get(key)), key_node.start_mark,
+ '(original value: "{}")'.format(key, value, mapping.get(key)),
+ key_node.start_mark,
"""
To suppress this check see:
http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys
@@ -267,9 +271,8 @@ class BaseConstructor(object):
# type: (Any, bool) -> Any
if not isinstance(node, MappingNode):
raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
pairs = []
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
@@ -322,10 +325,11 @@ class SafeConstructor(BaseConstructor):
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError(
- "while constructing a mapping",
+ 'while constructing a mapping',
node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
+ 'expected a mapping for merging, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
self.flatten_mapping(subnode)
submerge.append(subnode.value)
submerge.reverse()
@@ -333,10 +337,12 @@ class SafeConstructor(BaseConstructor):
merge.extend(value)
else:
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, "
- "but found %s"
- % value_node.id, value_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ 'but found %s' % value_node.id,
+ value_node.start_mark,
+ )
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
@@ -378,7 +384,7 @@ class SafeConstructor(BaseConstructor):
def construct_yaml_int(self, node):
# type: (Any) -> int
value_s = to_str(self.construct_scalar(node))
- value_s = value_s.replace('_', '')
+ value_s = value_s.replace('_', "")
sign = +1
if value_s[0] == '-':
sign = -1
@@ -409,12 +415,12 @@ class SafeConstructor(BaseConstructor):
inf_value = 1e300
while inf_value != inf_value * inf_value:
inf_value *= inf_value
- nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99).
+ nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
# type: (Any) -> float
value_so = to_str(self.construct_scalar(node))
- value_s = value_so.replace('_', '').lower()
+ value_s = value_so.replace('_', "").lower()
sign = +1
if value_s[0] == '-':
sign = -1
@@ -442,15 +448,18 @@ class SafeConstructor(BaseConstructor):
return sign * float(value_s)
if PY3:
+
def construct_yaml_binary(self, node):
# type: (Any) -> Any
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(
- None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
+ None,
+ None,
+ 'failed to convert base64 data into ascii: %s' % exc,
+ node.start_mark,
+ )
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
@@ -458,9 +467,11 @@ class SafeConstructor(BaseConstructor):
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(
- None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
+
else:
+
def construct_yaml_binary(self, node):
# type: (Any) -> Any
value = self.construct_scalar(node)
@@ -468,11 +479,11 @@ class SafeConstructor(BaseConstructor):
return to_str(value).decode('base64')
except (binascii.Error, UnicodeEncodeError) as exc:
raise ConstructorError(
- None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
timestamp_regexp = RegExp(
- u'''^(?P<year>[0-9][0-9][0-9][0-9])
+ u"""^(?P<year>[0-9][0-9][0-9][0-9])
-(?P<month>[0-9][0-9]?)
-(?P<day>[0-9][0-9]?)
(?:((?P<t>[Tt])|[ \\t]+) # explictly not retaining extra spaces
@@ -481,7 +492,9 @@ class SafeConstructor(BaseConstructor):
:(?P<second>[0-9][0-9])
(?:\\.(?P<fraction>[0-9]*))?
(?:[ \\t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$""",
+ re.X,
+ )
def construct_yaml_timestamp(self, node, values=None):
# type: (Any, Any) -> Any
@@ -492,9 +505,11 @@ class SafeConstructor(BaseConstructor):
match = None
if match is None:
raise ConstructorError(
- None, None,
+ None,
+ None,
'failed to construct timestamp from "{}"'.format(node.value),
- node.start_mark)
+ node.start_mark,
+ )
values = match.groupdict()
year = int(values['year'])
month = int(values['month'])
@@ -520,8 +535,7 @@ class SafeConstructor(BaseConstructor):
delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
if values['tz_sign'] == '-':
delta = -delta
- data = datetime.datetime(year, month, day, hour, minute, second,
- fraction)
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
if delta:
data -= delta
return data
@@ -533,21 +547,26 @@ class SafeConstructor(BaseConstructor):
yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" %
- subnode.id,
- subnode.start_mark)
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
if len(subnode.value) != 1:
raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" %
- len(subnode.value),
- subnode.start_mark)
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
assert key not in omap
@@ -561,21 +580,26 @@ class SafeConstructor(BaseConstructor):
yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError(
- "while constructing pairs", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError(
- "while constructing pairs", node.start_mark,
- "expected a mapping of length 1, but found %s" %
- subnode.id,
- subnode.start_mark)
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
if len(subnode.value) != 1:
raise ConstructorError(
- "while constructing pairs", node.start_mark,
- "expected a single mapping item, but found %d items" %
- len(subnode.value),
- subnode.start_mark)
+ 'while constructing pairs',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
value = self.construct_object(value_node)
@@ -583,7 +607,7 @@ class SafeConstructor(BaseConstructor):
def construct_yaml_set(self, node):
# type: (Any) -> Any
- data = set() # type: Set[Any]
+ data = set() # type: Set[Any]
yield data
value = self.construct_mapping(node)
data.update(value)
@@ -625,70 +649,54 @@ class SafeConstructor(BaseConstructor):
def construct_undefined(self, node):
# type: (Any) -> None
raise ConstructorError(
- None, None,
- "could not determine a constructor for the tag %r" %
- utf8(node.tag),
- node.start_mark)
+ None,
+ None,
+ 'could not determine a constructor for the tag %r' % utf8(node.tag),
+ node.start_mark,
+ )
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- SafeConstructor.construct_yaml_null)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- SafeConstructor.construct_yaml_bool)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- SafeConstructor.construct_yaml_int)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int)
SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- SafeConstructor.construct_yaml_float)
+ u'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float
+)
SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- SafeConstructor.construct_yaml_binary)
+ u'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary
+)
SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
+ u'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp
+)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- SafeConstructor.construct_yaml_omap)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap)
SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- SafeConstructor.construct_yaml_pairs)
+ u'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs
+)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- SafeConstructor.construct_yaml_set)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- SafeConstructor.construct_yaml_str)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- SafeConstructor.construct_yaml_seq)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq)
-SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- SafeConstructor.construct_yaml_map)
+SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map)
-SafeConstructor.add_constructor(
- None, SafeConstructor.construct_undefined)
+SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined)
if PY2:
+
class classobj:
pass
class Constructor(SafeConstructor):
-
def construct_python_str(self, node):
# type: (Any) -> Any
return utf8(self.construct_scalar(node))
@@ -698,15 +706,18 @@ class Constructor(SafeConstructor):
return self.construct_scalar(node)
if PY3:
+
def construct_python_bytes(self, node):
# type: (Any) -> Any
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(
- None, None,
- "failed to convert base64 data into ascii: %s" % exc,
- node.start_mark)
+ None,
+ None,
+ 'failed to convert base64 data into ascii: %s' % exc,
+ node.start_mark,
+ )
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
@@ -714,8 +725,8 @@ class Constructor(SafeConstructor):
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(
- None, None,
- "failed to decode base64 data: %s" % exc, node.start_mark)
+ None, None, 'failed to decode base64 data: %s' % exc, node.start_mark
+ )
def construct_python_long(self, node):
# type: (Any) -> int
@@ -736,22 +747,31 @@ class Constructor(SafeConstructor):
# type: (Any, Any) -> Any
if not name:
raise ConstructorError(
- "while constructing a Python module", mark,
- "expected non-empty name appended to the tag", mark)
+ 'while constructing a Python module',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
try:
__import__(name)
except ImportError as exc:
raise ConstructorError(
- "while constructing a Python module", mark,
- "cannot find module %r (%s)" % (utf8(name), exc), mark)
+ 'while constructing a Python module',
+ mark,
+ 'cannot find module %r (%s)' % (utf8(name), exc),
+ mark,
+ )
return sys.modules[name]
def find_python_name(self, name, mark):
# type: (Any, Any) -> Any
if not name:
raise ConstructorError(
- "while constructing a Python object", mark,
- "expected non-empty name appended to the tag", mark)
+ 'while constructing a Python object',
+ mark,
+ 'expected non-empty name appended to the tag',
+ mark,
+ )
if u'.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
@@ -761,14 +781,19 @@ class Constructor(SafeConstructor):
__import__(module_name)
except ImportError as exc:
raise ConstructorError(
- "while constructing a Python object", mark,
- "cannot find module %r (%s)" % (utf8(module_name), exc), mark)
+ 'while constructing a Python object',
+ mark,
+ 'cannot find module %r (%s)' % (utf8(module_name), exc),
+ mark,
+ )
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError(
- "while constructing a Python object", mark,
- "cannot find %r in the module %r" % (utf8(object_name),
- module.__name__), mark)
+ 'while constructing a Python object',
+ mark,
+ 'cannot find %r in the module %r' % (utf8(object_name), module.__name__),
+ mark,
+ )
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
@@ -776,9 +801,11 @@ class Constructor(SafeConstructor):
value = self.construct_scalar(node)
if value:
raise ConstructorError(
- "while constructing a Python name", node.start_mark,
- "expected the empty value, but found %r" % utf8(value),
- node.start_mark)
+ 'while constructing a Python name',
+ node.start_mark,
+ 'expected the empty value, but found %r' % utf8(value),
+ node.start_mark,
+ )
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
@@ -786,13 +813,14 @@ class Constructor(SafeConstructor):
value = self.construct_scalar(node)
if value:
raise ConstructorError(
- "while constructing a Python module", node.start_mark,
- "expected the empty value, but found %r" % utf8(value),
- node.start_mark)
+ 'while constructing a Python module',
+ node.start_mark,
+ 'expected the empty value, but found %r' % utf8(value),
+ node.start_mark,
+ )
return self.find_python_module(suffix, node.start_mark)
- def make_python_instance(self, suffix, node,
- args=None, kwds=None, newobj=False):
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
# type: (Any, Any, Any, Any, bool) -> Any
if not args:
args = []
@@ -805,8 +833,7 @@ class Constructor(SafeConstructor):
else:
return cls(*args, **kwds)
else:
- if newobj and isinstance(cls, type(classobj)) \
- and not args and not kwds:
+ if newobj and isinstance(cls, type(classobj)) and not args and not kwds:
instance = classobj()
instance.__class__ = cls
return instance
@@ -881,74 +908,62 @@ class Constructor(SafeConstructor):
return self.construct_python_object_apply(suffix, node, newobj=True)
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/none',
- Constructor.construct_yaml_null)
+Constructor.add_constructor(u'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null)
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bool',
- Constructor.construct_yaml_bool)
+Constructor.add_constructor(u'tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool)
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/str',
- Constructor.construct_python_str)
+Constructor.add_constructor(u'tag:yaml.org,2002:python/str', Constructor.construct_python_str)
Constructor.add_constructor(
- u'tag:yaml.org,2002:python/unicode',
- Constructor.construct_python_unicode)
+ u'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode
+)
if PY3:
Constructor.add_constructor(
- u'tag:yaml.org,2002:python/bytes',
- Constructor.construct_python_bytes)
+ u'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes
+ )
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/int',
- Constructor.construct_yaml_int)
+Constructor.add_constructor(u'tag:yaml.org,2002:python/int', Constructor.construct_yaml_int)
Constructor.add_constructor(
- u'tag:yaml.org,2002:python/long',
- Constructor.construct_python_long)
+ u'tag:yaml.org,2002:python/long', Constructor.construct_python_long
+)
Constructor.add_constructor(
- u'tag:yaml.org,2002:python/float',
- Constructor.construct_yaml_float)
+ u'tag:yaml.org,2002:python/float', Constructor.construct_yaml_float
+)
Constructor.add_constructor(
- u'tag:yaml.org,2002:python/complex',
- Constructor.construct_python_complex)
+ u'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex
+)
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/list',
- Constructor.construct_yaml_seq)
+Constructor.add_constructor(u'tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq)
Constructor.add_constructor(
- u'tag:yaml.org,2002:python/tuple',
- Constructor.construct_python_tuple)
+ u'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple
+)
-Constructor.add_constructor(
- u'tag:yaml.org,2002:python/dict',
- Constructor.construct_yaml_map)
+Constructor.add_constructor(u'tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map)
Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/name:',
- Constructor.construct_python_name)
+ u'tag:yaml.org,2002:python/name:', Constructor.construct_python_name
+)
Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/module:',
- Constructor.construct_python_module)
+ u'tag:yaml.org,2002:python/module:', Constructor.construct_python_module
+)
Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object:',
- Constructor.construct_python_object)
+ u'tag:yaml.org,2002:python/object:', Constructor.construct_python_object
+)
Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/apply:',
- Constructor.construct_python_object_apply)
+ u'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply
+)
Constructor.add_multi_constructor(
- u'tag:yaml.org,2002:python/object/new:',
- Constructor.construct_python_object_new)
+ u'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new
+)
class RoundTripConstructor(SafeConstructor):
@@ -960,9 +975,8 @@ class RoundTripConstructor(SafeConstructor):
# type: (Any) -> Any
if not isinstance(node, ScalarNode):
raise ConstructorError(
- None, None,
- "expected a scalar node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark
+ )
if node.style == '|' and isinstance(node.value, text_type):
return PreservedScalarString(node.value)
@@ -984,7 +998,7 @@ class RoundTripConstructor(SafeConstructor):
underscore = None
except IndexError:
underscore = None
- value_s = value_su.replace('_', '')
+ value_s = value_su.replace('_', "")
sign = +1
if value_s[0] == '-':
sign = -1
@@ -998,8 +1012,9 @@ class RoundTripConstructor(SafeConstructor):
if underscore is not None:
underscore[1] = value_su[2] == '_'
underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
- return BinaryInt(sign * int(value_s[2:], 2), width=width, # type: ignore
- underscore=underscore)
+ return BinaryInt( # type: ignore
+ sign * int(value_s[2:], 2), width=width, underscore=underscore
+ )
elif value_s.startswith('0x'):
# default to lower-case if no a-fA-F in string
if self.resolver.processing_version > (1, 1) and value_s[2] == '0':
@@ -1021,8 +1036,9 @@ class RoundTripConstructor(SafeConstructor):
if underscore is not None:
underscore[1] = value_su[2] == '_'
underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_'
- return OctalInt(sign * int(value_s[2:], 8), width=width, # type: ignore
- underscore=underscore)
+ return OctalInt( # type: ignore
+ sign * int(value_s[2:], 8), width=width, underscore=underscore
+ )
elif self.resolver.processing_version != (1, 2) and value_s[0] == '0':
return sign * int(value_s, 8)
elif self.resolver.processing_version != (1, 2) and ':' in value_s:
@@ -1039,13 +1055,15 @@ class RoundTripConstructor(SafeConstructor):
if underscore is not None:
# cannot have a leading underscore
underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
- return ScalarInt(sign * int(value_s), width=len(value_s), # type: ignore
- underscore=underscore)
+ return ScalarInt( # type: ignore
+ sign * int(value_s), width=len(value_s), underscore=underscore
+ )
elif underscore:
# cannot have a leading underscore
underscore[2] = len(value_su) > 1 and value_su[-1] == '_'
- return ScalarInt(sign * int(value_s), width=None, # type: ignore
- underscore=underscore)
+ return ScalarInt( # type: ignore
+ sign * int(value_s), width=None, underscore=underscore
+ )
else:
return sign * int(value_s)
@@ -1060,10 +1078,11 @@ class RoundTripConstructor(SafeConstructor):
lead0 += 1
idx += 1
return lead0
+
# underscore = None
m_sign = False # type: Any
value_so = to_str(self.construct_scalar(node))
- value_s = value_so.replace('_', '').lower()
+ value_s = value_so.replace('_', "").lower()
sign = +1
if value_s[0] == '-':
sign = -1
@@ -1102,14 +1121,22 @@ class RoundTripConstructor(SafeConstructor):
e_width = len(exponent)
e_sign = exponent[0] in '+-'
# print('sf', width, prec, m_sign, exp, e_width, e_sign)
- return ScalarFloat(sign * float(value_s), # type: ignore
- width=width, prec=prec, m_sign=m_sign,
- m_lead0=lead0, exp=exp, e_width=e_width, e_sign=e_sign)
+ return ScalarFloat( # type: ignore
+ sign * float(value_s),
+ width=width,
+ prec=prec,
+ m_sign=m_sign,
+ m_lead0=lead0,
+ exp=exp,
+ e_width=e_width,
+ e_sign=e_sign,
+ )
width = len(value_so)
prec = value_so.index('.') # you can use index, this would not be float without dot
lead0 = leading_zeros(value_so)
- return ScalarFloat(sign * float(value_s), # type: ignore
- width=width, prec=prec, m_sign=m_sign, m_lead0=lead0)
+ return ScalarFloat( # type: ignore
+ sign * float(value_s), width=width, prec=prec, m_sign=m_sign, m_lead0=lead0
+ )
def construct_yaml_str(self, node):
# type: (Any) -> Any
@@ -1130,9 +1157,8 @@ class RoundTripConstructor(SafeConstructor):
# type: (Any, Any, bool) -> Any
if not isinstance(node, SequenceNode):
raise ConstructorError(
- None, None,
- "expected a sequence node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark
+ )
ret_val = []
if node.comment:
seqtyp._yaml_add_comment(node.comment[:2])
@@ -1140,6 +1166,7 @@ class RoundTripConstructor(SafeConstructor):
seqtyp.yaml_end_comment_extend(node.comment[2], clear=True)
if node.anchor:
from ruamel.yaml.serializer import templated_id
+
if not templated_id(node.anchor):
seqtyp.yaml_set_anchor(node.anchor)
for idx, child in enumerate(node.value):
@@ -1147,7 +1174,8 @@ class RoundTripConstructor(SafeConstructor):
if child.comment:
seqtyp._yaml_add_comment(child.comment, key=idx)
seqtyp._yaml_set_idx_line_col(
- idx, [child.start_mark.line, child.start_mark.column])
+ idx, [child.start_mark.line, child.start_mark.column]
+ )
return ret_val
def flatten_mapping(self, node):
@@ -1178,8 +1206,7 @@ class RoundTripConstructor(SafeConstructor):
if key_node.tag == u'tag:yaml.org,2002:merge':
del node.value[index]
if isinstance(value_node, MappingNode):
- merge_map_list.append(
- (index, constructed(value_node)))
+ merge_map_list.append((index, constructed(value_node)))
# self.flatten_mapping(value_node)
# merge.extend(value_node.value)
elif isinstance(value_node, SequenceNode):
@@ -1187,12 +1214,12 @@ class RoundTripConstructor(SafeConstructor):
for subnode in value_node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError(
- "while constructing a mapping",
+ 'while constructing a mapping',
node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- merge_map_list.append(
- (index, constructed(subnode)))
+ 'expected a mapping for merging, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
+ merge_map_list.append((index, constructed(subnode)))
# self.flatten_mapping(subnode)
# submerge.append(subnode.value)
# submerge.reverse()
@@ -1200,10 +1227,12 @@ class RoundTripConstructor(SafeConstructor):
# merge.extend(value)
else:
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, "
- "but found %s"
- % value_node.id, value_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'expected a mapping or list of mappings for merging, '
+ 'but found %s' % value_node.id,
+ value_node.start_mark,
+ )
elif key_node.tag == u'tag:yaml.org,2002:value':
key_node.tag = u'tag:yaml.org,2002:str'
index += 1
@@ -1221,17 +1250,17 @@ class RoundTripConstructor(SafeConstructor):
# type: (Any, Any, bool) -> Any
if not isinstance(node, MappingNode):
raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
merge_map = self.flatten_mapping(node)
# mapping = {}
if node.comment:
- maptyp._yaml_add_comment(node.comment[:2]) # type: ignore
+ maptyp._yaml_add_comment(node.comment[:2])
if len(node.comment) > 2:
- maptyp.yaml_end_comment_extend(node.comment[2], clear=True) # type: ignore
+ maptyp.yaml_end_comment_extend(node.comment[2], clear=True)
if node.anchor:
from ruamel.yaml.serializer import templated_id
+
if not templated_id(node.anchor):
maptyp.yaml_set_anchor(node.anchor)
last_key, last_value = None, self._sentinel
@@ -1253,53 +1282,63 @@ class RoundTripConstructor(SafeConstructor):
hash(key)
except TypeError as exc:
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" %
- exc, key_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
else:
if not isinstance(key, Hashable):
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
value = self.construct_object(value_node, deep=deep)
self.check_mapping_key(node, key_node, maptyp, key, value)
- if key_node.comment and len(key_node.comment) > 4 and \
- key_node.comment[4]:
+ if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]:
if last_value is None:
key_node.comment[0] = key_node.comment.pop(4)
maptyp._yaml_add_comment(key_node.comment, value=last_key)
else:
key_node.comment[2] = key_node.comment.pop(4)
- maptyp._yaml_add_comment(key_node.comment, key=key) # type: ignore
+ maptyp._yaml_add_comment(key_node.comment, key=key)
key_node.comment = None
if key_node.comment:
- maptyp._yaml_add_comment(key_node.comment, key=key) # type: ignore
+ maptyp._yaml_add_comment(key_node.comment, key=key)
if value_node.comment:
- maptyp._yaml_add_comment(value_node.comment, value=key) # type: ignore
- maptyp._yaml_set_kv_line_col( # type: ignore
- key, [key_node.start_mark.line, key_node.start_mark.column,
- value_node.start_mark.line, value_node.start_mark.column])
- maptyp[key] = value # type: ignore
+ maptyp._yaml_add_comment(value_node.comment, value=key)
+ maptyp._yaml_set_kv_line_col(
+ key,
+ [
+ key_node.start_mark.line,
+ key_node.start_mark.column,
+ value_node.start_mark.line,
+ value_node.start_mark.column,
+ ],
+ )
+ maptyp[key] = value
last_key, last_value = key, value # could use indexing
# do this last, or <<: before a key will prevent insertion in instances
# of collections.OrderedDict (as they have no __contains__
if merge_map:
- maptyp.add_yaml_merge(merge_map) # type: ignore
+ maptyp.add_yaml_merge(merge_map)
def construct_setting(self, node, typ, deep=False):
# type: (Any, Any, bool) -> Any
if not isinstance(node, MappingNode):
raise ConstructorError(
- None, None,
- "expected a mapping node, but found %s" % node.id,
- node.start_mark)
+ None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark
+ )
if node.comment:
typ._yaml_add_comment(node.comment[:2])
if len(node.comment) > 2:
typ.yaml_end_comment_extend(node.comment[2], clear=True)
if node.anchor:
from ruamel.yaml.serializer import templated_id
+
if not templated_id(node.anchor):
typ.yaml_set_anchor(node.anchor)
for key_node, value_node in node.value:
@@ -1314,14 +1353,19 @@ class RoundTripConstructor(SafeConstructor):
hash(key)
except TypeError as exc:
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" %
- exc, key_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unacceptable key (%s)' % exc,
+ key_node.start_mark,
+ )
else:
if not isinstance(key, Hashable):
raise ConstructorError(
- "while constructing a mapping", node.start_mark,
- "found unhashable key", key_node.start_mark)
+ 'while constructing a mapping',
+ node.start_mark,
+ 'found unhashable key',
+ key_node.start_mark,
+ )
# construct but should be null
value = self.construct_object(value_node, deep=deep) # NOQA
self.check_mapping_key(node, key_node, typ, key, value)
@@ -1382,21 +1426,26 @@ class RoundTripConstructor(SafeConstructor):
omap.yaml_end_comment_extend(node.comment[2], clear=True)
if not isinstance(node, SequenceNode):
raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a sequence, but found %s" % node.id, node.start_mark)
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a sequence, but found %s' % node.id,
+ node.start_mark,
+ )
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a mapping of length 1, but found %s" %
- subnode.id,
- subnode.start_mark)
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a mapping of length 1, but found %s' % subnode.id,
+ subnode.start_mark,
+ )
if len(subnode.value) != 1:
raise ConstructorError(
- "while constructing an ordered map", node.start_mark,
- "expected a single mapping item, but found %d items" %
- len(subnode.value),
- subnode.start_mark)
+ 'while constructing an ordered map',
+ node.start_mark,
+ 'expected a single mapping item, but found %d items' % len(subnode.value),
+ subnode.start_mark,
+ )
key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
assert key not in omap
@@ -1431,19 +1480,20 @@ class RoundTripConstructor(SafeConstructor):
self.construct_mapping(node, data)
return
elif isinstance(node, ScalarNode):
- data = TaggedScalar()
- data.value = self.construct_scalar(node)
- data.style = node.style
- data.yaml_set_tag(node.tag)
- yield data
+ data2 = TaggedScalar()
+ data2.value = self.construct_scalar(node)
+ data2.style = node.style
+ data2.yaml_set_tag(node.tag)
+ yield data2
return
except: # NOQA
pass
raise ConstructorError(
- None, None,
- "could not determine a constructor for the tag %r" %
- utf8(node.tag),
- node.start_mark)
+ None,
+ None,
+ 'could not determine a constructor for the tag %r' % utf8(node.tag),
+ node.start_mark,
+ )
def construct_yaml_timestamp(self, node, values=None):
# type: (Any, Any) -> Any
@@ -1453,9 +1503,11 @@ class RoundTripConstructor(SafeConstructor):
match = None
if match is None:
raise ConstructorError(
- None, None,
+ None,
+ None,
'failed to construct timestamp from "{}"'.format(node.value),
- node.start_mark)
+ node.start_mark,
+ )
values = match.groupdict()
if not values['hour']:
return SafeConstructor.construct_yaml_timestamp(self, node, values)
@@ -1489,8 +1541,7 @@ class RoundTripConstructor(SafeConstructor):
if delta:
dt = datetime.datetime(year, month, day, hour, minute)
dt -= delta
- data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute,
- second, fraction)
+ data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction)
data._yaml['delta'] = delta
tz = values['tz_sign'] + values['tz_hour']
if values['tz_minute']:
@@ -1507,52 +1558,51 @@ class RoundTripConstructor(SafeConstructor):
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:null',
- RoundTripConstructor.construct_yaml_null)
+ u'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:bool',
- RoundTripConstructor.construct_yaml_bool)
+ u'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:int',
- RoundTripConstructor.construct_yaml_int)
+ u'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:float',
- RoundTripConstructor.construct_yaml_float)
+ u'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:binary',
- RoundTripConstructor.construct_yaml_binary)
+ u'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- RoundTripConstructor.construct_yaml_timestamp)
+ u'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:omap',
- RoundTripConstructor.construct_yaml_omap)
+ u'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:pairs',
- RoundTripConstructor.construct_yaml_pairs)
+ u'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:set',
- RoundTripConstructor.construct_yaml_set)
+ u'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:str',
- RoundTripConstructor.construct_yaml_str)
+ u'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:seq',
- RoundTripConstructor.construct_yaml_seq)
+ u'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq
+)
RoundTripConstructor.add_constructor(
- u'tag:yaml.org,2002:map',
- RoundTripConstructor.construct_yaml_map)
+ u'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map
+)
-RoundTripConstructor.add_constructor(
- None, RoundTripConstructor.construct_undefined)
+RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined)
diff --git a/cyaml.py b/cyaml.py
index 3e5b90a..ed2c1da 100644
--- a/cyaml.py
+++ b/cyaml.py
@@ -2,7 +2,7 @@
from __future__ import absolute_import
-from _ruamel_yaml import CParser, CEmitter # type: ignore
+from _ruamel_yaml import CParser, CEmitter # type: ignore
from ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor
from ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter
@@ -12,16 +12,16 @@ if False: # MYPY
from typing import Any, Union # NOQA
from ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA
-__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
- 'CBaseDumper', 'CSafeDumper', 'CDumper']
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper']
# this includes some hacks to solve the usage of resolver by lower level
# parts of the parser
+
class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
CParser.__init__(self, stream)
self._parser = self._composer = self
BaseConstructor.__init__(self, loader=self)
@@ -33,7 +33,7 @@ class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore
class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
CParser.__init__(self, stream)
self._parser = self._composer = self
SafeConstructor.__init__(self, loader=self)
@@ -45,7 +45,7 @@ class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore
class CLoader(CParser, Constructor, Resolver): # type: ignore
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
CParser.__init__(self, stream)
self._parser = self._composer = self
Constructor.__init__(self, loader=self)
@@ -56,64 +56,130 @@ class CLoader(CParser, Constructor, Resolver): # type: ignore
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
self._emitter = self._serializer = self._representer = self
- BaseRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style, dumper=self)
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
BaseResolver.__init__(self, loadumper=self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
self._emitter = self._serializer = self._representer = self
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
self._emitter = self._serializer = self._representer = self # type: ignore
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
+ SafeRepresenter.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
Resolver.__init__(self)
class CDumper(CEmitter, Representer, Resolver): # type: ignore
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- CEmitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
self._emitter = self._serializer = self._representer = self
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style)
+ Representer.__init__(
+ self, default_style=default_style, default_flow_style=default_flow_style
+ )
Resolver.__init__(self)
diff --git a/dumper.py b/dumper.py
index b43260c..5f23c45 100644
--- a/dumper.py
+++ b/dumper.py
@@ -4,114 +4,218 @@ from __future__ import absolute_import
from ruamel.yaml.emitter import Emitter
from ruamel.yaml.serializer import Serializer
-from ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter, \
- RoundTripRepresenter
+from ruamel.yaml.representer import (
+ Representer,
+ SafeRepresenter,
+ BaseRepresenter,
+ RoundTripRepresenter,
+)
from ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver
if False: # MYPY
- from typing import Any, Dict, List, Union # NOQA
+ from typing import Any, Dict, List, Union # NOQA
from ruamel.yaml.compat import StreamType, VersionType # NOQA
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (Any, StreamType, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent,
- dumper=self)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags,
- dumper=self)
- BaseRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style,
- dumper=self)
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (Any, StreamType, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ BaseRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
BaseResolver.__init__(self, loadumper=self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (Any, StreamType, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent,
- dumper=self)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags,
- dumper=self)
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style,
- dumper=self)
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
Resolver.__init__(self, loadumper=self)
class Dumper(Emitter, Serializer, Representer, Resolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (Any, StreamType, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent,
- dumper=self)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags,
- dumper=self)
- Representer.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style,
- dumper=self)
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ Representer.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
Resolver.__init__(self, loadumper=self)
class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver):
- def __init__(self, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (Any, StreamType, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- Emitter.__init__(self, stream, canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align,
- prefix_colon=prefix_colon,
- dumper=self)
- Serializer.__init__(self, encoding=encoding,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags,
- dumper=self)
- RoundTripRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style,
- dumper=self)
+ def __init__(
+ self,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ Emitter.__init__(
+ self,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ dumper=self,
+ )
+ Serializer.__init__(
+ self,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ dumper=self,
+ )
+ RoundTripRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=self,
+ )
VersionedResolver.__init__(self, loader=self)
diff --git a/emitter.py b/emitter.py
index ea28c33..626a69e 100644
--- a/emitter.py
+++ b/emitter.py
@@ -12,9 +12,12 @@ from __future__ import print_function
import sys
from ruamel.yaml.error import YAMLError, YAMLStreamError
-from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.events import * # NOQA
+
+# fmt: off
from ruamel.yaml.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, \
check_anchorname_char
+# fmt: on
if False: # MYPY
from typing import Any, Dict, List, Union, Text, Tuple # NOQA
@@ -28,10 +31,17 @@ class EmitterError(YAMLError):
class ScalarAnalysis(object):
- def __init__(self, scalar, empty, multiline,
- allow_flow_plain, allow_block_plain,
- allow_single_quoted, allow_double_quoted,
- allow_block):
+ def __init__(
+ self,
+ scalar,
+ empty,
+ multiline,
+ allow_flow_plain,
+ allow_block_plain,
+ allow_single_quoted,
+ allow_double_quoted,
+ allow_block,
+ ):
# type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None
self.scalar = scalar
self.empty = empty
@@ -81,17 +91,29 @@ class Indents(object):
class Emitter(object):
+ # fmt: off
DEFAULT_TAG_PREFIXES = {
u'!': u'!',
u'tag:yaml.org,2002:': u'!!',
}
+ # fmt: on
MAX_SIMPLE_KEY_LENGTH = 128
- def __init__(self, stream, canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None, dumper=None):
- # type: (StreamType, Any, int, int, bool, Any, int, bool, Any, Any) -> None
+ def __init__(
+ self,
+ stream,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ dumper=None,
+ ):
+ # type: (StreamType, Any, Union[None, int], Union[None, int], Union[None, bool], Any, Union[None, int], Union[None, bool], Any, Any) -> None # NOQA
self.dumper = dumper
if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None:
self.dumper._emitter = self
@@ -195,8 +217,8 @@ class Emitter(object):
# type: () -> Any
try:
if hasattr(self.dumper, 'typ'):
- return self.dumper.serializer # type: ignore
- return self.dumper._serializer # type: ignore
+ return self.dumper.serializer
+ return self.dumper._serializer
except AttributeError:
return self # cyaml
@@ -244,10 +266,10 @@ class Emitter(object):
level = -1
if level < 0:
return False
- return (len(self.events) < count + 1)
+ return len(self.events) < count + 1
def increase_indent(self, flow=False, sequence=None, indentless=False):
- # type: (bool, bool, bool) -> None
+ # type: (bool, Union[None, bool], bool) -> None
self.indents.append(self.indent, sequence)
if self.indent is None: # top level
if flow:
@@ -258,8 +280,9 @@ class Emitter(object):
else:
self.indent = 0
elif not indentless:
- self.indent += (self.best_sequence_indent if self.indents.last_seq() else
- self.best_map_indent)
+ self.indent += (
+ self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent
+ )
# if self.indents.last_seq():
# if self.indent == 0: # top level block sequence
# self.indent = self.best_sequence_indent - self.sequence_dash_offset
@@ -276,22 +299,19 @@ class Emitter(object):
# type: () -> None
if isinstance(self.event, StreamStartEvent):
if PY2:
- if self.event.encoding \
- and not getattr(self.stream, 'encoding', None):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
else:
- if self.event.encoding \
- and not hasattr(self.stream, 'encoding'):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
else:
- raise EmitterError("expected StreamStartEvent, but got %s" %
- self.event)
+ raise EmitterError('expected StreamStartEvent, but got %s' % self.event)
def expect_nothing(self):
# type: () -> None
- raise EmitterError("expected nothing, but got %s" % self.event)
+ raise EmitterError('expected nothing, but got %s' % self.event)
# Document handlers.
@@ -317,12 +337,14 @@ class Emitter(object):
handle_text = self.prepare_tag_handle(handle)
prefix_text = self.prepare_tag_prefix(prefix)
self.write_tag_directive(handle_text, prefix_text)
- implicit = (first and
- not self.event.explicit and
- not self.canonical and
- not self.event.version and
- not self.event.tags and
- not self.check_empty_document())
+ implicit = (
+ first
+ and not self.event.explicit
+ and not self.canonical
+ and not self.event.version
+ and not self.event.tags
+ and not self.check_empty_document()
+ )
if not implicit:
self.write_indent()
self.write_indicator(u'---', True)
@@ -336,8 +358,7 @@ class Emitter(object):
self.write_stream_end()
self.state = self.expect_nothing
else:
- raise EmitterError("expected DocumentStartEvent, but got %s" %
- self.event)
+ raise EmitterError('expected DocumentStartEvent, but got %s' % self.event)
def expect_document_end(self):
# type: () -> None
@@ -349,8 +370,7 @@ class Emitter(object):
self.flush_stream()
self.state = self.expect_document_start
else:
- raise EmitterError("expected DocumentEndEvent, but got %s" %
- self.event)
+ raise EmitterError('expected DocumentEndEvent, but got %s' % self.event)
def expect_document_root(self):
# type: () -> None
@@ -359,11 +379,10 @@ class Emitter(object):
# Node handlers.
- def expect_node(self, root=False, sequence=False, mapping=False,
- simple_key=False):
+ def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False):
# type: (bool, bool, bool, bool) -> None
self.root_context = root
- self.sequence_context = sequence # not used in PyYAML
+ self.sequence_context = sequence # not used in PyYAML
self.mapping_context = mapping
self.simple_key_context = simple_key
if isinstance(self.event, AliasEvent):
@@ -382,8 +401,12 @@ class Emitter(object):
if self.write_pre_comment(self.event):
self.indention = False
self.no_newline = True
- if self.flow_level or self.canonical or self.event.flow_style or \
- self.check_empty_sequence():
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_sequence()
+ ):
self.expect_flow_sequence()
else:
self.expect_block_sequence()
@@ -392,18 +415,22 @@ class Emitter(object):
self.write_post_comment(self.event)
if self.event.comment and self.event.comment[1]:
self.write_pre_comment(self.event)
- if self.flow_level or self.canonical or self.event.flow_style \
- or self.check_empty_mapping():
+ if (
+ self.flow_level
+ or self.canonical
+ or self.event.flow_style
+ or self.check_empty_mapping()
+ ):
self.expect_flow_mapping()
else:
self.expect_block_mapping()
else:
- raise EmitterError("expected NodeEvent, but got %s" % self.event)
+ raise EmitterError('expected NodeEvent, but got %s' % self.event)
def expect_alias(self):
# type: () -> None
if self.event.anchor is None:
- raise EmitterError("anchor is not specified for alias")
+ raise EmitterError('anchor is not specified for alias')
self.process_anchor(u'*')
self.state = self.states.pop()
@@ -540,7 +567,7 @@ class Emitter(object):
def expect_block_sequence(self):
# type: () -> None
- indentless = (self.mapping_context and not self.indention)
+ indentless = self.mapping_context and not self.indention
self.increase_indent(flow=False, sequence=True, indentless=indentless)
self.state = self.expect_first_block_sequence_item
@@ -627,21 +654,32 @@ class Emitter(object):
def check_empty_sequence(self):
# type: () -> bool
- return (isinstance(self.event, SequenceStartEvent) and bool(self.events) and
- isinstance(self.events[0], SequenceEndEvent))
+ return (
+ isinstance(self.event, SequenceStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], SequenceEndEvent)
+ )
def check_empty_mapping(self):
# type: () -> bool
- return (isinstance(self.event, MappingStartEvent) and bool(self.events) and
- isinstance(self.events[0], MappingEndEvent))
+ return (
+ isinstance(self.event, MappingStartEvent)
+ and bool(self.events)
+ and isinstance(self.events[0], MappingEndEvent)
+ )
def check_empty_document(self):
# type: () -> bool
if not isinstance(self.event, DocumentStartEvent) or not self.events:
return False
event = self.events[0]
- return (isinstance(event, ScalarEvent) and event.anchor is None and
- event.tag is None and event.implicit and event.value == u'')
+ return (
+ isinstance(event, ScalarEvent)
+ and event.anchor is None
+ and event.tag is None
+ and event.implicit
+ and event.value == ""
+ )
def check_simple_key(self):
# type: () -> bool
@@ -650,8 +688,10 @@ class Emitter(object):
if self.prepared_anchor is None:
self.prepared_anchor = self.prepare_anchor(self.event.anchor)
length += len(self.prepared_anchor)
- if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
- and self.event.tag is not None:
+ if (
+ isinstance(self.event, (ScalarEvent, CollectionStartEvent))
+ and self.event.tag is not None
+ ):
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(self.event.tag)
length += len(self.prepared_tag)
@@ -659,13 +699,17 @@ class Emitter(object):
if self.analysis is None:
self.analysis = self.analyze_scalar(self.event.value)
length += len(self.analysis.scalar)
- return (length < self.MAX_SIMPLE_KEY_LENGTH and (
- isinstance(self.event, AliasEvent) or
- (isinstance(self.event, SequenceStartEvent) and
- self.event.flow_style is True) or
- (isinstance(self.event, ScalarEvent) and
- not self.analysis.empty and not self.analysis.multiline) or
- self.check_empty_sequence() or self.check_empty_mapping()))
+ return length < self.MAX_SIMPLE_KEY_LENGTH and (
+ isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True)
+ or (
+ isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty
+ and not self.analysis.multiline
+ )
+ or self.check_empty_sequence()
+ or self.check_empty_mapping()
+ )
# Anchor, Tag, and Scalar processors.
@@ -686,9 +730,10 @@ class Emitter(object):
if isinstance(self.event, ScalarEvent):
if self.style is None:
self.style = self.choose_scalar_style()
- if ((not self.canonical or tag is None) and
- ((self.style == '' and self.event.implicit[0]) or
- (self.style != '' and self.event.implicit[1]))):
+ if (not self.canonical or tag is None) and (
+ (self.style == "" and self.event.implicit[0])
+ or (self.style != "" and self.event.implicit[1])
+ ):
self.prepared_tag = None
return
if self.event.implicit[0] and tag is None:
@@ -699,13 +744,16 @@ class Emitter(object):
self.prepared_tag = None
return
if tag is None:
- raise EmitterError("tag is not specified")
+ raise EmitterError('tag is not specified')
if self.prepared_tag is None:
self.prepared_tag = self.prepare_tag(tag)
if self.prepared_tag:
self.write_indicator(self.prepared_tag, True)
- if self.sequence_context and not self.flow_level and \
- isinstance(self.event, ScalarEvent):
+ if (
+ self.sequence_context
+ and not self.flow_level
+ and isinstance(self.event, ScalarEvent)
+ ):
self.no_newline = True
self.prepared_tag = None
@@ -715,25 +763,33 @@ class Emitter(object):
self.analysis = self.analyze_scalar(self.event.value)
if self.event.style == '"' or self.canonical:
return '"'
- if (not self.event.style or self.event.style == '?') and \
- (self.event.implicit[0] or not self.event.implicit[2]):
- if (not (self.simple_key_context and
- (self.analysis.empty or self.analysis.multiline)) and
- (self.flow_level and self.analysis.allow_flow_plain or
- (not self.flow_level and self.analysis.allow_block_plain))):
- return ''
+ if (not self.event.style or self.event.style == '?') and (
+ self.event.implicit[0] or not self.event.implicit[2]
+ ):
+ if not (
+ self.simple_key_context and (self.analysis.empty or self.analysis.multiline)
+ ) and (
+ self.flow_level
+ and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain)
+ ):
+ return ""
self.analysis.allow_block = True
if self.event.style and self.event.style in '|>':
- if (not self.flow_level and not self.simple_key_context and
- self.analysis.allow_block):
+ if (
+ not self.flow_level
+ and not self.simple_key_context
+ and self.analysis.allow_block
+ ):
return self.event.style
if not self.event.style and self.analysis.allow_double_quoted:
if "'" in self.event.value or '\n' in self.event.value:
return '"'
- if not self.event.style or self.event.style == '\'':
- if (self.analysis.allow_single_quoted and
- not (self.simple_key_context and self.analysis.multiline)):
- return '\''
+ if not self.event.style or self.event.style == "'":
+ if self.analysis.allow_single_quoted and not (
+ self.simple_key_context and self.analysis.multiline
+ ):
+ return "'"
return '"'
def process_scalar(self):
@@ -742,7 +798,7 @@ class Emitter(object):
self.analysis = self.analyze_scalar(self.event.value)
if self.style is None:
self.style = self.choose_scalar_style()
- split = (not self.simple_key_context)
+ split = not self.simple_key_context
# if self.analysis.multiline and split \
# and (not self.style or self.style in '\'\"'):
# self.write_indent()
@@ -750,7 +806,7 @@ class Emitter(object):
self.write_indent()
if self.style == '"':
self.write_double_quoted(self.analysis.scalar, split)
- elif self.style == '\'':
+ elif self.style == "'":
self.write_single_quoted(self.analysis.scalar, split)
elif self.style == '>':
self.write_folded(self.analysis.scalar)
@@ -769,36 +825,40 @@ class Emitter(object):
# type: (Any) -> Any
major, minor = version
if major != 1:
- raise EmitterError("unsupported YAML version: %d.%d" %
- (major, minor))
+ raise EmitterError('unsupported YAML version: %d.%d' % (major, minor))
return u'%d.%d' % (major, minor)
def prepare_tag_handle(self, handle):
# type: (Any) -> Any
if not handle:
- raise EmitterError("tag handle must not be empty")
+ raise EmitterError('tag handle must not be empty')
if handle[0] != u'!' or handle[-1] != u'!':
- raise EmitterError("tag handle must start and end with '!': %r"
- % (utf8(handle)))
+ raise EmitterError("tag handle must start and end with '!': %r" % (utf8(handle)))
for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or
- u'a' <= ch <= u'z' or ch in u'-_'):
- raise EmitterError("invalid character %r in the tag handle: %r"
- % (utf8(ch), utf8(handle)))
+ if not (
+ u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in u'-_'
+ ):
+ raise EmitterError(
+ 'invalid character %r in the tag handle: %r' % (utf8(ch), utf8(handle))
+ )
return handle
def prepare_tag_prefix(self, prefix):
# type: (Any) -> Any
if not prefix:
- raise EmitterError("tag prefix must not be empty")
+ raise EmitterError('tag prefix must not be empty')
chunks = [] # type: List[Any]
start = end = 0
if prefix[0] == u'!':
end = 1
while end < len(prefix):
ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ if (
+ u'0' <= ch <= u'9'
+ or u'A' <= ch <= u'Z'
+ or u'a' <= ch <= u'z'
+ or ch in u"-;/?!:@&=+$,_.~*'()[]"
+ ):
end += 1
else:
if start < end:
@@ -809,29 +869,32 @@ class Emitter(object):
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(prefix[start:end])
- return u''.join(chunks)
+ return "".join(chunks)
def prepare_tag(self, tag):
# type: (Any) -> Any
if not tag:
- raise EmitterError("tag must not be empty")
+ raise EmitterError('tag must not be empty')
if tag == u'!':
return tag
handle = None
suffix = tag
prefixes = sorted(self.tag_prefixes.keys())
for prefix in prefixes:
- if tag.startswith(prefix) \
- and (prefix == u'!' or len(prefix) < len(tag)):
+ if tag.startswith(prefix) and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
- suffix = tag[len(prefix):]
+ suffix = tag[len(prefix) :]
chunks = [] # type: List[Any]
start = end = 0
while end < len(suffix):
ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.~*\'()[]' \
- or (ch == u'!' and handle != u'!'):
+ if (
+ u'0' <= ch <= u'9'
+ or u'A' <= ch <= u'Z'
+ or u'a' <= ch <= u'z'
+ or ch in u"-;/?:@&=+$,_.~*'()[]"
+ or (ch == u'!' and handle != u'!')
+ ):
end += 1
else:
if start < end:
@@ -842,7 +905,7 @@ class Emitter(object):
chunks.append(u'%%%02X' % ord(ch))
if start < end:
chunks.append(suffix[start:end])
- suffix_text = u''.join(chunks)
+ suffix_text = "".join(chunks)
if handle:
return u'%s%s' % (handle, suffix_text)
else:
@@ -851,11 +914,12 @@ class Emitter(object):
def prepare_anchor(self, anchor):
# type: (Any) -> Any
if not anchor:
- raise EmitterError("anchor must not be empty")
+ raise EmitterError('anchor must not be empty')
for ch in anchor:
if not check_anchorname_char(ch):
- raise EmitterError("invalid character %r in the anchor: %r"
- % (utf8(ch), utf8(anchor)))
+ raise EmitterError(
+ 'invalid character %r in the anchor: %r' % (utf8(ch), utf8(anchor))
+ )
return anchor
def analyze_scalar(self, scalar):
@@ -863,10 +927,15 @@ class Emitter(object):
# Empty scalar is a special case.
if not scalar:
return ScalarAnalysis(
- scalar=scalar, empty=True, multiline=False,
- allow_flow_plain=False, allow_block_plain=True,
- allow_single_quoted=True, allow_double_quoted=True,
- allow_block=False)
+ scalar=scalar,
+ empty=True,
+ multiline=False,
+ allow_flow_plain=False,
+ allow_block_plain=True,
+ allow_single_quoted=True,
+ allow_double_quoted=True,
+ allow_block=False,
+ )
# Indicators and special characters.
block_indicators = False
@@ -891,8 +960,7 @@ class Emitter(object):
preceeded_by_whitespace = True
# Last character or followed by a whitespace.
- followed_by_whitespace = (len(scalar) == 1 or
- scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029'
# The previous character is a space.
previous_space = False
@@ -907,7 +975,7 @@ class Emitter(object):
# Check for indicators.
if index == 0:
# Leading indicators are special characters.
- if ch in u'#,[]{}&*!|>\'\"%@`':
+ if ch in u'#,[]{}&*!|>\'"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:': # ToDo
@@ -936,10 +1004,12 @@ class Emitter(object):
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
- if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF' or
- u'\uE000' <= ch <= u'\uFFFD' or
- (self.unicode_supplementary and
- (u'\U00010000' <= ch <= u'\U0010FFFF'))) and ch != u'\uFEFF':
+ if (
+ ch == u'\x85'
+ or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'
+ or (self.unicode_supplementary and (u'\U00010000' <= ch <= u'\U0010FFFF'))
+ ) and ch != u'\uFEFF':
# unicode_characters = True
if not self.allow_unicode:
special_characters = True
@@ -971,10 +1041,10 @@ class Emitter(object):
# Prepare for the next character.
index += 1
- preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ preceeded_by_whitespace = ch in u'\0 \t\r\n\x85\u2028\u2029'
followed_by_whitespace = (
- index + 1 >= len(scalar) or
- scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029')
+ index + 1 >= len(scalar) or scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029'
+ )
# Let's decide what styles are allowed.
allow_flow_plain = True
@@ -984,7 +1054,7 @@ class Emitter(object):
allow_block = True
# Leading and trailing whitespaces are bad for plain scalars.
- if (leading_space or leading_break or trailing_space or trailing_break):
+ if leading_space or leading_break or trailing_space or trailing_break:
allow_flow_plain = allow_block_plain = False
# We do not permit trailing spaces for block scalars.
@@ -1018,13 +1088,16 @@ class Emitter(object):
if block_indicators:
allow_block_plain = False
- return ScalarAnalysis(scalar=scalar,
- empty=False, multiline=line_breaks,
- allow_flow_plain=allow_flow_plain,
- allow_block_plain=allow_block_plain,
- allow_single_quoted=allow_single_quoted,
- allow_double_quoted=allow_double_quoted,
- allow_block=allow_block)
+ return ScalarAnalysis(
+ scalar=scalar,
+ empty=False,
+ multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block,
+ )
# Writers.
@@ -1043,8 +1116,7 @@ class Emitter(object):
# type: () -> None
self.flush_stream()
- def write_indicator(self, indicator, need_whitespace,
- whitespace=False, indention=False):
+ def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False):
# type: (Any, Any, bool, bool) -> None
if self.whitespace or not need_whitespace:
data = indicator
@@ -1061,8 +1133,11 @@ class Emitter(object):
def write_indent(self):
# type: () -> None
indent = self.indent or 0
- if not self.indention or self.column > indent \
- or (self.column == indent and not self.whitespace):
+ if (
+ not self.indention
+ or self.column > indent
+ or (self.column == indent and not self.whitespace)
+ ):
if bool(self.no_newline):
self.no_newline = False
else:
@@ -1112,7 +1187,7 @@ class Emitter(object):
self.write_line_break()
if self.requested_indent != 0:
self.write_indent()
- self.write_indicator(u'\'', True)
+ self.write_indicator(u"'", True)
spaces = False
breaks = False
start = end = 0
@@ -1122,8 +1197,13 @@ class Emitter(object):
ch = text[end]
if spaces:
if ch is None or ch != u' ':
- if start + 1 == end and self.column > self.best_width and split \
- and start != 0 and end != len(text):
+ if (
+ start + 1 == end
+ and self.column > self.best_width
+ and split
+ and start != 0
+ and end != len(text)
+ ):
self.write_indent()
else:
data = text[start:end]
@@ -1144,7 +1224,7 @@ class Emitter(object):
self.write_indent()
start = end
else:
- if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u"'":
if start < end:
data = text[start:end]
self.column += len(data)
@@ -1152,18 +1232,18 @@ class Emitter(object):
data = data.encode(self.encoding)
self.stream.write(data)
start = end
- if ch == u'\'':
- data = u'\'\''
+ if ch == u"'":
+ data = u"''"
self.column += 2
if bool(self.encoding):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = ch == u' '
+ breaks = ch in u'\n\x85\u2028\u2029'
end += 1
- self.write_indicator(u'\'', False)
+ self.write_indicator(u"'", False)
ESCAPE_REPLACEMENTS = {
u'\0': u'0',
@@ -1175,7 +1255,7 @@ class Emitter(object):
u'\x0C': u'f',
u'\x0D': u'r',
u'\x1B': u'e',
- u'\"': u'\"',
+ u'"': u'"',
u'\\': u'\\',
u'\x85': u'N',
u'\xA0': u'_',
@@ -1196,10 +1276,17 @@ class Emitter(object):
ch = None
if end < len(text):
ch = text[end]
- if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
- or not (u'\x20' <= ch <= u'\x7E' or
- (self.allow_unicode and
- (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD'))):
+ if (
+ ch is None
+ or ch in u'"\\\x85\u2028\u2029\uFEFF'
+ or not (
+ u'\x20' <= ch <= u'\x7E'
+ or (
+ self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD')
+ )
+ )
+ ):
if start < end:
data = text[start:end]
self.column += len(data)
@@ -1221,8 +1308,12 @@ class Emitter(object):
data = data.encode(self.encoding)
self.stream.write(data)
start = end + 1
- if 0 < end < len(text) - 1 and (ch == u' ' or start >= end) \
- and self.column + (end - start) > self.best_width and split:
+ if (
+ 0 < end < len(text) - 1
+ and (ch == u' ' or start >= end)
+ and self.column + (end - start) > self.best_width
+ and split
+ ):
data = text[start:end] + u'\\'
if start < end:
start = end
@@ -1244,7 +1335,7 @@ class Emitter(object):
def determine_block_hints(self, text):
# type: (Any) -> Any
- hints = u''
+ hints = ""
if text:
if text[0] in u' \n\x85\u2028\u2029':
hints += text_type(self.best_sequence_indent)
@@ -1271,10 +1362,14 @@ class Emitter(object):
ch = text[end]
if breaks:
if ch is None or ch not in u'\n\x85\u2028\u2029':
- if not leading_space and ch is not None and ch != u' ' \
- and text[start] == u'\n':
+ if (
+ not leading_space
+ and ch is not None
+ and ch != u' '
+ and text[start] == u'\n'
+ ):
self.write_line_break()
- leading_space = (ch == u' ')
+ leading_space = ch == u' '
for br in text[start:end]:
if br == u'\n':
self.write_line_break()
@@ -1305,8 +1400,8 @@ class Emitter(object):
self.write_line_break()
start = end
if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
- spaces = (ch == u' ')
+ breaks = ch in u'\n\x85\u2028\u2029'
+ spaces = ch == u' '
end += 1
def write_literal(self, text):
@@ -1342,7 +1437,7 @@ class Emitter(object):
self.write_line_break()
start = end
if ch is not None:
- breaks = (ch in u'\n\x85\u2028\u2029')
+ breaks = ch in u'\n\x85\u2028\u2029'
end += 1
def write_plain(self, text, split=True):
@@ -1410,8 +1505,8 @@ class Emitter(object):
raise
start = end
if ch is not None:
- spaces = (ch == u' ')
- breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = ch == u' '
+ breaks = ch in u'\n\x85\u2028\u2029'
end += 1
def write_comment(self, comment):
@@ -1453,8 +1548,7 @@ class Emitter(object):
try:
start_events = (MappingStartEvent, SequenceStartEvent)
for comment in comments:
- if isinstance(event, start_events) and \
- getattr(comment, 'pre_done', None):
+ if isinstance(event, start_events) and getattr(comment, 'pre_done', None):
continue
if self.column != 0:
self.write_line_break()
diff --git a/error.py b/error.py
index f712a60..ab1072d 100644
--- a/error.py
+++ b/error.py
@@ -12,14 +12,20 @@ if False: # MYPY
__all__ = [
- 'FileMark', 'StringMark', 'CommentMark', 'YAMLError', 'MarkedYAMLError',
- 'ReusedAnchorWarning', 'UnsafeLoaderWarning', 'MarkedYAMLWarning',
+ 'FileMark',
+ 'StringMark',
+ 'CommentMark',
+ 'YAMLError',
+ 'MarkedYAMLError',
+ 'ReusedAnchorWarning',
+ 'UnsafeLoaderWarning',
+ 'MarkedYAMLWarning',
'MarkedYAMLFutureWarning',
]
class StreamMark(object):
- __slots__ = 'name', 'index', 'line', 'column',
+ __slots__ = 'name', 'index', 'line', 'column'
def __init__(self, name, index, line, column):
# type: (Any, int, int, int) -> None
@@ -30,8 +36,7 @@ class StreamMark(object):
def __str__(self):
# type: () -> Any
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line + 1, self.column + 1)
+ where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1)
return where
@@ -40,7 +45,7 @@ class FileMark(StreamMark):
class StringMark(StreamMark):
- __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer',
+ __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer'
def __init__(self, name, index, line, column, buffer, pointer):
# type: (Any, int, int, int, Any, Any) -> None
@@ -52,19 +57,17 @@ class StringMark(StreamMark):
# type: (int, int) -> Any
if self.buffer is None: # always False
return None
- head = ''
+ head = ""
start = self.pointer
- while (start > 0 and
- self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029'):
+ while start > 0 and self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029':
start -= 1
if self.pointer - start > max_length / 2 - 1:
head = ' ... '
start += 5
break
- tail = ''
+ tail = ""
end = self.pointer
- while (end < len(self.buffer) and
- self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
end += 1
if end - self.pointer > max_length / 2 - 1:
tail = ' ... '
@@ -73,21 +76,27 @@ class StringMark(StreamMark):
snippet = utf8(self.buffer[start:end])
caret = '^'
caret = '^ (line: {})'.format(self.line + 1)
- return ' ' * indent + head + snippet + tail + '\n' \
- + ' ' * (indent + self.pointer - start + len(head)) + caret
+ return (
+ ' ' * indent
+ + head
+ + snippet
+ + tail
+ + '\n'
+ + ' ' * (indent + self.pointer - start + len(head))
+ + caret
+ )
def __str__(self):
# type: () -> Any
snippet = self.get_snippet()
- where = " in \"%s\", line %d, column %d" \
- % (self.name, self.line + 1, self.column + 1)
+ where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1)
if snippet is not None:
- where += ":\n" + snippet
+ where += ':\n' + snippet
return where
class CommentMark(object):
- __slots__ = 'column',
+ __slots__ = ('column',)
def __init__(self, column):
# type: (Any) -> None
@@ -99,8 +108,15 @@ class YAMLError(Exception):
class MarkedYAMLError(YAMLError):
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None, warn=None):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
@@ -114,18 +130,20 @@ class MarkedYAMLError(YAMLError):
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None or
- self.context_mark.name != self.problem_mark.name or
- self.context_mark.line != self.problem_mark.line or
- self.context_mark.column != self.problem_mark.column):
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
- note = textwrap.dedent(self.note) # type: ignore
+ note = textwrap.dedent(self.note)
lines.append(note)
return '\n'.join(lines)
@@ -139,8 +157,15 @@ class YAMLWarning(Warning):
class MarkedYAMLWarning(YAMLWarning):
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None, warn=None):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
@@ -154,21 +179,23 @@ class MarkedYAMLWarning(YAMLWarning):
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None or
- self.context_mark.name != self.problem_mark.name or
- self.context_mark.line != self.problem_mark.line or
- self.context_mark.column != self.problem_mark.column):
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
- note = textwrap.dedent(self.note) # type: ignore
+ note = textwrap.dedent(self.note)
lines.append(note)
if self.warn is not None and self.warn:
- warn = textwrap.dedent(self.warn) # type: ignore
+ warn = textwrap.dedent(self.warn)
lines.append(warn)
return '\n'.join(lines)
@@ -215,7 +242,9 @@ or alternatively include the following in your code:
import warnings
warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning)
-""".format(self.flt, line, col)
+""".format(
+ self.flt, line, col
+ )
warnings.simplefilter('once', MantissaNoDotYAML1_1Warning)
@@ -226,8 +255,15 @@ class YAMLFutureWarning(Warning):
class MarkedYAMLFutureWarning(YAMLFutureWarning):
- def __init__(self, context=None, context_mark=None,
- problem=None, problem_mark=None, note=None, warn=None):
+ def __init__(
+ self,
+ context=None,
+ context_mark=None,
+ problem=None,
+ problem_mark=None,
+ note=None,
+ warn=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
@@ -242,20 +278,22 @@ class MarkedYAMLFutureWarning(YAMLFutureWarning):
if self.context is not None:
lines.append(self.context)
- if self.context_mark is not None \
- and (self.problem is None or self.problem_mark is None or
- self.context_mark.name != self.problem_mark.name or
- self.context_mark.line != self.problem_mark.line or
- self.context_mark.column != self.problem_mark.column):
+ if self.context_mark is not None and (
+ self.problem is None
+ or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column
+ ):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
- note = textwrap.dedent(self.note) # type: ignore
+ note = textwrap.dedent(self.note)
lines.append(note)
if self.warn is not None and self.warn:
- warn = textwrap.dedent(self.warn) # type: ignore
+ warn = textwrap.dedent(self.warn)
lines.append(warn)
return '\n'.join(lines)
diff --git a/events.py b/events.py
index 515274a..672042d 100644
--- a/events.py
+++ b/events.py
@@ -12,7 +12,7 @@ def CommentCheck():
class Event(object):
- __slots__ = 'start_mark', 'end_mark', 'comment',
+ __slots__ = 'start_mark', 'end_mark', 'comment'
def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck):
# type: (Any, Any, Any) -> None
@@ -25,18 +25,19 @@ class Event(object):
def __repr__(self):
# type: () -> Any
- attributes = [key for key in ['anchor', 'tag', 'implicit', 'value',
- 'flow_style', 'style']
- if hasattr(self, key)]
- arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
- for key in attributes])
+ attributes = [
+ key
+ for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style']
+ if hasattr(self, key)
+ ]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes])
if self.comment not in [None, CommentCheck]:
arguments += ', comment={!r}'.format(self.comment)
return '%s(%s)' % (self.__class__.__name__, arguments)
class NodeEvent(Event):
- __slots__ = 'anchor',
+ __slots__ = ('anchor',)
def __init__(self, anchor, start_mark=None, end_mark=None, comment=None):
# type: (Any, Any, Any, Any) -> None
@@ -45,10 +46,18 @@ class NodeEvent(Event):
class CollectionStartEvent(NodeEvent):
- __slots__ = 'tag', 'implicit', 'flow_style',
-
- def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
- flow_style=None, comment=None):
+ __slots__ = 'tag', 'implicit', 'flow_style'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any, Any) -> None
NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
self.tag = tag
@@ -62,11 +71,11 @@ class CollectionEndEvent(Event):
# Implementations.
+
class StreamStartEvent(Event):
- __slots__ = 'encoding',
+ __slots__ = ('encoding',)
- def __init__(self, start_mark=None, end_mark=None, encoding=None,
- comment=None):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.encoding = encoding
@@ -77,10 +86,17 @@ class StreamEndEvent(Event):
class DocumentStartEvent(Event):
- __slots__ = 'explicit', 'version', 'tags',
-
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, version=None, tags=None, comment=None):
+ __slots__ = 'explicit', 'version', 'tags'
+
+ def __init__(
+ self,
+ start_mark=None,
+ end_mark=None,
+ explicit=None,
+ version=None,
+ tags=None,
+ comment=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.explicit = explicit
@@ -89,10 +105,9 @@ class DocumentStartEvent(Event):
class DocumentEndEvent(Event):
- __slots__ = 'explicit',
+ __slots__ = ('explicit',)
- def __init__(self, start_mark=None, end_mark=None,
- explicit=None, comment=None):
+ def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None):
# type: (Any, Any, Any, Any) -> None
Event.__init__(self, start_mark, end_mark, comment)
self.explicit = explicit
@@ -103,10 +118,19 @@ class AliasEvent(NodeEvent):
class ScalarEvent(NodeEvent):
- __slots__ = 'tag', 'implicit', 'value', 'style',
-
- def __init__(self, anchor, tag, implicit, value,
- start_mark=None, end_mark=None, style=None, comment=None):
+ __slots__ = 'tag', 'implicit', 'value', 'style'
+
+ def __init__(
+ self,
+ anchor,
+ tag,
+ implicit,
+ value,
+ start_mark=None,
+ end_mark=None,
+ style=None,
+ comment=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None
NodeEvent.__init__(self, anchor, start_mark, end_mark, comment)
self.tag = tag
diff --git a/loader.py b/loader.py
index 725c326..d8cbe31 100644
--- a/loader.py
+++ b/loader.py
@@ -7,12 +7,16 @@ from ruamel.yaml.reader import Reader
from ruamel.yaml.scanner import Scanner, RoundTripScanner
from ruamel.yaml.parser import Parser, RoundTripParser
from ruamel.yaml.composer import Composer
-from ruamel.yaml.constructor import BaseConstructor, SafeConstructor, Constructor, \
- RoundTripConstructor
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
from ruamel.yaml.resolver import VersionedResolver
if False: # MYPY
- from typing import Any, Dict, List # NOQA
+ from typing import Any, Dict, List, Union # NOQA
from ruamel.yaml.compat import StreamTextType, VersionType # NOQA
__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
@@ -20,7 +24,7 @@ __all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
@@ -31,7 +35,7 @@ class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedRe
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
@@ -42,7 +46,7 @@ class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedRe
class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
@@ -51,10 +55,16 @@ class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
VersionedResolver.__init__(self, version, loader=self)
-class RoundTripLoader(Reader, RoundTripScanner, RoundTripParser, Composer,
- RoundTripConstructor, VersionedResolver):
+class RoundTripLoader(
+ Reader,
+ RoundTripScanner,
+ RoundTripParser,
+ Composer,
+ RoundTripConstructor,
+ VersionedResolver,
+):
def __init__(self, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None
# self.reader = Reader.__init__(self, stream)
Reader.__init__(self, stream, loader=self)
RoundTripScanner.__init__(self, loader=self)
diff --git a/main.py b/main.py
index b2bd808..93c89ca 100644
--- a/main.py
+++ b/main.py
@@ -10,28 +10,36 @@ from importlib import import_module
import ruamel.yaml
-from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA
-from ruamel.yaml.tokens import * # NOQA
-from ruamel.yaml.events import * # NOQA
-from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
from ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA
from ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA
from ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, PY3
from ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA
-from ruamel.yaml.representer import (BaseRepresenter, SafeRepresenter, Representer,
- RoundTripRepresenter)
-from ruamel.yaml.constructor import (BaseConstructor, SafeConstructor, Constructor,
- RoundTripConstructor)
+from ruamel.yaml.representer import (
+ BaseRepresenter,
+ SafeRepresenter,
+ Representer,
+ RoundTripRepresenter,
+)
+from ruamel.yaml.constructor import (
+ BaseConstructor,
+ SafeConstructor,
+ Constructor,
+ RoundTripConstructor,
+)
from ruamel.yaml.loader import Loader as UnsafeLoader
if False: # MYPY
- from typing import List, Set, Dict, Union, Any # NOQA
+ from typing import List, Set, Dict, Union, Any # NOQA
from ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA
try:
- from _ruamel_yaml import CParser, CEmitter # type: ignore
+ from _ruamel_yaml import CParser, CEmitter # type: ignore
except: # NOQA
CParser = CEmitter = None
@@ -43,6 +51,7 @@ enforce = object()
# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a
# subset of abbreviations, which should be all caps according to PEP8
+
class YAML(object):
def __init__(self, _kw=enforce, typ=None, pure=False, plug_ins=None):
# type: (Any, Any, Any, Any) -> None
@@ -56,8 +65,10 @@ class YAML(object):
plug_ins: a list of plug-in files
"""
if _kw is not enforce:
- raise TypeError("{}.__init__() takes no positional argument but at least "
- "one was given ({!r})".format(self.__class__.__name__, _kw))
+ raise TypeError(
+ '{}.__init__() takes no positional argument but at least '
+ 'one was given ({!r})'.format(self.__class__.__name__, _kw)
+ )
self.typ = 'rt' if typ is None else typ
self.pure = pure
@@ -65,26 +76,27 @@ class YAML(object):
for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins():
file_name = pu.replace(os.sep, '.')
self.plug_ins.append(import_module(file_name))
- self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
+ self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any
self.allow_unicode = True
- self.Reader = None # type: Any
- self.Scanner = None # type: Any
- self.Serializer = None # type: Any
+ self.Reader = None # type: Any
+ self.Scanner = None # type: Any
+ self.Serializer = None # type: Any
self.default_flow_style = None # type: Any
if self.typ == 'rt':
self.default_flow_style = False
# no optimized rt-dumper yet
- self.Emitter = ruamel.yaml.emitter.Emitter # type: Any
- self.Serializer = ruamel.yaml.serializer.Serializer # type: Any
+ self.Emitter = ruamel.yaml.emitter.Emitter # type: Any
+ self.Serializer = ruamel.yaml.serializer.Serializer # type: Any
self.Representer = ruamel.yaml.representer.RoundTripRepresenter # type: Any
- self.Scanner = ruamel.yaml.scanner.RoundTripScanner # type: Any
+ self.Scanner = ruamel.yaml.scanner.RoundTripScanner # type: Any
# no optimized rt-parser yet
- self.Parser = ruamel.yaml.parser.RoundTripParser # type: Any
- self.Composer = ruamel.yaml.composer.Composer # type: Any
+ self.Parser = ruamel.yaml.parser.RoundTripParser # type: Any
+ self.Composer = ruamel.yaml.composer.Composer # type: Any
self.Constructor = ruamel.yaml.constructor.RoundTripConstructor # type: Any
elif self.typ == 'safe':
- self.Emitter = ruamel.yaml.emitter.Emitter if pure or CEmitter is None \
- else CEmitter
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
self.Representer = ruamel.yaml.representer.SafeRepresenter
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
@@ -96,8 +108,9 @@ class YAML(object):
self.Composer = ruamel.yaml.composer.Composer
self.Constructor = ruamel.yaml.constructor.BaseConstructor
elif self.typ == 'unsafe':
- self.Emitter = ruamel.yaml.emitter.Emitter if pure or CEmitter is None \
- else CEmitter
+ self.Emitter = (
+ ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter
+ )
self.Representer = ruamel.yaml.representer.Representer
self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser
self.Composer = ruamel.yaml.composer.Composer
@@ -109,7 +122,8 @@ class YAML(object):
break
else:
raise NotImplementedError(
- 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ))
+ 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ)
+ )
self.stream = None
self.canonical = None
self.old_indent = None
@@ -193,8 +207,7 @@ class YAML(object):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
- setattr(self, attr, self.Resolver(
- version=self.version, loader=self))
+ setattr(self, attr, self.Resolver(version=self.version, loader=self))
return getattr(self, attr)
@property
@@ -204,11 +217,15 @@ class YAML(object):
if not hasattr(self, attr):
if self.Emitter is not CEmitter:
_emitter = self.Emitter(
- None, canonical=self.canonical,
- indent=self.old_indent, width=self.width,
- allow_unicode=self.allow_unicode, line_break=self.line_break,
+ None,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
prefix_colon=self.prefix_colon,
- dumper=self)
+ dumper=self,
+ )
setattr(self, attr, _emitter)
if self.map_indent is not None:
_emitter.best_map_indent = self.map_indent
@@ -229,10 +246,18 @@ class YAML(object):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
- setattr(self, attr, self.Serializer(
- encoding=self.encoding,
- explicit_start=self.explicit_start, explicit_end=self.explicit_end,
- version=self.version, tags=self.tags, dumper=self))
+ setattr(
+ self,
+ attr,
+ self.Serializer(
+ encoding=self.encoding,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ dumper=self,
+ ),
+ )
return getattr(self, attr)
@property
@@ -240,10 +265,15 @@ class YAML(object):
# type: () -> Any
attr = '_' + sys._getframe().f_code.co_name
if not hasattr(self, attr):
- setattr(self, attr, self.Representer(
- default_style=self.default_style,
- default_flow_style=self.default_flow_style,
- dumper=self))
+ setattr(
+ self,
+ attr,
+ self.Representer(
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ dumper=self,
+ ),
+ )
return getattr(self, attr)
# separate output resolver?
@@ -278,8 +308,10 @@ class YAML(object):
def load_all(self, stream, _kw=enforce): # , skip=None):
# type: (StreamTextType, Any) -> Any
if _kw is not enforce:
- raise TypeError("{}.__init__() takes no positional argument but at least "
- "one was given ({!r})".format(self.__class__.__name__, _kw))
+ raise TypeError(
+ '{}.__init__() takes no positional argument but at least '
+ 'one was given ({!r})'.format(self.__class__.__name__, _kw)
+ )
if not hasattr(stream, 'read') and hasattr(stream, 'open'):
# pathlib.Path() instance
with stream.open('r') as fp: # type: ignore
@@ -338,12 +370,13 @@ class YAML(object):
class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore
def __init__(selfx, stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> None
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> None # NOQA
CParser.__init__(selfx, stream)
selfx._parser = selfx._composer = selfx
self.Constructor.__init__(selfx, loader=selfx)
selfx.allow_duplicate_keys = self.allow_duplicate_keys
rslvr.__init__(selfx, loadumper=selfx)
+
self._stream = stream
loader = XLoader(stream)
return loader, loader
@@ -360,11 +393,13 @@ class YAML(object):
"""
if not hasattr(stream, 'write') and hasattr(stream, 'open'):
# pathlib.Path() instance
- with stream.open('w') as fp: # type: ignore
+ with stream.open('w') as fp: # type: ignore
return self.dump_all(documents, fp, _kw, transform=transform)
if _kw is not enforce:
- raise TypeError("{}.dump(_all) takes two positional argument but at least "
- "three were given ({!r})".format(self.__class__.__name__, _kw))
+ raise TypeError(
+ '{}.dump(_all) takes two positional argument but at least '
+ 'three were given ({!r})'.format(self.__class__.__name__, _kw)
+ )
# The stream should have the methods `write` and possibly `flush`.
if self.top_level_colon_align is True:
tlca = max([len(str(x)) for x in documents[0]]) # type: Any
@@ -376,8 +411,9 @@ class YAML(object):
stream = StringIO()
else:
stream = BytesIO()
- serializer, representer, emitter = \
- self.get_serializer_representer_emitter(stream, tlca)
+ serializer, representer, emitter = self.get_serializer_representer_emitter(
+ stream, tlca
+ )
try:
self.serializer.open()
for data in documents:
@@ -393,8 +429,8 @@ class YAML(object):
except AttributeError:
raise
# self.dumper.dispose() # cyaml
- delattr(self, "_serializer")
- delattr(self, "_emitter")
+ delattr(self, '_serializer')
+ delattr(self, '_emitter')
if transform:
val = stream.getvalue() # type: ignore
if self.encoding:
@@ -422,36 +458,68 @@ class YAML(object):
return self.serializer, self.representer, self.emitter
# C routines
- rslvr = ruamel.yaml.resolver.BaseResolver if self.typ == 'base' \
+ rslvr = (
+ ruamel.yaml.resolver.BaseResolver
+ if self.typ == 'base'
else ruamel.yaml.resolver.Resolver
+ )
class XDumper(CEmitter, self.Representer, rslvr): # type: ignore
- def __init__(selfx, stream,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
- CEmitter.__init__(selfx, stream, canonical=canonical,
- indent=indent, width=width, encoding=encoding,
- allow_unicode=allow_unicode, line_break=line_break,
- explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags)
+ def __init__(
+ selfx,
+ stream,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+ ):
+ # type: (StreamType, Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> None # NOQA
+ CEmitter.__init__(
+ selfx,
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ encoding=encoding,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ )
selfx._emitter = selfx._serializer = selfx._representer = selfx
- self.Representer.__init__(selfx, default_style=default_style,
- default_flow_style=default_flow_style)
+ self.Representer.__init__(
+ selfx, default_style=default_style, default_flow_style=default_flow_style
+ )
rslvr.__init__(selfx)
+
self._stream = stream
- dumper = XDumper(stream, default_style=self.default_style,
- default_flow_style=self.default_flow_style,
- canonical=self.canonical, indent=self.old_indent, width=self.width,
- allow_unicode=self.allow_unicode, line_break=self.line_break,
- explicit_start=self.explicit_start,
- explicit_end=self.explicit_end,
- version=self.version, tags=self.tags)
+ dumper = XDumper(
+ stream,
+ default_style=self.default_style,
+ default_flow_style=self.default_flow_style,
+ canonical=self.canonical,
+ indent=self.old_indent,
+ width=self.width,
+ allow_unicode=self.allow_unicode,
+ line_break=self.line_break,
+ explicit_start=self.explicit_start,
+ explicit_end=self.explicit_end,
+ version=self.version,
+ tags=self.tags,
+ )
self._emitter = self._serializer = dumper
return dumper, dumper, dumper
@@ -460,6 +528,7 @@ class YAML(object):
# type: (Any) -> Any
if self.typ == 'rt':
from ruamel.yaml.comments import CommentedMap
+
return CommentedMap(**kw)
else:
return dict(**kw)
@@ -468,6 +537,7 @@ class YAML(object):
# type: (Any) -> Any
if self.typ == 'rt':
from ruamel.yaml.comments import CommentedSeq
+
return CommentedSeq(*args)
else:
return list(*args)
@@ -477,7 +547,7 @@ class YAML(object):
# type: () -> Any
bd = os.path.dirname(__file__)
gpbd = os.path.dirname(os.path.dirname(bd))
- res = [x.replace(gpbd, '')[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
+ res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')]
return res
def register_class(self, cls):
@@ -492,15 +562,18 @@ class YAML(object):
try:
self.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
+
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
- tag, data, cls, flow_style=representer.default_flow_style)
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
self.representer.add_representer(cls, t_y)
try:
self.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
+
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
@@ -547,29 +620,35 @@ def yaml_object(yml):
If methods to_yaml and/or from_yaml are available, these are called for dumping resp.
loading, default routines (dumping a mapping of the attributes) used otherwise.
"""
+
def yo_deco(cls):
# type: (Any) -> Any
tag = getattr(cls, 'yaml_tag', '!' + cls.__name__)
try:
yml.representer.add_representer(cls, cls.to_yaml)
except AttributeError:
+
def t_y(representer, data):
# type: (Any, Any) -> Any
return representer.represent_yaml_object(
- tag, data, cls, flow_style=representer.default_flow_style)
+ tag, data, cls, flow_style=representer.default_flow_style
+ )
yml.representer.add_representer(cls, t_y)
try:
yml.constructor.add_constructor(tag, cls.from_yaml)
except AttributeError:
+
def f_y(constructor, node):
# type: (Any, Any) -> Any
return constructor.construct_yaml_object(node, cls)
yml.constructor.add_constructor(tag, f_y)
return cls
+
return yo_deco
+
########################################################################################
@@ -627,7 +706,7 @@ def compose_all(stream, Loader=Loader):
def load(stream, Loader=None, version=None, preserve_quotes=None):
- # type: (StreamTextType, Any, VersionType, Any) -> Any
+ # type: (StreamTextType, Any, Union[None, VersionType], Any) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
@@ -637,13 +716,13 @@ def load(stream, Loader=None, version=None, preserve_quotes=None):
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes)
try:
- return loader._constructor.get_single_data() # type: ignore
+ return loader._constructor.get_single_data()
finally:
- loader._parser.dispose() # type: ignore
+ loader._parser.dispose()
def load_all(stream, Loader=None, version=None, preserve_quotes=None):
- # type: (StreamTextType, Any, VersionType, bool) -> Any
+ # type: (Union[None, StreamTextType], Any, Union[None, VersionType], Union[None, bool]) -> Any # NOQA
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
@@ -653,14 +732,14 @@ def load_all(stream, Loader=None, version=None, preserve_quotes=None):
Loader = UnsafeLoader
loader = Loader(stream, version, preserve_quotes=preserve_quotes)
try:
- while loader._constructor.check_data(): # type: ignore
- yield loader._constructor.get_data() # type: ignore
+ while loader._constructor.check_data():
+ yield loader._constructor.get_data()
finally:
- loader._parser.dispose() # type: ignore
+ loader._parser.dispose()
def safe_load(stream, version=None):
- # type: (StreamTextType, VersionType) -> Any
+ # type: (StreamTextType, Union[None, VersionType]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
@@ -670,7 +749,7 @@ def safe_load(stream, version=None):
def safe_load_all(stream, version=None):
- # type: (StreamTextType, VersionType) -> Any
+ # type: (StreamTextType, Union[None, VersionType]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
@@ -680,7 +759,7 @@ def safe_load_all(stream, version=None):
def round_trip_load(stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> Any
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> Any
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
@@ -690,7 +769,7 @@ def round_trip_load(stream, version=None, preserve_quotes=None):
def round_trip_load_all(stream, version=None, preserve_quotes=None):
- # type: (StreamTextType, VersionType, bool) -> Any
+ # type: (StreamTextType, Union[None, VersionType], Union[None, bool]) -> Any
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
@@ -699,10 +778,17 @@ def round_trip_load_all(stream, version=None, preserve_quotes=None):
return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes)
-def emit(events, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None):
- # type: (Any, StreamType, Any, bool, Union[int, None], int, bool, Any) -> Any
+def emit(
+ events,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+):
+ # type: (Any, Union[None, StreamType], Any, Union[None, bool], Union[int, None], Union[None, int], Union[None, bool], Any) -> Any # NOQA
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
@@ -711,8 +797,14 @@ def emit(events, stream=None, Dumper=Dumper,
if stream is None:
stream = StringIO()
getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break)
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ )
try:
for event in events:
dumper.emit(event)
@@ -721,7 +813,7 @@ def emit(events, stream=None, Dumper=Dumper,
dumper._emitter.dispose()
except AttributeError:
raise
- dumper.dispose() # cyaml
+ dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
@@ -729,12 +821,22 @@ def emit(events, stream=None, Dumper=Dumper,
enc = None if PY3 else 'utf-8'
-def serialize_all(nodes, stream=None, Dumper=Dumper,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None):
- # type: (Any, StreamType, Any, Any, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], VersionType, Any) -> Any # NOQA
+def serialize_all(
+ nodes,
+ stream=None,
+ Dumper=Dumper,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+):
+ # type: (Any, Union[None, StreamType], Any, Any, Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Union[None, VersionType], Any) -> Any # NOQA
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
@@ -746,10 +848,19 @@ def serialize_all(nodes, stream=None, Dumper=Dumper,
else:
stream = BytesIO()
getvalue = stream.getvalue
- dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, version=version, tags=tags,
- explicit_start=explicit_start, explicit_end=explicit_end)
+ dumper = Dumper(
+ stream,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ version=version,
+ tags=tags,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ )
try:
dumper._serializer.open()
for node in nodes:
@@ -760,13 +871,13 @@ def serialize_all(nodes, stream=None, Dumper=Dumper,
dumper._emitter.dispose()
except AttributeError:
raise
- dumper.dispose() # cyaml
+ dumper.dispose() # cyaml
if getvalue is not None:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
- # type: (Any, StreamType, Any, Any) -> Any
+ # type: (Any, Union[None, StreamType], Any, Any) -> Any
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
@@ -774,14 +885,27 @@ def serialize(node, stream=None, Dumper=Dumper, **kwds):
return serialize_all([node], stream, Dumper=Dumper, **kwds)
-def dump_all(documents, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (Any, StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> Union[None, str] # NOQA
+def dump_all(
+ documents,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Union[None, StreamType], Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Any, Any, Any, Any, Any) -> Union[None, str] # NOQA
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
@@ -795,15 +919,24 @@ def dump_all(documents, stream=None, Dumper=Dumper,
else:
stream = BytesIO()
getvalue = stream.getvalue
- dumper = Dumper(stream, default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical, indent=indent, width=width,
- allow_unicode=allow_unicode, line_break=line_break,
- encoding=encoding, explicit_start=explicit_start,
- explicit_end=explicit_end, version=version,
- tags=tags, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon,
- )
+ dumper = Dumper(
+ stream,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
try:
dumper._serializer.open()
for data in documents:
@@ -824,13 +957,25 @@ def dump_all(documents, stream=None, Dumper=Dumper,
return None
-def dump(data, stream=None, Dumper=Dumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None):
- # type: (Any, StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], VersionType, Any, Any) -> Union[None, str] # NOQA
+def dump(
+ data,
+ stream=None,
+ Dumper=Dumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+):
+ # type: (Any, Union[None, StreamType], Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Union[None, VersionType], Any, Any) -> Union[None, str] # NOQA
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
@@ -838,20 +983,28 @@ def dump(data, stream=None, Dumper=Dumper,
default_style ∈ None, '', '"', "'", '|', '>'
"""
- return dump_all([data], stream, Dumper=Dumper,
- default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode,
- line_break=line_break,
- encoding=encoding, explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags, block_seq_indent=block_seq_indent)
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ )
def safe_dump_all(documents, stream=None, **kwds):
- # type: (Any, StreamType, Any) -> Union[None, str]
+ # type: (Any, Union[None, StreamType], Any) -> Union[None, str]
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
@@ -861,7 +1014,7 @@ def safe_dump_all(documents, stream=None, **kwds):
def safe_dump(data, stream=None, **kwds):
- # type: (Any, StreamType, Any) -> Union[None, str]
+ # type: (Any, Union[None, StreamType], Any) -> Union[None, str]
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
@@ -870,33 +1023,57 @@ def safe_dump(data, stream=None, **kwds):
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
-def round_trip_dump(data, stream=None, Dumper=RoundTripDumper,
- default_style=None, default_flow_style=None,
- canonical=None, indent=None, width=None,
- allow_unicode=None, line_break=None,
- encoding=enc, explicit_start=None, explicit_end=None,
- version=None, tags=None, block_seq_indent=None,
- top_level_colon_align=None, prefix_colon=None):
- # type: (Any, StreamType, Any, Any, Any, bool, Union[None, int], Union[None, int], bool, Any, Any, Union[None, bool], Union[None, bool], VersionType, Any, Any, Any, Any) -> Union[None, str] # NOQA
+def round_trip_dump(
+ data,
+ stream=None,
+ Dumper=RoundTripDumper,
+ default_style=None,
+ default_flow_style=None,
+ canonical=None,
+ indent=None,
+ width=None,
+ allow_unicode=None,
+ line_break=None,
+ encoding=enc,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ block_seq_indent=None,
+ top_level_colon_align=None,
+ prefix_colon=None,
+):
+ # type: (Any, Union[None, StreamType], Any, Any, Any, Union[None, bool], Union[None, int], Union[None, int], Union[None, bool], Any, Any, Union[None, bool], Union[None, bool], Union[None, VersionType], Any, Any, Any, Any) -> Union[None, str] # NOQA
allow_unicode = True if allow_unicode is None else allow_unicode
- return dump_all([data], stream, Dumper=Dumper,
- default_style=default_style,
- default_flow_style=default_flow_style,
- canonical=canonical,
- indent=indent, width=width,
- allow_unicode=allow_unicode,
- line_break=line_break,
- encoding=encoding, explicit_start=explicit_start,
- explicit_end=explicit_end,
- version=version, tags=tags, block_seq_indent=block_seq_indent,
- top_level_colon_align=top_level_colon_align, prefix_colon=prefix_colon)
+ return dump_all(
+ [data],
+ stream,
+ Dumper=Dumper,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical,
+ indent=indent,
+ width=width,
+ allow_unicode=allow_unicode,
+ line_break=line_break,
+ encoding=encoding,
+ explicit_start=explicit_start,
+ explicit_end=explicit_end,
+ version=version,
+ tags=tags,
+ block_seq_indent=block_seq_indent,
+ top_level_colon_align=top_level_colon_align,
+ prefix_colon=prefix_colon,
+ )
# Loader/Dumper are no longer composites, to get to the associated
# Resolver()/Representer(), etc., you need to instantiate the class
-def add_implicit_resolver(tag, regexp, first=None, Loader=None, Dumper=None,
- resolver=Resolver):
+
+def add_implicit_resolver(
+ tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver
+):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add an implicit scalar detector.
@@ -910,24 +1087,25 @@ def add_implicit_resolver(tag, regexp, first=None, Loader=None, Dumper=None,
if Loader:
if hasattr(Loader, 'add_implicit_resolver'):
Loader.add_implicit_resolver(tag, regexp, first)
- elif issubclass(Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader,
- RoundTripLoader)):
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_implicit_resolver'):
Dumper.add_implicit_resolver(tag, regexp, first)
- elif issubclass(Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper,
- RoundTripDumper)):
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
Resolver.add_implicit_resolver(tag, regexp, first)
else:
raise NotImplementedError
# this code currently not tested
-def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None,
- resolver=Resolver):
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver):
# type: (Any, Any, Any, Any, Any, Any) -> None
"""
Add a path based resolver for the given tag.
@@ -941,16 +1119,18 @@ def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None,
if Loader:
if hasattr(Loader, 'add_path_resolver'):
Loader.add_path_resolver(tag, path, kind)
- elif issubclass(Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader,
- RoundTripLoader)):
+ elif issubclass(
+ Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader)
+ ):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
if Dumper:
if hasattr(Dumper, 'add_path_resolver'):
Dumper.add_path_resolver(tag, path, kind)
- elif issubclass(Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper,
- RoundTripDumper)):
+ elif issubclass(
+ Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper)
+ ):
Resolver.add_path_resolver(tag, path, kind)
else:
raise NotImplementedError
@@ -981,8 +1161,7 @@ def add_constructor(tag, object_constructor, Loader=None, constructor=Constructo
raise NotImplementedError
-def add_multi_constructor(tag_prefix, multi_constructor, Loader=None,
- constructor=Constructor):
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor):
# type: (Any, Any, Any, Any) -> None
"""
Add a multi-constructor for the given tag prefix.
@@ -1065,12 +1244,13 @@ class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
+
def __init__(cls, name, bases, kwds):
# type: (Any, Any, Any) -> None
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore
- cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
+ cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore
class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
@@ -1078,6 +1258,7 @@ class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
+
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_constructor = Constructor
@@ -1100,5 +1281,6 @@ class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore
"""
Convert a Python object to a representation node.
"""
- return representer.represent_yaml_object(cls.yaml_tag, data, cls,
- flow_style=cls.yaml_flow_style)
+ return representer.represent_yaml_object(
+ cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style
+ )
diff --git a/nodes.py b/nodes.py
index 781df94..df90be7 100644
--- a/nodes.py
+++ b/nodes.py
@@ -5,11 +5,11 @@ from __future__ import print_function
from .compat import string_types
if False: # MYPY
- from typing import Dict, Any, Text # NOQA
+ from typing import Dict, Any, Text # NOQA
class Node(object):
- __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor',
+ __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor'
def __init__(self, tag, value, start_mark, end_mark, comment=None):
# type: (Any, Any, Any, Any, Any) -> None
@@ -36,23 +36,22 @@ class Node(object):
# else:
# value = repr(value)
value = repr(value)
- return '%s(tag=%r, value=%s)' % (self.__class__.__name__,
- self.tag, value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
def dump(self, indent=0):
# type: (int) -> None
if isinstance(self.value, string_types):
- print('{}{}(tag={!r}, value={!r})'.format(
- ' ' * indent, self.__class__.__name__, self.tag, self.value))
+ print(
+ '{}{}(tag={!r}, value={!r})'.format(
+ ' ' * indent, self.__class__.__name__, self.tag, self.value
+ )
+ )
if self.comment:
- print(' {}comment: {})'.format(
- ' ' * indent, self.comment))
+ print(' {}comment: {})'.format(' ' * indent, self.comment))
return
- print('{}{}(tag={!r})'.format(
- ' ' * indent, self.__class__.__name__, self.tag))
+ print('{}{}(tag={!r})'.format(' ' * indent, self.__class__.__name__, self.tag))
if self.comment:
- print(' {}comment: {})'.format(
- ' ' * indent, self.comment))
+ print(' {}comment: {})'.format(' ' * indent, self.comment))
for v in self.value:
if isinstance(v, tuple):
for v1 in v:
@@ -72,21 +71,29 @@ class ScalarNode(Node):
| -> literal style
> -> folding style
"""
- __slots__ = 'style',
+
+ __slots__ = ('style',)
id = 'scalar'
- def __init__(self, tag, value, start_mark=None, end_mark=None, style=None,
- comment=None):
+ def __init__(self, tag, value, start_mark=None, end_mark=None, style=None, comment=None):
# type: (Any, Any, Any, Any, Any, Any) -> None
Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
self.style = style
class CollectionNode(Node):
- __slots__ = 'flow_style', 'anchor',
-
- def __init__(self, tag, value, start_mark=None, end_mark=None,
- flow_style=None, comment=None, anchor=None):
+ __slots__ = 'flow_style', 'anchor'
+
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any, Any) -> None
Node.__init__(self, tag, value, start_mark, end_mark, comment=comment)
self.flow_style = flow_style
@@ -99,12 +106,21 @@ class SequenceNode(CollectionNode):
class MappingNode(CollectionNode):
- __slots__ = ('merge', )
+ __slots__ = ('merge',)
id = 'mapping'
- def __init__(self, tag, value, start_mark=None, end_mark=None,
- flow_style=None, comment=None, anchor=None):
+ def __init__(
+ self,
+ tag,
+ value,
+ start_mark=None,
+ end_mark=None,
+ flow_style=None,
+ comment=None,
+ anchor=None,
+ ):
# type: (Any, Any, Any, Any, Any, Any, Any) -> None
- CollectionNode.__init__(self, tag, value, start_mark, end_mark,
- flow_style, comment, anchor)
+ CollectionNode.__init__(
+ self, tag, value, start_mark, end_mark, flow_style, comment, anchor
+ )
self.merge = None
diff --git a/parser.py b/parser.py
index 83510bb..9c6041d 100644
--- a/parser.py
+++ b/parser.py
@@ -77,10 +77,10 @@ from __future__ import absolute_import
from ruamel.yaml.error import MarkedYAMLError
-from ruamel.yaml.tokens import * # NOQA
-from ruamel.yaml.events import * # NOQA
-from ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA
-from ruamel.yaml.compat import utf8, nprint # NOQA
+from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.events import * # NOQA
+from ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA
+from ruamel.yaml.compat import utf8, nprint # NOQA
if False: # MYPY
from typing import Any, Dict, Optional, List # NOQA
@@ -96,10 +96,7 @@ class Parser(object):
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
+ DEFAULT_TAGS = {u'!': u'!', u'!!': u'tag:yaml.org,2002:'}
def __init__(self, loader):
# type: (Any) -> None
@@ -115,7 +112,7 @@ class Parser(object):
self.yaml_version = None
self.tag_handles = {} # type: Dict[Any, Any]
self.states = [] # type: List[Any]
- self.marks = [] # type: List[Any]
+ self.marks = [] # type: List[Any]
self.state = self.parse_stream_start # type: Any
def dispose(self):
@@ -178,8 +175,7 @@ class Parser(object):
# Parse the stream start.
token = self.scanner.get_token()
token.move_comment(self.scanner.peek_token())
- event = StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
+ event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
@@ -189,13 +185,11 @@ class Parser(object):
def parse_implicit_document_start(self):
# type: () -> Any
# Parse an implicit document.
- if not self.scanner.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
+ if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.scanner.peek_token()
start_mark = end_mark = token.start_mark
- event = DocumentStartEvent(start_mark, end_mark,
- explicit=False)
+ event = DocumentStartEvent(start_mark, end_mark, explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
@@ -217,22 +211,23 @@ class Parser(object):
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.scanner.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.scanner.peek_token().id,
- self.scanner.peek_token().start_mark)
+ raise ParserError(
+ None,
+ None,
+ "expected '<document start>', but found %r" % self.scanner.peek_token().id,
+ self.scanner.peek_token().start_mark,
+ )
token = self.scanner.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(
- start_mark, end_mark,
- explicit=True, version=version, tags=tags) # type: Any
+ start_mark, end_mark, explicit=True, version=version, tags=tags
+ ) # type: Any
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.scanner.get_token()
- event = StreamEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
+ event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment)
assert not self.states
assert not self.marks
self.state = None
@@ -261,8 +256,8 @@ class Parser(object):
def parse_document_content(self):
# type: () -> Any
if self.scanner.check_token(
- DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
+ DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken
+ ):
event = self.process_empty_scalar(self.scanner.peek_token().start_mark)
self.state = self.states.pop()
return event
@@ -278,22 +273,23 @@ class Parser(object):
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(
- None, None,
- "found duplicate YAML directive", token.start_mark)
+ None, None, 'found duplicate YAML directive', token.start_mark
+ )
major, minor = token.value
if major != 1:
raise ParserError(
- None, None,
- "found incompatible YAML document (version 1.* is "
- "required)",
- token.start_mark)
+ None,
+ None,
+ 'found incompatible YAML document (version 1.* is ' 'required)',
+ token.start_mark,
+ )
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % utf8(handle),
- token.start_mark)
+ raise ParserError(
+ None, None, 'duplicate tag handle %r' % utf8(handle), token.start_mark
+ )
self.tag_handles[handle] = prefix
if bool(self.tag_handles):
value = self.yaml_version, self.tag_handles.copy() # type: Any
@@ -372,9 +368,11 @@ class Parser(object):
if handle is not None:
if handle not in self.tag_handles:
raise ParserError(
- "while parsing a node", start_mark,
- "found undefined tag handle %r" % utf8(handle),
- tag_mark)
+ 'while parsing a node',
+ start_mark,
+ 'found undefined tag handle %r' % utf8(handle),
+ tag_mark,
+ )
tag = self.transform_tag(handle, suffix)
else:
tag = suffix
@@ -386,7 +384,7 @@ class Parser(object):
if start_mark is None:
start_mark = end_mark = self.scanner.peek_token().start_mark
event = None
- implicit = (tag is None or tag == u'!')
+ implicit = tag is None or tag == u'!'
if indentless_sequence and self.scanner.check_token(BlockEntryToken):
comment = None
pt = self.scanner.peek_token()
@@ -394,8 +392,9 @@ class Parser(object):
comment = [pt.comment[0], []]
pt.comment[0] = None
end_mark = self.scanner.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit, start_mark, end_mark,
- flow_style=False, comment=comment)
+ event = SequenceStartEvent(
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
self.state = self.parse_indentless_sequence_entry
return event
@@ -411,23 +410,34 @@ class Parser(object):
implicit = (False, False)
# nprint('se', token.value, token.comment)
event = ScalarEvent(
- anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style,
- comment=token.comment
+ anchor,
+ tag,
+ implicit,
+ token.value,
+ start_mark,
+ end_mark,
+ style=token.style,
+ comment=token.comment,
)
self.state = self.states.pop()
elif self.scanner.check_token(FlowSequenceStartToken):
pt = self.scanner.peek_token()
end_mark = pt.end_mark
event = SequenceStartEvent(
- anchor, tag, implicit,
- start_mark, end_mark, flow_style=True, comment=pt.comment)
+ anchor,
+ tag,
+ implicit,
+ start_mark,
+ end_mark,
+ flow_style=True,
+ comment=pt.comment,
+ )
self.state = self.parse_flow_sequence_first_entry
elif self.scanner.check_token(FlowMappingStartToken):
end_mark = self.scanner.peek_token().end_mark
event = MappingStartEvent(
- anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
+ anchor, tag, implicit, start_mark, end_mark, flow_style=True
+ )
self.state = self.parse_flow_mapping_first_key
elif block and self.scanner.check_token(BlockSequenceStartToken):
end_mark = self.scanner.peek_token().start_mark
@@ -440,23 +450,20 @@ class Parser(object):
comment = pt.split_comment()
# nprint('pt1', comment)
event = SequenceStartEvent(
- anchor, tag, implicit, start_mark, end_mark,
- flow_style=False,
- comment=comment,
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
)
self.state = self.parse_block_sequence_first_entry
elif block and self.scanner.check_token(BlockMappingStartToken):
end_mark = self.scanner.peek_token().start_mark
comment = self.scanner.peek_token().comment
event = MappingStartEvent(
- anchor, tag, implicit, start_mark, end_mark,
- flow_style=False, comment=comment)
+ anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment
+ )
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
+ event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
@@ -465,9 +472,11 @@ class Parser(object):
node = 'flow'
token = self.scanner.peek_token()
raise ParserError(
- "while parsing a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
+ 'while parsing a %s node' % node,
+ start_mark,
+ 'expected the node content, but found %r' % token.id,
+ token.start_mark,
+ )
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)*
@@ -495,12 +504,13 @@ class Parser(object):
if not self.scanner.check_token(BlockEndToken):
token = self.scanner.peek_token()
raise ParserError(
- "while parsing a block collection", self.marks[-1],
- "expected <block end>, but found %r" %
- token.id, token.start_mark)
+ 'while parsing a block collection',
+ self.marks[-1],
+ 'expected <block end>, but found %r' % token.id,
+ token.start_mark,
+ )
token = self.scanner.get_token() # BlockEndToken
- event = SequenceEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
self.state = self.states.pop()
self.marks.pop()
return event
@@ -517,16 +527,16 @@ class Parser(object):
if self.scanner.check_token(BlockEntryToken):
token = self.scanner.get_token()
token.move_comment(self.scanner.peek_token())
- if not self.scanner.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
+ if not self.scanner.check_token(
+ BlockEntryToken, KeyToken, ValueToken, BlockEndToken
+ ):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.scanner.peek_token()
- event = SequenceEndEvent(token.start_mark, token.start_mark,
- comment=token.comment)
+ event = SequenceEndEvent(token.start_mark, token.start_mark, comment=token.comment)
self.state = self.states.pop()
return event
@@ -556,13 +566,14 @@ class Parser(object):
if not self.scanner.check_token(BlockEndToken):
token = self.scanner.peek_token()
raise ParserError(
- "while parsing a block mapping", self.marks[-1],
- "expected <block end>, but found %r" % token.id,
- token.start_mark)
+ 'while parsing a block mapping',
+ self.marks[-1],
+ 'expected <block end>, but found %r' % token.id,
+ token.start_mark,
+ )
token = self.scanner.get_token()
token.move_comment(self.scanner.peek_token())
- event = MappingEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
self.state = self.states.pop()
self.marks.pop()
return event
@@ -620,23 +631,24 @@ class Parser(object):
else:
token = self.scanner.peek_token()
raise ParserError(
- "while parsing a flow sequence", self.marks[-1],
+ 'while parsing a flow sequence',
+ self.marks[-1],
"expected ',' or ']', but got %r" % token.id,
- token.start_mark)
+ token.start_mark,
+ )
if self.scanner.check_token(KeyToken):
token = self.scanner.peek_token()
- event = MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True) # type: Any
+ event = MappingStartEvent(
+ None, None, True, token.start_mark, token.end_mark, flow_style=True
+ ) # type: Any
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.scanner.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.scanner.get_token()
- event = SequenceEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
+ event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment)
self.state = self.states.pop()
self.marks.pop()
return event
@@ -644,8 +656,7 @@ class Parser(object):
def parse_flow_sequence_entry_mapping_key(self):
# type: () -> Any
token = self.scanner.get_token()
- if not self.scanner.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
+ if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
@@ -694,13 +705,16 @@ class Parser(object):
else:
token = self.scanner.peek_token()
raise ParserError(
- "while parsing a flow mapping", self.marks[-1],
+ 'while parsing a flow mapping',
+ self.marks[-1],
"expected ',' or '}', but got %r" % token.id,
- token.start_mark)
+ token.start_mark,
+ )
if self.scanner.check_token(KeyToken):
token = self.scanner.get_token()
- if not self.scanner.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
+ if not self.scanner.check_token(
+ ValueToken, FlowEntryToken, FlowMappingEndToken
+ ):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
@@ -710,8 +724,7 @@ class Parser(object):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.scanner.get_token()
- event = MappingEndEvent(token.start_mark, token.end_mark,
- comment=token.comment)
+ event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment)
self.state = self.states.pop()
self.marks.pop()
return event
@@ -738,16 +751,28 @@ class Parser(object):
def process_empty_scalar(self, mark, comment=None):
# type: (Any, Any) -> Any
- return ScalarEvent(None, None, (True, False), u'', mark, mark, comment=comment)
+ return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment)
class RoundTripParser(Parser):
"""roundtrip is a safe loader, that wants to see the unmangled tag"""
+
def transform_tag(self, handle, suffix):
# type: (Any, Any) -> Any
# return self.tag_handles[handle]+suffix
- if handle == '!!' and suffix in (u'null', u'bool', u'int', u'float', u'binary',
- u'timestamp', u'omap', u'pairs', u'set', u'str',
- u'seq', u'map'):
+ if handle == '!!' and suffix in (
+ u'null',
+ u'bool',
+ u'int',
+ u'float',
+ u'binary',
+ u'timestamp',
+ u'omap',
+ u'pairs',
+ u'set',
+ u'str',
+ u'seq',
+ u'map',
+ ):
return Parser.transform_tag(self, handle, suffix)
return handle + suffix
diff --git a/reader.py b/reader.py
index 96bb96b..4d0ddb4 100644
--- a/reader.py
+++ b/reader.py
@@ -1,6 +1,7 @@
# coding: utf-8
from __future__ import absolute_import
+
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
#
@@ -34,7 +35,6 @@ __all__ = ['Reader', 'ReaderError']
class ReaderError(YAMLError):
-
def __init__(self, name, position, character, encoding, reason):
# type: (Any, Any, Any, Any, Any) -> None
self.name = name
@@ -46,15 +46,20 @@ class ReaderError(YAMLError):
def __str__(self):
# type: () -> str
if isinstance(self.character, binary_type):
- return "'%s' codec can't decode byte #x%02x: %s\n" \
- " in \"%s\", position %d" \
- % (self.encoding, ord(self.character), self.reason,
- self.name, self.position)
+ return "'%s' codec can't decode byte #x%02x: %s\n" ' in "%s", position %d' % (
+ self.encoding,
+ ord(self.character),
+ self.reason,
+ self.name,
+ self.position,
+ )
else:
- return "unacceptable character #x%04x: %s\n" \
- " in \"%s\", position %d" \
- % (self.character, self.reason,
- self.name, self.position)
+ return 'unacceptable character #x%04x: %s\n' ' in "%s", position %d' % (
+ self.character,
+ self.reason,
+ self.name,
+ self.position,
+ )
class Reader(object):
@@ -81,10 +86,10 @@ class Reader(object):
def reset_reader(self):
# type: () -> None
- self.name = None # type: Any
+ self.name = None # type: Any
self.stream_pointer = 0
self.eof = True
- self.buffer = u''
+ self.buffer = ""
self.pointer = 0
self.raw_buffer = None # type: Any
self.raw_decode = None
@@ -108,18 +113,18 @@ class Reader(object):
return
self._stream = None
if isinstance(val, text_type):
- self.name = "<unicode string>"
+ self.name = '<unicode string>'
self.check_printable(val)
- self.buffer = val + u'\0'
+ self.buffer = val + u'\0' # type: ignore
elif isinstance(val, binary_type):
- self.name = "<byte string>"
+ self.name = '<byte string>'
self.raw_buffer = val
self.determine_encoding()
else:
if not hasattr(val, 'read'):
raise YAMLStreamError('stream argument needs to have a read() method')
self._stream = val
- self.name = getattr(self.stream, 'name', "<file>")
+ self.name = getattr(self.stream, 'name', '<file>')
self.eof = False
self.raw_buffer = None
self.determine_encoding()
@@ -136,7 +141,7 @@ class Reader(object):
# type: (int) -> Any
if self.pointer + length >= len(self.buffer):
self.update(length)
- return self.buffer[self.pointer:self.pointer + length]
+ return self.buffer[self.pointer : self.pointer + length]
def forward(self, length=1):
# type: (int) -> None
@@ -146,8 +151,9 @@ class Reader(object):
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
- if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ if ch in u'\n\x85\u2028\u2029' or (
+ ch == u'\r' and self.buffer[self.pointer] != u'\n'
+ ):
self.line += 1
self.column = 0
elif ch != u'\uFEFF':
@@ -157,15 +163,15 @@ class Reader(object):
def get_mark(self):
# type: () -> Any
if self.stream is None:
- return StringMark(self.name, self.index, self.line, self.column,
- self.buffer, self.pointer)
+ return StringMark(
+ self.name, self.index, self.line, self.column, self.buffer, self.pointer
+ )
else:
return FileMark(self.name, self.index, self.line, self.column)
def determine_encoding(self):
# type: () -> None
- while not self.eof and (self.raw_buffer is None or
- len(self.raw_buffer) < 2):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
if isinstance(self.raw_buffer, binary_type):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
@@ -181,10 +187,7 @@ class Reader(object):
if UNICODE_SIZE == 2:
NON_PRINTABLE = RegExp(
- u'[^\x09\x0A\x0D\x20-\x7E\x85'
- u'\xA0-\uD7FF'
- u'\uE000-\uFFFD'
- u']'
+ u'[^\x09\x0A\x0D\x20-\x7E\x85' u'\xA0-\uD7FF' u'\uE000-\uFFFD' u']'
)
else:
NON_PRINTABLE = RegExp(
@@ -195,13 +198,13 @@ class Reader(object):
u']'
)
- _printable_ascii = ('\x09\x0A\x0D' + ''.join(map(chr, range(0x20, 0x7F)))).encode('ascii')
+ _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii')
@classmethod
- def _get_non_printable_ascii(cls, data):
+ def _get_non_printable_ascii(cls, data): # type: ignore
# type: (Text, bytes) -> Union[None, Tuple[int, Text]]
ascii_bytes = data.encode('ascii')
- non_printables = ascii_bytes.translate(None, cls._printable_ascii)
+ non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore
if not non_printables:
return None
non_printable = non_printables[:1]
@@ -219,7 +222,7 @@ class Reader(object):
def _get_non_printable(cls, data):
# type: (Text) -> Union[None, Tuple[int, Text]]
try:
- return cls._get_non_printable_ascii(data)
+ return cls._get_non_printable_ascii(data) # type: ignore
except UnicodeEncodeError:
return cls._get_non_printable_regex(data)
@@ -229,37 +232,38 @@ class Reader(object):
if non_printable_match is not None:
start, character = non_printable_match
position = self.index + (len(self.buffer) - self.pointer) + start
- raise ReaderError(self.name, position, ord(character),
- 'unicode', "special characters are not allowed")
+ raise ReaderError(
+ self.name,
+ position,
+ ord(character),
+ 'unicode',
+ 'special characters are not allowed',
+ )
def update(self, length):
# type: (int) -> None
if self.raw_buffer is None:
return
- self.buffer = self.buffer[self.pointer:]
+ self.buffer = self.buffer[self.pointer :]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
if self.raw_decode is not None:
try:
- data, converted = self.raw_decode(self.raw_buffer,
- 'strict', self.eof)
+ data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
except UnicodeDecodeError as exc:
if PY3:
character = self.raw_buffer[exc.start]
else:
character = exc.object[exc.start]
if self.stream is not None:
- position = self.stream_pointer - \
- len(self.raw_buffer) + exc.start
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
elif self.stream is not None:
- position = self.stream_pointer - \
- len(self.raw_buffer) + exc.start
+ position = self.stream_pointer - len(self.raw_buffer) + exc.start
else:
position = exc.start
- raise ReaderError(self.name, position, character,
- exc.encoding, exc.reason)
+ raise ReaderError(self.name, position, character, exc.encoding, exc.reason)
else:
data = self.raw_buffer
converted = len(data)
@@ -267,12 +271,12 @@ class Reader(object):
self.buffer += data
self.raw_buffer = self.raw_buffer[converted:]
if self.eof:
- self.buffer += u'\0'
+ self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=None):
- # type: (int) -> None
+ # type: (Union[None, int]) -> None
if size is None:
size = 4096 if PY3 else 1024
data = self.stream.read(size)
@@ -284,6 +288,7 @@ class Reader(object):
if not data:
self.eof = True
+
# try:
# import psyco
# psyco.bind(Reader)
diff --git a/representer.py b/representer.py
index 43f2c06..7b821fe 100644
--- a/representer.py
+++ b/representer.py
@@ -3,11 +3,14 @@
from __future__ import print_function, absolute_import, division
-from ruamel.yaml.error import * # NOQA
-from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import * # NOQA
from ruamel.yaml.compat import text_type, binary_type, to_unicode, PY2, PY3, ordereddict
-from ruamel.yaml.scalarstring import (PreservedScalarString, SingleQuotedScalarString,
- DoubleQuotedScalarString)
+from ruamel.yaml.scalarstring import (
+ PreservedScalarString,
+ SingleQuotedScalarString,
+ DoubleQuotedScalarString,
+)
from ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt
from ruamel.yaml.scalarfloat import ScalarFloat
from ruamel.yaml.timestamp import TimeStamp
@@ -15,6 +18,7 @@ from ruamel.yaml.timestamp import TimeStamp
import datetime
import sys
import types
+
if PY3:
import copyreg
import base64
@@ -22,11 +26,12 @@ else:
import copy_reg as copyreg # type: ignore
if False: # MYPY
- from typing import Dict, List, Any, Union, Text # NOQA
-
+ from typing import Dict, List, Any, Union, Text # NOQA
+# fmt: off
__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
'RepresenterError', 'RoundTripRepresenter']
+# fmt: on
class RepresenterError(YAMLError):
@@ -34,6 +39,7 @@ class RepresenterError(YAMLError):
if PY2:
+
def get_classobj_bases(cls):
# type: (Any) -> Any
bases = [cls]
@@ -44,7 +50,7 @@ if PY2:
class BaseRepresenter(object):
- yaml_representers = {} # type: Dict[Any, Any]
+ yaml_representers = {} # type: Dict[Any, Any]
yaml_multi_representers = {} # type: Dict[Any, Any]
def __init__(self, default_style=None, default_flow_style=None, dumper=None):
@@ -63,8 +69,8 @@ class BaseRepresenter(object):
# type: () -> Any
try:
if hasattr(self.dumper, 'typ'):
- return self.dumper.serializer # type: ignore
- return self.dumper._serializer # type: ignore
+ return self.dumper.serializer
+ return self.dumper._serializer
except AttributeError:
return self # cyaml
@@ -95,8 +101,7 @@ class BaseRepresenter(object):
if PY2:
# if type(data) is types.InstanceType:
if isinstance(data, types.InstanceType):
- data_types = get_classobj_bases(data.__class__) + \
- list(data_types)
+ data_types = get_classobj_bases(data.__class__) + list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
else:
@@ -206,8 +211,7 @@ class BaseRepresenter(object):
node_value = self.represent_data(item_value)
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
- if not (isinstance(node_value, ScalarNode) and not
- node_value.style):
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
@@ -223,7 +227,6 @@ class BaseRepresenter(object):
class SafeRepresenter(BaseRepresenter):
-
def ignore_aliases(self, data):
# type: (Any) -> bool
# https://docs.python.org/3/reference/expressions.html#parenthesized-forms :
@@ -240,6 +243,7 @@ class SafeRepresenter(BaseRepresenter):
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
if PY3:
+
def represent_str(self, data):
# type: (Any) -> Any
return self.represent_scalar(u'tag:yaml.org,2002:str', data)
@@ -250,9 +254,10 @@ class SafeRepresenter(BaseRepresenter):
data = base64.encodebytes(data).decode('ascii')
else:
data = base64.encodestring(data).decode('ascii')
- return self.represent_scalar(u'tag:yaml.org,2002:binary', data,
- style='|')
+ return self.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|')
+
else:
+
def represent_str(self, data):
# type: (Any) -> Any
tag = None
@@ -277,7 +282,7 @@ class SafeRepresenter(BaseRepresenter):
def represent_bool(self, data):
# type: (Any) -> Any
try:
- value = self.dumper.boolean_representation[bool(data)] # type: ignore
+ value = self.dumper.boolean_representation[bool(data)]
except AttributeError:
if data:
value = u'true'
@@ -290,10 +295,10 @@ class SafeRepresenter(BaseRepresenter):
return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
if PY2:
+
def represent_long(self, data):
# type: (Any) -> Any
- return self.represent_scalar(u'tag:yaml.org,2002:int',
- text_type(data))
+ return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data))
inf_value = 1e300
while repr(inf_value) != repr(inf_value * inf_value):
@@ -330,12 +335,13 @@ class SafeRepresenter(BaseRepresenter):
# pairs = False
# break
# if not pairs:
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- # value = []
- # for item_key, item_value in data:
- # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- # [(item_key, item_value)]))
- # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+
+ # value = []
+ # for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ # return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
# type: (Any) -> Any
@@ -372,67 +378,54 @@ class SafeRepresenter(BaseRepresenter):
def represent_undefined(self, data):
# type: (Any) -> None
- raise RepresenterError("cannot represent an object: %s" % data)
+ raise RepresenterError('cannot represent an object: %s' % data)
-SafeRepresenter.add_representer(type(None),
- SafeRepresenter.represent_none)
+SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none)
-SafeRepresenter.add_representer(str,
- SafeRepresenter.represent_str)
+SafeRepresenter.add_representer(str, SafeRepresenter.represent_str)
if PY2:
- SafeRepresenter.add_representer(unicode,
- SafeRepresenter.represent_unicode)
+ SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode)
else:
- SafeRepresenter.add_representer(bytes,
- SafeRepresenter.represent_binary)
+ SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary)
-SafeRepresenter.add_representer(bool,
- SafeRepresenter.represent_bool)
+SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool)
-SafeRepresenter.add_representer(int,
- SafeRepresenter.represent_int)
+SafeRepresenter.add_representer(int, SafeRepresenter.represent_int)
if PY2:
- SafeRepresenter.add_representer(long,
- SafeRepresenter.represent_long)
+ SafeRepresenter.add_representer(long, SafeRepresenter.represent_long)
-SafeRepresenter.add_representer(float,
- SafeRepresenter.represent_float)
+SafeRepresenter.add_representer(float, SafeRepresenter.represent_float)
-SafeRepresenter.add_representer(list,
- SafeRepresenter.represent_list)
+SafeRepresenter.add_representer(list, SafeRepresenter.represent_list)
-SafeRepresenter.add_representer(tuple,
- SafeRepresenter.represent_list)
+SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list)
-SafeRepresenter.add_representer(dict,
- SafeRepresenter.represent_dict)
+SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict)
-SafeRepresenter.add_representer(set,
- SafeRepresenter.represent_set)
+SafeRepresenter.add_representer(set, SafeRepresenter.represent_set)
-SafeRepresenter.add_representer(ordereddict,
- SafeRepresenter.represent_ordereddict)
+SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict)
if sys.version_info >= (2, 7):
import collections
- SafeRepresenter.add_representer(collections.OrderedDict,
- SafeRepresenter.represent_ordereddict)
-SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
+ SafeRepresenter.add_representer(
+ collections.OrderedDict, SafeRepresenter.represent_ordereddict
+ )
+
+SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date)
-SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
+SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime)
-SafeRepresenter.add_representer(None,
- SafeRepresenter.represent_undefined)
+SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined)
class Representer(SafeRepresenter):
if PY2:
+
def represent_str(self, data):
# type: (Any) -> Any
tag = None
@@ -486,15 +479,14 @@ class Representer(SafeRepresenter):
def represent_name(self, data):
# type: (Any) -> Any
name = u'%s.%s' % (data.__module__, data.__name__)
- return self.represent_scalar(u'tag:yaml.org,2002:python/name:' +
- name, u'')
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:' + name, "")
def represent_module(self, data):
# type: (Any) -> Any
- return self.represent_scalar(
- u'tag:yaml.org,2002:python/module:' + data.__name__, u'')
+ return self.represent_scalar(u'tag:yaml.org,2002:python/module:' + data.__name__, "")
if PY2:
+
def represent_instance(self, data):
# type: (Any) -> Any
# For instances of classic classes, we use __getinitargs__ and
@@ -526,17 +518,19 @@ class Representer(SafeRepresenter):
state = data.__dict__
if args is None and isinstance(state, dict):
return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:' + class_name, state)
+ u'tag:yaml.org,2002:python/object:' + class_name, state
+ )
if isinstance(state, dict) and not state:
return self.represent_sequence(
- u'tag:yaml.org,2002:python/object/new:' +
- class_name, args)
+ u'tag:yaml.org,2002:python/object/new:' + class_name, args
+ )
value = {}
if bool(args):
value['args'] = args
value['state'] = state # type: ignore
return self.represent_mapping(
- u'tag:yaml.org,2002:python/object/new:' + class_name, value)
+ u'tag:yaml.org,2002:python/object/new:' + class_name, value
+ )
def represent_object(self, data):
# type: (Any) -> Any
@@ -564,7 +558,7 @@ class Representer(SafeRepresenter):
elif hasattr(data, '__reduce__'):
reduce = data.__reduce__()
else:
- raise RepresenterError("cannot represent object: %r" % data)
+ raise RepresenterError('cannot represent object: %r' % data)
reduce = (list(reduce) + [None] * 5)[:5]
function, args, state, listitems, dictitems = reduce
args = list(args)
@@ -583,12 +577,11 @@ class Representer(SafeRepresenter):
tag = u'tag:yaml.org,2002:python/object/apply:'
newobj = False
function_name = u'%s.%s' % (function.__module__, function.__name__)
- if not args and not listitems and not dictitems \
- and isinstance(state, dict) and newobj:
+ if not args and not listitems and not dictitems and isinstance(state, dict) and newobj:
return self.represent_mapping(
- u'tag:yaml.org,2002:python/object:' + function_name, state)
- if not listitems and not dictitems \
- and isinstance(state, dict) and not state:
+ u'tag:yaml.org,2002:python/object:' + function_name, state
+ )
+ if not listitems and not dictitems and isinstance(state, dict) and not state:
return self.represent_sequence(tag + function_name, args)
value = {}
if args:
@@ -603,47 +596,43 @@ class Representer(SafeRepresenter):
if PY2:
- Representer.add_representer(str,
- Representer.represent_str)
+ Representer.add_representer(str, Representer.represent_str)
- Representer.add_representer(unicode,
- Representer.represent_unicode)
+ Representer.add_representer(unicode, Representer.represent_unicode)
- Representer.add_representer(long,
- Representer.represent_long)
+ Representer.add_representer(long, Representer.represent_long)
-Representer.add_representer(complex,
- Representer.represent_complex)
+Representer.add_representer(complex, Representer.represent_complex)
-Representer.add_representer(tuple,
- Representer.represent_tuple)
+Representer.add_representer(tuple, Representer.represent_tuple)
-Representer.add_representer(type,
- Representer.represent_name)
+Representer.add_representer(type, Representer.represent_name)
if PY2:
- Representer.add_representer(types.ClassType,
- Representer.represent_name)
+ Representer.add_representer(types.ClassType, Representer.represent_name)
-Representer.add_representer(types.FunctionType,
- Representer.represent_name)
+Representer.add_representer(types.FunctionType, Representer.represent_name)
-Representer.add_representer(types.BuiltinFunctionType,
- Representer.represent_name)
+Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name)
-Representer.add_representer(types.ModuleType,
- Representer.represent_module)
+Representer.add_representer(types.ModuleType, Representer.represent_module)
if PY2:
- Representer.add_multi_representer(types.InstanceType,
- Representer.represent_instance)
+ Representer.add_multi_representer(types.InstanceType, Representer.represent_instance)
-Representer.add_multi_representer(object,
- Representer.represent_object)
+Representer.add_multi_representer(object, Representer.represent_object)
-from ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSeq,
- CommentedKeySeq, CommentedSet, comment_attrib, merge_attrib, TaggedScalar) # NOQA
+from ruamel.yaml.comments import (
+ CommentedMap,
+ CommentedOrderedMap,
+ CommentedSeq,
+ CommentedKeySeq,
+ CommentedSet,
+ comment_attrib,
+ merge_attrib,
+ TaggedScalar,
+) # NOQA
class RoundTripRepresenter(SafeRepresenter):
@@ -654,16 +643,19 @@ class RoundTripRepresenter(SafeRepresenter):
# type: (Any, Any, Any) -> None
if not hasattr(dumper, 'typ') and default_flow_style is None:
default_flow_style = False
- SafeRepresenter.__init__(self, default_style=default_style,
- default_flow_style=default_flow_style,
- dumper=dumper)
+ SafeRepresenter.__init__(
+ self,
+ default_style=default_style,
+ default_flow_style=default_flow_style,
+ dumper=dumper,
+ )
def represent_none(self, data):
# type: (Any) -> Any
if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start:
# this will be open ended (although it is not yet)
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
- return self.represent_scalar(u'tag:yaml.org,2002:null', u'')
+ return self.represent_scalar(u'tag:yaml.org,2002:null', "")
def represent_preserved_scalarstring(self, data):
# type: (Any) -> Any
@@ -702,7 +694,7 @@ class RoundTripRepresenter(SafeRepresenter):
while pos > 0:
sl.insert(pos, '_')
pos -= underscore[0]
- s = ''.join(sl)
+ s = "".join(sl)
if underscore[1]:
s = '_' + s
if underscore[2]:
@@ -715,7 +707,7 @@ class RoundTripRepresenter(SafeRepresenter):
s = '{:0{}d}'.format(data, data._width)
else:
s = format(data, 'd')
- return self.insert_underscore('', s, data._underscore)
+ return self.insert_underscore("", s, data._underscore)
def represent_binary_int(self, data):
# type: (Any) -> Any
@@ -767,17 +759,18 @@ class RoundTripRepresenter(SafeRepresenter):
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
if data._exp is None and data._prec > 0 and data._prec == data._width - 1:
# no exponent, but trailing dot
- value = u'{}{:d}.'.format(data._m_sign if data._m_sign else u'', abs(int(data)))
+ value = u'{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data)))
elif data._exp is None:
# no exponent, "normal" dot
prec = data._prec
if prec < 1:
prec = 1
# print('dw2', data._width, prec)
- ms = data._m_sign if data._m_sign else u''
+ ms = data._m_sign if data._m_sign else ""
# -1 for the dot
- value = u'{}{:0{}.{}f}'.format(ms, abs(data), data._width - len(ms),
- data._width - prec - 1)
+ value = u'{}{:0{}.{}f}'.format(
+ ms, abs(data), data._width - len(ms), data._width - prec - 1
+ )
while len(value) < data._width:
value += u'0'
else:
@@ -795,32 +788,34 @@ class RoundTripRepresenter(SafeRepresenter):
m2 += u'0'
if data._m_sign and data > 0:
m1 = '+' + m1
- esgn = u'+' if data._e_sign else u''
- if data._prec < 0: # mantissa without dot
+ esgn = u'+' if data._e_sign else ""
+ if data._prec < 0: # mantissa without dot
if m2 != u'0':
e -= len(m2)
else:
- m2 = u''
+ m2 = ""
while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width:
m2 += u'0'
e -= 1
value = m1 + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
- elif data._prec == 0: # mantissa with trailing dot
+ elif data._prec == 0: # mantissa with trailing dot
e -= len(m2)
- value = m1 + m2 + u'.' + data._exp + u'{:{}0{}d}'.format(
- e, esgn, data._e_width)
+ value = (
+ m1 + m2 + u'.' + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ )
else:
if data._m_lead0 > 0:
m2 = u'0' * (data._m_lead0 - 1) + m1 + m2
m1 = u'0'
- m2 = m2[:-data._m_lead0] # these should be zeros
+ m2 = m2[: -data._m_lead0] # these should be zeros
e += data._m_lead0
while len(m1) < data._prec:
m1 += m2[0]
m2 = m2[1:]
e -= 1
- value = m1 + u'.' + m2 + data._exp + u'{:{}0{}d}'.format(
- e, esgn, data._e_width)
+ value = (
+ m1 + u'.' + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width)
+ )
if value is None:
value = to_unicode(repr(data)).lower()
@@ -881,8 +876,7 @@ class RoundTripRepresenter(SafeRepresenter):
# type: (Any) -> Any
if isinstance(data, CommentedKeySeq):
self.alias_key = None
- return self.represent_sequence(u'tag:yaml.org,2002:seq', data,
- flow_style=True)
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True)
return SafeRepresenter.represent_key(self, data)
def represent_mapping(self, tag, mapping, flow_style=None):
@@ -938,8 +932,7 @@ class RoundTripRepresenter(SafeRepresenter):
node_value.comment = item_comment[2:]
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
- if not (isinstance(node_value, ScalarNode) and not
- node_value.style):
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
@@ -955,8 +948,7 @@ class RoundTripRepresenter(SafeRepresenter):
else:
arg = self.represent_data(merge_list)
arg.flow_style = True
- value.insert(0,
- (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg))
+ value.insert(0, (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg))
return node
def represent_omap(self, tag, omap, flow_style=None):
@@ -1059,11 +1051,10 @@ class RoundTripRepresenter(SafeRepresenter):
if item_comment:
assert getattr(node_key, 'comment', None) is None
node_key.comment = item_comment[:2]
- node_key.style = node_value.style = "?"
+ node_key.style = node_value.style = '?'
if not (isinstance(node_key, ScalarNode) and not node_key.style):
best_style = False
- if not (isinstance(node_value, ScalarNode) and not
- node_value.style):
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
best_style = False
value.append((node_key, node_value))
best_style = best_style
@@ -1099,6 +1090,7 @@ class RoundTripRepresenter(SafeRepresenter):
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', to_unicode(value))
def represent_tagged_scalar(self, data):
+ # type: (Any) -> Any
try:
tag = data.tag.value
except AttributeError:
@@ -1106,64 +1098,51 @@ class RoundTripRepresenter(SafeRepresenter):
return self.represent_scalar(tag, data.value, style=data.style)
-RoundTripRepresenter.add_representer(type(None),
- RoundTripRepresenter.represent_none)
+RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none)
RoundTripRepresenter.add_representer(
- PreservedScalarString,
- RoundTripRepresenter.represent_preserved_scalarstring)
+ PreservedScalarString, RoundTripRepresenter.represent_preserved_scalarstring
+)
RoundTripRepresenter.add_representer(
- SingleQuotedScalarString,
- RoundTripRepresenter.represent_single_quoted_scalarstring)
+ SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring
+)
RoundTripRepresenter.add_representer(
- DoubleQuotedScalarString,
- RoundTripRepresenter.represent_double_quoted_scalarstring)
+ DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring
+)
-RoundTripRepresenter.add_representer(
- ScalarInt,
- RoundTripRepresenter.represent_scalar_int)
+RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int)
-RoundTripRepresenter.add_representer(
- BinaryInt,
- RoundTripRepresenter.represent_binary_int)
+RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int)
-RoundTripRepresenter.add_representer(
- OctalInt,
- RoundTripRepresenter.represent_octal_int)
+RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int)
-RoundTripRepresenter.add_representer(
- HexInt,
- RoundTripRepresenter.represent_hex_int)
+RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int)
-RoundTripRepresenter.add_representer(
- HexCapsInt,
- RoundTripRepresenter.represent_hex_caps_int)
+RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int)
-RoundTripRepresenter.add_representer(
- ScalarFloat,
- RoundTripRepresenter.represent_scalar_float)
+RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float)
-RoundTripRepresenter.add_representer(CommentedSeq,
- RoundTripRepresenter.represent_list)
+RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list)
-RoundTripRepresenter.add_representer(CommentedMap,
- RoundTripRepresenter.represent_dict)
+RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict)
-RoundTripRepresenter.add_representer(CommentedOrderedMap,
- RoundTripRepresenter.represent_ordereddict)
+RoundTripRepresenter.add_representer(
+ CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict
+)
if sys.version_info >= (2, 7):
import collections
- RoundTripRepresenter.add_representer(collections.OrderedDict,
- RoundTripRepresenter.represent_ordereddict)
-RoundTripRepresenter.add_representer(CommentedSet,
- RoundTripRepresenter.represent_set)
+ RoundTripRepresenter.add_representer(
+ collections.OrderedDict, RoundTripRepresenter.represent_ordereddict
+ )
+
+RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set)
-RoundTripRepresenter.add_representer(TaggedScalar,
- RoundTripRepresenter.represent_tagged_scalar)
+RoundTripRepresenter.add_representer(
+ TaggedScalar, RoundTripRepresenter.represent_tagged_scalar
+)
-RoundTripRepresenter.add_representer(TimeStamp,
- RoundTripRepresenter.represent_datetime)
+RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime)
diff --git a/resolver.py b/resolver.py
index 905bebe..6b8c56e 100644
--- a/resolver.py
+++ b/resolver.py
@@ -5,17 +5,18 @@ from __future__ import absolute_import
import re
if False: # MYPY
- from typing import Any, Dict, List, Union # NOQA
+ from typing import Any, Dict, List, Union, Text # NOQA
from ruamel.yaml.compat import VersionType # NOQA
from ruamel.yaml.compat import string_types, _DEFAULT_YAML_VERSION # NOQA
-from ruamel.yaml.error import * # NOQA
-from ruamel.yaml.nodes import * # NOQA
+from ruamel.yaml.error import * # NOQA
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA
from ruamel.yaml.util import RegExp # NOQA
__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver']
+# fmt: off
# resolvers consist of
# - a list of applicable version
# - a tag
@@ -95,6 +96,7 @@ implicit_resolvers = [
RegExp(u'^(?:!|&|\\*)$'),
list(u'!&*')),
]
+# fmt: on
class ResolverError(YAMLError):
@@ -108,7 +110,7 @@ class BaseResolver(object):
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {} # type: Dict[Any, Any]
- yaml_path_resolvers = {} # type: Dict[Any, Any]
+ yaml_path_resolvers = {} # type: Dict[Any, Any]
def __init__(self, loadumper=None):
# type: (Any, Any) -> None
@@ -133,26 +135,26 @@ class BaseResolver(object):
# type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
# deepcopy doesn't work here
- cls.yaml_implicit_resolvers = dict((k, cls.yaml_implicit_resolvers[k][:])
- for k in cls.yaml_implicit_resolvers)
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
if first is None:
first = [None]
for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append(
- (tag, regexp))
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
@classmethod
def add_implicit_resolver(cls, tag, regexp, first):
# type: (Any, Any, Any) -> None
if 'yaml_implicit_resolvers' not in cls.__dict__:
# deepcopy doesn't work here
- cls.yaml_implicit_resolvers = dict((k, cls.yaml_implicit_resolvers[k][:])
- for k in cls.yaml_implicit_resolvers)
+ cls.yaml_implicit_resolvers = dict(
+ (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers
+ )
if first is None:
first = [None]
for ch in first:
- cls.yaml_implicit_resolvers.setdefault(ch, []).append(
- (tag, regexp))
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first))
# @classmethod
@@ -179,12 +181,12 @@ class BaseResolver(object):
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
- node_check, index_check = element # type: ignore
+ node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
- raise ResolverError("Invalid path element: %s" % element)
+ raise ResolverError('Invalid path element: %s' % element)
else:
node_check = None
index_check = element
@@ -194,13 +196,14 @@ class BaseResolver(object):
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
- elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
- and not isinstance(node_check, string_types) \
- and node_check is not None:
- raise ResolverError("Invalid node checker: %s" % node_check)
- if not isinstance(index_check, (string_types, int)) \
- and index_check is not None:
- raise ResolverError("Invalid index checker: %s" % index_check)
+ elif (
+ node_check not in [ScalarNode, SequenceNode, MappingNode]
+ and not isinstance(node_check, string_types)
+ and node_check is not None
+ ):
+ raise ResolverError('Invalid node checker: %s' % node_check)
+ if not isinstance(index_check, (string_types, int)) and index_check is not None:
+ raise ResolverError('Invalid index checker: %s' % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
@@ -208,9 +211,8 @@ class BaseResolver(object):
kind = SequenceNode
elif kind is dict:
kind = MappingNode
- elif kind not in [ScalarNode, SequenceNode, MappingNode] \
- and kind is not None:
- raise ResolverError("Invalid node kind: %s" % kind)
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None:
+ raise ResolverError('Invalid node kind: %s' % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
def descend_resolver(self, current_node, current_index):
@@ -224,13 +226,11 @@ class BaseResolver(object):
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
- if self.check_resolver_prefix(depth, path, kind,
- current_node, current_index):
+ if self.check_resolver_prefix(depth, path, kind, current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
- exact_paths[kind] = self.yaml_path_resolvers[path,
- kind]
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
@@ -247,8 +247,7 @@ class BaseResolver(object):
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
- def check_resolver_prefix(self, depth, path, kind,
- current_node, current_index):
+ def check_resolver_prefix(self, depth, path, kind, current_node, current_index):
# type: (int, Text, Any, Any, Any) -> bool
node_check, index_check = path[depth - 1]
if isinstance(node_check, string_types):
@@ -259,15 +258,14 @@ class BaseResolver(object):
return False
if index_check is True and current_index is not None:
return False
- if (index_check is False or index_check is None) \
- and current_index is None:
+ if (index_check is False or index_check is None) and current_index is None:
return False
if isinstance(index_check, string_types):
- if not (isinstance(current_index, ScalarNode) and
- index_check == current_index.value):
+ if not (
+ isinstance(current_index, ScalarNode) and index_check == current_index.value
+ ):
return False
- elif isinstance(index_check, int) and not isinstance(index_check,
- bool):
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return False
return True
@@ -275,8 +273,8 @@ class BaseResolver(object):
def resolve(self, kind, value, implicit):
# type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ if value == "":
+ resolvers = self.yaml_implicit_resolvers.get("", [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
@@ -307,6 +305,7 @@ class Resolver(BaseResolver):
pass
+# fmt: off
Resolver.add_implicit_resolver_base(
u'tag:yaml.org,2002:bool',
RegExp(u'''^(?:yes|Yes|YES|no|No|NO
@@ -366,6 +365,7 @@ Resolver.add_implicit_resolver_base(
u'tag:yaml.org,2002:yaml',
RegExp(u'^(?:!|&|\\*)$'),
list(u'!&*'))
+# fmt: on
class VersionedResolver(BaseResolver):
@@ -377,7 +377,7 @@ class VersionedResolver(BaseResolver):
"""
def __init__(self, version=None, loader=None):
- # type: (VersionType, Any) -> None
+ # type: (Union[None, VersionType], Any) -> None
BaseResolver.__init__(self, loader)
self._loader_version = self.get_loader_version(version)
self._version_implicit_resolver = {} # type: Dict[Any, Any]
@@ -415,8 +415,8 @@ class VersionedResolver(BaseResolver):
def resolve(self, kind, value, implicit):
# type: (Any, Any, Any) -> Any
if kind is ScalarNode and implicit[0]:
- if value == u'':
- resolvers = self.versioned_resolver.get(u'', [])
+ if value == "":
+ resolvers = self.versioned_resolver.get("", [])
else:
resolvers = self.versioned_resolver.get(value[0], [])
resolvers += self.versioned_resolver.get(None, [])
@@ -444,9 +444,9 @@ class VersionedResolver(BaseResolver):
version = self.parser.yaml_version
except AttributeError:
if hasattr(self.loadumper, 'typ'):
- version = self.loadumper.version # type: ignore
+ version = self.loadumper.version
else:
- version = self.loadumper._serializer.use_version # type: ignore # dumping
+ version = self.loadumper._serializer.use_version # dumping
if version is None:
version = self._loader_version
if version is None:
diff --git a/scalarfloat.py b/scalarfloat.py
index fca8be2..83e0125 100644
--- a/scalarfloat.py
+++ b/scalarfloat.py
@@ -8,21 +8,21 @@ from .compat import no_limit_int # NOQA
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
-__all__ = ["ScalarFloat", "ExponentialFloat", "ExponentialCapsFloat"]
+__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat']
class ScalarFloat(float):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
- width = kw.pop('width', None) # type: ignore
- prec = kw.pop('prec', None) # type: ignore
- m_sign = kw.pop('m_sign', None) # type: ignore
- m_lead0 = kw.pop('m_lead0', 0) # type: ignore
- exp = kw.pop('exp', None) # type: ignore
- e_width = kw.pop('e_width', None) # type: ignore
- e_sign = kw.pop('e_sign', None) # type: ignore
+ width = kw.pop('width', None) # type: ignore
+ prec = kw.pop('prec', None) # type: ignore
+ m_sign = kw.pop('m_sign', None) # type: ignore
+ m_lead0 = kw.pop('m_lead0', 0) # type: ignore
+ exp = kw.pop('exp', None) # type: ignore
+ e_width = kw.pop('e_width', None) # type: ignore
+ e_sign = kw.pop('e_sign', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
- v = float.__new__(cls, *args, **kw) # type: ignore
+ v = float.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._prec = prec
v._m_sign = m_sign
@@ -37,48 +37,58 @@ class ScalarFloat(float):
# type: (Any) -> Any
return float(self) + a
x = type(self)(self + a)
- x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
return float(self) // a
x = type(self)(self // a)
- x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
return float(self) * a
x = type(self)(self * a)
- x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
- x._prec = self._prec # check for others
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
+ x._prec = self._prec # check for others
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
return float(self) ** a
x = type(self)(self ** a)
- x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
return float(self) - a
x = type(self)(self - a)
- x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._width = self._width
+ x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def dump(self, out=sys.stdout):
# type: (Any) -> Any
- print('ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}|{}, w:{}, s:{})'.format(
- self, self._width, self._prec, self._m_sign, self._m_lead0, # type: ignore
- self._exp, self._e_width, self._e_sign), file=out) # type: ignore
+ print(
+ 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}|{}, w:{}, s:{})'.format(
+ self,
+ self._width, # type: ignore
+ self._prec, # type: ignore
+ self._m_sign, # type: ignore
+ self._m_lead0, # type: ignore
+ self._exp, # type: ignore
+ self._e_width, # type: ignore
+ self._e_sign, # type: ignore
+ ),
+ file=out,
+ )
class ExponentialFloat(ScalarFloat):
diff --git a/scalarint.py b/scalarint.py
index d609236..2bd605d 100644
--- a/scalarint.py
+++ b/scalarint.py
@@ -5,7 +5,7 @@ from __future__ import print_function, absolute_import, division, unicode_litera
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
-__all__ = ["ScalarInt", "BinaryInt", "OctalInt", "HexInt", "HexCapsInt"]
+__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt']
from .compat import no_limit_int # NOQA
@@ -13,9 +13,9 @@ from .compat import no_limit_int # NOQA
class ScalarInt(no_limit_int):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
- width = kw.pop('width', None) # type: ignore
+ width = kw.pop('width', None) # type: ignore
underscore = kw.pop('underscore', None) # type: ignore
- v = no_limit_int.__new__(cls, *args, **kw) # type: ignore
+ v = no_limit_int.__new__(cls, *args, **kw) # type: ignore
v._width = width
v._underscore = underscore
return v
@@ -24,35 +24,45 @@ class ScalarInt(no_limit_int):
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
- x._underscore = self._underscore[:] if self._underscore is not None else None # type: ignore # NOQA
+ x._underscore = ( # type: ignore
+ self._underscore[:] if self._underscore is not None else None # type: ignore
+ ) # NOQA
return x
@@ -71,8 +81,10 @@ class OctalInt(ScalarInt):
# mixed casing of A-F is not supported, when loading the first non digit
# determines the case
+
class HexInt(ScalarInt):
"""uses lower case (a-f)"""
+
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore)
@@ -80,6 +92,7 @@ class HexInt(ScalarInt):
class HexCapsInt(ScalarInt):
"""uses upper case (A-F)"""
+
def __new__(cls, value, width=None, underscore=None):
# type: (Any, Any, Any) -> Any
return ScalarInt.__new__(cls, value, width=width, underscore=underscore)
diff --git a/scalarstring.py b/scalarstring.py
index 571230f..4b1a317 100644
--- a/scalarstring.py
+++ b/scalarstring.py
@@ -7,8 +7,12 @@ from ruamel.yaml.compat import text_type
if False: # MYPY
from typing import Text, Any, Dict, List # NOQA
-__all__ = ["ScalarString", "PreservedScalarString", "SingleQuotedScalarString",
- "DoubleQuotedScalarString"]
+__all__ = [
+ 'ScalarString',
+ 'PreservedScalarString',
+ 'SingleQuotedScalarString',
+ 'DoubleQuotedScalarString',
+]
class ScalarString(text_type):
@@ -26,7 +30,7 @@ class ScalarString(text_type):
class PreservedScalarString(ScalarString):
__slots__ = ()
- style = "|"
+ style = '|'
def __new__(cls, value):
# type: (Text) -> Any
diff --git a/scanner.py b/scanner.py
index 0aab137..92fbf52 100644
--- a/scanner.py
+++ b/scanner.py
@@ -31,7 +31,7 @@ from __future__ import print_function, absolute_import, division, unicode_litera
#
from ruamel.yaml.error import MarkedYAMLError
-from ruamel.yaml.tokens import * # NOQA
+from ruamel.yaml.tokens import * # NOQA
from ruamel.yaml.compat import utf8, unichr, PY3, check_anchorname_char, nprint # NOQA
if False: # MYPY
@@ -41,9 +41,9 @@ if False: # MYPY
__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError']
-_THE_END = u'\0\r\n\x85\u2028\u2029'
-_THE_END_SPACE_TAB = u'\0 \t\r\n\x85\u2028\u2029'
-_SPACE_TAB = u' \t'
+_THE_END = '\0\r\n\x85\u2028\u2029'
+_THE_END_SPACE_TAB = '\0 \t\r\n\x85\u2028\u2029'
+_SPACE_TAB = ' \t'
class ScannerError(MarkedYAMLError):
@@ -64,7 +64,6 @@ class SimpleKey(object):
class Scanner(object):
-
def __init__(self, loader=None):
# type: (Any) -> None
"""Initialize the scanner."""
@@ -141,8 +140,8 @@ class Scanner(object):
def reader(self):
# type: () -> Any
if hasattr(self.loader, 'typ'):
- self.loader.reader # type: ignore
- return self.loader._reader # type: ignore
+ self.loader.reader
+ return self.loader._reader
@property
def scanner_processing_version(self): # prefix until un-composited
@@ -219,19 +218,19 @@ class Scanner(object):
ch = self.reader.peek()
# Is it the end of stream?
- if ch == u'\0':
+ if ch == '\0':
return self.fetch_stream_end()
# Is it a directive?
- if ch == u'%' and self.check_directive():
+ if ch == '%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
- if ch == u'-' and self.check_document_start():
+ if ch == '-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
- if ch == u'.' and self.check_document_end():
+ if ch == '.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
@@ -241,63 +240,63 @@ class Scanner(object):
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
- if ch == u'[':
+ if ch == '[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
- if ch == u'{':
+ if ch == '{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
- if ch == u']':
+ if ch == ']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
- if ch == u'}':
+ if ch == '}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
- if ch == u',':
+ if ch == ',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
- if ch == u'-' and self.check_block_entry():
+ if ch == '-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
- if ch == u'?' and self.check_key():
+ if ch == '?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
- if ch == u':' and self.check_value():
+ if ch == ':' and self.check_value():
return self.fetch_value()
# Is it an alias?
- if ch == u'*':
+ if ch == '*':
return self.fetch_alias()
# Is it an anchor?
- if ch == u'&':
+ if ch == '&':
return self.fetch_anchor()
# Is it a tag?
- if ch == u'!':
+ if ch == '!':
return self.fetch_tag()
# Is it a literal scalar?
- if ch == u'|' and not self.flow_level:
+ if ch == '|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
- if ch == u'>' and not self.flow_level:
+ if ch == '>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
- if ch == u'\'':
+ if ch == "'":
return self.fetch_single()
# Is it a double quoted scalar?
- if ch == u'\"':
+ if ch == '"':
return self.fetch_double()
# It must be a plain scalar then.
@@ -305,9 +304,12 @@ class Scanner(object):
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
- raise ScannerError("while scanning for the next token", None,
- "found character %r that cannot start any token"
- % utf8(ch), self.reader.get_mark())
+ raise ScannerError(
+ 'while scanning for the next token',
+ None,
+ 'found character %r that cannot start any token' % utf8(ch),
+ self.reader.get_mark(),
+ )
# Simple keys treatment.
@@ -337,12 +339,14 @@ class Scanner(object):
# height (may cause problems if indentation is broken though).
for level in list(self.possible_simple_keys):
key = self.possible_simple_keys[level]
- if key.line != self.reader.line \
- or self.reader.index - key.index > 1024:
+ if key.line != self.reader.line or self.reader.index - key.index > 1024:
if key.required:
raise ScannerError(
- "while scanning a simple key", key.mark,
- "could not find expected ':'", self.reader.get_mark())
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
@@ -360,9 +364,13 @@ class Scanner(object):
self.remove_possible_simple_key()
token_number = self.tokens_taken + len(self.tokens)
key = SimpleKey(
- token_number, required,
- self.reader.index, self.reader.line, self.reader.column,
- self.reader.get_mark())
+ token_number,
+ required,
+ self.reader.index,
+ self.reader.line,
+ self.reader.column,
+ self.reader.get_mark(),
+ )
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
@@ -373,8 +381,11 @@ class Scanner(object):
if key.required:
raise ScannerError(
- "while scanning a simple key", key.mark,
- "could not find expected ':'", self.reader.get_mark())
+ 'while scanning a simple key',
+ key.mark,
+ "could not find expected ':'",
+ self.reader.get_mark(),
+ )
del self.possible_simple_keys[self.flow_level]
@@ -423,8 +434,7 @@ class Scanner(object):
# Read the token.
mark = self.reader.get_mark()
# Add STREAM-START.
- self.tokens.append(StreamStartToken(mark, mark,
- encoding=self.reader.encoding))
+ self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding))
def fetch_stream_end(self):
# type: () -> None
@@ -539,9 +549,9 @@ class Scanner(object):
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
- raise ScannerError(None, None,
- "sequence entries are not allowed here",
- self.reader.get_mark())
+ raise ScannerError(
+ None, None, 'sequence entries are not allowed here', self.reader.get_mark()
+ )
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.reader.column):
mark = self.reader.get_mark()
@@ -568,9 +578,9 @@ class Scanner(object):
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping keys are not allowed here",
- self.reader.get_mark())
+ raise ScannerError(
+ None, None, 'mapping keys are not allowed here', self.reader.get_mark()
+ )
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.reader.column):
@@ -596,8 +606,9 @@ class Scanner(object):
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
- self.tokens.insert(key.token_number - self.tokens_taken,
- KeyToken(key.mark, key.mark))
+ self.tokens.insert(
+ key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark)
+ )
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
@@ -605,7 +616,8 @@ class Scanner(object):
if self.add_indent(key.column):
self.tokens.insert(
key.token_number - self.tokens_taken,
- BlockMappingStartToken(key.mark, key.mark))
+ BlockMappingStartToken(key.mark, key.mark),
+ )
# There cannot be two simple keys one after another.
self.allow_simple_key = False
@@ -621,9 +633,12 @@ class Scanner(object):
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
- raise ScannerError(None, None,
- "mapping values are not allowed here",
- self.reader.get_mark())
+ raise ScannerError(
+ None,
+ None,
+ 'mapping values are not allowed here',
+ self.reader.get_mark(),
+ )
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
@@ -691,7 +706,7 @@ class Scanner(object):
def fetch_single(self):
# type: () -> None
- self.fetch_flow_scalar(style='\'')
+ self.fetch_flow_scalar(style="'")
def fetch_double(self):
# type: () -> None
@@ -731,8 +746,7 @@ class Scanner(object):
# type: () -> Any
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.reader.column == 0:
- if self.reader.prefix(3) == u'---' \
- and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB:
return True
return None
@@ -740,8 +754,7 @@ class Scanner(object):
# type: () -> Any
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.reader.column == 0:
- if self.reader.prefix(3) == u'...' \
- and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB:
return True
return None
@@ -786,11 +799,12 @@ class Scanner(object):
# independent.
ch = self.reader.peek()
if self.scanner_processing_version == (1, 1):
- return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' or \
- (self.reader.peek(1) not in _THE_END_SPACE_TAB and
- (ch == u'-' or (not self.flow_level and ch in u'?:')))
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or (
+ self.reader.peek(1) not in _THE_END_SPACE_TAB
+ and (ch == '-' or (not self.flow_level and ch in '?:'))
+ )
# YAML 1.2
- if ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`':
+ if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`':
# ################### ^ ???
return True
ch1 = self.reader.peek(1)
@@ -799,8 +813,9 @@ class Scanner(object):
if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB:
return True
- return (self.reader.peek(1) not in _THE_END_SPACE_TAB and
- (ch == u'-' or (not self.flow_level and ch in u'?:')))
+ return self.reader.peek(1) not in _THE_END_SPACE_TAB and (
+ ch == '-' or (not self.flow_level and ch in '?:')
+ )
# Scanners.
@@ -825,13 +840,13 @@ class Scanner(object):
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
- if self.reader.index == 0 and self.reader.peek() == u'\uFEFF':
+ if self.reader.index == 0 and self.reader.peek() == '\uFEFF':
self.reader.forward()
found = False
while not found:
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
- if self.reader.peek() == u'#':
+ if self.reader.peek() == '#':
while self.reader.peek() not in _THE_END:
self.reader.forward()
if self.scan_line_break():
@@ -848,10 +863,10 @@ class Scanner(object):
self.reader.forward()
name = self.scan_directive_name(start_mark)
value = None
- if name == u'YAML':
+ if name == 'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.reader.get_mark()
- elif name == u'TAG':
+ elif name == 'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.reader.get_mark()
else:
@@ -866,58 +881,65 @@ class Scanner(object):
# See the specification for details.
length = 0
ch = self.reader.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-_:.':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.':
length += 1
ch = self.reader.peek(length)
if not length:
raise ScannerError(
- "while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning a directive',
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
value = self.reader.prefix(length)
self.reader.forward(length)
ch = self.reader.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError(
- "while scanning a directive", start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning a directive',
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
return value
def scan_yaml_directive_value(self, start_mark):
# type: (Any) -> Any
# See the specification for details.
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.reader.peek() != '.':
raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a digit or '.', but found %r"
- % utf8(self.reader.peek()),
- self.reader.get_mark())
+ 'while scanning a directive',
+ start_mark,
+ "expected a digit or '.', but found %r" % utf8(self.reader.peek()),
+ self.reader.get_mark(),
+ )
self.reader.forward()
minor = self.scan_yaml_directive_number(start_mark)
- if self.reader.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ if self.reader.peek() not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a digit or ' ', but found %r"
- % utf8(self.reader.peek()),
- self.reader.get_mark())
+ 'while scanning a directive',
+ start_mark,
+ "expected a digit or ' ', but found %r" % utf8(self.reader.peek()),
+ self.reader.get_mark(),
+ )
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# type: (Any) -> Any
# See the specification for details.
ch = self.reader.peek()
- if not (u'0' <= ch <= u'9'):
+ if not ('0' <= ch <= '9'):
raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a digit, but found %r" % utf8(ch),
- self.reader.get_mark())
+ 'while scanning a directive',
+ start_mark,
+ 'expected a digit, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
length = 0
- while u'0' <= self.reader.peek(length) <= u'9':
+ while '0' <= self.reader.peek(length) <= '9':
length += 1
value = int(self.reader.prefix(length))
self.reader.forward(length)
@@ -926,10 +948,10 @@ class Scanner(object):
def scan_tag_directive_value(self, start_mark):
# type: (Any) -> Any
# See the specification for details.
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
handle = self.scan_tag_directive_handle(start_mark)
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
@@ -939,10 +961,13 @@ class Scanner(object):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.reader.peek()
- if ch != u' ':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % utf8(ch),
- self.reader.get_mark())
+ if ch != ' ':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
return value
def scan_tag_directive_prefix(self, start_mark):
@@ -950,26 +975,31 @@ class Scanner(object):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.reader.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a directive", start_mark,
- "expected ' ', but found %r" % utf8(ch),
- self.reader.get_mark())
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a directive',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
return value
def scan_directive_ignored_line(self, start_mark):
# type: (Any) -> None
# See the specification for details.
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
- if self.reader.peek() == u'#':
+ if self.reader.peek() == '#':
while self.reader.peek() not in _THE_END:
self.reader.forward()
ch = self.reader.peek()
if ch not in _THE_END:
raise ScannerError(
- "while scanning a directive", start_mark,
- "expected a comment or a line break, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning a directive',
+ start_mark,
+ 'expected a comment or a line break, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
self.scan_line_break()
def scan_anchor(self, TokenClass):
@@ -984,7 +1014,7 @@ class Scanner(object):
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.reader.get_mark()
indicator = self.reader.peek()
- if indicator == u'*':
+ if indicator == '*':
name = 'alias'
else:
name = 'anchor'
@@ -998,19 +1028,23 @@ class Scanner(object):
ch = self.reader.peek(length)
if not length:
raise ScannerError(
- "while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning an %s' % name,
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
value = self.reader.prefix(length)
self.reader.forward(length)
# ch1 = ch
# ch = self.reader.peek() # no need to peek, ch is already set
# assert ch1 == ch
- if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`':
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`':
raise ScannerError(
- "while scanning an %s" % name, start_mark,
- "expected alphabetic or numeric character, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning an %s' % name,
+ start_mark,
+ 'expected alphabetic or numeric character, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
end_mark = self.reader.get_mark()
return TokenClass(value, start_mark, end_mark)
@@ -1019,41 +1053,46 @@ class Scanner(object):
# See the specification for details.
start_mark = self.reader.get_mark()
ch = self.reader.peek(1)
- if ch == u'<':
+ if ch == '<':
handle = None
self.reader.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
- if self.reader.peek() != u'>':
+ if self.reader.peek() != '>':
raise ScannerError(
- "while parsing a tag", start_mark,
+ 'while parsing a tag',
+ start_mark,
"expected '>', but found %r" % utf8(self.reader.peek()),
- self.reader.get_mark())
+ self.reader.get_mark(),
+ )
self.reader.forward()
elif ch in _THE_END_SPACE_TAB:
handle = None
- suffix = u'!'
+ suffix = '!'
self.reader.forward()
else:
length = 1
use_handle = False
- while ch not in u'\0 \r\n\x85\u2028\u2029':
- if ch == u'!':
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
use_handle = True
break
length += 1
ch = self.reader.peek(length)
- handle = u'!'
+ handle = '!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
- handle = u'!'
+ handle = '!'
self.reader.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.reader.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
- raise ScannerError("while scanning a tag", start_mark,
- "expected ' ', but found %r" % utf8(ch),
- self.reader.get_mark())
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError(
+ 'while scanning a tag',
+ start_mark,
+ "expected ' ', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
value = (handle, suffix)
end_mark = self.reader.get_mark()
return TagToken(value, start_mark, end_mark)
@@ -1078,11 +1117,13 @@ class Scanner(object):
min_indent = self.indent + 1
if increment is None:
# no increment and top level, min_indent could be 0
- if min_indent < 1 and \
- (style not in '|>' or (
- self.scanner_processing_version == (1, 1)) and
- getattr(self.loader,
- 'top_level_block_style_scalar_no_indent_error_1_1', False)):
+ if min_indent < 1 and (
+ style not in '|>'
+ or (self.scanner_processing_version == (1, 1))
+ and getattr(
+ self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False
+ )
+ ):
min_indent = 1
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
@@ -1091,12 +1132,12 @@ class Scanner(object):
min_indent = 1
indent = min_indent + increment - 1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
- line_break = u''
+ line_break = ""
# Scan the inner part of the block scalar.
- while self.reader.column == indent and self.reader.peek() != u'\0':
+ while self.reader.column == indent and self.reader.peek() != '\0':
chunks.extend(breaks)
- leading_non_space = self.reader.peek() not in u' \t'
+ leading_non_space = self.reader.peek() not in ' \t'
length = 0
while self.reader.peek(length) not in _THE_END:
length += 1
@@ -1109,16 +1150,20 @@ class Scanner(object):
# end of document/start_new_document
if self.check_document_start() or self.check_document_end():
break
- if self.reader.column == indent and self.reader.peek() != u'\0':
+ if self.reader.column == indent and self.reader.peek() != '\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
- if folded and line_break == u'\n' \
- and leading_non_space and self.reader.peek() not in u' \t':
+ if (
+ folded
+ and line_break == '\n'
+ and leading_non_space
+ and self.reader.peek() not in ' \t'
+ ):
if not breaks:
- chunks.append(u' ')
+ chunks.append(' ')
else:
chunks.append(line_break)
@@ -1147,7 +1192,7 @@ class Scanner(object):
trailing.extend(breaks)
# We are done.
- token = ScalarToken(u''.join(chunks), False, start_mark, end_mark, style)
+ token = ScalarToken("".join(chunks), False, start_mark, end_mark, style)
if len(trailing) > 0:
# print('trailing 1', trailing) # XXXXX
# Eat whitespaces and comments until we reach the next token.
@@ -1159,8 +1204,7 @@ class Scanner(object):
# Keep track of the trailing whitespace and following comments
# as a comment token, if isn't all included in the actual value.
comment_end_mark = self.reader.get_mark()
- comment = CommentToken(''.join(trailing), end_mark,
- comment_end_mark)
+ comment = CommentToken("".join(trailing), end_mark, comment_end_mark)
token.add_post_comment(comment)
return token
@@ -1170,59 +1214,66 @@ class Scanner(object):
chomping = None
increment = None
ch = self.reader.peek()
- if ch in u'+-':
+ if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.reader.forward()
ch = self.reader.peek()
- if ch in u'0123456789':
+ if ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, "
- "but found 0", self.reader.get_mark())
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
self.reader.forward()
- elif ch in u'0123456789':
+ elif ch in '0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected indentation indicator in the range 1-9, "
- "but found 0",
- self.reader.get_mark())
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected indentation indicator in the range 1-9, ' 'but found 0',
+ self.reader.get_mark(),
+ )
self.reader.forward()
ch = self.reader.peek()
- if ch in u'+-':
+ if ch in '+-':
if ch == '+':
chomping = True
else:
chomping = False
self.reader.forward()
ch = self.reader.peek()
- if ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch not in '\0 \r\n\x85\u2028\u2029':
raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected chomping or indentation indicators, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected chomping or indentation indicators, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# type: (Any) -> Any
# See the specification for details.
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
- if self.reader.peek() == u'#':
+ if self.reader.peek() == '#':
while self.reader.peek() not in _THE_END:
self.reader.forward()
ch = self.reader.peek()
if ch not in _THE_END:
raise ScannerError(
- "while scanning a block scalar", start_mark,
- "expected a comment or a line break, but found %r"
- % utf8(ch), self.reader.get_mark())
+ 'while scanning a block scalar',
+ start_mark,
+ 'expected a comment or a line break, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
self.scan_line_break()
def scan_block_scalar_indentation(self):
@@ -1231,8 +1282,8 @@ class Scanner(object):
chunks = []
max_indent = 0
end_mark = self.reader.get_mark()
- while self.reader.peek() in u' \r\n\x85\u2028\u2029':
- if self.reader.peek() != u' ':
+ while self.reader.peek() in ' \r\n\x85\u2028\u2029':
+ if self.reader.peek() != ' ':
chunks.append(self.scan_line_break())
end_mark = self.reader.get_mark()
else:
@@ -1246,12 +1297,12 @@ class Scanner(object):
# See the specification for details.
chunks = []
end_mark = self.reader.get_mark()
- while self.reader.column < indent and self.reader.peek() == u' ':
+ while self.reader.column < indent and self.reader.peek() == ' ':
self.reader.forward()
- while self.reader.peek() in u'\r\n\x85\u2028\u2029':
+ while self.reader.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.reader.get_mark()
- while self.reader.column < indent and self.reader.peek() == u' ':
+ while self.reader.column < indent and self.reader.peek() == ' ':
self.reader.forward()
return chunks, end_mark
@@ -1277,35 +1328,30 @@ class Scanner(object):
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.reader.forward()
end_mark = self.reader.get_mark()
- return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
- style)
+ return ScalarToken("".join(chunks), False, start_mark, end_mark, style)
ESCAPE_REPLACEMENTS = {
- u'0': u'\0',
- u'a': u'\x07',
- u'b': u'\x08',
- u't': u'\x09',
- u'\t': u'\x09',
- u'n': u'\x0A',
- u'v': u'\x0B',
- u'f': u'\x0C',
- u'r': u'\x0D',
- u'e': u'\x1B',
- u' ': u'\x20',
- u'\"': u'\"',
- u'/': u'/', # as per http://www.json.org/
- u'\\': u'\\',
- u'N': u'\x85',
- u'_': u'\xA0',
- u'L': u'\u2028',
- u'P': u'\u2029',
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '"': '"',
+ '/': '/', # as per http://www.json.org/
+ '\\': '\\',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
}
- ESCAPE_CODES = {
- u'x': 2,
- u'u': 4,
- u'U': 8,
- }
+ ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# type: (Any, Any) -> Any
@@ -1313,19 +1359,19 @@ class Scanner(object):
chunks = [] # type: List[Any]
while True:
length = 0
- while self.reader.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ while self.reader.peek(length) not in '\'"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length != 0:
chunks.append(self.reader.prefix(length))
self.reader.forward(length)
ch = self.reader.peek()
- if not double and ch == u'\'' and self.reader.peek(1) == u'\'':
- chunks.append(u'\'')
+ if not double and ch == "'" and self.reader.peek(1) == "'":
+ chunks.append("'")
self.reader.forward(2)
- elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ elif (double and ch == "'") or (not double and ch in '"\\'):
chunks.append(ch)
self.reader.forward()
- elif double and ch == u'\\':
+ elif double and ch == '\\':
self.reader.forward()
ch = self.reader.peek()
if ch in self.ESCAPE_REPLACEMENTS:
@@ -1335,25 +1381,27 @@ class Scanner(object):
length = self.ESCAPE_CODES[ch]
self.reader.forward()
for k in range(length):
- if self.reader.peek(k) not in u'0123456789ABCDEFabcdef':
+ if self.reader.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError(
- "while scanning a double-quoted scalar",
+ 'while scanning a double-quoted scalar',
start_mark,
- "expected escape sequence of %d hexdecimal "
- "numbers, but found %r" %
- (length, utf8(self.reader.peek(k))), self.reader.get_mark())
+ 'expected escape sequence of %d hexdecimal '
+ 'numbers, but found %r' % (length, utf8(self.reader.peek(k))),
+ self.reader.get_mark(),
+ )
code = int(self.reader.prefix(length), 16)
chunks.append(unichr(code))
self.reader.forward(length)
- elif ch in u'\r\n\x85\u2028\u2029':
+ elif ch in '\r\n\x85\u2028\u2029':
self.scan_line_break()
- chunks.extend(self.scan_flow_scalar_breaks(
- double, start_mark))
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError(
- "while scanning a double-quoted scalar", start_mark,
- "found unknown escape character %r" % utf8(ch),
- self.reader.get_mark())
+ 'while scanning a double-quoted scalar',
+ start_mark,
+ 'found unknown escape character %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
else:
return chunks
@@ -1362,22 +1410,25 @@ class Scanner(object):
# See the specification for details.
chunks = []
length = 0
- while self.reader.peek(length) in u' \t':
+ while self.reader.peek(length) in ' \t':
length += 1
whitespaces = self.reader.prefix(length)
self.reader.forward(length)
ch = self.reader.peek()
- if ch == u'\0':
+ if ch == '\0':
raise ScannerError(
- "while scanning a quoted scalar", start_mark,
- "found unexpected end of stream", self.reader.get_mark())
- elif ch in u'\r\n\x85\u2028\u2029':
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected end of stream',
+ self.reader.get_mark(),
+ )
+ elif ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
- if line_break != u'\n':
+ if line_break != '\n':
chunks.append(line_break)
elif not breaks:
- chunks.append(u' ')
+ chunks.append(' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
@@ -1391,15 +1442,18 @@ class Scanner(object):
# Instead of checking indentation, we check for document
# separators.
prefix = self.reader.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.reader.peek(3) in _THE_END_SPACE_TAB:
- raise ScannerError("while scanning a quoted scalar",
- start_mark,
- "found unexpected document separator",
- self.reader.get_mark())
- while self.reader.peek() in u' \t':
+ if (prefix == '---' or prefix == '...') and self.reader.peek(
+ 3
+ ) in _THE_END_SPACE_TAB:
+ raise ScannerError(
+ 'while scanning a quoted scalar',
+ start_mark,
+ 'found unexpected document separator',
+ self.reader.get_mark(),
+ )
+ while self.reader.peek() in ' \t':
self.reader.forward()
- if self.reader.peek() in u'\r\n\x85\u2028\u2029':
+ if self.reader.peek() in '\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
@@ -1422,31 +1476,41 @@ class Scanner(object):
spaces = [] # type: List[Any]
while True:
length = 0
- if self.reader.peek() == u'#':
+ if self.reader.peek() == '#':
break
while True:
ch = self.reader.peek(length)
- if (ch == u':' and
- self.reader.peek(length + 1) not in _THE_END_SPACE_TAB):
+ if ch == ':' and self.reader.peek(length + 1) not in _THE_END_SPACE_TAB:
pass
- elif (ch == u'?' and self.scanner_processing_version != (1, 1)):
+ elif ch == '?' and self.scanner_processing_version != (1, 1):
pass
- elif (ch in _THE_END_SPACE_TAB or
- (not self.flow_level and ch == u':' and
- self.reader.peek(length + 1) in _THE_END_SPACE_TAB) or
- (self.flow_level and ch in u',:?[]{}')):
+ elif (
+ ch in _THE_END_SPACE_TAB
+ or (
+ not self.flow_level
+ and ch == ':'
+ and self.reader.peek(length + 1) in _THE_END_SPACE_TAB
+ )
+ or (self.flow_level and ch in ',:?[]{}')
+ ):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
- if (self.flow_level and ch == u':' and
- self.reader.peek(length + 1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
+ if (
+ self.flow_level
+ and ch == ':'
+ and self.reader.peek(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}'
+ ):
self.reader.forward(length)
raise ScannerError(
- "while scanning a plain scalar", start_mark,
- "found unexpected ':'", self.reader.get_mark(),
- "Please check "
- "http://pyyaml.org/wiki/YAMLColonInFlowContext "
- "for details.")
+ 'while scanning a plain scalar',
+ start_mark,
+ "found unexpected ':'",
+ self.reader.get_mark(),
+ 'Please check '
+ 'http://pyyaml.org/wiki/YAMLColonInFlowContext '
+ 'for details.',
+ )
if length == 0:
break
self.allow_simple_key = False
@@ -1455,14 +1519,17 @@ class Scanner(object):
self.reader.forward(length)
end_mark = self.reader.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
- if not spaces or self.reader.peek() == u'#' \
- or (not self.flow_level and self.reader.column < indent):
+ if (
+ not spaces
+ or self.reader.peek() == '#'
+ or (not self.flow_level and self.reader.column < indent)
+ ):
break
- token = ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+ token = ScalarToken("".join(chunks), True, start_mark, end_mark)
if spaces and spaces[0] == '\n':
# Create a comment token to preserve the trailing line breaks.
- comment = CommentToken(''.join(spaces) + '\n', start_mark, end_mark)
+ comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark)
token.add_post_comment(comment)
return token
@@ -1473,32 +1540,34 @@ class Scanner(object):
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
- while self.reader.peek(length) in u' ':
+ while self.reader.peek(length) in ' ':
length += 1
whitespaces = self.reader.prefix(length)
self.reader.forward(length)
ch = self.reader.peek()
- if ch in u'\r\n\x85\u2028\u2029':
+ if ch in '\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.reader.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ if (prefix == '---' or prefix == '...') and self.reader.peek(
+ 3
+ ) in _THE_END_SPACE_TAB:
return
breaks = []
- while self.reader.peek() in u' \r\n\x85\u2028\u2029':
+ while self.reader.peek() in ' \r\n\x85\u2028\u2029':
if self.reader.peek() == ' ':
self.reader.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.reader.prefix(3)
- if (prefix == u'---' or prefix == u'...') \
- and self.reader.peek(3) in _THE_END_SPACE_TAB:
+ if (prefix == '---' or prefix == '...') and self.reader.peek(
+ 3
+ ) in _THE_END_SPACE_TAB:
return
- if line_break != u'\n':
+ if line_break != '\n':
chunks.append(line_break)
elif not breaks:
- chunks.append(u' ')
+ chunks.append(' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
@@ -1510,23 +1579,27 @@ class Scanner(object):
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.reader.peek()
- if ch != u'!':
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % utf8(ch),
- self.reader.get_mark())
+ if ch != '!':
+ raise ScannerError(
+ 'while scanning a %s' % name,
+ start_mark,
+ "expected '!', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
length = 1
ch = self.reader.peek(length)
- if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' \
- or u'a' <= ch <= u'z' \
- or ch in u'-_':
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_':
length += 1
ch = self.reader.peek(length)
- if ch != u'!':
+ if ch != '!':
self.reader.forward(length)
- raise ScannerError("while scanning a %s" % name, start_mark,
- "expected '!', but found %r" % utf8(ch),
- self.reader.get_mark())
+ raise ScannerError(
+ 'while scanning a %s' % name,
+ start_mark,
+ "expected '!', but found %r" % utf8(ch),
+ self.reader.get_mark(),
+ )
length += 1
value = self.reader.prefix(length)
self.reader.forward(length)
@@ -1539,9 +1612,13 @@ class Scanner(object):
chunks = []
length = 0
ch = self.reader.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
- or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
- if ch == u'%':
+ while (
+ '0' <= ch <= '9'
+ or 'A' <= ch <= 'Z'
+ or 'a' <= ch <= 'z'
+ or ch in "-;/?:@&=+$,_.!~*'()[]%"
+ ):
+ if ch == '%':
chunks.append(self.reader.prefix(length))
self.reader.forward(length)
length = 0
@@ -1554,25 +1631,30 @@ class Scanner(object):
self.reader.forward(length)
length = 0
if not chunks:
- raise ScannerError("while parsing a %s" % name, start_mark,
- "expected URI, but found %r" % utf8(ch),
- self.reader.get_mark())
- return u''.join(chunks)
+ raise ScannerError(
+ 'while parsing a %s' % name,
+ start_mark,
+ 'expected URI, but found %r' % utf8(ch),
+ self.reader.get_mark(),
+ )
+ return "".join(chunks)
def scan_uri_escapes(self, name, start_mark):
# type: (Any, Any) -> Any
# See the specification for details.
code_bytes = [] # type: List[Any]
mark = self.reader.get_mark()
- while self.reader.peek() == u'%':
+ while self.reader.peek() == '%':
self.reader.forward()
for k in range(2):
- if self.reader.peek(k) not in u'0123456789ABCDEFabcdef':
+ if self.reader.peek(k) not in '0123456789ABCDEFabcdef':
raise ScannerError(
- "while scanning a %s" % name, start_mark,
- "expected URI escape sequence of 2 hexdecimal numbers,"
- " but found %r"
- % utf8(self.reader.peek(k)), self.reader.get_mark())
+ 'while scanning a %s' % name,
+ start_mark,
+ 'expected URI escape sequence of 2 hexdecimal numbers,'
+ ' but found %r' % utf8(self.reader.peek(k)),
+ self.reader.get_mark(),
+ )
if PY3:
code_bytes.append(int(self.reader.prefix(2), 16))
else:
@@ -1582,10 +1664,11 @@ class Scanner(object):
if PY3:
value = bytes(code_bytes).decode('utf-8')
else:
- value = unicode(b''.join(code_bytes), 'utf-8') # type: ignore
+ # fmt: off
+ value = unicode(b"".join(code_bytes), 'utf-8') # type: ignore
+ # fmt: on
except UnicodeDecodeError as exc:
- raise ScannerError("while scanning a %s" % name, start_mark,
- str(exc), mark)
+ raise ScannerError('while scanning a %s' % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
@@ -1599,16 +1682,16 @@ class Scanner(object):
# '\u2029 : '\u2029'
# default : ''
ch = self.reader.peek()
- if ch in u'\r\n\x85':
- if self.reader.prefix(2) == u'\r\n':
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
self.reader.forward(2)
else:
self.reader.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
+ return '\n'
+ elif ch in '\u2028\u2029':
self.reader.forward()
return ch
- return u''
+ return ""
class RoundTripScanner(Scanner):
@@ -1673,11 +1756,15 @@ class RoundTripScanner(Scanner):
# scalar, value token. FlowXEndToken, otherwise
# hidden streamtokens could get them (leave them and they will be
# pre comments for the next map/seq
- if len(self.tokens) > 1 and \
- isinstance(self.tokens[0], (ScalarToken, ValueToken,
- FlowSequenceEndToken, FlowMappingEndToken, )) and \
- isinstance(self.tokens[1], CommentToken) and \
- self.tokens[0].end_mark.line == self.tokens[1].start_mark.line:
+ if (
+ len(self.tokens) > 1
+ and isinstance(
+ self.tokens[0],
+ (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken),
+ )
+ and isinstance(self.tokens[1], CommentToken)
+ and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line
+ ):
self.tokens_taken += 1
self.tokens[0].add_post_comment(self.tokens.pop(1))
self.tokens_taken += 1
@@ -1687,7 +1774,7 @@ class RoundTripScanner(Scanner):
def fetch_comment(self, comment):
# type: (Any) -> None
value, start_mark, end_mark = comment
- while value and value[-1] == u' ':
+ while value and value[-1] == ' ':
# empty line within indented key context
# no need to update end-mark, that is not used
value = value[:-1]
@@ -1716,20 +1803,20 @@ class RoundTripScanner(Scanner):
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
- if self.reader.index == 0 and self.reader.peek() == u'\uFEFF':
+ if self.reader.index == 0 and self.reader.peek() == '\uFEFF':
self.reader.forward()
found = False
while not found:
- while self.reader.peek() == u' ':
+ while self.reader.peek() == ' ':
self.reader.forward()
ch = self.reader.peek()
- if ch == u'#':
+ if ch == '#':
start_mark = self.reader.get_mark()
comment = ch
self.reader.forward()
while ch not in _THE_END:
ch = self.reader.peek()
- if ch == u'\0': # don't gobble the end-of-stream character
+ if ch == '\0': # don't gobble the end-of-stream character
break
comment += ch
self.reader.forward()
@@ -1747,9 +1834,9 @@ class RoundTripScanner(Scanner):
if not self.flow_level:
self.allow_simple_key = True
ch = self.reader.peek()
- if ch == '\n': # empty toplevel lines
+ if ch == '\n': # empty toplevel lines
start_mark = self.reader.get_mark()
- comment = ''
+ comment = ""
while ch:
ch = self.scan_line_break(empty_line=True)
comment += ch
@@ -1773,19 +1860,20 @@ class RoundTripScanner(Scanner):
# '\u2029 : '\u2029'
# default : ''
ch = self.reader.peek() # type: Text
- if ch in u'\r\n\x85':
- if self.reader.prefix(2) == u'\r\n':
+ if ch in '\r\n\x85':
+ if self.reader.prefix(2) == '\r\n':
self.reader.forward(2)
else:
self.reader.forward()
- return u'\n'
- elif ch in u'\u2028\u2029':
+ return '\n'
+ elif ch in '\u2028\u2029':
self.reader.forward()
return ch
elif empty_line and ch in '\t ':
self.reader.forward()
return ch
- return u''
+ return ""
+
# try:
# import psyco
diff --git a/serializer.py b/serializer.py
index c88539d..b2b26bf 100644
--- a/serializer.py
+++ b/serializer.py
@@ -7,13 +7,18 @@ from ruamel.yaml.compat import nprint, DBG_NODE, dbg, string_types
from ruamel.yaml.util import RegExp
from ruamel.yaml.events import (
- StreamStartEvent, StreamEndEvent, MappingStartEvent, MappingEndEvent,
- SequenceStartEvent, SequenceEndEvent, AliasEvent, ScalarEvent,
- DocumentStartEvent, DocumentEndEvent,
-)
-from ruamel.yaml.nodes import (
- MappingNode, ScalarNode, SequenceNode,
+ StreamStartEvent,
+ StreamEndEvent,
+ MappingStartEvent,
+ MappingEndEvent,
+ SequenceStartEvent,
+ SequenceEndEvent,
+ AliasEvent,
+ ScalarEvent,
+ DocumentStartEvent,
+ DocumentEndEvent,
)
+from ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode
if False: # MYPY
from typing import Any, Dict, Union, Text # NOQA
@@ -32,9 +37,16 @@ class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
ANCHOR_RE = RegExp(u'id(?!000$)\\d{3,}')
- def __init__(self, encoding=None, explicit_start=None, explicit_end=None,
- version=None, tags=None, dumper=None):
- # type: (Any, bool, bool, VersionType, Any, Any) -> None
+ def __init__(
+ self,
+ encoding=None,
+ explicit_start=None,
+ explicit_end=None,
+ version=None,
+ tags=None,
+ dumper=None,
+ ):
+ # type: (Any, Union[None, bool], Union[None, bool], Union[None, VersionType], Any, Any) -> None # NOQA
self.dumper = dumper
if self.dumper is not None:
self.dumper._serializer = self
@@ -56,15 +68,15 @@ class Serializer(object):
def emitter(self):
# type: () -> Any
if hasattr(self.dumper, 'typ'):
- return self.dumper.emitter # type: ignore
- return self.dumper._emitter # type: ignore
+ return self.dumper.emitter
+ return self.dumper._emitter
@property
def resolver(self):
# type: () -> Any
if hasattr(self.dumper, 'typ'):
- self.dumper.resolver # type: ignore
- return self.dumper._resolver # type: ignore
+ self.dumper.resolver
+ return self.dumper._resolver
def open(self):
# type: () -> None
@@ -72,14 +84,14 @@ class Serializer(object):
self.emitter.emit(StreamStartEvent(encoding=self.use_encoding))
self.closed = False
elif self.closed:
- raise SerializerError("serializer is closed")
+ raise SerializerError('serializer is closed')
else:
- raise SerializerError("serializer is already opened")
+ raise SerializerError('serializer is already opened')
def close(self):
# type: () -> None
if self.closed is None:
- raise SerializerError("serializer is not opened")
+ raise SerializerError('serializer is not opened')
elif not self.closed:
self.emitter.emit(StreamEndEvent())
self.closed = True
@@ -93,12 +105,14 @@ class Serializer(object):
nprint('Serializing nodes')
node.dump()
if self.closed is None:
- raise SerializerError("serializer is not opened")
+ raise SerializerError('serializer is not opened')
elif self.closed:
- raise SerializerError("serializer is closed")
- self.emitter.emit(DocumentStartEvent(explicit=self.use_explicit_start,
- version=self.use_version,
- tags=self.use_tags))
+ raise SerializerError('serializer is closed')
+ self.emitter.emit(
+ DocumentStartEvent(
+ explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags
+ )
+ )
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end))
@@ -151,12 +165,23 @@ class Serializer(object):
# if not equal quoting is necessary for strings
detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False))
default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True))
- implicit = ((node.tag == detected_tag), (node.tag == default_tag),
- node.tag.startswith('tag:yaml.org,2002:'))
- self.emitter.emit(ScalarEvent(alias, node.tag, implicit, node.value,
- style=node.style, comment=node.comment))
+ implicit = (
+ (node.tag == detected_tag),
+ (node.tag == default_tag),
+ node.tag.startswith('tag:yaml.org,2002:'),
+ )
+ self.emitter.emit(
+ ScalarEvent(
+ alias,
+ node.tag,
+ implicit,
+ node.value,
+ style=node.style,
+ comment=node.comment,
+ )
+ )
elif isinstance(node, SequenceNode):
- implicit = (node.tag == self.resolver.resolve(SequenceNode, node.value, True))
+ implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True)
comment = node.comment
end_comment = None
seq_comment = None
@@ -168,16 +193,22 @@ class Serializer(object):
end_comment = comment[2]
else:
end_comment = None
- self.emitter.emit(SequenceStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style,
- comment=node.comment))
+ self.emitter.emit(
+ SequenceStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ )
+ )
index = 0
for item in node.value:
self.serialize_node(item, node, index)
index += 1
self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment]))
elif isinstance(node, MappingNode):
- implicit = (node.tag == self.resolver.resolve(MappingNode, node.value, True))
+ implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True)
comment = node.comment
end_comment = None
map_comment = None
@@ -187,9 +218,15 @@ class Serializer(object):
# comment[0] = None
if comment and len(comment) > 2:
end_comment = comment[2]
- self.emitter.emit(MappingStartEvent(alias, node.tag, implicit,
- flow_style=node.flow_style,
- comment=node.comment))
+ self.emitter.emit(
+ MappingStartEvent(
+ alias,
+ node.tag,
+ implicit,
+ flow_style=node.flow_style,
+ comment=node.comment,
+ )
+ )
for key, value in node.value:
self.serialize_node(key, node, None)
self.serialize_node(value, node, key)
diff --git a/setup.py b/setup.py
index c0d00cf..4eb777c 100644
--- a/setup.py
+++ b/setup.py
@@ -9,14 +9,15 @@ import sys
import os
import datetime
import traceback
-sys.path = [path for path in sys.path if path not in [os.getcwd(), '']]
-import platform # NOQA
-from _ast import * # NOQA
-from ast import parse # NOQA
+
+sys.path = [path for path in sys.path if path not in [os.getcwd(), ""]]
+import platform # NOQA
+from _ast import * # NOQA
+from ast import parse # NOQA
from setuptools import setup, Extension, Distribution # NOQA
-from setuptools.command import install_lib # NOQA
-from setuptools.command.sdist import sdist as _sdist # NOQA
+from setuptools.command import install_lib # NOQA
+from setuptools.command.sdist import sdist as _sdist # NOQA
if __name__ != '__main__':
@@ -29,33 +30,41 @@ full_package_name = None
if __name__ != '__main__':
raise NotImplementedError('should never include setup.py')
-if sys.version_info < (3, ):
+if sys.version_info < (3,):
string_type = basestring
else:
string_type = str
if sys.version_info < (3, 4):
- class Bytes():
+
+ class Bytes:
pass
class NameConstant:
pass
-if sys.version_info < (3, ):
+
+if sys.version_info < (3,):
open_kw = dict()
else:
open_kw = dict(encoding='utf-8')
if sys.version_info < (2, 7) or platform.python_implementation() == 'Jython':
- class Set():
+
+ class Set:
pass
-if os.environ.get('DVDEBUG', '') == '':
+
+if os.environ.get('DVDEBUG', "") == "":
+
def debug(*args, **kw):
pass
+
+
else:
+
def debug(*args, **kw):
with open(os.environ['DVDEBUG'], 'a') as fp:
kw1 = kw.copy()
@@ -70,6 +79,9 @@ def literal_eval(node_or_string):
expression. The string or node provided may only consist of the following
Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
sets, booleans, and None.
+
+ Even when passing in Unicode, the resulting Str types parsed are 'str' in Python 2.
+ I don't now how to set 'unicode_literals' on parse -> Str is explicitly converted.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, string_type):
@@ -77,10 +89,14 @@ def literal_eval(node_or_string):
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
else:
- raise TypeError("only string or AST nodes supported")
+ raise TypeError('only string or AST nodes supported')
def _convert(node):
- if isinstance(node, (Str, Bytes)):
+ if isinstance(node, Str):
+ if sys.version_info < (3,) and not isinstance(node.s, unicode):
+ return node.s.decode('utf-8')
+ return node.s
+ elif isinstance(node, Bytes):
return node.s
elif isinstance(node, Num):
return node.n
@@ -91,25 +107,28 @@ def literal_eval(node_or_string):
elif isinstance(node, Set):
return set(map(_convert, node.elts))
elif isinstance(node, Dict):
- return dict((_convert(k), _convert(v)) for k, v
- in zip(node.keys, node.values))
+ return dict((_convert(k), _convert(v)) for k, v in zip(node.keys, node.values))
elif isinstance(node, NameConstant):
return node.value
elif sys.version_info < (3, 4) and isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
- elif isinstance(node, UnaryOp) and \
- isinstance(node.op, (UAdd, USub)) and \
- isinstance(node.operand, (Num, UnaryOp, BinOp)): # NOQA
+ elif (
+ isinstance(node, UnaryOp)
+ and isinstance(node.op, (UAdd, USub))
+ and isinstance(node.operand, (Num, UnaryOp, BinOp))
+ ): # NOQA
operand = _convert(node.operand)
if isinstance(node.op, UAdd):
- return + operand
+ return +operand
else:
- return - operand
- elif isinstance(node, BinOp) and \
- isinstance(node.op, (Add, Sub)) and \
- isinstance(node.right, (Num, UnaryOp, BinOp)) and \
- isinstance(node.left, (Num, UnaryOp, BinOp)): # NOQA
+ return -operand
+ elif (
+ isinstance(node, BinOp)
+ and isinstance(node.op, (Add, Sub))
+ and isinstance(node.right, (Num, UnaryOp, BinOp))
+ and isinstance(node.left, (Num, UnaryOp, BinOp))
+ ): # NOQA
left = _convert(node.left)
right = _convert(node.right)
if isinstance(node.op, Add):
@@ -133,6 +152,7 @@ def literal_eval(node_or_string):
err.text = repr(node)
err.node = node
raise err
+
return _convert(node_or_string)
@@ -145,23 +165,23 @@ def _package_data(fn):
for line in fp.readlines():
if sys.version_info < (3,):
line = line.decode('utf-8')
- if line.startswith(u'_package_data'):
+ if line.startswith('_package_data'):
if 'dict(' in line:
parsing = 'python'
- lines.append(u'dict(\n')
- elif line.endswith(u'= {\n'):
+ lines.append('dict(\n')
+ elif line.endswith('= {\n'):
parsing = 'python'
- lines.append(u'{\n')
+ lines.append('{\n')
else:
raise NotImplementedError
continue
if not parsing:
continue
if parsing == 'python':
- if line.startswith(u')') or line.startswith(u'}'):
+ if line.startswith(')') or line.startswith('}'):
lines.append(line)
try:
- data = literal_eval(u''.join(lines))
+ data = literal_eval("".join(lines))
except SyntaxError as e:
context = 2
from_line = e.lineno - (context + 1)
@@ -169,11 +189,16 @@ def _package_data(fn):
w = len(str(to_line))
for index, line in enumerate(lines):
if from_line <= index <= to_line:
- print(u"{0:{1}}: {2}".format(index, w, line).encode('utf-8'),
- end=u'')
+ print(
+ '{0:{1}}: {2}'.format(index, w, line).encode('utf-8'),
+ end="",
+ )
if index == e.lineno - 1:
- print(u"{0:{1}} {2}^--- {3}".format(
- u' ', w, u' ' * e.offset, e.node))
+ print(
+ '{0:{1}} {2}^--- {3}'.format(
+ ' ', w, ' ' * e.offset, e.node
+ )
+ )
raise
break
lines.append(line)
@@ -185,32 +210,29 @@ def _package_data(fn):
# make sure you can run "python ../some/dir/setup.py install"
pkg_data = _package_data(__file__.replace('setup.py', '__init__.py'))
-exclude_files = [
- 'setup.py',
-]
+exclude_files = ['setup.py']
# # helper
def _check_convert_version(tup):
"""Create a PEP 386 pseudo-format conformant string from tuple tup."""
ret_val = str(tup[0]) # first is always digit
- next_sep = "." # separator for next extension, can be "" or "."
+ next_sep = '.' # separator for next extension, can be "" or "."
nr_digits = 0 # nr of adjacent digits in rest, to verify
post_dev = False # are we processig post/dev
for x in tup[1:]:
if isinstance(x, int):
nr_digits += 1
if nr_digits > 2:
- raise ValueError("too many consecutive digits after " + ret_val)
+ raise ValueError('too many consecutive digits after ' + ret_val)
ret_val += next_sep + str(x)
next_sep = '.'
continue
first_letter = x[0].lower()
- next_sep = ''
+ next_sep = ""
if first_letter in 'abcr':
if post_dev:
- raise ValueError("release level specified after "
- "post/dev: " + x)
+ raise ValueError('release level specified after ' 'post/dev: ' + x)
nr_digits = 0
ret_val += 'rc' if first_letter == 'r' else first_letter
elif first_letter in 'pd':
@@ -232,8 +254,7 @@ version_str = _check_convert_version(version_info)
class MyInstallLib(install_lib.install_lib):
def install(self):
fpp = pkg_data['full_package_name'].split('.') # full package path
- full_exclude_files = [os.path.join(*(fpp + [x]))
- for x in exclude_files]
+ full_exclude_files = [os.path.join(*(fpp + [x])) for x in exclude_files]
alt_files = []
outfiles = install_lib.install_lib.install(self)
for x in outfiles:
@@ -263,7 +284,7 @@ class MySdist(_sdist):
# try except so this doesn't bomb when you don't have wheel installed, implies
# generation of wheels in ./dist
try:
- from wheel.bdist_wheel import bdist_wheel as _bdist_wheel # NOQA
+ from wheel.bdist_wheel import bdist_wheel as _bdist_wheel # NOQA
class MyBdistWheel(_bdist_wheel):
def initialize_options(self):
@@ -287,23 +308,25 @@ class InMemoryZipFile(object):
except ImportError:
from io import BytesIO as StringIO
import zipfile
+
self.zip_file = zipfile
# Create the in-memory file-like object
self._file_name = file_name
self.in_memory_data = StringIO()
# Create the in-memory zipfile
self.in_memory_zip = self.zip_file.ZipFile(
- self.in_memory_data, "w", self.zip_file.ZIP_DEFLATED, False)
+ self.in_memory_data, 'w', self.zip_file.ZIP_DEFLATED, False
+ )
self.in_memory_zip.debug = 3
def append(self, filename_in_zip, file_contents):
- '''Appends a file with name filename_in_zip and contents of
- file_contents to the in-memory zip.'''
+ """Appends a file with name filename_in_zip and contents of
+ file_contents to the in-memory zip."""
self.in_memory_zip.writestr(filename_in_zip, file_contents)
- return self # so you can daisy-chain
+ return self # so you can daisy-chain
def write_to_file(self, filename):
- '''Writes the in-memory zip to a file.'''
+ """Writes the in-memory zip to a file."""
# Mark the files as having been created on Windows so that
# Unix permissions are not inferred as 0000
for zfile in self.in_memory_zip.filelist:
@@ -328,6 +351,7 @@ class InMemoryZipFile(object):
"""
if pattern and isinstance(pattern, string_type):
import re
+
pattern = re.compile(pattern)
if file_names:
if not isinstance(file_names, list):
@@ -343,9 +367,12 @@ class InMemoryZipFile(object):
continue
self.append(l.filename, zf.read(l))
if file_names:
- raise IOError('[Errno 2] No such file{}: {}'.format(
- '' if len(file_names) == 1 else 's',
- ', '.join([repr(f) for f in file_names])))
+ raise IOError(
+ '[Errno 2] No such file{}: {}'.format(
+ "" if len(file_names) == 1 else 's',
+ ', '.join([repr(f) for f in file_names]),
+ )
+ )
class NameSpacePackager(object):
@@ -359,8 +386,11 @@ class NameSpacePackager(object):
self.command = None
self.python_version()
self._pkg = [None, None] # required and pre-installable packages
- if sys.argv[0] == 'setup.py' and sys.argv[1] == 'install' and \
- '--single-version-externally-managed' not in sys.argv:
+ if (
+ sys.argv[0] == 'setup.py'
+ and sys.argv[1] == 'install'
+ and '--single-version-externally-managed' not in sys.argv
+ ):
if os.environ.get('READTHEDOCS', None) == 'True':
os.system('pip install .')
sys.exit(0)
@@ -381,7 +411,7 @@ class NameSpacePackager(object):
break
def pn(self, s):
- if sys.version_info < (3, ) and isinstance(s, unicode):
+ if sys.version_info < (3,) and isinstance(s, unicode):
return s.encode('utf-8')
return s
@@ -408,14 +438,15 @@ class NameSpacePackager(object):
if pd.get('nested', False):
continue
self._split.append(self.full_package_name + '.' + d)
- if sys.version_info < (3, ):
- self._split = [(y.encode('utf-8') if isinstance(y, unicode) else y)
- for y in self._split]
+ if sys.version_info < (3,):
+ self._split = [
+ (y.encode('utf-8') if isinstance(y, unicode) else y) for y in self._split
+ ]
return self._split
@property
def namespace_packages(self):
- return self.split[:self.depth]
+ return self.split[: self.depth]
def namespace_directories(self, depth=None):
"""return list of directories where the namespace should be created /
@@ -433,8 +464,10 @@ class NameSpacePackager(object):
def package_dir(self):
d = {
# don't specify empty dir, clashes with package_data spec
- self.full_package_name: '.',
+ self.full_package_name: '.'
}
+ if 'extra_packages' in self._pkg_data:
+ return d
if len(self.split) > 1: # only if package namespace
d[self.split[0]] = self.namespace_directories(1)[0]
return d
@@ -448,8 +481,9 @@ class NameSpacePackager(object):
for d in directories:
os.mkdir(d)
with open(os.path.join(d, '__init__.py'), 'w') as fp:
- fp.write('import pkg_resources\n'
- 'pkg_resources.declare_namespace(__name__)\n')
+ fp.write(
+ 'import pkg_resources\n' 'pkg_resources.declare_namespace(__name__)\n'
+ )
def python_version(self):
supported = self._pkg_data.get('supported')
@@ -505,12 +539,10 @@ class NameSpacePackager(object):
if self.command == 'develop':
raise InstallationError(
'Cannot mix develop (pip install -e),\nwith '
- 'non-develop installs for package name {0}'.format(
- fn))
+ 'non-develop installs for package name {0}'.format(fn)
+ )
elif fn == prefix:
- raise InstallationError(
- 'non directory package {0} in {1}'.format(
- fn, p))
+ raise InstallationError('non directory package {0} in {1}'.format(fn, p))
for pre in [x + '.' for x in prefixes]:
if fn.startswith(pre):
break
@@ -519,7 +551,8 @@ class NameSpacePackager(object):
if fn.endswith('-link') and self.command == 'install':
raise InstallationError(
'Cannot mix non-develop with develop\n(pip install -e)'
- ' installs for package name {0}'.format(fn))
+ ' installs for package name {0}'.format(fn)
+ )
def entry_points(self, script_name=None, package_name=None):
"""normally called without explicit script_name and package name
@@ -533,13 +566,15 @@ class NameSpacePackager(object):
if the ep entry is a simple string without "=", that is assumed to be
the name of the script.
"""
+
def pckg_entry_point(name):
return '{0}{1}:main'.format(
- name,
- '.__main__' if os.path.exists('__main__.py') else '',
+ name, '.__main__' if os.path.exists('__main__.py') else ""
)
ep = self._pkg_data.get('entry_points', True)
+ if isinstance(ep, dict):
+ return ep
if ep is None:
return None
if ep not in [True, 1]:
@@ -553,9 +588,11 @@ class NameSpacePackager(object):
package_name = self.full_package_name
if not script_name:
script_name = package_name.split('.')[-1]
- return {'console_scripts': [
- '{0} = {1}'.format(script_name, pckg_entry_point(package_name)),
- ]}
+ return {
+ 'console_scripts': [
+ '{0} = {1}'.format(script_name, pckg_entry_point(package_name))
+ ]
+ }
@property
def url(self):
@@ -567,11 +604,11 @@ class NameSpacePackager(object):
@property
def author(self):
- return self._pkg_data['author']
+ return self._pkg_data['author'] # no get needs to be there
@property
def author_email(self):
- return self._pkg_data['author_email']
+ return self._pkg_data['author_email'] # no get needs to be there
@property
def license(self):
@@ -580,7 +617,7 @@ class NameSpacePackager(object):
if lic is None:
# lic_fn = os.path.join(os.path.dirname(__file__), 'LICENSE')
# assert os.path.exists(lic_fn)
- return "MIT license"
+ return 'MIT license'
return lic
def has_mit_lic(self):
@@ -588,30 +625,40 @@ class NameSpacePackager(object):
@property
def description(self):
- return self._pkg_data['description']
+ return self._pkg_data['description'] # no get needs to be there
@property
def status(self):
# αβ
- status = self._pkg_data.get('status', u'β').lower()
- if status in [u'α', u'alpha']:
+ status = self._pkg_data.get('status', 'β').lower()
+ if status in ['α', 'alpha']:
return (3, 'Alpha')
- elif status in [u'β', u'beta']:
+ elif status in ['β', 'beta']:
return (4, 'Beta')
- elif u'stable' in status.lower():
+ elif 'stable' in status.lower():
return (5, 'Production/Stable')
raise NotImplementedError
@property
def classifiers(self):
- return [
- 'Development Status :: {0} - {1}'.format(*self.status),
- 'Intended Audience :: Developers',
- 'License :: ' + ('OSI Approved :: MIT' if self.has_mit_lic()
- else 'Other/Proprietary') + ' License',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python',
- ] + [self.pn(x) for x in self._pkg_data.get('classifiers', [])]
+ """this needs more intelligence, probably splitting the classifiers from _pkg_data
+ and only adding defaults when no explicit entries were provided.
+ Add explicit Python versions in sync with tox.env generation based on python_requires?
+ """
+ return sorted(
+ set(
+ [
+ 'Development Status :: {0} - {1}'.format(*self.status),
+ 'Intended Audience :: Developers',
+ 'License :: '
+ + ('OSI Approved :: MIT' if self.has_mit_lic() else 'Other/Proprietary')
+ + ' License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ ]
+ + [self.pn(x) for x in self._pkg_data.get('classifiers', [])]
+ )
+ )
@property
def keywords(self):
@@ -649,7 +696,7 @@ class NameSpacePackager(object):
# 'any' for all builds, 'py27' etc for specifics versions
packages = ir.get('any', [])
if isinstance(packages, string_type):
- packages = packages.split() # assume white space separated string
+ packages = packages.split() # assume white space separated string
if self.nested:
# parent dir is also a package, make sure it is installed (need its .pth file)
parent_pkg = self.full_package_name.rsplit('.', 1)[0]
@@ -659,7 +706,7 @@ class NameSpacePackager(object):
if implementation == 'CPython':
pyver = 'py{0}{1}'.format(*sys.version_info)
elif implementation == 'PyPy':
- pyver = 'pypy' if sys.version_info < (3, ) else 'pypy3'
+ pyver = 'pypy' if sys.version_info < (3,) else 'pypy3'
elif implementation == 'Jython':
pyver = 'jython'
packages.extend(ir.get(pyver, []))
@@ -692,7 +739,7 @@ class NameSpacePackager(object):
df.append('LICENSE')
if not df:
return None
- return [('.', df), ]
+ return [('.', df)]
@property
def package_data(self):
@@ -702,9 +749,30 @@ class NameSpacePackager(object):
df.append('LICENSE')
# but don't install it
exclude_files.append('LICENSE')
- if not df:
- return {}
- return {self.full_package_name: df}
+ pd = self._pkg_data.get('package_data', {})
+ if df:
+ pd[self.full_package_name] = df
+ if sys.version_info < (3,):
+ # python2 doesn't seem to like unicode package names as keys
+ # maybe only when the packages themselves are non-unicode
+ for k in pd:
+ if isinstance(k, unicode):
+ pd[str(k)] = pd.pop(k)
+ # for k in pd:
+ # pd[k] = [e.encode('utf-8') for e in pd[k]] # de-unicode
+ return pd
+
+ @property
+ def packages(self):
+ s = self.split
+ # fixed this in package_data, the keys there must be non-unicode for py27
+ # if sys.version_info < (3, 0):
+ # s = [x.encode('utf-8') for x in self.split]
+ return s + self._pkg_data.get('extra_packages', [])
+
+ @property
+ def python_requires(self):
+ return self._pkg_data.get('python_requires', None)
@property
def ext_modules(self):
@@ -787,16 +855,13 @@ class NameSpacePackager(object):
distutils.sysconfig.customize_compiler(compiler)
# make sure you can reach header files because compile does change dir
compiler.add_include_dir(os.getcwd())
- if sys.version_info < (3, ):
+ if sys.version_info < (3,):
tmp_dir = tmp_dir.encode('utf-8')
# used to be a different directory, not necessary
compile_out_dir = tmp_dir
try:
compiler.link_executable(
- compiler.compile(
- [file_name],
- output_dir=compile_out_dir,
- ),
+ compiler.compile([file_name], output_dir=compile_out_dir),
bin_file_name,
output_dir=tmp_dir,
libraries=ext.libraries,
@@ -819,6 +884,10 @@ class NameSpacePackager(object):
shutil.rmtree(tmp_dir)
return self._ext_modules
+ @property
+ def test_suite(self):
+ return self._pkg_data.get('test_suite')
+
def wheel(self, kw, setup):
"""temporary add setup.cfg if creating a wheel to include LICENSE file
https://bitbucket.org/pypa/wheel/issues/47
@@ -832,7 +901,7 @@ class NameSpacePackager(object):
if os.path.exists('LICENSE'):
fp.write('[metadata]\nlicense-file = LICENSE\n')
else:
- print("\n\n>>>>>> LICENSE file not found <<<<<\n\n")
+ print('\n\n>>>>>> LICENSE file not found <<<<<\n\n')
if self._pkg_data.get('universal'):
fp.write('[bdist_wheel]\nuniversal = 1\n')
try:
@@ -851,6 +920,7 @@ def main():
import wheel
import distutils
import setuptools
+
print('python: ', sys.version)
print('setuptools:', setuptools.__version__)
print('distutils: ', distutils.__version__)
@@ -862,10 +932,7 @@ def main():
if pkg_data.get('tarfmt'):
MySdist.tarfmt = pkg_data.get('tarfmt')
- cmdclass = dict(
- install_lib=MyInstallLib,
- sdist=MySdist,
- )
+ cmdclass = dict(install_lib=MyInstallLib, sdist=MySdist)
if _bdist_wheel_available:
MyBdistWheel.nsp = nsp
cmdclass['bdist_wheel'] = MyBdistWheel
@@ -874,7 +941,8 @@ def main():
name=nsp.full_package_name,
namespace_packages=nsp.namespace_packages,
version=version_str,
- packages=nsp.split,
+ packages=nsp.packages,
+ python_requires=nsp.python_requires,
url=nsp.url,
author=nsp.author,
author_email=nsp.author_email,
@@ -889,6 +957,7 @@ def main():
keywords=nsp.keywords,
package_data=nsp.package_data,
ext_modules=nsp.ext_modules,
+ test_suite=nsp.test_suite,
)
if '--version' not in sys.argv and ('--verbose' in sys.argv or dump_kw in sys.argv):
@@ -912,6 +981,7 @@ def main():
# until you match your/package/name for your.package.name
for p in nsp.install_pre:
import subprocess
+
# search other source
setup_path = os.path.join(*p.split('.') + ['setup.py'])
try_dir = os.path.dirname(sys.executable)
diff --git a/timestamp.py b/timestamp.py
index 7035c0f..b87a348 100644
--- a/timestamp.py
+++ b/timestamp.py
@@ -20,7 +20,6 @@ class TimeStamp(datetime.datetime):
def __deepcopy__(self, memo):
# type: (Any) -> Any
- ts = TimeStamp(self.year, self.month, self.day,
- self.hour, self.minute, self.second)
+ ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second)
ts._yaml = copy.deepcopy(self._yaml)
return ts
diff --git a/tokens.py b/tokens.py
index 94b1c10..54a8bd7 100644
--- a/tokens.py
+++ b/tokens.py
@@ -8,7 +8,7 @@ SHOWLINES = True
class Token(object):
- __slots__ = 'start_mark', 'end_mark', '_comment',
+ __slots__ = 'start_mark', 'end_mark', '_comment'
def __init__(self, start_mark, end_mark):
# type: (Any, Any) -> None
@@ -21,8 +21,7 @@ class Token(object):
# hasattr('self', key)]
attributes = [key for key in self.__slots__ if not key.endswith('_mark')]
attributes.sort()
- arguments = u', '.join([u'%s=%r' % (key, getattr(self, key))
- for key in attributes])
+ arguments = u', '.join([u'%s=%r' % (key, getattr(self, key)) for key in attributes])
if SHOWLINES:
try:
arguments += u', line: ' + str(self.start_mark.line)
@@ -102,8 +101,9 @@ class Token(object):
# class BOMToken(Token):
# id = '<byte order mark>'
+
class DirectiveToken(Token):
- __slots__ = 'name', 'value',
+ __slots__ = 'name', 'value'
id = '<directive>'
def __init__(self, name, value, start_mark, end_mark):
@@ -124,7 +124,7 @@ class DocumentEndToken(Token):
class StreamStartToken(Token):
- __slots__ = 'encoding',
+ __slots__ = ('encoding',)
id = '<stream start>'
def __init__(self, start_mark=None, end_mark=None, encoding=None):
@@ -198,7 +198,7 @@ class FlowEntryToken(Token):
class AliasToken(Token):
- __slots__ = 'value',
+ __slots__ = ('value',)
id = '<alias>'
def __init__(self, value, start_mark, end_mark):
@@ -208,7 +208,7 @@ class AliasToken(Token):
class AnchorToken(Token):
- __slots__ = 'value',
+ __slots__ = ('value',)
id = '<anchor>'
def __init__(self, value, start_mark, end_mark):
@@ -218,7 +218,7 @@ class AnchorToken(Token):
class TagToken(Token):
- __slots__ = 'value',
+ __slots__ = ('value',)
id = '<tag>'
def __init__(self, value, start_mark, end_mark):
@@ -228,7 +228,7 @@ class TagToken(Token):
class ScalarToken(Token):
- __slots__ = 'value', 'plain', 'style',
+ __slots__ = 'value', 'plain', 'style'
id = '<scalar>'
def __init__(self, value, plain, start_mark, end_mark, style=None):
@@ -240,7 +240,7 @@ class ScalarToken(Token):
class CommentToken(Token):
- __slots__ = 'value', 'pre_done',
+ __slots__ = 'value', 'pre_done'
id = '<comment>'
def __init__(self, value, start_mark, end_mark):
diff --git a/tox.ini b/tox.ini
index c09b65f..da5806b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,7 +7,6 @@ commands =
/bin/bash -c 'pytest _test/test_*.py'
deps =
pytest
- flake8==2.5.5
ruamel.std.pathlib
[testenv:py27m]
@@ -15,17 +14,24 @@ basepython = /opt/python/2.7.15m/bin/python
[testenv:cs]
basepython = python3.6
+deps =
+ flake8
+ flake8-bugbear;python_version>="3.5"
commands =
- flake8 --exclude ".tox,.#*,jabsy,jinja2,base,cmd,convert" {posargs}
+ flake8 []{posargs}
[testenv:pep8]
basepython = python3.6
+deps =
+ flake8
+ flake8-bugbear;python_version>="3.5"
commands =
- flake8 --exclude ".tox,.#*,jabsy,jinja2,base,cmd,convert" {posargs}
+ flake8 []{posargs}
[flake8]
show-source = True
max-line-length = 95
+ignore = W503,F405,E203
exclude = _test/lib,.hg,.git,.tox,dist,.cache,__pycache__,ruamel.zip2tar.egg-info
[pytest]
diff --git a/util.py b/util.py
index e5e816f..2412888 100644
--- a/util.py
+++ b/util.py
@@ -26,20 +26,26 @@ class LazyEval(object):
The sole additional attribute is the lazy_self function which holds the
return value (or, prior to evaluation, func and arguments), in its closure.
"""
+
def __init__(self, func, *args, **kwargs):
+ # type: (Any, Any, Any) -> None
def lazy_self():
+ # type: () -> Any
return_value = func(*args, **kwargs)
- object.__setattr__(self, "lazy_self", lambda: return_value)
+ object.__setattr__(self, 'lazy_self', lambda: return_value)
return return_value
- object.__setattr__(self, "lazy_self", lazy_self)
+
+ object.__setattr__(self, 'lazy_self', lazy_self)
def __getattribute__(self, name):
- lazy_self = object.__getattribute__(self, "lazy_self")
- if name == "lazy_self":
+ # type: (Any) -> Any
+ lazy_self = object.__getattribute__(self, 'lazy_self')
+ if name == 'lazy_self':
return lazy_self
return getattr(lazy_self(), name)
def __setattr__(self, name, value):
+ # type: (Any, Any) -> None
setattr(self.lazy_self(), name, value)
@@ -90,7 +96,7 @@ def load_yaml_guess_indent(stream, **kw):
idx = l_s + 1
while line[idx] == ' ': # this will end as we rstripped
idx += 1
- if line[idx] == '#': # comment after -
+ if line[idx] == '#': # comment after -
continue
indent = idx - key_indent
break
@@ -120,6 +126,7 @@ def configobj_walker(cfg):
corresponding YAML output (including comments
"""
from configobj import ConfigObj # type: ignore
+
assert isinstance(cfg, ConfigObj)
for c in cfg.initial_comment:
if c.strip():
@@ -135,6 +142,7 @@ def configobj_walker(cfg):
def _walk_section(s, level=0):
# type: (Any, int) -> Any
from configobj import Section
+
assert isinstance(s, Section)
indent = u' ' * level
for name in s.scalars:
@@ -162,6 +170,7 @@ def _walk_section(s, level=0):
for val in _walk_section(s[name], level=level + 1):
yield val
+
# def config_obj_2_rt_yaml(cfg):
# from .comments import CommentedMap, CommentedSeq
# from configobj import ConfigObj