summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--docutils/docutils/__init__.py4
-rw-r--r--docutils/docutils/core.py24
-rw-r--r--docutils/docutils/frontend.py9
-rw-r--r--docutils/docutils/io.py20
-rw-r--r--docutils/docutils/languages/zh_cn.py2
-rw-r--r--docutils/docutils/languages/zh_tw.py2
-rw-r--r--docutils/docutils/nodes.py43
-rw-r--r--docutils/docutils/parsers/__init__.py3
-rw-r--r--docutils/docutils/parsers/recommonmark_wrapper.py8
-rw-r--r--docutils/docutils/parsers/rst/__init__.py3
-rw-r--r--docutils/docutils/parsers/rst/directives/body.py7
-rw-r--r--docutils/docutils/parsers/rst/directives/misc.py34
-rw-r--r--docutils/docutils/parsers/rst/languages/ar.py105
-rw-r--r--docutils/docutils/parsers/rst/languages/fa.py109
-rw-r--r--docutils/docutils/parsers/rst/languages/fr.py2
-rw-r--r--docutils/docutils/parsers/rst/languages/he.py2
-rw-r--r--docutils/docutils/parsers/rst/languages/it.py2
-rw-r--r--docutils/docutils/parsers/rst/languages/ru.py130
-rw-r--r--docutils/docutils/parsers/rst/languages/sk.py2
-rw-r--r--docutils/docutils/parsers/rst/roles.py3
-rw-r--r--docutils/docutils/parsers/rst/states.py121
-rw-r--r--docutils/docutils/parsers/rst/tableparser.py15
-rw-r--r--docutils/docutils/statemachine.py22
-rw-r--r--docutils/docutils/transforms/peps.py4
-rw-r--r--docutils/docutils/transforms/references.py10
-rw-r--r--docutils/docutils/transforms/universal.py25
-rw-r--r--docutils/docutils/utils/__init__.py24
-rw-r--r--docutils/docutils/utils/code_analyzer.py4
-rw-r--r--docutils/docutils/utils/error_reporting.py15
-rw-r--r--docutils/docutils/utils/math/latex2mathml.py24
-rwxr-xr-xdocutils/docutils/utils/math/math2html.py9
-rw-r--r--docutils/docutils/utils/math/tex2mathml_extern.py27
-rw-r--r--docutils/docutils/utils/smartquotes.py4
-rw-r--r--docutils/docutils/utils/urischemes.py2
-rw-r--r--docutils/docutils/writers/_html_base.py82
-rw-r--r--docutils/docutils/writers/html4css1/__init__.py71
-rw-r--r--docutils/docutils/writers/html5_polyglot/__init__.py88
-rw-r--r--docutils/docutils/writers/latex2e/__init__.py209
-rw-r--r--docutils/docutils/writers/manpage.py44
-rw-r--r--docutils/docutils/writers/odf_odt/pygmentsformatter.py4
-rw-r--r--docutils/docutils/writers/pep_html/__init__.py2
-rw-r--r--docutils/docutils/writers/pseudoxml.py3
-rw-r--r--docutils/docutils/writers/s5_html/__init__.py5
-rw-r--r--docutils/docutils/writers/xetex/__init__.py16
-rw-r--r--docutils/test/functional/tests/dangerous.py3
-rw-r--r--docutils/test/functional/tests/pep_html.py3
-rwxr-xr-xdocutils/test/functional/tests/standalone_rst_s5_html_1.py3
-rw-r--r--docutils/test/package_unittest.py4
-rw-r--r--docutils/test/test__init__.py2
-rwxr-xr-xdocutils/test/test_dependencies.py10
-rwxr-xr-xdocutils/test/test_io.py8
-rwxr-xr-xdocutils/test/test_nodes.py59
-rwxr-xr-xdocutils/test/test_parsers/test_rst/test_directives/test_block_quotes.py6
-rwxr-xr-xdocutils/test/test_publisher.py19
-rwxr-xr-xdocutils/test/test_settings.py13
-rwxr-xr-xdocutils/test/test_statemachine.py32
-rwxr-xr-xdocutils/test/test_traversals.py6
-rwxr-xr-xdocutils/test/test_utils.py12
-rwxr-xr-xdocutils/test/test_viewlist.py4
-rwxr-xr-xdocutils/test/test_writers/test_docutils_xml.py3
-rwxr-xr-xdocutils/test/test_writers/test_html4css1_misc.py109
-rwxr-xr-xdocutils/test/test_writers/test_html4css1_template.py2
-rw-r--r--docutils/test/test_writers/test_html5_polyglot_misc.py110
-rw-r--r--docutils/test/test_writers/test_html5_polyglot_parts.py8
-rwxr-xr-xdocutils/test/test_writers/test_latex2e.py21
-rw-r--r--docutils/test/test_writers/test_latex2e_misc.py4
-rwxr-xr-xdocutils/test/test_writers/test_odt.py31
-rwxr-xr-xdocutils/tools/buildhtml.py29
-rw-r--r--docutils/tools/dev/generate_punctuation_chars.py20
-rwxr-xr-xdocutils/tools/dev/unicode2rstsubs.py6
-rwxr-xr-xdocutils/tools/docutils-cli.py2
-rwxr-xr-xdocutils/tools/quicktest.py11
-rwxr-xr-xdocutils/tools/rst2odt.py2
-rw-r--r--docutils/tox.ini155
74 files changed, 1038 insertions, 1003 deletions
diff --git a/docutils/docutils/__init__.py b/docutils/docutils/__init__.py
index d9be49866..2280a2855 100644
--- a/docutils/docutils/__init__.py
+++ b/docutils/docutils/__init__.py
@@ -84,8 +84,8 @@ class VersionInfo(namedtuple('VersionInfo',
if releaselevel == 'final':
if not release:
raise ValueError('releaselevel "final" must not be used '
- 'with development versions (leads to wrong '
- 'version ordering of the related __version__')
+ 'with development versions (leads to wrong '
+ 'version ordering of the related __version__')
if serial != 0:
raise ValueError('"serial" must be 0 for final releases')
diff --git a/docutils/docutils/core.py b/docutils/docutils/core.py
index 594b68edf..d1799bd4e 100644
--- a/docutils/docutils/core.py
+++ b/docutils/docutils/core.py
@@ -275,7 +275,7 @@ command line used.""" % (__version__,
def report_SystemMessage(self, error):
print('Exiting due to level-%s (%s) system message.' % (
- error.level, utils.Reporter.levels[error.level]),
+ error.level, utils.Reporter.levels[error.level]),
file=self._stderr)
def report_UnicodeError(self, error):
@@ -508,14 +508,18 @@ def publish_from_doctree(document, destination_path=None,
return pub.publish(enable_exit_status=enable_exit_status)
def publish_cmdline_to_binary(reader=None, reader_name='standalone',
- parser=None, parser_name='restructuredtext',
- writer=None, writer_name='pseudoxml',
- settings=None, settings_spec=None,
- settings_overrides=None, config_section=None,
- enable_exit_status=True, argv=None,
- usage=default_usage, description=default_description,
- destination=None, destination_class=io.BinaryFileOutput
- ):
+ parser=None, parser_name='restructuredtext',
+ writer=None, writer_name='pseudoxml',
+ settings=None,
+ settings_spec=None,
+ settings_overrides=None,
+ config_section=None,
+ enable_exit_status=True,
+ argv=None,
+ usage=default_usage,
+ description=default_description,
+ destination=None,
+ destination_class=io.BinaryFileOutput):
"""
Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
@@ -533,7 +537,7 @@ def publish_cmdline_to_binary(reader=None, reader_name='standalone',
(along with command-line option descriptions).
"""
pub = Publisher(reader, parser, writer, settings=settings,
- destination_class=destination_class)
+ destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name)
output = pub.publish(
argv, usage, description, settings_spec, settings_overrides,
diff --git a/docutils/docutils/frontend.py b/docutils/docutils/frontend.py
index e0f5bcd95..e77a6f03a 100644
--- a/docutils/docutils/frontend.py
+++ b/docutils/docutils/frontend.py
@@ -162,7 +162,7 @@ def validate_colon_separated_string_list(
return value
def validate_comma_separated_list(setting, value, option_parser,
- config_parser=None, config_section=None):
+ config_parser=None, config_section=None):
"""Check/normalize list arguments (split at "," and strip whitespace).
"""
# `value` may be ``bytes``, ``str``, or a ``list`` (when given as
@@ -207,7 +207,7 @@ def validate_strip_class(setting, value, option_parser,
return value
def validate_smartquotes_locales(setting, value, option_parser,
- config_parser=None, config_section=None):
+ config_parser=None, config_section=None):
"""Check/normalize a comma separated list of smart quote definitions.
Return a list of (language-tag, quotes) string tuples."""
@@ -236,7 +236,7 @@ def validate_smartquotes_locales(setting, value, option_parser,
quotes = multichar_quotes
elif len(quotes) != 4:
raise ValueError('Invalid value "%s". Please specify 4 quotes\n'
- ' (primary open/close; secondary open/close).'
+ ' (primary open/close; secondary open/close).'
% item.encode('ascii', 'backslashreplace'))
lc_quotes.append((lang, quotes))
return lc_quotes
@@ -281,8 +281,7 @@ def filter_settings_spec(settings_spec, *exclude, **replace):
# opt_spec is ("<help>", [<option strings>], {<keyword args>})
opt_name = [opt_string[2:].replace('-', '_')
for opt_string in opt_spec[1]
- if opt_string.startswith('--')
- ][0]
+ if opt_string.startswith('--')][0]
if opt_name in exclude:
continue
if opt_name in replace.keys():
diff --git a/docutils/docutils/io.py b/docutils/docutils/io.py
index 13c23878c..7f65df60c 100644
--- a/docutils/docutils/io.py
+++ b/docutils/docutils/io.py
@@ -281,7 +281,7 @@ class ErrorOutput:
except TypeError:
if isinstance(data, str): # destination may expect bytes
self.destination.write(data.encode(self.encoding,
- self.encoding_errors))
+ self.encoding_errors))
elif self.destination in (sys.stderr, sys.stdout):
self.destination.buffer.write(data) # write bytes to raw stream
else:
@@ -422,9 +422,9 @@ class FileOutput(Output):
self.opened = True
self.autoclose = autoclose
if handle_io_errors is not None:
- warnings.warn('io.FileOutput: initialization argument '
- '"handle_io_errors" is ignored and will be removed in '
- 'Docutils 1.2.', DeprecationWarning, stacklevel=2)
+ warnings.warn('io.FileOutput: init argument "handle_io_errors" '
+ 'is ignored and will be removed in '
+ 'Docutils 1.2.', DeprecationWarning, stacklevel=2)
if mode is not None:
self.mode = mode
self._stderr = ErrorOutput()
@@ -436,9 +436,9 @@ class FileOutput(Output):
elif (# destination is file-type object -> check mode:
mode and hasattr(self.destination, 'mode')
and mode != self.destination.mode):
- print('Warning: Destination mode "%s" differs from specified '
- 'mode "%s"' % (self.destination.mode, mode),
- file=self._stderr)
+ print('Warning: Destination mode "%s" differs from specified '
+ 'mode "%s"' % (self.destination.mode, mode),
+ file=self._stderr)
if not destination_path:
try:
self.destination_path = self.destination.name
@@ -482,10 +482,12 @@ class FileOutput(Output):
except AttributeError:
if check_encoding(self.destination,
self.encoding) is False:
- raise ValueError('Encoding of %s (%s) differs \n'
+ raise ValueError(
+ 'Encoding of %s (%s) differs \n'
' from specified encoding (%s)' %
(self.destination_path or 'destination',
- self.destination.encoding, self.encoding))
+ self.destination.encoding,
+ self.encoding))
else:
raise err
except (UnicodeError, LookupError) as err:
diff --git a/docutils/docutils/languages/zh_cn.py b/docutils/docutils/languages/zh_cn.py
index 67e11ed0c..0ddb9fa08 100644
--- a/docutils/docutils/languages/zh_cn.py
+++ b/docutils/docutils/languages/zh_cn.py
@@ -61,6 +61,6 @@ author_separators = [';', ',',
'\uff1b', # ';'
'\uff0c', # ','
'\u3001', # '、'
- ]
+ ]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
diff --git a/docutils/docutils/languages/zh_tw.py b/docutils/docutils/languages/zh_tw.py
index 490997d0b..27f3f770f 100644
--- a/docutils/docutils/languages/zh_tw.py
+++ b/docutils/docutils/languages/zh_tw.py
@@ -60,6 +60,6 @@ author_separators = [';', ',',
'\uff1b', # ';'
'\uff0c', # ','
'\u3001', # '、'
- ]
+ ]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
diff --git a/docutils/docutils/nodes.py b/docutils/docutils/nodes.py
index d31b1fdae..d811c3074 100644
--- a/docutils/docutils/nodes.py
+++ b/docutils/docutils/nodes.py
@@ -670,7 +670,7 @@ class Element(Node):
def astext(self):
return self.child_text_separator.join(
- [child.astext() for child in self.children])
+ [child.astext() for child in self.children])
def non_default_attributes(self):
atts = {}
@@ -1125,7 +1125,7 @@ class TextElement(Element):
if text != '':
textnode = Text(text)
Element.__init__(self, rawsource, textnode, *children,
- **attributes)
+ **attributes)
else:
Element.__init__(self, rawsource, *children, **attributes)
@@ -1385,8 +1385,9 @@ class document(Root, Structural, Element):
else:
prefix = id_prefix + auto_id_prefix
if prefix.endswith('%'):
- prefix = '%s%s-' % (prefix[:-1], suggested_prefix
- or make_id(node.tagname))
+ prefix = '%s%s-' % (prefix[:-1],
+ suggested_prefix
+ or make_id(node.tagname))
while True:
self.id_counter[prefix] += 1
id = '%s%d' % (prefix, self.id_counter[prefix])
@@ -1448,9 +1449,9 @@ class document(Root, Structural, Element):
old_node = self.ids[old_id]
if 'refuri' in node:
refuri = node['refuri']
- if old_node['names'] \
- and 'refuri' in old_node \
- and old_node['refuri'] == refuri:
+ if (old_node['names']
+ and 'refuri' in old_node
+ and old_node['refuri'] == refuri):
level = 1 # just inform if refuri's identical
if level > 1:
dupname(old_node, name)
@@ -1542,8 +1543,8 @@ class document(Root, Structural, Element):
name = whitespace_normalize_name(def_name)
if name in self.substitution_defs:
msg = self.reporter.error(
- 'Duplicate substitution definition name: "%s".' % name,
- base_node=subdef)
+ 'Duplicate substitution definition name: "%s".' % name,
+ base_node=subdef)
if msgnode is not None:
msgnode += msg
oldnode = self.substitution_defs[name]
@@ -1574,7 +1575,7 @@ class document(Root, Structural, Element):
def copy(self):
obj = self.__class__(self.settings, self.reporter,
- **self.attributes)
+ **self.attributes)
obj.source = self.source
obj.line = self.line
return obj
@@ -1796,7 +1797,7 @@ class system_message(Special, BackLinkable, PreBibliographic, Element):
def astext(self):
line = self.get('line', '')
return '%s:%s: (%s/%s) %s' % (self['source'], line, self['type'],
- self['level'], Element.astext(self))
+ self['level'], Element.astext(self))
class pending(Special, Invisible, Element):
@@ -1839,19 +1840,19 @@ class pending(Special, Invisible, Element):
"""Detail data (dictionary) required by the pending operation."""
def pformat(self, indent=' ', level=0):
- internals = [
- '.. internal attributes:',
- ' .transform: %s.%s' % (self.transform.__module__,
- self.transform.__name__),
- ' .details:']
+ internals = ['.. internal attributes:',
+ ' .transform: %s.%s' % (self.transform.__module__,
+ self.transform.__name__),
+ ' .details:']
details = sorted(self.details.items())
for key, value in details:
if isinstance(value, Node):
internals.append('%7s%s:' % ('', key))
internals.extend(['%9s%s' % ('', line)
for line in value.pformat().splitlines()])
- elif value and isinstance(value, list) \
- and isinstance(value[0], Node):
+ elif (value
+ and isinstance(value, list)
+ and isinstance(value[0], Node)):
internals.append('%7s%s:' % ('', key))
for v in value:
internals.extend(['%9s%s' % ('', line)
@@ -1864,7 +1865,7 @@ class pending(Special, Invisible, Element):
def copy(self):
obj = self.__class__(self.transform, self.details, self.rawsource,
- **self.attributes)
+ **self.attributes)
obj._document = self._document
obj.source = self.source
obj.line = self.line
@@ -2229,8 +2230,8 @@ def make_id(string):
id = id.translate(_non_id_translate)
# get rid of non-ascii characters.
# 'ascii' lowercase to prevent problems with turkish locale.
- id = unicodedata.normalize('NFKD', id).\
- encode('ascii', 'ignore').decode('ascii')
+ id = unicodedata.normalize(
+ 'NFKD', id).encode('ascii', 'ignore').decode('ascii')
# shrink runs of whitespace and replace by hyphen
id = _non_id_chars.sub('-', ' '.join(id.split()))
id = _non_id_at_ends.sub('', id)
diff --git a/docutils/docutils/parsers/__init__.py b/docutils/docutils/parsers/__init__.py
index 657b540ff..5fc968672 100644
--- a/docutils/docutils/parsers/__init__.py
+++ b/docutils/docutils/parsers/__init__.py
@@ -39,7 +39,8 @@ class Parser(Component):
['--line-length-limit'],
{'metavar': '<length>', 'type': 'int', 'default': 10000,
'validator': frontend.validate_nonnegative_int}),
- ))
+ )
+ )
component_type = 'parser'
config_section = 'parsers'
diff --git a/docutils/docutils/parsers/recommonmark_wrapper.py b/docutils/docutils/parsers/recommonmark_wrapper.py
index 00779e5b2..2a1bdd286 100644
--- a/docutils/docutils/parsers/recommonmark_wrapper.py
+++ b/docutils/docutils/parsers/recommonmark_wrapper.py
@@ -44,6 +44,11 @@ except ImportError:
pending_xref = nodes.pending
+# auxiliary function for `document.findall()`
+def is_literal(node):
+ return isinstance(node, (nodes.literal, nodes.literal_block))
+
+
class Parser(CommonMarkParser):
"""MarkDown parser based on recommonmark.
@@ -94,8 +99,7 @@ class Parser(CommonMarkParser):
i += 1
# add "code" class argument to literal elements (inline and block)
- for node in document.findall(lambda n: isinstance(n,
- (nodes.literal, nodes.literal_block))):
+ for node in document.findall(is_literal):
if 'code' not in node['classes']:
node['classes'].append('code')
# move "language" argument to classes
diff --git a/docutils/docutils/parsers/rst/__init__.py b/docutils/docutils/parsers/rst/__init__.py
index ccfb954d0..783175223 100644
--- a/docutils/docutils/parsers/rst/__init__.py
+++ b/docutils/docutils/parsers/rst/__init__.py
@@ -143,7 +143,8 @@ class Parser(docutils.parsers.Parser):
['--character-level-inline-markup'],
{'action': 'store_true', 'default': False,
'dest': 'character_level_inline_markup'}),
- ))
+ )
+ )
config_section = 'restructuredtext parser'
config_section_dependencies = ('parsers',)
diff --git a/docutils/docutils/parsers/rst/directives/body.py b/docutils/docutils/parsers/rst/directives/body.py
index 4adfdff58..c2dc2464f 100644
--- a/docutils/docutils/parsers/rst/directives/body.py
+++ b/docutils/docutils/parsers/rst/directives/body.py
@@ -45,7 +45,7 @@ class BasePseudoSection(Directive):
textnodes, more_messages = self.state.inline_text(
self.options['subtitle'], self.lineno)
titles.append(nodes.subtitle(self.options['subtitle'], '',
- *textnodes))
+ *textnodes))
messages.extend(more_messages)
else:
titles = []
@@ -137,7 +137,7 @@ class CodeBlock(Directive):
option_spec = {'class': directives.class_option,
'name': directives.unchanged,
'number-lines': directives.unchanged # integer or None
- }
+ }
has_content = True
def run(self):
@@ -193,9 +193,10 @@ class CodeBlock(Directive):
class MathBlock(Directive):
option_spec = {'class': directives.class_option,
- 'name': directives.unchanged}
+ 'name': directives.unchanged,
## TODO: Add Sphinx' ``mathbase.py`` option 'nowrap'?
# 'nowrap': directives.flag,
+ }
has_content = True
def run(self):
diff --git a/docutils/docutils/parsers/rst/directives/misc.py b/docutils/docutils/parsers/rst/directives/misc.py
index 3722a38bb..4577b0fd7 100644
--- a/docutils/docutils/parsers/rst/directives/misc.py
+++ b/docutils/docutils/parsers/rst/directives/misc.py
@@ -80,7 +80,7 @@ class Include(Directive):
(self.name, path))
except OSError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
- (self.name, io.error_string(error)))
+ (self.name, io.error_string(error)))
else:
self.state.document.settings.record_dependencies.add(path)
@@ -128,8 +128,9 @@ class Include(Directive):
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
- literal_block = nodes.literal_block(rawtext, source=path,
- classes=self.options.get('class', []))
+ literal_block = nodes.literal_block(
+ rawtext, source=path,
+ classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
@@ -260,7 +261,7 @@ class Raw(Directive):
text = raw_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s'
- % (self.name, io.error_string(error)))
+ % (self.name, io.error_string(error)))
attributes['source'] = path
elif 'url' in self.options:
source = self.options['url']
@@ -273,7 +274,9 @@ class Raw(Directive):
raw_text = urlopen(source).read()
except (URLError, OSError) as error:
raise self.severe('Problems with "%s" directive URL "%s":\n%s.'
- % (self.name, self.options['url'], io.error_string(error)))
+ % (self.name,
+ self.options['url'],
+ io.error_string(error)))
raw_file = io.StringInput(source=raw_text, source_path=source,
encoding=encoding,
error_handler=e_handler)
@@ -289,7 +292,7 @@ class Raw(Directive):
raw_node = nodes.raw('', text, classes=self.options.get('class', []),
**attributes)
(raw_node.source,
- raw_node.line) = self.state_machine.get_source_and_line(self.lineno)
+ raw_node.line) = self.state_machine.get_source_and_line(self.lineno)
return [raw_node]
@@ -364,7 +367,7 @@ class Unicode(Directive):
decoded = directives.unicode_code(code)
except ValueError as error:
raise self.error('Invalid character code: %s\n%s'
- % (code, io.error_string(error)))
+ % (code, io.error_string(error)))
element += nodes.Text(decoded)
return element.children
@@ -444,10 +447,10 @@ class Role(Directive):
'supported (specified by "%r" role).' % (self.name, base_role))
try:
converted_role = convert_directive_function(base_role)
- (arguments, options, content, content_offset) = (
- self.state.parse_directive_block(
- self.content[1:], self.content_offset, converted_role,
- option_presets={}))
+ (arguments, options, content, content_offset
+ ) = self.state.parse_directive_block(
+ self.content[1:], self.content_offset,
+ converted_role, option_presets={})
except states.MarkupError as detail:
error = self.reporter.error(
'Error in "%s" directive:\n%s.' % (self.name, detail),
@@ -458,8 +461,9 @@ class Role(Directive):
try:
options['class'] = directives.class_option(new_role_name)
except ValueError as detail:
- error = self.reporter.error('Invalid argument '
- 'for "%s" directive:\n%s.' % (self.name, detail),
+ error = self.reporter.error(
+ 'Invalid argument for "%s" directive:\n%s.'
+ % (self.name, detail),
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return messages + [error]
@@ -516,8 +520,8 @@ class MetaBody(states.SpecializedBody):
def parsemeta(self, match):
name = self.parse_field_marker(match)
name = utils.unescape(utils.escape2null(name))
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
node = nodes.meta()
node['content'] = utils.unescape(utils.escape2null(
' '.join(indented)))
diff --git a/docutils/docutils/parsers/rst/languages/ar.py b/docutils/docutils/parsers/rst/languages/ar.py
index f111828c9..722b0befa 100644
--- a/docutils/docutils/parsers/rst/languages/ar.py
+++ b/docutils/docutils/parsers/rst/languages/ar.py
@@ -15,57 +15,57 @@ reStructuredText.
__docformat__ = 'reStructuredText'
directives = {
- # language-dependent: fixed
- 'تنبيه': 'attention',
- 'احتیاط': 'caution',
- 'كود': 'code',
- 'خطر': 'danger',
- 'خطأ': 'error',
- 'تلميح': 'hint',
- 'مهم': 'important',
- 'ملاحظة': 'note',
- 'نصيحة': 'tip',
- 'تحذير': 'warning',
- 'تذكير': 'admonition',
- 'شريط-جانبي': 'sidebar',
- 'موضوع': 'topic',
- 'قالب-سطري': 'line-block',
- 'لفظ-حرفي': 'parsed-literal',
- 'معيار': 'rubric',
- 'فكرة-الكتاب': 'epigraph',
- 'تمييز': 'highlights',
- 'نقل-قول': 'pull-quote',
- 'ترکیب': 'compound',
- 'وعاء': 'container',
- #'questions': 'questions',
- 'جدول': 'table',
- 'جدول-csv': 'csv-table',
- 'جدول-قوائم': 'list-table',
- #'qa': 'questions',
- #'faq': 'questions',
- 'ميتا': 'meta',
- 'رياضيات': 'math',
- #'imagemap': 'imagemap',
- 'صورة': 'image',
- 'رسم-توضيحي': 'figure',
- 'تضمين': 'include',
- 'خام': 'raw',
- 'تبديل': 'replace',
- 'یونیکد': 'unicode',
- 'تاریخ': 'date',
- 'كائن': 'class',
- 'قانون': 'role',
- 'قانون-افتراضي': 'default-role',
- 'عنوان': 'title',
- 'المحتوى': 'contents',
- 'رقم-الفصل': 'sectnum',
- 'رقم-القسم': 'sectnum',
- 'رأس-الصفحة': 'header',
- 'هامش': 'footer',
- #'footnotes': 'footnotes',
- #'citations': 'citations',
- '': 'target-notes',
- }
+ # language-dependent: fixed
+ 'تنبيه': 'attention',
+ 'احتیاط': 'caution',
+ 'كود': 'code',
+ 'خطر': 'danger',
+ 'خطأ': 'error',
+ 'تلميح': 'hint',
+ 'مهم': 'important',
+ 'ملاحظة': 'note',
+ 'نصيحة': 'tip',
+ 'تحذير': 'warning',
+ 'تذكير': 'admonition',
+ 'شريط-جانبي': 'sidebar',
+ 'موضوع': 'topic',
+ 'قالب-سطري': 'line-block',
+ 'لفظ-حرفي': 'parsed-literal',
+ 'معيار': 'rubric',
+ 'فكرة-الكتاب': 'epigraph',
+ 'تمييز': 'highlights',
+ 'نقل-قول': 'pull-quote',
+ 'ترکیب': 'compound',
+ 'وعاء': 'container',
+ #'questions': 'questions',
+ 'جدول': 'table',
+ 'جدول-csv': 'csv-table',
+ 'جدول-قوائم': 'list-table',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ 'ميتا': 'meta',
+ 'رياضيات': 'math',
+ #'imagemap': 'imagemap',
+ 'صورة': 'image',
+ 'رسم-توضيحي': 'figure',
+ 'تضمين': 'include',
+ 'خام': 'raw',
+ 'تبديل': 'replace',
+ 'یونیکد': 'unicode',
+ 'تاریخ': 'date',
+ 'كائن': 'class',
+ 'قانون': 'role',
+ 'قانون-افتراضي': 'default-role',
+ 'عنوان': 'title',
+ 'المحتوى': 'contents',
+ 'رقم-الفصل': 'sectnum',
+ 'رقم-القسم': 'sectnum',
+ 'رأس-الصفحة': 'header',
+ 'هامش': 'footer',
+ #'footnotes': 'footnotes',
+ #'citations': 'citations',
+ '': 'target-notes',
+}
"""Arabic name to registered (in directives/__init__.py) directive name
mapping."""
@@ -93,6 +93,7 @@ roles = {
'منبع-uri': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
- 'خام': 'raw',}
+ 'خام': 'raw',
+}
"""Mapping of Arabic role names to canonical role names for interpreted text.
"""
diff --git a/docutils/docutils/parsers/rst/languages/fa.py b/docutils/docutils/parsers/rst/languages/fa.py
index 65eb604ce..560a02fb9 100644
--- a/docutils/docutils/parsers/rst/languages/fa.py
+++ b/docutils/docutils/parsers/rst/languages/fa.py
@@ -16,59 +16,59 @@ __docformat__ = 'reStructuredText'
directives = {
- # language-dependent: fixed
- 'توجه': 'attention',
- 'احتیاط': 'caution',
- 'کد': 'code',
- 'بلوک-کد': 'code',
- 'کد-منبع': 'code',
- 'خطر': 'danger',
- 'خطا': 'error',
- 'راهنما': 'hint',
- 'مهم': 'important',
- 'یادداشت': 'note',
- 'نکته': 'tip',
- 'اخطار': 'warning',
- 'تذکر': 'admonition',
- 'نوار-کناری': 'sidebar',
- 'موضوع': 'topic',
- 'بلوک-خط': 'line-block',
- 'تلفظ-پردازش-شده': 'parsed-literal',
- 'سر-فصل': 'rubric',
- 'کتیبه': 'epigraph',
- 'نکات-برجسته': 'highlights',
- 'نقل-قول': 'pull-quote',
- 'ترکیب': 'compound',
- 'ظرف': 'container',
- #'questions': 'questions',
- 'جدول': 'table',
- 'جدول-csv': 'csv-table',
- 'جدول-لیست': 'list-table',
- #'qa': 'questions',
- #'faq': 'questions',
- 'متا': 'meta',
- 'ریاضی': 'math',
- #'imagemap': 'imagemap',
- 'تصویر': 'image',
- 'شکل': 'figure',
- 'شامل': 'include',
- 'خام': 'raw',
- 'جایگزین': 'replace',
- 'یونیکد': 'unicode',
- 'تاریخ': 'date',
- 'کلاس': 'class',
- 'قانون': 'role',
- 'قانون-پیش‌فرض': 'default-role',
- 'عنوان': 'title',
- 'محتوا': 'contents',
- 'شماره-فصل': 'sectnum',
- 'شماره‌گذاری-فصل': 'sectnum',
- 'سرآیند': 'header',
- 'پاصفحه': 'footer',
- #'footnotes': 'footnotes',
- #'citations': 'citations',
- 'یادداشت-هدف': 'target-notes',
- }
+ # language-dependent: fixed
+ 'توجه': 'attention',
+ 'احتیاط': 'caution',
+ 'کد': 'code',
+ 'بلوک-کد': 'code',
+ 'کد-منبع': 'code',
+ 'خطر': 'danger',
+ 'خطا': 'error',
+ 'راهنما': 'hint',
+ 'مهم': 'important',
+ 'یادداشت': 'note',
+ 'نکته': 'tip',
+ 'اخطار': 'warning',
+ 'تذکر': 'admonition',
+ 'نوار-کناری': 'sidebar',
+ 'موضوع': 'topic',
+ 'بلوک-خط': 'line-block',
+ 'تلفظ-پردازش-شده': 'parsed-literal',
+ 'سر-فصل': 'rubric',
+ 'کتیبه': 'epigraph',
+ 'نکات-برجسته': 'highlights',
+ 'نقل-قول': 'pull-quote',
+ 'ترکیب': 'compound',
+ 'ظرف': 'container',
+ #'questions': 'questions',
+ 'جدول': 'table',
+ 'جدول-csv': 'csv-table',
+ 'جدول-لیست': 'list-table',
+ #'qa': 'questions',
+ #'faq': 'questions',
+ 'متا': 'meta',
+ 'ریاضی': 'math',
+ #'imagemap': 'imagemap',
+ 'تصویر': 'image',
+ 'شکل': 'figure',
+ 'شامل': 'include',
+ 'خام': 'raw',
+ 'جایگزین': 'replace',
+ 'یونیکد': 'unicode',
+ 'تاریخ': 'date',
+ 'کلاس': 'class',
+ 'قانون': 'role',
+ 'قانون-پیش‌فرض': 'default-role',
+ 'عنوان': 'title',
+ 'محتوا': 'contents',
+ 'شماره-فصل': 'sectnum',
+ 'شماره‌گذاری-فصل': 'sectnum',
+ 'سرآیند': 'header',
+ 'پاصفحه': 'footer',
+ #'footnotes': 'footnotes',
+ #'citations': 'citations',
+ 'یادداشت-هدف': 'target-notes',
+}
"""Persian name to registered (in directives/__init__.py) directive name
mapping."""
@@ -96,6 +96,7 @@ roles = {
'منبع-uri': 'uri-reference',
'uri': 'uri-reference',
'url': 'uri-reference',
- 'خام': 'raw',}
+ 'خام': 'raw',
+}
"""Mapping of Persian role names to canonical role names for interpreted text.
"""
diff --git a/docutils/docutils/parsers/rst/languages/fr.py b/docutils/docutils/parsers/rst/languages/fr.py
index b476ed01f..1006850a6 100644
--- a/docutils/docutils/parsers/rst/languages/fr.py
+++ b/docutils/docutils/parsers/rst/languages/fr.py
@@ -90,7 +90,7 @@ roles = {
'emphase': 'emphasis',
'fort': 'strong',
'litt\u00E9ral': 'literal',
- 'math (translation required)': 'math',
+ 'math (translation required)': 'math',
'nomm\u00E9e-r\u00E9f\u00E9rence': 'named-reference',
'anonyme-r\u00E9f\u00E9rence': 'anonymous-reference',
'note-r\u00E9f\u00E9rence': 'footnote-reference',
diff --git a/docutils/docutils/parsers/rst/languages/he.py b/docutils/docutils/parsers/rst/languages/he.py
index aca759033..b9fc47247 100644
--- a/docutils/docutils/parsers/rst/languages/he.py
+++ b/docutils/docutils/parsers/rst/languages/he.py
@@ -54,7 +54,7 @@ directives = {
'replace': 'replace',
'unicode': 'unicode',
'date': 'date',
- '\u05e1\u05d2\u05e0\u05d5\u05df': 'class',
+ '\u05e1\u05d2\u05e0\u05d5\u05df': 'class',
'role': 'role',
'default-role': 'default-role',
'title': 'title',
diff --git a/docutils/docutils/parsers/rst/languages/it.py b/docutils/docutils/parsers/rst/languages/it.py
index 6de9d013a..359b05ba6 100644
--- a/docutils/docutils/parsers/rst/languages/it.py
+++ b/docutils/docutils/parsers/rst/languages/it.py
@@ -84,7 +84,7 @@ roles = {
'enfasi': 'emphasis',
'forte': 'strong',
'letterale': 'literal',
- 'math (translation required)': 'math',
+ 'math (translation required)': 'math',
'riferimento-con-nome': 'named-reference',
'riferimento-anonimo': 'anonymous-reference',
'riferimento-nota': 'footnote-reference',
diff --git a/docutils/docutils/parsers/rst/languages/ru.py b/docutils/docutils/parsers/rst/languages/ru.py
index a72401763..51c50634e 100644
--- a/docutils/docutils/parsers/rst/languages/ru.py
+++ b/docutils/docutils/parsers/rst/languages/ru.py
@@ -15,74 +15,74 @@ reStructuredText.
__docformat__ = 'reStructuredText'
directives = {
- 'блок-строк': 'line-block',
- 'meta': 'meta',
- 'математика': 'math',
- 'обработанный-литерал': 'parsed-literal',
- 'выделенная-цитата': 'pull-quote',
- 'код': 'code',
- 'compound (translation required)': 'compound',
- 'контейнер': 'container',
- 'таблица': 'table',
- 'csv-table (translation required)': 'csv-table',
- 'list-table (translation required)': 'list-table',
- 'сырой': 'raw',
- 'замена': 'replace',
- 'тестовая-директива-restructuredtext': 'restructuredtext-test-directive',
- 'целевые-сноски': 'target-notes',
- 'unicode': 'unicode',
- 'дата': 'date',
- 'боковая-полоса': 'sidebar',
- 'важно': 'important',
- 'включать': 'include',
- 'внимание': 'attention',
- 'выделение': 'highlights',
- 'замечание': 'admonition',
- 'изображение': 'image',
- 'класс': 'class',
- 'роль': 'role',
- 'default-role (translation required)': 'default-role',
- 'титул': 'title',
- 'номер-раздела': 'sectnum',
- 'нумерация-разделов': 'sectnum',
- 'опасно': 'danger',
- 'осторожно': 'caution',
- 'ошибка': 'error',
- 'подсказка': 'tip',
- 'предупреждение': 'warning',
- 'примечание': 'note',
- 'рисунок': 'figure',
- 'рубрика': 'rubric',
- 'совет': 'hint',
- 'содержание': 'contents',
- 'тема': 'topic',
- 'эпиграф': 'epigraph',
- 'header (translation required)': 'header',
- 'footer (translation required)': 'footer',}
+ 'блок-строк': 'line-block',
+ 'meta': 'meta',
+ 'математика': 'math',
+ 'обработанный-литерал': 'parsed-literal',
+ 'выделенная-цитата': 'pull-quote',
+ 'код': 'code',
+ 'compound (translation required)': 'compound',
+ 'контейнер': 'container',
+ 'таблица': 'table',
+ 'csv-table (translation required)': 'csv-table',
+ 'list-table (translation required)': 'list-table',
+ 'сырой': 'raw',
+ 'замена': 'replace',
+ 'тестовая-директива-restructuredtext': 'restructuredtext-test-directive',
+ 'целевые-сноски': 'target-notes',
+ 'unicode': 'unicode',
+ 'дата': 'date',
+ 'боковая-полоса': 'sidebar',
+ 'важно': 'important',
+ 'включать': 'include',
+ 'внимание': 'attention',
+ 'выделение': 'highlights',
+ 'замечание': 'admonition',
+ 'изображение': 'image',
+ 'класс': 'class',
+ 'роль': 'role',
+ 'default-role (translation required)': 'default-role',
+ 'титул': 'title',
+ 'номер-раздела': 'sectnum',
+ 'нумерация-разделов': 'sectnum',
+ 'опасно': 'danger',
+ 'осторожно': 'caution',
+ 'ошибка': 'error',
+ 'подсказка': 'tip',
+ 'предупреждение': 'warning',
+ 'примечание': 'note',
+ 'рисунок': 'figure',
+ 'рубрика': 'rubric',
+ 'совет': 'hint',
+ 'содержание': 'contents',
+ 'тема': 'topic',
+ 'эпиграф': 'epigraph',
+ 'header (translation required)': 'header',
+ 'footer (translation required)': 'footer',}
"""Russian name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
- 'акроним': 'acronym',
- 'код': 'code',
- 'анонимная-ссылка': 'anonymous-reference',
- 'буквально': 'literal',
- 'математика': 'math',
- 'верхний-индекс': 'superscript',
- 'выделение': 'emphasis',
- 'именованная-ссылка': 'named-reference',
- 'индекс': 'index',
- 'нижний-индекс': 'subscript',
- 'сильное-выделение': 'strong',
- 'сокращение': 'abbreviation',
- 'ссылка-замена': 'substitution-reference',
- 'ссылка-на-pep': 'pep-reference',
- 'ссылка-на-rfc': 'rfc-reference',
- 'ссылка-на-uri': 'uri-reference',
- 'ссылка-на-заглавие': 'title-reference',
- 'ссылка-на-сноску': 'footnote-reference',
- 'цитатная-ссылка': 'citation-reference',
- 'цель': 'target',
- 'сырой': 'raw',}
+ 'акроним': 'acronym',
+ 'код': 'code',
+ 'анонимная-ссылка': 'anonymous-reference',
+ 'буквально': 'literal',
+ 'математика': 'math',
+ 'верхний-индекс': 'superscript',
+ 'выделение': 'emphasis',
+ 'именованная-ссылка': 'named-reference',
+ 'индекс': 'index',
+ 'нижний-индекс': 'subscript',
+ 'сильное-выделение': 'strong',
+ 'сокращение': 'abbreviation',
+ 'ссылка-замена': 'substitution-reference',
+ 'ссылка-на-pep': 'pep-reference',
+ 'ссылка-на-rfc': 'rfc-reference',
+ 'ссылка-на-uri': 'uri-reference',
+ 'ссылка-на-заглавие': 'title-reference',
+ 'ссылка-на-сноску': 'footnote-reference',
+ 'цитатная-ссылка': 'citation-reference',
+ 'цель': 'target',
+ 'сырой': 'raw',}
"""Mapping of Russian role names to canonical role names for interpreted text.
"""
diff --git a/docutils/docutils/parsers/rst/languages/sk.py b/docutils/docutils/parsers/rst/languages/sk.py
index e1ca5fae9..11c404e29 100644
--- a/docutils/docutils/parsers/rst/languages/sk.py
+++ b/docutils/docutils/parsers/rst/languages/sk.py
@@ -82,7 +82,7 @@ roles = {
'emphasis (translation required)': 'emphasis',
'strong (translation required)': 'strong',
'literal (translation required)': 'literal',
- 'math (translation required)': 'math',
+ 'math (translation required)': 'math',
'named-reference (translation required)': 'named-reference',
'anonymous-reference (translation required)': 'anonymous-reference',
'footnote-reference (translation required)': 'footnote-reference',
diff --git a/docutils/docutils/parsers/rst/roles.py b/docutils/docutils/parsers/rst/roles.py
index bdb0980b5..371dfd110 100644
--- a/docutils/docutils/parsers/rst/roles.py
+++ b/docutils/docutils/parsers/rst/roles.py
@@ -229,7 +229,8 @@ class CustomRole:
content = content or []
delimiter = ['\n'] if supplied_content and content else []
return self.base_role(role, rawtext, text, lineno, inliner,
- options=opts, content=supplied_content+delimiter+content)
+ options=opts,
+ content=supplied_content+delimiter+content)
def generic_custom_role(role, rawtext, text, lineno, inliner,
diff --git a/docutils/docutils/parsers/rst/states.py b/docutils/docutils/parsers/rst/states.py
index 215dc8559..c57dfd436 100644
--- a/docutils/docutils/parsers/rst/states.py
+++ b/docutils/docutils/parsers/rst/states.py
@@ -486,7 +486,8 @@ class Inliner:
args.update(vars(self.__class__))
parts = ('initial_inline', start_string_prefix, '',
- [('start', '', self.non_whitespace_after, # simple start-strings
+ [
+ ('start', '', self.non_whitespace_after, # simple start-strings
[r'\*\*', # strong
r'\*(?!\*)', # emphasis but not strong
r'``', # literal
@@ -500,7 +501,8 @@ class Inliner:
[r'[0-9]+', # manually numbered
r'\#(%s)?' % self.simplename, # auto-numbered (w/ label?)
r'\*', # auto-symbol
- r'(?P<citationlabel>%s)' % self.simplename] # citation reference
+ r'(?P<citationlabel>%s)' % self.simplename, # citation reference
+ ]
)
]
),
@@ -510,7 +512,7 @@ class Inliner:
['`(?!`)'] # but not literal
)
]
- )
+ )
self.start_string_prefix = start_string_prefix
self.end_string_suffix = end_string_suffix
self.parts = parts
@@ -809,8 +811,9 @@ class Inliner:
aliastext = match.group(2)
rawaliastext = unescape(aliastext, True)
underscore_escaped = rawaliastext.endswith(r'\_')
- if aliastext.endswith('_') and not (underscore_escaped
- or self.patterns.uri.match(aliastext)):
+ if (aliastext.endswith('_')
+ and not (underscore_escaped
+ or self.patterns.uri.match(aliastext))):
aliastype = 'name'
alias = normalize_name(unescape(aliastext[:-1]))
target = nodes.target(match.group(1), refname=alias)
@@ -1162,8 +1165,8 @@ class Body(RSTState):
def indent(self, match, context, next_state):
"""Block quote."""
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_indented()
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_indented()
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
@@ -1174,8 +1177,8 @@ class Body(RSTState):
elements = []
while indented:
blockquote = nodes.block_quote(rawsource='\n'.join(indented))
- (blockquote.source, blockquote.line) = \
- self.state_machine.get_source_and_line(line_offset+1)
+ (blockquote.source, blockquote.line
+ ) = self.state_machine.get_source_and_line(line_offset+1)
(blockquote_lines,
attribution_lines,
attribution_offset,
@@ -1470,8 +1473,8 @@ class Body(RSTState):
name = self.parse_field_marker(match)
src, srcline = self.state_machine.get_source_and_line()
lineno = self.state_machine.abs_line_number()
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
field_node = nodes.field()
field_node.source = src
field_node.line = srcline
@@ -1495,16 +1498,17 @@ class Body(RSTState):
def option_marker(self, match, context, next_state):
"""Option list item."""
optionlist = nodes.option_list()
- (optionlist.source, optionlist.line) = self.state_machine.get_source_and_line()
+ (optionlist.source, optionlist.line
+ ) = self.state_machine.get_source_and_line()
try:
listitem, blank_finish = self.option_list_item(match)
except MarkupError as error:
# This shouldn't happen; pattern won't match.
- msg = self.reporter.error('Invalid option list marker: %s' %
- error)
+ msg = self.reporter.error('Invalid option list marker: %s'
+ % error)
self.parent += msg
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
elements = self.block_quote(indented, line_offset)
self.parent += elements
if not blank_finish:
@@ -1526,8 +1530,8 @@ class Body(RSTState):
def option_list_item(self, match):
offset = self.state_machine.abs_line_offset()
options = self.parse_option_marker(match)
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
if not indented: # not an option list item
self.goto_line(offset)
raise statemachine.TransitionCorrection('text')
@@ -1617,9 +1621,9 @@ class Body(RSTState):
def line_block_line(self, match, lineno):
"""Return one line element of a line_block."""
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end(),
- until_blank=True)
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end(),
+ until_blank=True)
text = '\n'.join(indented)
text_nodes, messages = self.inline_text(text, lineno)
line = nodes.line(text, '', *text_nodes)
@@ -1881,8 +1885,8 @@ class Body(RSTState):
def footnote(self, match):
src, srcline = self.state_machine.get_source_and_line()
- indented, indent, offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
footnote = nodes.footnote('\n'.join(indented))
@@ -1912,8 +1916,8 @@ class Body(RSTState):
def citation(self, match):
src, srcline = self.state_machine.get_source_and_line()
- indented, indent, offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
label = match.group(1)
name = normalize_name(label)
citation = nodes.citation('\n'.join(indented))
@@ -1930,9 +1934,9 @@ class Body(RSTState):
def hyperlink_target(self, match):
pattern = self.explicit.patterns.target
lineno = self.state_machine.abs_line_number()
- block, indent, offset, blank_finish = \
- self.state_machine.get_first_known_indented(
- match.end(), until_blank=True, strip_indent=False)
+ (block, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(
+ match.end(), until_blank=True, strip_indent=False)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
escaped = block[0]
@@ -2015,9 +2019,9 @@ class Body(RSTState):
def substitution_def(self, match):
pattern = self.explicit.patterns.substitution
src, srcline = self.state_machine.get_source_and_line()
- block, indent, offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end(),
- strip_indent=False)
+ (block, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end(),
+ strip_indent=False)
blocktext = (match.string[:match.end()] + '\n'.join(block))
block.disconnect()
escaped = escape2null(block[0].rstrip())
@@ -2127,9 +2131,9 @@ class Body(RSTState):
directive = convert_directive_function(directive)
lineno = self.state_machine.abs_line_number()
initial_line_offset = self.state_machine.line_offset
- indented, indent, line_offset, blank_finish \
- = self.state_machine.get_first_known_indented(match.end(),
- strip_top=0)
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end(),
+ strip_top=0)
block_text = '\n'.join(self.state_machine.input_lines[
initial_line_offset : self.state_machine.line_offset + 1])
try:
@@ -2277,12 +2281,12 @@ class Body(RSTState):
def unknown_directive(self, type_name):
lineno = self.state_machine.abs_line_number()
- indented, indent, offset, blank_finish = \
- self.state_machine.get_first_known_indented(0, strip_indent=False)
+ (indented, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(0, strip_indent=False)
text = '\n'.join(indented)
- error = self.reporter.error(
- 'Unknown directive type "%s".' % type_name,
- nodes.literal_block(text, text), line=lineno)
+ error = self.reporter.error('Unknown directive type "%s".' % type_name,
+ nodes.literal_block(text, text),
+ line=lineno)
return [error], blank_finish
def comment(self, match):
@@ -2294,8 +2298,8 @@ class Body(RSTState):
# cf. parsers.rst.directives.misc.Include
self.document.include_log.pop()
return [], True
- indented, indent, offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
while indented and not indented[-1].strip():
indented.trim_end()
text = '\n'.join(indented)
@@ -2393,9 +2397,9 @@ class Body(RSTState):
def anonymous_target(self, match):
lineno = self.state_machine.abs_line_number()
- block, indent, offset, blank_finish \
- = self.state_machine.get_first_known_indented(match.end(),
- until_blank=True)
+ (block, indent, offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end(),
+ until_blank=True)
blocktext = match.string[:match.end()] + '\n'.join(block)
block = [escape2null(line) for line in block]
target = self.make_target(block, blocktext, lineno, '')
@@ -2461,9 +2465,9 @@ class RFC2822Body(Body):
def rfc2822_field(self, match):
name = match.string[:match.string.find(':')]
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end(),
- until_blank=True)
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end(),
+ until_blank=True)
fieldnode = nodes.field()
fieldnode += nodes.field_name(name, name)
fieldbody = nodes.field_body('\n'.join(indented))
@@ -2755,8 +2759,10 @@ class Text(RSTState):
raise statemachine.TransitionCorrection('text')
else:
blocktext = context[0] + '\n' + self.state_machine.line
- msg = self.reporter.warning('Title underline too short.',
- nodes.literal_block(blocktext, blocktext), line=lineno)
+ msg = self.reporter.warning(
+ 'Title underline too short.',
+ nodes.literal_block(blocktext, blocktext),
+ line=lineno)
messages.append(msg)
if not self.state_machine.match_titles:
blocktext = context[0] + '\n' + self.state_machine.line
@@ -2766,7 +2772,8 @@ class Text(RSTState):
# if the error is in a table (try with test_tables.py)?
# print("get_source_and_line", srcline)
# print("abs_line_number", self.state_machine.abs_line_number())
- msg = self.reporter.severe('Unexpected section title.',
+ msg = self.reporter.severe(
+ 'Unexpected section title.',
nodes.literal_block(blocktext, blocktext),
source=src, line=srcline)
self.parent += messages
@@ -2801,8 +2808,8 @@ class Text(RSTState):
def literal_block(self):
"""Return a list of nodes."""
- indented, indent, offset, blank_finish = \
- self.state_machine.get_indented()
+ (indented, indent, offset, blank_finish
+ ) = self.state_machine.get_indented()
while indented and not indented[-1].strip():
indented.trim_end()
if not indented:
@@ -2829,8 +2836,8 @@ class Text(RSTState):
return parent_node.children
def definition_list_item(self, termline):
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_indented()
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_indented()
itemnode = nodes.definition_list_item(
'\n'.join(termline + list(indented)))
lineno = self.state_machine.abs_line_number() - 1
@@ -3081,8 +3088,8 @@ class QuotedLiteralBlock(RSTState):
else:
self.parent += self.reporter.warning(
'Literal block expected; none found.',
- line=self.state_machine.abs_line_number())
- # src not available, because statemachine.input_lines is empty
+ line=self.state_machine.abs_line_number()
+ ) # src not available, statemachine.input_lines is empty
self.state_machine.previous_line()
self.parent += self.messages
return []
@@ -3116,7 +3123,7 @@ class QuotedLiteralBlock(RSTState):
if context:
self.messages.append(
self.reporter.error('Inconsistent literal block quoting.',
- line=self.state_machine.abs_line_number()))
+ line=self.state_machine.abs_line_number()))
self.state_machine.previous_line()
raise EOFError
diff --git a/docutils/docutils/parsers/rst/tableparser.py b/docutils/docutils/parsers/rst/tableparser.py
index f6a7e95c5..18a085cc0 100644
--- a/docutils/docutils/parsers/rst/tableparser.py
+++ b/docutils/docutils/parsers/rst/tableparser.py
@@ -36,8 +36,8 @@ class TableMarkupError(DataError):
"""
def __init__(self, *args, **kwargs):
- self.offset = kwargs.pop('offset', 0)
- DataError.__init__(self, *args)
+ self.offset = kwargs.pop('offset', 0)
+ DataError.__init__(self, *args)
class TableParser:
@@ -169,8 +169,9 @@ class GridTableParser(TableParser):
corners = [(0, 0)]
while corners:
top, left = corners.pop(0)
- if top == self.bottom or left == self.right \
- or top <= self.done[left]:
+ if (top == self.bottom
+ or left == self.right
+ or top <= self.done[left]):
continue
result = self.scan_cell(top, left)
if not result:
@@ -512,9 +513,9 @@ class SimpleTableParser(TableParser):
if new_end > main_end:
self.columns[-1] = (main_start, new_end)
elif line[end:nextstart].strip():
- raise TableMarkupError('Text in column margin '
- 'in table line %s.' % (first_line+offset+1),
- offset=first_line+offset)
+ raise TableMarkupError('Text in column margin in table '
+ 'line %s.' % (first_line+offset+1),
+ offset=first_line+offset)
offset += 1
columns.pop()
diff --git a/docutils/docutils/statemachine.py b/docutils/docutils/statemachine.py
index 94ab44d33..2a8856b95 100644
--- a/docutils/docutils/statemachine.py
+++ b/docutils/docutils/statemachine.py
@@ -482,8 +482,8 @@ class StateMachine:
type, value, module, line, function = _exception_data()
print('%s: %s' % (type, value), file=self._stderr)
print('input line %s' % (self.abs_line_number()), file=self._stderr)
- print(('module %s, line %s, function %s' %
- (module, line, function)), file=self._stderr)
+ print('module %s, line %s, function %s' % (module, line, function),
+ file=self._stderr)
def attach_observer(self, observer):
"""
@@ -632,7 +632,7 @@ class State:
"""Make and add transitions listed in `self.initial_transitions`."""
if self.initial_transitions:
names, transitions = self.make_transitions(
- self.initial_transitions)
+ self.initial_transitions)
self.add_transitions(names, transitions)
def add_transitions(self, names, transitions):
@@ -980,8 +980,8 @@ class StateWS(State):
Recursively run the registered state machine for indented blocks
(`self.indent_sm`).
"""
- indented, indent, line_offset, blank_finish = \
- self.state_machine.get_indented()
+ (indented, indent, line_offset, blank_finish
+ ) = self.state_machine.get_indented()
sm = self.indent_sm(debug=self.debug, **self.indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
@@ -994,10 +994,10 @@ class StateWS(State):
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
- indented, line_offset, blank_finish = \
- self.state_machine.get_known_indented(match.end())
+ (indented, line_offset, blank_finish
+ ) = self.state_machine.get_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
- **self.known_indent_sm_kwargs)
+ **self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
@@ -1011,10 +1011,10 @@ class StateWS(State):
blocks (`self.known_indent_sm`). The indent is the length of the
match, ``match.end()``.
"""
- indented, line_offset, blank_finish = \
- self.state_machine.get_first_known_indented(match.end())
+ (indented, line_offset, blank_finish
+ ) = self.state_machine.get_first_known_indented(match.end())
sm = self.known_indent_sm(debug=self.debug,
- **self.known_indent_sm_kwargs)
+ **self.known_indent_sm_kwargs)
results = sm.run(indented, input_offset=line_offset)
return context, next_state, results
diff --git a/docutils/docutils/transforms/peps.py b/docutils/docutils/transforms/peps.py
index 1204e5257..381da8d91 100644
--- a/docutils/docutils/transforms/peps.py
+++ b/docutils/docutils/transforms/peps.py
@@ -42,8 +42,8 @@ class Headers(Transform):
# @@@ replace these DataErrors with proper system messages
raise DataError('Document tree is empty.')
header = self.document[0]
- if not isinstance(header, nodes.field_list) or \
- 'rfc2822' not in header['classes']:
+ if (not isinstance(header, nodes.field_list)
+ or 'rfc2822' not in header['classes']):
raise DataError('Document does not begin with an RFC-2822 '
'header; it is not a PEP.')
pep = None
diff --git a/docutils/docutils/transforms/references.py b/docutils/docutils/transforms/references.py
index 1d3b81c78..b18966e6c 100644
--- a/docutils/docutils/transforms/references.py
+++ b/docutils/docutils/transforms/references.py
@@ -121,8 +121,7 @@ class AnonymousHyperlinks(Transform):
for node in self.document.findall(nodes.target):
if node.get('anonymous'):
anonymous_targets.append(node)
- if len(anonymous_refs) \
- != len(anonymous_targets):
+ if len(anonymous_refs) != len(anonymous_targets):
msg = self.document.reporter.error(
'Anonymous hyperlink mismatch: %s references but %s '
'targets.\nSee "backrefs" attribute for IDs.'
@@ -229,11 +228,10 @@ class IndirectHyperlinks(Transform):
return
reftarget = self.document.ids[reftarget_id]
reftarget.note_referenced_by(id=reftarget_id)
- if isinstance(reftarget, nodes.target) \
- and not reftarget.resolved and reftarget.hasattr('refname'):
+ if (isinstance(reftarget, nodes.target)
+ and not reftarget.resolved
+ and reftarget.hasattr('refname')):
if hasattr(target, 'multiply_indirect'):
- #and target.multiply_indirect):
- #del target.multiply_indirect
self.circular_indirect_reference(target)
return
target.multiply_indirect = 1
diff --git a/docutils/docutils/transforms/universal.py b/docutils/docutils/transforms/universal.py
index 945b5928e..3abc061a7 100644
--- a/docutils/docutils/transforms/universal.py
+++ b/docutils/docutils/transforms/universal.py
@@ -58,11 +58,11 @@ class Decorations(Transform):
# See https://sourceforge.net/p/docutils/patches/132/
# and https://reproducible-builds.org/specs/source-date-epoch/
settings = self.document.settings
- if settings.generator or settings.datestamp or settings.source_link \
- or settings.source_url:
+ if (settings.generator or settings.datestamp
+ or settings.source_link or settings.source_url):
text = []
- if settings.source_link and settings._source \
- or settings.source_url:
+ if (settings.source_link and settings._source
+ or settings.source_url):
if settings.source_url:
source = settings.source_url
else:
@@ -79,10 +79,11 @@ class Decorations(Transform):
text.extend([
nodes.Text('Generated by '),
nodes.reference('', 'Docutils',
- refuri='https://docutils.sourceforge.io/'),
+ refuri='https://docutils.sourceforge.io/'),
nodes.Text(' from '),
nodes.reference('', 'reStructuredText',
- refuri='https://docutils.sourceforge.io/rst.html'),
+ refuri='https://docutils.sourceforge.io/'
+ 'rst.html'),
nodes.Text(' source.\n')])
return [nodes.paragraph('', '', *text)]
else:
@@ -267,7 +268,7 @@ class SmartQuotes(Transform):
def apply(self):
smart_quotes = self.document.settings.setdefault('smart_quotes',
- False)
+ False)
if not smart_quotes:
return
try:
@@ -310,15 +311,17 @@ class SmartQuotes(Transform):
break
else: # language not supported: (keep ASCII quotes)
if lang not in self.unsupported_languages:
- self.document.reporter.warning('No smart quotes '
- 'defined for language "%s".'%lang, base_node=node)
+ self.document.reporter.warning(
+ 'No smart quotes defined for language "%s".' % lang,
+ base_node=node)
self.unsupported_languages.add(lang)
lang = ''
# Iterator educating quotes in plain text:
# (see "utils/smartquotes.py" for the attribute setting)
- teacher = smartquotes.educate_tokens(self.get_tokens(txtnodes),
- attr=self.smartquotes_action, language=lang)
+ teacher = smartquotes.educate_tokens(
+ self.get_tokens(txtnodes),
+ attr=self.smartquotes_action, language=lang)
for txtnode, newtext in zip(txtnodes, teacher):
txtnode.parent.replace(txtnode, nodes.Text(newtext))
diff --git a/docutils/docutils/utils/__init__.py b/docutils/docutils/utils/__init__.py
index 69b82e3fe..ad50d9582 100644
--- a/docutils/docutils/utils/__init__.py
+++ b/docutils/docutils/utils/__init__.py
@@ -286,8 +286,10 @@ def extract_options(field_list):
body = field[1]
if len(body) == 0:
data = None
- elif len(body) > 1 or not isinstance(body[0], nodes.paragraph) \
- or len(body[0]) != 1 or not isinstance(body[0][0], nodes.Text):
+ elif (len(body) > 1
+ or not isinstance(body[0], nodes.paragraph)
+ or len(body[0]) != 1
+ or not isinstance(body[0][0], nodes.Text)):
raise BadOptionDataError(
'extension option field body may contain\n'
'a single paragraph only (option "%s")' % name)
@@ -460,7 +462,7 @@ def relative_path(source, target):
If there is no common prefix, return the absolute path to `target`.
"""
source_parts = os.path.abspath(source or type(target)('dummy_file')
- ).split(os.sep)
+ ).split(os.sep)
target_parts = os.path.abspath(target).split(os.sep)
# Check first 2 parts because '/dir'.split('/') == ['', 'dir']:
if source_parts[:2] != target_parts[:2]:
@@ -628,8 +630,8 @@ east_asian_widths = {'W': 2, # Wide
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
- 'A': 1} # Ambiguous (s/b wide in East Asian context,
- # narrow otherwise, but that doesn't work)
+ 'A': 1, # Ambiguous (s/b wide in East Asian context,
+ } # narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_widt()` to character
column widths."""
@@ -639,17 +641,17 @@ def column_width(text):
Correct ``len(text)`` for wide East Asian and combining Unicode chars.
"""
width = sum(east_asian_widths[unicodedata.east_asian_width(c)]
- for c in text)
+ for c in text)
# correction for combining chars:
width -= len(find_combining_chars(text))
return width
def uniq(L):
- r = []
- for item in L:
- if item not in r:
- r.append(item)
- return r
+ r = []
+ for item in L:
+ if item not in r:
+ r.append(item)
+ return r
def normalize_language_tag(tag):
"""Return a list of normalized combinations for a `BCP 47` language tag.
diff --git a/docutils/docutils/utils/code_analyzer.py b/docutils/docutils/utils/code_analyzer.py
index 7050f5fdf..5edc8736f 100644
--- a/docutils/docutils/utils/code_analyzer.py
+++ b/docutils/docutils/utils/code_analyzer.py
@@ -57,12 +57,12 @@ class Lexer:
return
if not with_pygments:
raise LexerError('Cannot analyze code. '
- 'Pygments package not found.')
+ 'Pygments package not found.')
try:
self.lexer = get_lexer_by_name(self.language)
except pygments.util.ClassNotFound:
raise LexerError('Cannot analyze code. '
- 'No Pygments lexer found for "%s".' % language)
+ 'No Pygments lexer found for "%s".' % language)
# self.lexer.add_filter('tokenmerge')
# Since version 1.2. (released Jan 01, 2010) Pygments has a
# TokenMergeFilter. # ``self.merge(tokens)`` in __iter__ could
diff --git a/docutils/docutils/utils/error_reporting.py b/docutils/docutils/utils/error_reporting.py
index 64f54ed49..382251a94 100644
--- a/docutils/docutils/utils/error_reporting.py
+++ b/docutils/docutils/utils/error_reporting.py
@@ -105,7 +105,7 @@ class SafeString:
except UnicodeEncodeError:
if isinstance(self.data, Exception):
args = [str(SafeString(arg, self.encoding,
- self.encoding_errors))
+ self.encoding_errors))
for arg in self.data.args]
return ', '.join(args)
if isinstance(self.data, unicode):
@@ -135,14 +135,16 @@ class SafeString:
return u
except UnicodeError as error: # catch ..Encode.. and ..Decode.. errors
if isinstance(self.data, EnvironmentError):
- return "[Errno %s] %s: '%s'" % (self.data.errno,
+ return "[Errno %s] %s: '%s'" % (
+ self.data.errno,
SafeString(self.data.strerror, self.encoding,
self.decoding_errors),
SafeString(self.data.filename, self.encoding,
self.decoding_errors))
if isinstance(self.data, Exception):
- args = [unicode(SafeString(arg, self.encoding,
- decoding_errors=self.decoding_errors))
+ args = [unicode(SafeString(
+ arg, self.encoding,
+ decoding_errors=self.decoding_errors))
for arg in self.data.args]
return u', '.join(args)
if isinstance(error, UnicodeDecodeError):
@@ -155,7 +157,7 @@ class ErrorString(SafeString):
"""
def __str__(self):
return '%s: %s' % (self.data.__class__.__name__,
- super(ErrorString, self).__str__())
+ super(ErrorString, self).__str__())
def __unicode__(self):
return u'%s: %s' % (self.data.__class__.__name__,
@@ -214,7 +216,8 @@ class ErrorOutput:
return
if isinstance(data, Exception):
data = unicode(SafeString(data, self.encoding,
- self.encoding_errors, self.decoding_errors))
+ self.encoding_errors,
+ self.decoding_errors))
try:
self.stream.write(data)
except UnicodeEncodeError:
diff --git a/docutils/docutils/utils/math/latex2mathml.py b/docutils/docutils/utils/math/latex2mathml.py
index 592bc21ee..f2156dd08 100644
--- a/docutils/docutils/utils/math/latex2mathml.py
+++ b/docutils/docutils/utils/math/latex2mathml.py
@@ -177,9 +177,9 @@ small_operators = {# mathsize='75%'
# Operators and functions with limits above/below in display formulas
# and in index position inline (movablelimits=True)
movablelimits = ('bigcap', 'bigcup', 'bigodot', 'bigoplus', 'bigotimes',
- 'bigsqcup', 'biguplus', 'bigvee', 'bigwedge',
- 'coprod', 'intop', 'ointop', 'prod', 'sum',
- 'lim', 'max', 'min', 'sup', 'inf')
+ 'bigsqcup', 'biguplus', 'bigvee', 'bigwedge',
+ 'coprod', 'intop', 'ointop', 'prod', 'sum',
+ 'lim', 'max', 'min', 'sup', 'inf')
# Depending on settings, integrals may also be in this category.
# (e.g. if "amsmath" is loaded with option "intlimits", see
# http://mirror.ctan.org/macros/latex/required/amsmath/amsldoc.pdf)
@@ -202,7 +202,7 @@ spaces = {'qquad': '2em', # two \quad
':': '0.2222em', # 4mu medspace
',': '0.1667em', # 3mu thinspace
'!': '-0.1667em', # negthinspace
- }
+ }
# accents -> <mover stretchy="false">
accents = {# TeX: (spacing, combining)
@@ -219,7 +219,7 @@ accents = {# TeX: (spacing, combining)
'tilde': ('˜', '\u0303'), # tilde ~ or small tilde ˜?
'vec': ('→', '\u20d7'), # → too heavy, accents="false"
# TODO: ddddot
- }
+ }
# limits etc. -> <mover> or <munder>
over = {# TeX: (char, offset-correction/em)
@@ -230,13 +230,13 @@ over = {# TeX: (char, offset-correction/em)
'overrightarrow': ('\u2192', -0.2),
'widehat': ('^', -0.5),
'widetilde': ('~', -0.3),
- }
+ }
under = {'underbrace': ('\u23DF', 0.1), # DejaVu Math -0.7
'underleftarrow': ('\u2190', -0.2),
'underleftrightarrow': ('\u2194', -0.2),
'underline': ('_', -0.8),
'underrightarrow': ('\u2192', -0.2),
- }
+ }
# Character translations
# ----------------------
@@ -245,7 +245,7 @@ under = {'underbrace': ('\u23DF', 0.1), # DejaVu Math -0.7
anomalous_chars = {'-': '\u2212', # HYPHEN-MINUS -> MINUS SIGN
':': '\u2236', # COLON -> RATIO
'~': '\u00a0', # NO-BREAK SPACE
- }
+ }
# blackboard bold (Greek characters not working with "mathvariant" (Firefox 78)
mathbb = {'Γ': '\u213E', # ℾ
@@ -253,7 +253,7 @@ mathbb = {'Γ': '\u213E', # ℾ
'Σ': '\u2140', # ⅀
'γ': '\u213D', # ℽ
'π': '\u213C', # ℼ
- }
+ }
# Matrix environments
matrices = {# name: fences
@@ -265,7 +265,7 @@ matrices = {# name: fences
'vmatrix': ('|', '|'),
'Vmatrix': ('\u2016', '\u2016'), # ‖
'cases': ('{', ''),
- }
+ }
layout_styles = {
'displaystyle': {'displaystyle': True, 'scriptlevel': 0},
@@ -284,7 +284,7 @@ fractions = {# name: style_attrs, frac_attrs
'binom': ({}, {'linethickness': 0}),
'dbinom': (layout_styles['displaystyle'], {'linethickness': 0}),
'tbinom': (layout_styles['textstyle'], {'linethickness': 0}),
- }
+ }
delimiter_sizes = ['', '1.2em', '1.623em', '2.047em', '2.470em']
bigdelimiters = {'left': 0,
@@ -297,7 +297,7 @@ bigdelimiters = {'left': 0,
'biggr': 3,
'Biggl': 4,
'Biggr': 4,
- }
+ }
# MathML element classes
diff --git a/docutils/docutils/utils/math/math2html.py b/docutils/docutils/utils/math/math2html.py
index 974ad43b5..c4127397c 100755
--- a/docutils/docutils/utils/math/math2html.py
+++ b/docutils/docutils/utils/math/math2html.py
@@ -165,7 +165,7 @@ class FormulaConfig:
# '|': ['⎪',], # 23AA CURLY BRACKET EXTENSION
'‖': ['‖'], # 2016 DOUBLE VERTICAL LINE
# '∥': ['∥'], # 2225 PARALLEL TO
- }
+ }
bracketcommands = {
'\\left': 'span class="stretchy"',
@@ -541,7 +541,7 @@ class FormulaConfig:
unmodified = {
'characters': ['.', '*', '€', '(', ')', '[', ']',
- '·', '!', ';', '|', '§', '"', '?'],
+ '·', '!', ';', '|', '§', '"', '?'],
}
@@ -801,7 +801,8 @@ class TextParser(Parser):
"Parse lines as long as they are text"
TextParser.stack.append(self.ending)
self.endings = TextParser.stack + [ContainerConfig.endings['Layout'],
- ContainerConfig.endings['Inset'], self.ending]
+ ContainerConfig.endings['Inset'],
+ self.ending]
contents = []
while not self.isending(reader):
self.parsecontainer(reader, contents)
@@ -1565,7 +1566,7 @@ class FormulaParser(Parser):
if FormulaConfig.starts['complex'] in reader.currentline():
# formula of the form \[...\]
return self.parsemultiliner(reader, FormulaConfig.starts['complex'],
- FormulaConfig.endings['complex'])
+ FormulaConfig.endings['complex'])
beginbefore = FormulaConfig.starts['beginbefore']
beginafter = FormulaConfig.starts['beginafter']
if beginbefore in reader.currentline():
diff --git a/docutils/docutils/utils/math/tex2mathml_extern.py b/docutils/docutils/utils/math/tex2mathml_extern.py
index ee759fe87..2d8279ae4 100644
--- a/docutils/docutils/utils/math/tex2mathml_extern.py
+++ b/docutils/docutils/utils/math/tex2mathml_extern.py
@@ -31,14 +31,14 @@ def latexml(math_code, reporter=None):
.. _LaTeXML: http://dlmf.nist.gov/LaTeXML/
"""
p = subprocess.Popen(['latexml',
- '-', # read from stdin
- # '--preload=amsmath',
- '--inputencoding=utf8',
- ],
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- close_fds=True)
+ '-', # read from stdin
+ # '--preload=amsmath',
+ '--inputencoding=utf8',
+ ],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=True)
p.stdin.write((document_template % math_code).encode('utf8'))
p.stdin.close()
latexml_code = p.stdout.read()
@@ -52,7 +52,7 @@ def latexml(math_code, reporter=None):
'--format=xhtml',
# '--linelength=78', # experimental
'--'
- ],
+ ],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
@@ -80,7 +80,7 @@ def ttm(math_code, reporter=None):
# '-i', # italic font for equations. Default roman.
'-', # unicode character encoding. (Default iso-8859-1).
'-r', # output raw MathML (no preamble or postlude)
- ],
+ ],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
@@ -110,7 +110,7 @@ def blahtexml(math_code, inline=True, reporter=None):
'--other-encoding', 'raw',
'--doctype-xhtml+mathml',
'--annotate-TeX',
- ]
+ ]
if inline:
mathmode_arg = ''
else:
@@ -128,8 +128,9 @@ def blahtexml(math_code, inline=True, reporter=None):
err = p.stderr.read().decode('utf8')
if result.find('<error>') >= 0:
- raise SyntaxError('\nMessage from external converter blahtexml:\n'
- +result[result.find('<message>')+9:result.find('</message>')])
+ msg = result[result.find('<message>')+9:result.find('</message>')]
+ raise SyntaxError('\nMessage from external converter blahtexml:\n%s'
+ % msg)
if reporter and (err.find('**** Error') >= 0 or not result):
reporter.error(err)
start, end = result.find('<markup>')+9, result.find('</markup>')
diff --git a/docutils/docutils/utils/smartquotes.py b/docutils/docutils/utils/smartquotes.py
index 6c72ddbd7..667940086 100644
--- a/docutils/docutils/utils/smartquotes.py
+++ b/docutils/docutils/utils/smartquotes.py
@@ -490,7 +490,7 @@ class smartchars:
'uk-x-altquot': '„“‚‘',
'zh-cn': '“”‘’',
'zh-tw': '「」『』',
- }
+ }
def __init__(self, language='en'):
self.language = language
@@ -639,7 +639,7 @@ def educateQuotes(text, language='en'):
'dash': '[-–—]' # hyphen and em/en dashes
+ r'|&[mn]dash;|&\#8211;|&\#8212;|&\#x201[34];',
'sep': '[\\s\u200B\u200C]|&nbsp;', # Whitespace, ZWSP, ZWNJ
- }
+ }
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Use closing quotes.
diff --git a/docutils/docutils/utils/urischemes.py b/docutils/docutils/utils/urischemes.py
index 059b47d53..113566486 100644
--- a/docutils/docutils/utils/urischemes.py
+++ b/docutils/docutils/utils/urischemes.py
@@ -90,7 +90,7 @@ schemes = {
'printer': '',
'prospero': 'Prospero Directory Service; RFC 4157',
'rdar': ('URLs found in Darwin source '
- '(http://www.opensource.apple.com/darwinsource/).'),
+ '(http://www.opensource.apple.com/darwinsource/).'),
'res': '',
'rtsp': 'real time streaming protocol; RFC 2326',
'rvp': '',
diff --git a/docutils/docutils/writers/_html_base.py b/docutils/docutils/writers/_html_base.py
index 5aa533657..b11dc9cbc 100644
--- a/docutils/docutils/writers/_html_base.py
+++ b/docutils/docutils/writers/_html_base.py
@@ -37,8 +37,10 @@ class Writer(writers.Writer):
supported = ('html', 'xhtml') # update in subclass
"""Formats this writer supports."""
- settings_spec = ('HTML Writer Options', None, (
- ('Specify the template file (UTF-8 encoded). '
+ settings_spec = (
+ 'HTML Writer Options',
+ None,
+ (('Specify the template file (UTF-8 encoded). '
'(default: writer dependent)',
['--template'],
{'metavar': '<file>'}),
@@ -125,7 +127,8 @@ class Writer(writers.Writer):
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
- ))
+ )
+ )
settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'}
@@ -269,7 +272,7 @@ class HTMLTranslator(nodes.NodeVisitor):
ord('"'): '&quot;',
ord('>'): '&gt;',
ord('@'): '&#64;', # may thwart address harvesters
- }
+ }
"""Character references for characters with a special meaning in HTML."""
@@ -305,14 +308,16 @@ class HTMLTranslator(nodes.NodeVisitor):
# legacy setting embed_images:
if getattr(settings, 'embed_images', None) is True:
warnings.warn('The configuration setting "embed_images" '
- 'will be removed in Docutils 1.2. Use "image_loading: embed".',
- FutureWarning, stacklevel=8)
+ 'will be removed in Docutils 1.2. '
+ 'Use "image_loading: embed".',
+ FutureWarning, stacklevel=8)
if self.image_loading is None:
self.image_loading = 'embed'
if getattr(settings, 'embed_images', None) is False:
warnings.warn('The configuration setting "embed_images" '
- 'will be removed in Docutils 1.2. Use "image_loading: link".',
- FutureWarning, stacklevel=8)
+ 'will be removed in Docutils 1.2. '
+ 'Use "image_loading: link".',
+ FutureWarning, stacklevel=8)
if self.image_loading is None:
self.image_loading = 'link' # default
self.math_output = settings.math_output.split()
@@ -441,11 +446,9 @@ class HTMLTranslator(nodes.NodeVisitor):
# may be targets inside of references, but nested "a"
# elements aren't allowed in XHTML (even if they do
# not all have a "href" attribute).
- if empty or isinstance(node,
- (nodes.bullet_list, nodes.docinfo,
- nodes.definition_list, nodes.enumerated_list,
- nodes.field_list, nodes.option_list,
- nodes.table)):
+ if empty or isinstance(node, (nodes.Sequential,
+ nodes.docinfo,
+ nodes.table)):
# Insert target right in front of element.
prefix.append('<span id="%s"></span>' % id)
else:
@@ -652,7 +655,7 @@ class HTMLTranslator(nodes.NodeVisitor):
def depart_citation_reference(self, node):
self.body.append(']</a>')
- # classifier
+ # classifier
# ----------
# don't insert classifier-delimiter here (done by CSS)
@@ -811,8 +814,8 @@ class HTMLTranslator(nodes.NodeVisitor):
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if 'name="dcterms.' in ''.join(self.meta):
- self.head.append(
- '<link rel="schema.dcterms" href="http://purl.org/dc/terms/"/>')
+ self.head.append('<link rel="schema.dcterms"'
+ 'href="http://purl.org/dc/terms/"/>')
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
@@ -966,7 +969,7 @@ class HTMLTranslator(nodes.NodeVisitor):
# get section number (strip trailing no-break-spaces)
sectnum = node.astext().rstrip(' ')
self.body.append('<span class="sectnum">%s </span>'
- % self.encode(sectnum))
+ % self.encode(sectnum))
# Content already processed:
raise nodes.SkipNode
@@ -1055,8 +1058,8 @@ class HTMLTranslator(nodes.NodeVisitor):
uri.replace('\\', '/'))
# TODO: insert SVG as-is?
# if mimetype == 'image/svg+xml':
- # read/parse, apply arguments,
- # insert as <svg ....> ... </svg> # (about 1/3 less data)
+ # read/parse, apply arguments,
+ # insert as <svg ....> ... </svg> # (about 1/3 less data)
data64 = base64.b64encode(imagedata).decode()
uri = 'data:%s;base64,%s' % (mimetype, data64)
elif self.image_loading == 'lazy':
@@ -1149,7 +1152,7 @@ class HTMLTranslator(nodes.NodeVisitor):
for token in self.words_and_spaces.findall(text):
if token.strip() and self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
- % self.encode(token))
+ % self.encode(token))
else:
self.body.append(self.encode(token))
self.body.append('</span>')
@@ -1199,8 +1202,9 @@ class HTMLTranslator(nodes.NodeVisitor):
'latex': (None, None),
}
wrapper = wrappers[self.math_output][math_env != '']
- if self.math_output == 'mathml' and (not self.math_output_options or
- self.math_output_options[0] == 'blahtexml'):
+ if (self.math_output == 'mathml'
+ and (not self.math_output_options
+ or self.math_output_options[0] == 'blahtexml')):
wrapper = None
# get and wrap content
math_code = node.astext().translate(unichar2tex.uni2tex_table)
@@ -1217,7 +1221,8 @@ class HTMLTranslator(nodes.NodeVisitor):
self.mathjax_url = self.math_output_options[0]
except IndexError:
self.document.reporter.warning('No MathJax URL specified, '
- 'using local fallback (see config.html)')
+ 'using local fallback '
+ '(see config.html)')
# append configuration, if not already present in the URL:
# input LaTeX with AMS, output common HTML
if '?' not in self.mathjax_url:
@@ -1239,23 +1244,24 @@ class HTMLTranslator(nodes.NodeVisitor):
converter = ' '.join(self.math_output_options).lower()
try:
if converter == 'latexml':
- math_code = tex2mathml_extern.latexml(math_code,
- self.document.reporter)
+ math_code = tex2mathml_extern.latexml(
+ math_code, self.document.reporter)
elif converter == 'ttm':
- math_code = tex2mathml_extern.ttm(math_code,
- self.document.reporter)
+ math_code = tex2mathml_extern.ttm(
+ math_code, self.document.reporter)
elif converter == 'blahtexml':
- math_code = tex2mathml_extern.blahtexml(math_code,
- inline=not(math_env),
- reporter=self.document.reporter)
+ math_code = tex2mathml_extern.blahtexml(
+ math_code,
+ inline=not(math_env),
+ reporter=self.document.reporter)
elif not converter:
- math_code = latex2mathml.tex2mathml(math_code,
- inline=not(math_env))
+ math_code = latex2mathml.tex2mathml(
+ math_code, inline=not(math_env))
else:
self.document.reporter.error('option "%s" not supported '
- 'with math-output "MathML"')
+ 'with math-output "MathML"')
except OSError:
- raise OSError('is "latexmlmath" in your PATH?')
+ raise OSError('is "latexmlmath" in your PATH?')
except SyntaxError as err:
err_node = self.document.reporter.error(err, base_node=node)
self.visit_system_message(err_node)
@@ -1632,13 +1638,13 @@ class HTMLTranslator(nodes.NodeVisitor):
def visit_title(self, node):
close_tag = '</p>\n'
if isinstance(node.parent, nodes.topic):
+ # TODO: use role="heading" or <h1>? (HTML5 only)
self.body.append(
- self.starttag(node, 'p', '', CLASS='topic-title'))
- # TODO: use role="heading" or <h1>? (HTML5 only)
+ self.starttag(node, 'p', '', CLASS='topic-title'))
elif isinstance(node.parent, nodes.sidebar):
+ # TODO: use role="heading" or <h1>? (HTML5 only)
self.body.append(
- self.starttag(node, 'p', '', CLASS='sidebar-title'))
- # TODO: use role="heading" or <h1>? (HTML5 only)
+ self.starttag(node, 'p', '', CLASS='sidebar-title'))
elif isinstance(node.parent, nodes.Admonition):
self.body.append(
self.starttag(node, 'p', '', CLASS='admonition-title'))
diff --git a/docutils/docutils/writers/html4css1/__init__.py b/docutils/docutils/writers/html4css1/__init__.py
index 48246991d..f56a7ec78 100644
--- a/docutils/docutils/writers/html4css1/__init__.py
+++ b/docutils/docutils/writers/html4css1/__init__.py
@@ -33,7 +33,7 @@ class Writer(writers._html_base.Writer):
os.path.abspath(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'html5_polyglot')) # for math.css
- ]
+ ]
default_template = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'template.txt')
@@ -41,39 +41,39 @@ class Writer(writers._html_base.Writer):
settings_spec = frontend.filter_settings_spec(
writers._html_base.Writer.settings_spec,
template =
- ('Template file. (UTF-8 encoded, default: "%s")' % default_template,
- ['--template'],
- {'default': default_template, 'metavar': '<file>'}),
+ ('Template file. (UTF-8 encoded, default: "%s")' % default_template,
+ ['--template'],
+ {'default': default_template, 'metavar': '<file>'}),
stylesheet_path =
- ('Comma separated list of stylesheet paths. '
- 'Relative paths are expanded if a matching file is found in '
- 'the --stylesheet-dirs. With --link-stylesheet, '
- 'the path is rewritten relative to the output HTML file. '
- '(default: "%s")' % ','.join(default_stylesheets),
- ['--stylesheet-path'],
- {'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
- 'validator': frontend.validate_comma_separated_list,
- 'default': default_stylesheets}),
- stylesheet_dirs =
- ('Comma-separated list of directories where stylesheets are found. '
- 'Used by --stylesheet-path when expanding relative path arguments. '
- '(default: "%s")' % ','.join(default_stylesheet_dirs),
- ['--stylesheet-dirs'],
- {'metavar': '<dir[,dir,...]>',
- 'validator': frontend.validate_comma_separated_list,
- 'default': default_stylesheet_dirs}),
- initial_header_level =
- ('Specify the initial header level. Does not affect document '
- 'title & subtitle (see --no-doc-title). (default: 1 for "<h1>")',
- ['--initial-header-level'],
- {'choices': '1 2 3 4 5 6'.split(), 'default': '1',
- 'metavar': '<level>'}),
- xml_declaration =
- ('Prepend an XML declaration (default). ',
- ['--xml-declaration'],
- {'default': True, 'action': 'store_true',
- 'validator': frontend.validate_boolean}),
- )
+ ('Comma separated list of stylesheet paths. '
+ 'Relative paths are expanded if a matching file is found in '
+ 'the --stylesheet-dirs. With --link-stylesheet, '
+ 'the path is rewritten relative to the output HTML file. '
+ '(default: "%s")' % ','.join(default_stylesheets),
+ ['--stylesheet-path'],
+ {'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
+ 'validator': frontend.validate_comma_separated_list,
+ 'default': default_stylesheets}),
+ stylesheet_dirs =
+ ('Comma-separated list of directories where stylesheets are found. '
+ 'Used by --stylesheet-path when expanding relative path arguments. '
+ '(default: "%s")' % ','.join(default_stylesheet_dirs),
+ ['--stylesheet-dirs'],
+ {'metavar': '<dir[,dir,...]>',
+ 'validator': frontend.validate_comma_separated_list,
+ 'default': default_stylesheet_dirs}),
+ initial_header_level =
+ ('Specify the initial header level. Does not affect document '
+ 'title & subtitle (see --no-doc-title). (default: 1 for "<h1>")',
+ ['--initial-header-level'],
+ {'choices': '1 2 3 4 5 6'.split(), 'default': '1',
+ 'metavar': '<level>'}),
+ xml_declaration =
+ ('Prepend an XML declaration (default). ',
+ ['--xml-declaration'],
+ {'default': True, 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
+ )
settings_spec = settings_spec + (
'HTML4 Writer Options',
'',
@@ -91,7 +91,8 @@ class Writer(writers._html_base.Writer):
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
- ))
+ )
+ )
config_section = 'html4css1 writer'
@@ -540,7 +541,7 @@ class HTMLTranslator(writers._html_base.HTMLTranslator):
# (SVG not supported by IE up to version 8,
# html4css1 strives for IE6 compatibility.)
object_image_types = {'.svg': 'image/svg+xml',
- '.swf': 'application/x-shockwave-flash'}
+ '.swf': 'application/x-shockwave-flash'}
#
def visit_image(self, node):
atts = {}
diff --git a/docutils/docutils/writers/html5_polyglot/__init__.py b/docutils/docutils/writers/html5_polyglot/__init__.py
index 36cb6945e..2771a842b 100644
--- a/docutils/docutils/writers/html5_polyglot/__init__.py
+++ b/docutils/docutils/writers/html5_polyglot/__init__.py
@@ -48,38 +48,38 @@ class Writer(writers._html_base.Writer):
settings_spec = frontend.filter_settings_spec(
writers._html_base.Writer.settings_spec,
template =
- ('Template file. (UTF-8 encoded, default: "%s")' % default_template,
- ['--template'],
- {'default': default_template, 'metavar': '<file>'}),
+ ('Template file. (UTF-8 encoded, default: "%s")' % default_template,
+ ['--template'],
+ {'default': default_template, 'metavar': '<file>'}),
stylesheet_path =
- ('Comma separated list of stylesheet paths. '
- 'Relative paths are expanded if a matching file is found in '
- 'the --stylesheet-dirs. With --link-stylesheet, '
- 'the path is rewritten relative to the output HTML file. '
- '(default: "%s")' % ','.join(default_stylesheets),
- ['--stylesheet-path'],
- {'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
- 'validator': frontend.validate_comma_separated_list,
- 'default': default_stylesheets}),
- stylesheet_dirs =
- ('Comma-separated list of directories where stylesheets are found. '
- 'Used by --stylesheet-path when expanding relative path arguments. '
- '(default: "%s")' % ','.join(default_stylesheet_dirs),
- ['--stylesheet-dirs'],
- {'metavar': '<dir[,dir,...]>',
- 'validator': frontend.validate_comma_separated_list,
- 'default': default_stylesheet_dirs}),
- initial_header_level =
- ('Specify the initial header level. Does not affect document '
- 'title & subtitle (see --no-doc-title). (default: 2 for "<h2>")',
- ['--initial-header-level'],
- {'choices': '1 2 3 4 5 6'.split(), 'default': '2',
- 'metavar': '<level>'}),
- no_xml_declaration =
- ('Omit the XML declaration.',
- ['--no-xml-declaration'],
- {'dest': 'xml_declaration', 'action': 'store_false'}),
- )
+ ('Comma separated list of stylesheet paths. '
+ 'Relative paths are expanded if a matching file is found in '
+ 'the --stylesheet-dirs. With --link-stylesheet, '
+ 'the path is rewritten relative to the output HTML file. '
+ '(default: "%s")' % ','.join(default_stylesheets),
+ ['--stylesheet-path'],
+ {'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
+ 'validator': frontend.validate_comma_separated_list,
+ 'default': default_stylesheets}),
+ stylesheet_dirs =
+ ('Comma-separated list of directories where stylesheets are found. '
+ 'Used by --stylesheet-path when expanding relative path arguments. '
+ '(default: "%s")' % ','.join(default_stylesheet_dirs),
+ ['--stylesheet-dirs'],
+ {'metavar': '<dir[,dir,...]>',
+ 'validator': frontend.validate_comma_separated_list,
+ 'default': default_stylesheet_dirs}),
+ initial_header_level =
+ ('Specify the initial header level. Does not affect document '
+ 'title & subtitle (see --no-doc-title). (default: 2 for "<h2>")',
+ ['--initial-header-level'],
+ {'choices': '1 2 3 4 5 6'.split(), 'default': '2',
+ 'metavar': '<level>'}),
+ no_xml_declaration =
+ ('Omit the XML declaration.',
+ ['--no-xml-declaration'],
+ {'dest': 'xml_declaration', 'action': 'store_false'}),
+ )
settings_spec = settings_spec + (
'HTML5 Writer Options',
'',
@@ -95,14 +95,15 @@ class Writer(writers._html_base.Writer):
['--image-loading'],
{'choices': ('embed', 'link', 'lazy'),
# 'default': 'link' # default set in _html_base.py
- }),
+ }),
('Append a self-link to section headings.',
['--section-self-link'],
{'default': 0, 'action': 'store_true'}),
('Do not append a self-link to section headings. (default)',
['--no-section-self-link'],
{'dest': 'section_self_link', 'action': 'store_false'}),
- ))
+ )
+ )
config_section = 'html5 writer'
@@ -208,8 +209,8 @@ class HTMLTranslator(writers._html_base.HTMLTranslator):
self.meta.insert(0, self.content_type % self.settings.output_encoding)
self.head.insert(0, self.content_type % self.settings.output_encoding)
if 'name="dcterms.' in ''.join(self.meta):
- self.head.append(
- '<link rel="schema.dcterms" href="http://purl.org/dc/terms/"/>')
+ self.head.append('<link rel="schema.dcterms"'
+ ' href="http://purl.org/dc/terms/"/>')
if self.math_header:
if self.math_output == 'mathjax':
self.head.extend(self.math_header)
@@ -293,9 +294,13 @@ class HTMLTranslator(writers._html_base.HTMLTranslator):
suffix = ''
else:
suffix = '\n'
- self.body.append('%s<a href="%s">%s</a>%s</video>%s'
+ self.body.append(
+ '%s<a href="%s">%s</a>%s</video>%s'
% (self.starttag(node, 'video', suffix, src=uri, **atts),
- uri, node.get('alt', uri), suffix, suffix))
+ uri,
+ node.get('alt', uri),
+ suffix,
+ suffix))
def depart_image(self, node):
pass
@@ -312,8 +317,9 @@ class HTMLTranslator(writers._html_base.HTMLTranslator):
if len(tags):
node.html5tagname = tags[0]
classes.remove(tags[0])
- elif (classes == ['ln'] and isinstance(node.parent, nodes.literal_block)
- and 'code' in node.parent.get('classes')):
+ elif (classes == ['ln']
+ and isinstance(node.parent, nodes.literal_block)
+ and 'code' in node.parent.get('classes')):
if self.body[-1] == '<code>':
del(self.body[-1])
else:
@@ -364,7 +370,7 @@ class HTMLTranslator(writers._html_base.HTMLTranslator):
for token in self.words_and_spaces.findall(text):
if token.strip() and self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
- % self.encode(token))
+ % self.encode(token))
else:
self.body.append(self.encode(token))
self.body.append('</%s>' % tagname)
@@ -443,6 +449,6 @@ class HTMLTranslator(writers._html_base.HTMLTranslator):
if (ids and getattr(self.settings, 'section_self_link', None)
and not isinstance(node.parent, nodes.document)):
self_link = ('<a class="self-link" title="link to this section"'
- ' href="#%s"></a>' % ids[0])
+ ' href="#%s"></a>' % ids[0])
close_tag = close_tag.replace('</h', self_link + '</h')
return start_tag, close_tag
diff --git a/docutils/docutils/writers/latex2e/__init__.py b/docutils/docutils/writers/latex2e/__init__.py
index 0c5728590..649a190da 100644
--- a/docutils/docutils/writers/latex2e/__init__.py
+++ b/docutils/docutils/writers/latex2e/__init__.py
@@ -230,14 +230,14 @@ class Writer(writers.Writer):
# TODO: implement "latex footnotes" alternative
('Footnotes with numbers/symbols by Docutils. (default) '
'(The alternative, --latex-footnotes, is not implemented yet.)',
- ['--docutils-footnotes'],
- {'default': True,
- 'action': 'store_true',
- 'validator': frontend.validate_boolean}),
- ),)
-
- settings_defaults = {'sectnum_depth': 0 # updated by SectNum transform
- }
+ ['--docutils-footnotes'],
+ {'default': True,
+ 'action': 'store_true',
+ 'validator': frontend.validate_boolean}),
+ ),
+ )
+
+ settings_defaults = {'sectnum_depth': 0} # updated by SectNum transform
config_section = 'latex2e writer'
config_section_dependencies = ('writers', 'latex writers')
@@ -256,11 +256,11 @@ class Writer(writers.Writer):
# Override parent method to add latex-specific transforms
def get_transforms(self):
- return writers.Writer.get_transforms(self) + [
- # Convert specific admonitions to generic one
- writer_aux.Admonitions,
- # TODO: footnote collection transform
- ]
+ return writers.Writer.get_transforms(self) + [
+ # Convert specific admonitions to generic one
+ writer_aux.Admonitions,
+ # TODO: footnote collection transform
+ ]
def translate(self):
visitor = self.translator_class(self.document)
@@ -875,8 +875,10 @@ class Table:
# TODO: elif 'align' in classes/settings.table-style:
# self.set('align', ...)
borders = [cls.replace('nolines', 'borderless')
- for cls in ['standard'] + settings.table_style + node['classes']
- if cls in ('standard', 'booktabs', 'borderless', 'nolines')]
+ for cls in (['standard']
+ + settings.table_style
+ + node['classes'])
+ if cls in ('standard', 'booktabs', 'borderless', 'nolines')]
self.borders = borders[-1]
self.colwidths_auto = (('colwidths-auto' in node['classes']
or 'colwidths-auto' in settings.table_style)
@@ -912,8 +914,7 @@ class Table:
latex_type = self.get_latex_type()
if align and latex_type not in ("longtable", "longtable*"):
opening = [r'\noindent\makebox[\linewidth]%s{%%' % (align,),
- r'\begin{%s}' % (latex_type,),
- ]
+ r'\begin{%s}' % (latex_type,)]
else:
opening = [r'\begin{%s}%s' % (latex_type, align)]
if not self.colwidths_auto:
@@ -958,7 +959,7 @@ class Table:
if total_width > width:
factor *= width / total_width
self._colwidths = [(factor * (node['colwidth']+1)/width)
- + 0.005 for node in self._col_specs]
+ + 0.005 for node in self._col_specs]
latex_colspecs = ['p{%.3f\\DUtablewidth}' % colwidth
for colwidth in self._colwidths]
else:
@@ -1210,7 +1211,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# language module for Docutils-generated text
# (labels, bibliographic_fields, and author_separators)
self.language_module = languages.get_language(settings.language_code,
- document.reporter)
+ document.reporter)
self.babel = babel_class(settings.language_code, document.reporter)
self.author_separator = self.language_module.author_separators[0]
d_options = [self.settings.documentoptions]
@@ -1233,7 +1234,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
# Document parts
self.head_prefix = [r'\documentclass[%s]{%s}' %
- (self.documentoptions, self.settings.documentclass)]
+ (self.documentoptions,
+ self.settings.documentclass)]
self.requirements = SortableDict() # made a list in depart_document()
self.requirements['__static'] = r'\usepackage{ifthen}'
self.latex_preamble = [settings.latex_preamble]
@@ -1333,8 +1335,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.fallback_stylesheet = False
else:
# require a minimal version:
- self.fallbacks['docutils.sty'
- ] = r'\usepackage{docutils}[2020/08/28]'
+ self.fallbacks['docutils.sty'] = (
+ r'\usepackage{docutils}[2020/08/28]')
self.stylesheet = [self.stylesheet_call(path)
for path in stylesheet_list]
@@ -1369,8 +1371,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
# 4 paragraph
# 5 subparagraph
if secnumdepth is not None:
- PreambleCmds.secnumdepth = (r'\setcounter{secnumdepth}{%d}'
- % self.d_class.latex_section_depth(secnumdepth))
+ PreambleCmds.secnumdepth = (
+ r'\setcounter{secnumdepth}{%d}'
+ % self.d_class.latex_section_depth(secnumdepth))
# start with specified number:
if (hasattr(settings, 'sectnum_start') and
settings.sectnum_start != 1):
@@ -1395,7 +1398,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
path = base + '.sty' # ensure extension
try:
content = docutils.io.FileInput(source_path=path,
- encoding='utf-8').read()
+ encoding='utf-8').read()
except OSError as err:
msg = f'Cannot embed stylesheet:\n {err}'
self.document.reporter.error(msg)
@@ -1423,34 +1426,34 @@ class LaTeXTranslator(nodes.NodeVisitor):
Default method is remove "-" and "_" chars from docutils_encoding.
"""
- tr = { 'iso-8859-1': 'latin1', # west european
- 'iso-8859-2': 'latin2', # east european
- 'iso-8859-3': 'latin3', # esperanto, maltese
- 'iso-8859-4': 'latin4', # north european, scandinavian, baltic
- 'iso-8859-5': 'iso88595', # cyrillic (ISO)
- 'iso-8859-9': 'latin5', # turkish
- 'iso-8859-15': 'latin9', # latin9, update to latin1.
- 'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
- 'windows-1251': 'cp1251', # cyrillic (on Windows)
- 'koi8-r': 'koi8-r', # cyrillic (Russian)
- 'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
- 'windows-1250': 'cp1250', #
- 'windows-1252': 'cp1252', #
- 'us-ascii': 'ascii', # ASCII (US)
- # unmatched encodings
- #'': 'applemac',
- #'': 'ansinew', # windows 3.1 ansi
- #'': 'ascii', # ASCII encoding for the range 32--127.
- #'': 'cp437', # dos latin us
- #'': 'cp850', # dos latin 1
- #'': 'cp852', # dos latin 2
- #'': 'decmulti',
- #'': 'latin10',
- #'iso-8859-6': '' # arabic
- #'iso-8859-7': '' # greek
- #'iso-8859-8': '' # hebrew
- #'iso-8859-10': '' # latin6, more complete iso-8859-4
- }
+ tr = {'iso-8859-1': 'latin1', # west european
+ 'iso-8859-2': 'latin2', # east european
+ 'iso-8859-3': 'latin3', # esperanto, maltese
+ 'iso-8859-4': 'latin4', # north european, scandinavian, baltic
+ 'iso-8859-5': 'iso88595', # cyrillic (ISO)
+ 'iso-8859-9': 'latin5', # turkish
+ 'iso-8859-15': 'latin9', # latin9, update to latin1.
+ 'mac_cyrillic': 'maccyr', # cyrillic (on Mac)
+ 'windows-1251': 'cp1251', # cyrillic (on Windows)
+ 'koi8-r': 'koi8-r', # cyrillic (Russian)
+ 'koi8-u': 'koi8-u', # cyrillic (Ukrainian)
+ 'windows-1250': 'cp1250', #
+ 'windows-1252': 'cp1252', #
+ 'us-ascii': 'ascii', # ASCII (US)
+ # unmatched encodings
+ #'': 'applemac',
+ #'': 'ansinew', # windows 3.1 ansi
+ #'': 'ascii', # ASCII encoding for the range 32--127.
+ #'': 'cp437', # dos latin us
+ #'': 'cp850', # dos latin 1
+ #'': 'cp852', # dos latin 2
+ #'': 'decmulti',
+ #'': 'latin10',
+ #'iso-8859-6': '' # arabic
+ #'iso-8859-7': '' # greek
+ #'iso-8859-8': '' # hebrew
+ #'iso-8859-10': '' # latin6, more complete iso-8859-4
+ }
encoding = docutils_encoding.lower()
if encoding in tr:
return tr[encoding]
@@ -1520,7 +1523,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
and cp in CharMaps.unsupported_unicode):
self.requirements['_inputenc'+str(cp)] = (
'\\DeclareUnicodeCharacter{%04X}{%s}'
- % (cp, CharMaps.unsupported_unicode[cp]))
+ % (cp, CharMaps.unsupported_unicode[cp]))
text = text.translate(table)
# Break up input ligatures e.g. '--' to '-{}-'.
@@ -1609,7 +1612,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def duclass_open(self, node):
"""Open a group and insert declarations for class values."""
if not isinstance(node.parent, nodes.compound):
- self.out.append('\n')
+ self.out.append('\n')
for cls in node['classes']:
if cls.startswith('language-'):
language = self.babel.language_name(cls[9:])
@@ -1829,7 +1832,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
and sibling.astext() in (' ', '\n')):
sibling2 = sibling.next_node(descend=False, siblings=True)
if isinstance(sibling2, nodes.citation_reference):
- followup_citation = True
+ followup_citation = True
if followup_citation:
self.out.append(',')
else:
@@ -1852,7 +1855,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_comment(self, node):
if not isinstance(node.parent, nodes.compound):
- self.out.append('\n')
+ self.out.append('\n')
# Precede every line with a comment sign, wrap in newlines
self.out.append('%% %s\n' % node.astext().replace('\n', '\n% '))
raise nodes.SkipNode
@@ -2019,14 +2022,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
title += self.title_labels
if self.subtitle:
title += [r'\\',
- r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle)
- ] + self.subtitle_labels
+ r'\DUdocumentsubtitle{%s}' % ''.join(self.subtitle),
+ ] + self.subtitle_labels
self.titledata.append(r'\title{%s}' % '%\n '.join(title))
# \author (empty \author prevents warning with \maketitle)
authors = ['\\\\\n'.join(author_entry)
- for author_entry in self.author_stack]
+ for author_entry in self.author_stack]
self.titledata.append(r'\author{%s}' %
- ' \\and\n'.join(authors))
+ ' \\and\n'.join(authors))
# \date (empty \date prevents defaulting to \today)
self.titledata.append(r'\date{%s}' % ', '.join(self.date))
# \maketitle in the body formats title with LaTeX
@@ -2041,12 +2044,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.out.append('\n\\begin{thebibliography}{%s}\n' %
- widest_label)
+ widest_label)
for bi in self._bibitems:
# cite_key: underscores must not be escaped
cite_key = bi[0].replace(r'\_', '_')
self.out.append('\\bibitem[%s]{%s}{%s}\n' %
- (bi[0], cite_key, bi[1]))
+ (bi[0], cite_key, bi[1]))
self.out.append('\\end{thebibliography}\n')
else:
self.out.append('\n\\bibliographystyle{%s}\n' %
@@ -2081,8 +2084,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
# multirow, multicolumn
if 'morerows' in node and 'morecols' in node:
- raise NotImplementedError('Cells that '
- 'span multiple rows *and* columns currently not supported, sorry.')
+ raise NotImplementedError('Cells that span multiple rows *and* '
+ 'columns currently not supported '
+ 'by the LaTeX writer')
# TODO: should be possible with LaTeX, see e.g.
# http://texblog.org/2012/12/21/multi-column-and-multi-row-cells-in-latex-tables/
# multirow in LaTeX simply will enlarge the cell over several rows
@@ -2104,11 +2108,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
bar1 = ''
mcols = node['morecols'] + 1
self.out.append('\\multicolumn{%d}{%s%s%s}{' %
- (mcols, bar1,
- self.active_table.get_multicolumn_width(
- self.active_table.get_entry_number(),
- mcols),
- self.active_table.get_vertical_bar()))
+ (mcols,
+ bar1,
+ self.active_table.get_multicolumn_width(
+ self.active_table.get_entry_number(), mcols),
+ self.active_table.get_vertical_bar()))
self.context.append('}')
else:
self.context.append('')
@@ -2125,7 +2129,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
if (not self.active_table.colwidths_auto
and self.out[-1].endswith("{")
and node.astext()):
- self.out.append("%")
+ self.out.append("%")
self.active_table.visit_entry() # increment cell count
@@ -2144,11 +2148,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_enumerated_list(self, node):
# enumeration styles:
types = {'': '',
- 'arabic':'arabic',
- 'loweralpha':'alph',
- 'upperalpha':'Alph',
- 'lowerroman':'roman',
- 'upperroman':'Roman'}
+ 'arabic':'arabic',
+ 'loweralpha':'alph',
+ 'upperalpha':'Alph',
+ 'lowerroman':'roman',
+ 'upperroman':'Roman'}
# default LaTeX enumeration labels:
default_labels = [# (präfix, enumtype, suffix)
('', 'arabic', '.'), # 1.
@@ -2169,16 +2173,15 @@ class LaTeXTranslator(nodes.NodeVisitor):
enumtype = types[node.get('enumtype', 'arabic')]
suffix = node.get('suffix', '.')
- enumeration_level = len(self._enumeration_counters)+1
- counter_name = 'enum' + roman.toRoman(enumeration_level).lower()
+ enum_level = len(self._enumeration_counters)+1
+ counter_name = 'enum' + roman.toRoman(enum_level).lower()
label = r'%s\%s{%s}%s' % (prefix, enumtype, counter_name, suffix)
self._enumeration_counters.append(label)
self.duclass_open(node)
- if enumeration_level <= 4:
+ if enum_level <= 4:
self.out.append('\\begin{enumerate}')
- if (prefix, enumtype, suffix
- ) != default_labels[enumeration_level-1]:
+ if (prefix, enumtype, suffix) != default_labels[enum_level-1]:
self.out.append('\n\\renewcommand{\\label%s}{%s}' %
(counter_name, label))
else:
@@ -2357,10 +2360,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
"""Convert `length_str` with rst length to LaTeX length
"""
if pxunit is not None:
- warnings.warn('The optional argument `pxunit` '
+ warnings.warn(
+ 'The optional argument `pxunit` '
'of LaTeXTranslator.to_latex_length() is ignored '
'and will be removed in Docutils 0.21 or later',
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=2)
match = re.match(r'(\d*\.?\d*)\s*(\S*)', length_str)
if not match:
return length_str
@@ -2414,14 +2418,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
except KeyError:
pass # TODO: warn?
if 'height' in attrs:
- include_graphics_options.append('height=%s' %
- self.to_latex_length(attrs['height']))
+ include_graphics_options.append(
+ 'height=%s' % self.to_latex_length(attrs['height']))
if 'scale' in attrs:
- include_graphics_options.append('scale=%f' %
- (attrs['scale'] / 100.0))
+ include_graphics_options.append(
+ 'scale=%f' % (attrs['scale'] / 100.0))
if 'width' in attrs:
- include_graphics_options.append('width=%s' %
- self.to_latex_length(attrs['width']))
+ include_graphics_options.append(
+ 'width=%s' % self.to_latex_length(attrs['width']))
if not (self.is_inline(node) or
isinstance(node.parent, (nodes.figure, nodes.compound))):
pre.append('\n')
@@ -2475,7 +2479,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.set_align_from_classes(node)
if isinstance(node.parent, nodes.line_block):
self.out.append('\\item[]\n'
- '\\begin{DUlineblock}{\\DUlineblockindent}\n')
+ '\\begin{DUlineblock}{\\DUlineblockindent}\n')
# nested line-blocks cannot be given class arguments
else:
self.duclass_open(node)
@@ -2566,7 +2570,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
# Wrap in minipage to prevent extra vertical space
# with alltt and verbatim-like environments:
self.fallbacks['ttem'] = PreambleCmds.ttem
- self.out.append('\\begin{minipage}{%d\\ttemwidth}\n' %
+ self.out.append(
+ '\\begin{minipage}{%d\\ttemwidth}\n' %
(max(len(line) for line in node.astext().split('\n'))))
self.context.append('\n\\end{minipage}\n')
elif not _in_table and not _use_listings:
@@ -2728,8 +2733,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
# * is in a table with auto-width columns
index = node.parent.index(node)
if index == 0 and isinstance(node.parent,
- (nodes.list_item, nodes.description,
- nodes.compound, nodes.container)):
+ (nodes.list_item, nodes.description,
+ nodes.compound, nodes.container)):
pass
elif (index > 0
and isinstance(node.parent, nodes.compound)
@@ -2794,7 +2799,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
special_chars = {ord('#'): '\\#',
ord('%'): '\\%',
ord('\\'): '\\\\',
- }
+ }
# external reference (URL)
if 'refuri' in node:
href = str(node['refuri']).translate(special_chars)
@@ -2912,7 +2917,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# section subtitle: "starred" (no number, not in ToC)
elif isinstance(node.parent, nodes.section):
self.out.append(r'\%s*{' %
- self.d_class.section(self.section_level + 1))
+ self.d_class.section(self.section_level + 1))
else:
if not self.fallback_stylesheet:
self.fallbacks['subtitle'] = PreambleCmds.subtitle
@@ -2942,8 +2947,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
except KeyError:
line = ''
self.out.append('}\n\n{\\color{red}%s/%s} in \\texttt{%s}%s\n' %
- (node['type'], node['level'],
- self.encode(node['source']), line))
+ (node['type'], node['level'],
+ self.encode(node['source']), line))
if len(node['backrefs']) == 1:
self.out.append('\n\\hyperlink{%s}{' % node['backrefs'][0])
self.context.append('}')
@@ -3150,16 +3155,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
section_name = self.d_class.section(self.section_level)
# minitoc only supports "part" and toplevel sections
minitoc_names = {'part': 'part',
- 'chapter': 'mini',
- 'section': 'sect'}
+ 'chapter': 'mini',
+ 'section': 'sect'}
if 'chapter' in self.d_class.sections:
del(minitoc_names['section'])
try:
mtc_name = minitoc_names[section_name]
except KeyError:
self.warn('Skipping local ToC at "%s" level.\n'
- ' Feature not supported with option "use-latex-toc"'
- % section_name, base_node=node)
+ ' Feature not supported with option "use-latex-toc"'
+ % section_name, base_node=node)
raise nodes.SkipNode
# labels and PDF bookmark (sidebar entry)
@@ -3245,7 +3250,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_topic(self, node):
if ('abstract' in node['classes']
- and self.settings.use_latex_abstract):
+ and self.settings.use_latex_abstract):
self.out.append('\\end{abstract}\n')
elif 'contents' in node['classes']:
self.duclass_close(node)
diff --git a/docutils/docutils/writers/manpage.py b/docutils/docutils/writers/manpage.py
index 3b45ab6ea..7f0ddc4f4 100644
--- a/docutils/docutils/writers/manpage.py
+++ b/docutils/docutils/writers/manpage.py
@@ -236,9 +236,8 @@ class Translator(nodes.NodeVisitor):
"""Return commented version of the passed text WITHOUT end of
line/comment."""
prefix = '.\\" '
- out_text = ''.join(
- [(prefix + in_line + '\n')
- for in_line in text.split('\n')])
+ out_text = ''.join([(prefix + in_line + '\n')
+ for in_line in text.split('\n')])
return out_text
def comment(self, text):
@@ -261,14 +260,14 @@ class Translator(nodes.NodeVisitor):
if self.body[i] == '.sp\n':
if self.body[i - 1][:4] in ('.BI ', '.IP '):
self.body[i] = '.\n'
- elif (self.body[i - 1][:3] == '.B ' and
- self.body[i - 2][:4] == '.TP\n'):
+ elif (self.body[i - 1][:3] == '.B '
+ and self.body[i - 2][:4] == '.TP\n'):
self.body[i] = '.\n'
- elif (self.body[i - 1] == '\n' and
- not self.possibly_a_roff_command.match(self.body[i - 2]) and
- (self.body[i - 3][:7] == '.TP\n.B '
- or self.body[i - 3][:4] == '\n.B ')
- ):
+ elif (self.body[i - 1] == '\n'
+ and not self.possibly_a_roff_command.match(self.body[i - 2])
+ and (self.body[i - 3][:7] == '.TP\n.B '
+ or self.body[i - 3][:4] == '\n.B ')
+ ):
self.body[i] = '.\n'
return ''.join(self.head + self.body + self.foot)
@@ -584,7 +583,7 @@ class Translator(nodes.NodeVisitor):
def depart_document(self, node):
if self._docinfo['author']:
self.body.append('.SH AUTHOR\n%s\n'
- % ', '.join(self._docinfo['author']))
+ % ', '.join(self._docinfo['author']))
skip = ('author', 'copyright', 'date',
'manual_group', 'manual_section',
'subtitle',
@@ -606,9 +605,8 @@ class Translator(nodes.NodeVisitor):
self.body.append("\n%s: %s\n" % (label, self._docinfo[name]))
if self._docinfo['copyright']:
self.body.append('.SH COPYRIGHT\n%s\n'
- % self._docinfo['copyright'])
- self.body.append(self.comment(
- 'Generated by docutils manpage writer.'))
+ % self._docinfo['copyright'])
+ self.body.append(self.comment('Generated by docutils manpage writer.'))
def visit_emphasis(self, node):
self.body.append(self.defs['emphasis'][0])
@@ -620,10 +618,10 @@ class Translator(nodes.NodeVisitor):
# a cell in a table row
if 'morerows' in node:
self.document.reporter.warning('"table row spanning" not supported',
- base_node=node)
+ base_node=node)
if 'morecols' in node:
self.document.reporter.warning(
- '"table cell spanning" not supported', base_node=node)
+ '"table cell spanning" not supported', base_node=node)
self.context.append(len(self.body))
def depart_entry(self, node):
@@ -684,7 +682,7 @@ class Translator(nodes.NodeVisitor):
def visit_footer(self, node):
self.document.reporter.warning('"footer" not supported',
- base_node=node)
+ base_node=node)
def depart_footer(self, node):
pass
@@ -699,7 +697,7 @@ class Translator(nodes.NodeVisitor):
def footnote_backrefs(self, node):
self.document.reporter.warning('"footnote_backrefs" not supported',
- base_node=node)
+ base_node=node)
def visit_footnote_reference(self, node):
self.body.append('['+self.deunicode(node.astext())+']')
@@ -745,7 +743,7 @@ class Translator(nodes.NodeVisitor):
def visit_image(self, node):
self.document.reporter.warning('"image" not supported',
- base_node=node)
+ base_node=node)
text = []
if 'alt' in node.attributes:
text.append(node.attributes['alt'])
@@ -771,7 +769,7 @@ class Translator(nodes.NodeVisitor):
or isinstance(node.parent, nodes.citation)):
raise nodes.SkipNode
self.document.reporter.warning('"unsupported "label"',
- base_node=node)
+ base_node=node)
self.body.append('[')
def depart_label(self, node):
@@ -840,7 +838,7 @@ class Translator(nodes.NodeVisitor):
def visit_math(self, node):
self.document.reporter.warning('"math" role not supported',
- base_node=node)
+ base_node=node)
self.visit_literal(node)
def depart_math(self, node):
@@ -848,7 +846,7 @@ class Translator(nodes.NodeVisitor):
def visit_math_block(self, node):
self.document.reporter.warning('"math" directive not supported',
- base_node=node)
+ base_node=node)
self.visit_literal_block(node)
def depart_math_block(self, node):
@@ -1028,7 +1026,7 @@ class Translator(nodes.NodeVisitor):
def visit_substitution_reference(self, node):
self.document.reporter.warning('"substitution_reference" not supported',
- base_node=node)
+ base_node=node)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
diff --git a/docutils/docutils/writers/odf_odt/pygmentsformatter.py b/docutils/docutils/writers/odf_odt/pygmentsformatter.py
index 8d3316b43..3dd13761b 100644
--- a/docutils/docutils/writers/odf_odt/pygmentsformatter.py
+++ b/docutils/docutils/writers/odf_odt/pygmentsformatter.py
@@ -82,8 +82,8 @@ class OdtPygmentsLaTeXFormatter(OdtPygmentsFormatter):
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
elif ttype in (tokenclass.Literal.String,
- tokenclass.Literal.String.Backtick,
- ):
+ tokenclass.Literal.String.Backtick,
+ ):
s2 = self.rststyle('codeblock-string')
s1 = '<text:span text:style-name="%s">%s</text:span>' % \
(s2, value, )
diff --git a/docutils/docutils/writers/pep_html/__init__.py b/docutils/docutils/writers/pep_html/__init__.py
index 935ed1adc..fcb8f711e 100644
--- a/docutils/docutils/writers/pep_html/__init__.py
+++ b/docutils/docutils/writers/pep_html/__init__.py
@@ -100,4 +100,4 @@ class HTMLTranslator(html4css1.HTMLTranslator):
def depart_field_list(self, node):
html4css1.HTMLTranslator.depart_field_list(self, node)
if 'rfc2822' in node['classes']:
- self.body.append('<hr />\n')
+ self.body.append('<hr />\n')
diff --git a/docutils/docutils/writers/pseudoxml.py b/docutils/docutils/writers/pseudoxml.py
index 3742e0787..b10a2d0e5 100644
--- a/docutils/docutils/writers/pseudoxml.py
+++ b/docutils/docutils/writers/pseudoxml.py
@@ -23,7 +23,8 @@ class Writer(writers.Writer):
(('Pretty-print <#text> nodes.',
['--detailed'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
- ))
+ )
+ )
config_section = 'pseudoxml writer'
config_section_dependencies = ('writers',)
diff --git a/docutils/docutils/writers/s5_html/__init__.py b/docutils/docutils/writers/s5_html/__init__.py
index 16dcf7c1a..2672ffc61 100644
--- a/docutils/docutils/writers/s5_html/__init__.py
+++ b/docutils/docutils/writers/s5_html/__init__.py
@@ -268,9 +268,10 @@ class S5HTMLTranslator(html4css1.HTMLTranslator):
src_data = src_file.read()
with open(dest, 'wb') as dest_file:
dest_dir = dest_dir.replace(os.sep, '/')
- dest_file.write(src_data.replace(b'ui/default',
+ dest_file.write(src_data.replace(
+ b'ui/default',
dest_dir[dest_dir.rfind('ui/'):].encode(
- sys.getfilesystemencoding())))
+ sys.getfilesystemencoding())))
settings.record_dependencies.add(source)
return 1
if os.path.isfile(dest):
diff --git a/docutils/docutils/writers/xetex/__init__.py b/docutils/docutils/writers/xetex/__init__.py
index e410bd872..4dadbc762 100644
--- a/docutils/docutils/writers/xetex/__init__.py
+++ b/docutils/docutils/writers/xetex/__init__.py
@@ -35,16 +35,16 @@ class Writer(latex2e.Writer):
"""Formats this writer supports."""
default_template = 'xelatex.tex'
- default_preamble = (
- '% Linux Libertine (free, wide coverage, not only for Linux)\n'
- '\\setmainfont{Linux Libertine O}\n'
- '\\setsansfont{Linux Biolinum O}\n'
- '\\setmonofont[HyphenChar=None,Scale=MatchLowercase]{DejaVu Sans Mono}'
- )
+ default_preamble = """\
+% Linux Libertine (free, wide coverage, not only for Linux)
+\\setmainfont{Linux Libertine O}
+\\setsansfont{Linux Biolinum O}
+\\setmonofont[HyphenChar=None,Scale=MatchLowercase]{DejaVu Sans Mono}"""
config_section = 'xetex writer'
+ # TODO: remove dependency on `latex2e writer`.
config_section_dependencies = ('writers', 'latex writers',
- 'latex2e writer') # TODO: remove dependency on `latex2e writer`.
+ 'latex2e writer')
# use a copy of the parent spec with some modifications:
settings_spec = frontend.filter_settings_spec(
@@ -108,7 +108,7 @@ class Babel(latex2e.Babel):
'fr-CA', # 'canadien',
'grc-ibycus', # 'ibycus', (Greek Ibycus encoding)
'sr-Latn', # 'serbian script=latin'
- ):
+ ):
del(language_codes[key.lower()])
def __init__(self, language_code, reporter):
diff --git a/docutils/test/functional/tests/dangerous.py b/docutils/test/functional/tests/dangerous.py
index 352785673..84f467163 100644
--- a/docutils/test/functional/tests/dangerous.py
+++ b/docutils/test/functional/tests/dangerous.py
@@ -11,5 +11,4 @@ writer_name = "html"
settings_overrides['file_insertion_enabled'] = False
settings_overrides['raw_enabled'] = False
# local copy of default stylesheet:
-settings_overrides['stylesheet_path'] = (
- 'functional/input/data/html4css1.css')
+settings_overrides['stylesheet_path'] = 'functional/input/data/html4css1.css'
diff --git a/docutils/test/functional/tests/pep_html.py b/docutils/test/functional/tests/pep_html.py
index 6cd28e3c4..09c796292 100644
--- a/docutils/test/functional/tests/pep_html.py
+++ b/docutils/test/functional/tests/pep_html.py
@@ -13,5 +13,4 @@ settings_overrides['pep_home'] = "http://www.python.org/peps"
settings_overrides['no_random'] = 1
settings_overrides['cloak_email_addresses'] = 1
# local copy of default stylesheet:
-settings_overrides['stylesheet_path'] = (
- 'functional/input/data/html4css1.css')
+settings_overrides['stylesheet_path'] = 'functional/input/data/html4css1.css'
diff --git a/docutils/test/functional/tests/standalone_rst_s5_html_1.py b/docutils/test/functional/tests/standalone_rst_s5_html_1.py
index 3a31bf19e..b4552e6e7 100755
--- a/docutils/test/functional/tests/standalone_rst_s5_html_1.py
+++ b/docutils/test/functional/tests/standalone_rst_s5_html_1.py
@@ -11,8 +11,7 @@ writer_name = 's5_html'
# Settings:
settings_overrides['theme'] = 'small-black'
# local copy of default stylesheet:
-settings_overrides['stylesheet_path'] = (
- 'functional/input/data/html4css1.css')
+settings_overrides['stylesheet_path'] = 'functional/input/data/html4css1.css'
# Extra functional tests.
diff --git a/docutils/test/package_unittest.py b/docutils/test/package_unittest.py
index 1d02f304c..2a8db1070 100644
--- a/docutils/test/package_unittest.py
+++ b/docutils/test/package_unittest.py
@@ -88,8 +88,8 @@ def loadTestModules(path, name='', packages=None):
if filename.endswith('.py'):
fullpath = fullpath[len(path)+1:]
testModules.append(path2mod(fullpath))
- elif packages and os.path.isdir(fullpath) and \
- os.path.isfile(os.path.join(fullpath, '__init__.py')):
+ elif (packages and os.path.isdir(fullpath) and
+ os.path.isfile(os.path.join(fullpath, '__init__.py'))):
paths.append(fullpath)
# Import modules and add their tests to the suite.
sys.path.insert(0, path)
diff --git a/docutils/test/test__init__.py b/docutils/test/test__init__.py
index f40dac19b..fadfd5de2 100644
--- a/docutils/test/test__init__.py
+++ b/docutils/test/test__init__.py
@@ -96,7 +96,7 @@ class VersionInfoTests(unittest.TestCase):
VersionInfo(0, 2, 0, 'beta', 0, False),
VersionInfo(0, 2, 0, 'beta', 0, True),
VersionInfo(0, 2, 0, 'final', 0, True),
- ]
+ ]
# transform to version strings
versions = [docutils.utils.version_identifier(vinfo)
for vinfo in versioninfos]
diff --git a/docutils/test/test_dependencies.py b/docutils/test/test_dependencies.py
index 08b02764b..f3a866da9 100755
--- a/docutils/test/test_dependencies.py
+++ b/docutils/test/test_dependencies.py
@@ -23,7 +23,7 @@ paths = {'include': 'data/include.txt', # included rst file
'scaled-image': '../docs/user/rst/images/biohazard.png',
'figure-image': '../docs/user/rst/images/title.png',
'stylesheet': 'data/stylesheet.txt',
- }
+ }
# avoid latex writer future warnings:
latex_settings_overwrites = {'legacy_column_widths': False,
@@ -101,19 +101,19 @@ class RecordDependenciesTests(unittest.TestCase):
so['embed_stylesheet'] = False
record = self.get_record(writer_name='html', settings_overrides=so)
self.assertTrue(stylesheet not in record,
- '%r should not be in %r' % (stylesheet, record))
+ '%r should not be in %r' % (stylesheet, record))
record = self.get_record(writer_name='latex', settings_overrides=so)
self.assertTrue(stylesheet not in record,
- '%r should not be in %r' % (stylesheet, record))
+ '%r should not be in %r' % (stylesheet, record))
so['embed_stylesheet'] = True
record = self.get_record(writer_name='html', settings_overrides=so)
self.assertTrue(stylesheet in record,
- '%r should be in %r' % (stylesheet, record))
+ '%r should be in %r' % (stylesheet, record))
so['embed_stylesheet'] = True
record = self.get_record(writer_name='latex', settings_overrides=so)
self.assertTrue(stylesheet in record,
- '%r should be in %r' % (stylesheet, record))
+ '%r should be in %r' % (stylesheet, record))
if __name__ == '__main__':
diff --git a/docutils/test/test_io.py b/docutils/test/test_io.py
index d20995dea..7c21b4b24 100755
--- a/docutils/test/test_io.py
+++ b/docutils/test/test_io.py
@@ -205,19 +205,19 @@ class OutputTests(unittest.TestCase):
fo = io.FileOutput(destination=self.mock_stdout)
fo.write(self.bdata)
self.assertEqual(self.mock_stdout.buffer.getvalue(),
- self.bdata)
+ self.bdata)
def test_encoding_clash_resolved(self):
fo = io.FileOutput(destination=self.mock_stdout,
- encoding='latin1', autoclose=False)
+ encoding='latin1', autoclose=False)
fo.write(self.udata)
self.assertEqual(self.mock_stdout.buffer.getvalue(),
- self.udata.encode('latin1'))
+ self.udata.encode('latin1'))
def test_encoding_clash_nonresolvable(self):
del self.mock_stdout.buffer
fo = io.FileOutput(destination=self.mock_stdout,
- encoding='latin1', autoclose=False)
+ encoding='latin1', autoclose=False)
self.assertRaises(ValueError, fo.write, self.udata)
diff --git a/docutils/test/test_nodes.py b/docutils/test/test_nodes.py
index e477760fd..605ffa2d9 100755
--- a/docutils/test/test_nodes.py
+++ b/docutils/test/test_nodes.py
@@ -29,7 +29,7 @@ class TextTests(unittest.TestCase):
def test_repr(self):
self.assertEqual(repr(self.text), r"<#text: 'Line 1.\nLine 2.'>")
self.assertEqual(self.text.shortrepr(),
- r"<#text: 'Line 1.\nLine 2.'>")
+ r"<#text: 'Line 1.\nLine 2.'>")
self.assertEqual(repr(self.unicode_text), "<#text: 'Möhren'>")
def test_str(self):
@@ -61,10 +61,10 @@ class TextTests(unittest.TestCase):
def test_longrepr(self):
self.assertEqual(repr(self.longtext), r"<#text: 'Mary had a "
- r"little lamb whose fleece was white as snow "
- r"and everwh ...'>")
+ r"little lamb whose fleece was white as snow "
+ r"and everwh ...'>")
self.assertEqual(self.longtext.shortrepr(),
- r"<#text: 'Mary had a lit ...'>")
+ r"<#text: 'Mary had a lit ...'>")
def test_Text_rawsource_deprection_warning(self):
with warnings.catch_warnings(record=True) as wng:
@@ -115,13 +115,13 @@ class ElementTests(unittest.TestCase):
element['attr'] = '1'
self.assertEqual(repr(element), r"<Element: <#text: 'text\nmore'>>")
self.assertEqual(str(element),
- '<Element attr="1">text\nmore</Element>')
+ '<Element attr="1">text\nmore</Element>')
dom = element.asdom()
self.assertEqual(dom.toxml(),
- '<Element attr="1">text\nmore</Element>')
+ '<Element attr="1">text\nmore</Element>')
dom.unlink()
self.assertEqual(element.pformat(),
- '<Element attr="1">\n text\n more\n')
+ '<Element attr="1">\n text\n more\n')
def test_clear(self):
element = nodes.Element()
@@ -148,7 +148,7 @@ class ElementTests(unittest.TestCase):
element['ids'].append('someid')
self.assertEqual(element['ids'], ['someid'])
self.assertEqual(element.non_default_attributes(),
- {'ids': ['someid']})
+ {'ids': ['someid']})
self.assertTrue(element.is_not_default('ids'))
def test_update_basic_atts(self):
@@ -325,8 +325,9 @@ class MiscTests(unittest.TestCase):
node_class_names = []
for x in dir(nodes):
c = getattr(nodes, x)
- if isinstance(c, type) and \
- issubclass(c, nodes.Node) and len(c.__bases__) > 1:
+ if (isinstance(c, type)
+ and issubclass(c, nodes.Node)
+ and len(c.__bases__) > 1):
node_class_names.append(x)
node_class_names.sort()
nodes.node_class_names.sort()
@@ -336,7 +337,7 @@ class MiscTests(unittest.TestCase):
('a.b.c', 'a-b-c'), (' - a - b - c - ', 'a-b-c'), (' - ', ''),
('\u2020\u2066', ''), ('a \xa7 b \u2020 c', 'a-b-c'),
('1', ''), ('1abc', 'abc'),
- ]
+ ]
ids_unicode_all = [
('\u00f8 o with stroke', 'o-o-with-stroke'),
('\u0111 d with stroke', 'd-d-with-stroke'),
@@ -344,7 +345,7 @@ class MiscTests(unittest.TestCase):
('\u0131 dotless i', 'i-dotless-i'),
('\u0142 l with stroke', 'l-l-with-stroke'),
('\u0167 t with stroke', 't-t-with-stroke'),
- # From Latin Extended-B
+ # From Latin Extended-B
('\u0180 b with stroke', 'b-b-with-stroke'),
('\u0183 b with topbar', 'b-b-with-topbar'),
('\u0188 c with hook', 'c-c-with-hook'),
@@ -372,7 +373,7 @@ class MiscTests(unittest.TestCase):
('\u024b q with hook tail', 'q-q-with-hook-tail'),
('\u024d r with stroke', 'r-r-with-stroke'),
('\u024f y with stroke', 'y-y-with-stroke'),
- # From Latin-1 Supplements
+ # From Latin-1 Supplements
('\u00e0: a with grave', 'a-a-with-grave'),
('\u00e1 a with acute', 'a-a-with-acute'),
('\u00e2 a with circumflex', 'a-a-with-circumflex'),
@@ -400,7 +401,7 @@ class MiscTests(unittest.TestCase):
('\u00fc u with diaeresis', 'u-u-with-diaeresis'),
('\u00fd y with acute', 'y-y-with-acute'),
('\u00ff y with diaeresis', 'y-y-with-diaeresis'),
- # From Latin Extended-A
+ # From Latin Extended-A
('\u0101 a with macron', 'a-a-with-macron'),
('\u0103 a with breve', 'a-a-with-breve'),
('\u0105 a with ogonek', 'a-a-with-ogonek'),
@@ -456,7 +457,7 @@ class MiscTests(unittest.TestCase):
('\u017a z with acute', 'z-z-with-acute'),
('\u017c z with dot above', 'z-z-with-dot-above'),
('\u017e z with caron', 'z-z-with-caron'),
- # From Latin Extended-B
+ # From Latin Extended-B
('\u01a1 o with horn', 'o-o-with-horn'),
('\u01b0 u with horn', 'u-u-with-horn'),
('\u01c6 dz with caron', 'dz-dz-with-caron'),
@@ -493,7 +494,7 @@ class MiscTests(unittest.TestCase):
('\u0229 e with cedilla', 'e-e-with-cedilla'),
('\u022f o with dot above', 'o-o-with-dot-above'),
('\u0233 y with macron', 'y-y-with-macron'),
- # digraphs From Latin-1 Supplements
+ # digraphs From Latin-1 Supplements
('\u00df: ligature sz', 'sz-ligature-sz'),
('\u00e6 ae', 'ae-ae'),
('\u0153 ligature oe', 'oe-ligature-oe'),
@@ -520,31 +521,31 @@ class MiscTests(unittest.TestCase):
e += nodes.Element()
e += nodes.Element()
self.assertEqual(list(e.findall()),
- [e, e[0], e[0][0], e[0][1], e[0][1][0], e[1], e[2]])
+ [e, e[0], e[0][0], e[0][1], e[0][1][0], e[1], e[2]])
self.assertEqual(list(e.findall(include_self=False)),
- [e[0], e[0][0], e[0][1], e[0][1][0], e[1], e[2]])
+ [e[0], e[0][0], e[0][1], e[0][1][0], e[1], e[2]])
self.assertEqual(list(e.findall(descend=False)),
- [e])
+ [e])
self.assertEqual(list(e[0].findall(descend=False, ascend=True)),
- [e[0], e[1], e[2]])
+ [e[0], e[1], e[2]])
self.assertEqual(list(e[0][0].findall(descend=False, ascend=True)),
- [e[0][0], e[0][1], e[1], e[2]])
+ [e[0][0], e[0][1], e[1], e[2]])
self.assertEqual(list(e[0][0].findall(descend=False, siblings=True)),
- [e[0][0], e[0][1]])
+ [e[0][0], e[0][1]])
self.testlist = e[0:2]
self.assertEqual(list(e.findall(condition=self.not_in_testlist)),
- [e, e[0][0], e[0][1], e[0][1][0], e[2]])
+ [e, e[0][0], e[0][1], e[0][1][0], e[2]])
# Return siblings despite siblings=False because ascend is true.
self.assertEqual(list(e[1].findall(ascend=True, siblings=False)),
- [e[1], e[2]])
+ [e[1], e[2]])
self.assertEqual(list(e[0].findall()),
- [e[0], e[0][0], e[0][1], e[0][1][0]])
+ [e[0], e[0][0], e[0][1], e[0][1][0]])
self.testlist = [e[0][0], e[0][1]]
self.assertEqual(list(e[0].findall(condition=self.not_in_testlist)),
- [e[0], e[0][1][0]])
+ [e[0], e[0][1][0]])
self.testlist.append(e[0][1][0])
self.assertEqual(list(e[0].findall(condition=self.not_in_testlist)),
- [e[0]])
+ [e[0]])
self.assertEqual(list(e.findall(nodes.TextElement)), [e[0][1]])
def test_next_node(self):
@@ -565,7 +566,7 @@ class MiscTests(unittest.TestCase):
(e[2], None)]
for node, next_node in compare:
self.assertEqual(node.next_node(self.not_in_testlist, ascend=True),
- next_node)
+ next_node)
self.assertEqual(e[0][0].next_node(ascend=True), e[0][1])
self.assertEqual(e[2].next_node(), None)
@@ -645,7 +646,7 @@ class SetIdTests(unittest.TestCase):
nodes.section(), # Name empty
nodes.section(names=['Test']), # duplicate id
nodes.footnote(names=['2019-10-30']), # id empty
- ]
+ ]
def test_set_id_default(self):
# Default prefixes.
diff --git a/docutils/test/test_parsers/test_rst/test_directives/test_block_quotes.py b/docutils/test/test_parsers/test_rst/test_directives/test_block_quotes.py
index 6298c7d16..efc7e0508 100755
--- a/docutils/test/test_parsers/test_rst/test_directives/test_block_quotes.py
+++ b/docutils/test/test_parsers/test_rst/test_directives/test_block_quotes.py
@@ -61,9 +61,9 @@ generic_tests = [
totest = {}
for block_quote_type in ('epigraph', 'highlights', 'pull-quote'):
- totest[block_quote_type] = [
- [text % {'type': block_quote_type} for text in pair]
- for pair in generic_tests]
+ totest[block_quote_type] = [
+ [text % {'type': block_quote_type} for text in pair]
+ for pair in generic_tests]
if __name__ == '__main__':
diff --git a/docutils/test/test_publisher.py b/docutils/test/test_publisher.py
index 5d4a01969..f00fc7344 100755
--- a/docutils/test/test_publisher.py
+++ b/docutils/test/test_publisher.py
@@ -38,10 +38,11 @@ pseudoxml_output = b"""\
<paragraph>
Unknown target name: "nonexistent".
"""
-exposed_pseudoxml_output = (b"""\
-<document ids="test-document" internal:refnames="{"""
-+ u_prefix
-+ b"""\'nonexistent\': [<reference: <#text: \'nonexistent\'>>]}" names="test\\ document" source="<string>" title="Test Document">
+exposed_pseudoxml_output = (
+ b'<document ids="test-document" internal:refnames="{'
+ + u_prefix
+ + b"""\
+\'nonexistent\': [<reference: <#text: \'nonexistent\'>>]}" names="test\\ document" source="<string>" title="Test Document">
<title>
Test Document
<paragraph>
@@ -66,7 +67,7 @@ class PublisherTests(DocutilsTestSupport.StandardTestCase):
# pass IOErrors to calling application if `traceback` is True
try:
core.publish_cmdline(argv=['nonexisting/path'],
- settings_overrides={'traceback': True})
+ settings_overrides={'traceback': True})
except IOError as e:
self.assertTrue(isinstance(e, io.InputError))
@@ -75,7 +76,7 @@ class PublisherTests(DocutilsTestSupport.StandardTestCase):
# pass IOErrors to calling application if `traceback` is True
try:
core.publish_cmdline(argv=['data/include.txt', 'nonexisting/path'],
- settings_overrides={'traceback': True})
+ settings_overrides={'traceback': True})
except IOError as e:
self.assertTrue(isinstance(e, io.OutputError))
@@ -120,9 +121,9 @@ class PublishDoctreeTestCase(DocutilsTestSupport.StandardTestCase, docutils.Sett
# Test publishing parts using document as the source.
parts = core.publish_parts(
- reader_name='doctree', source_class=io.DocTreeInput,
- source=doctree, source_path='test', writer_name='html',
- settings_spec=self)
+ reader_name='doctree', source_class=io.DocTreeInput,
+ source=doctree, source_path='test', writer_name='html',
+ settings_spec=self)
self.assertTrue(isinstance(parts, dict))
def test_publish_pickle(self):
diff --git a/docutils/test/test_settings.py b/docutils/test/test_settings.py
index fff9025e7..17ec55930 100755
--- a/docutils/test/test_settings.py
+++ b/docutils/test/test_settings.py
@@ -40,7 +40,8 @@ class ConfigFileTests(unittest.TestCase):
'source_link': True,
'stylesheet': None,
'stylesheet_path': ['stylesheets/pep.css'],
- 'template': fixpath('data/pep-html-template')},
+ 'template': fixpath('data/pep-html-template'),
+ },
'one': {'datestamp': '%Y-%m-%d %H:%M UTC',
'generator': True,
'no_random': True,
@@ -55,14 +56,15 @@ class ConfigFileTests(unittest.TestCase):
'trim_footnote_reference_space': True,
'output_encoding': 'ascii',
'output_encoding_error_handler': 'xmlcharrefreplace',
- },
+ },
'two': {'footnote_references': 'superscript',
'generator': False,
'record_dependencies': utils.DependencyList(),
'stylesheet': None,
'stylesheet_path': ['test.css'],
'trim_footnote_reference_space': None,
- 'output_encoding_error_handler': 'namereplace'},
+ 'output_encoding_error_handler': 'namereplace',
+ },
'two_html5': {
# use defaults from html5_polyglot writer component
# ignore settings in [html4css1 writer] section,
@@ -77,7 +79,8 @@ class ConfigFileTests(unittest.TestCase):
'list': {'expose_internals': ['a', 'b', 'c', 'd', 'e'],
'strip_classes': ['spam', 'pan', 'fun', 'parrot'],
'strip_elements_with_classes': ['sugar', 'flour', 'milk',
- 'safran']},
+ 'safran']
+ },
'list2': {'expose_internals': ['a', 'b', 'c', 'd', 'e', 'f'],
'strip_classes': ['spam', 'pan', 'fun', 'parrot',
'ham', 'eggs'],
@@ -85,7 +88,7 @@ class ConfigFileTests(unittest.TestCase):
'safran', 'eggs', 'salt'],
'stylesheet': ['style2.css'],
'stylesheet_path': None,
- },
+ },
'error': {'error_encoding': 'ascii',
'error_encoding_error_handler': 'strict'},
'error2': {'error_encoding': 'latin1'},
diff --git a/docutils/test/test_statemachine.py b/docutils/test/test_statemachine.py
index 8c498d4dc..3532590a6 100755
--- a/docutils/test/test_statemachine.py
+++ b/docutils/test/test_statemachine.py
@@ -81,7 +81,7 @@ class MockState(statemachine.StateWS):
def bullet(self, match, context, next_state):
if self.debug: print('bullet%s' % self.level, file=sys.stderr)
context, next_state, result \
- = self.known_indent(match, context, next_state)
+ = self.known_indent(match, context, next_state)
return [], next_state, ['bullet%s' % self.level] + result
def text(self, match, context, next_state):
@@ -90,8 +90,8 @@ class MockState(statemachine.StateWS):
def literalblock(self):
indented, indent, offset, good = self.state_machine.get_indented()
- if self.debug: print('literalblock%s(%s)' % (self.level,
- indent), file=sys.stderr)
+ if self.debug: print('literalblock%s(%s)' % (self.level, indent),
+ file=sys.stderr)
return ['literalblock%s(%s)' % (self.level, indent)]
def eof(self, context):
@@ -125,9 +125,9 @@ class EmptySMTests(unittest.TestCase):
self.assertRaises(statemachine.UnknownStateError, self.sm.get_state,
'unknownState')
self.assertTrue(isinstance(self.sm.get_state('State'),
- statemachine.State))
+ statemachine.State))
self.assertTrue(isinstance(self.sm.get_state('StateWS'),
- statemachine.State))
+ statemachine.State))
self.assertEqual(self.sm.current_state, 'StateWS')
@@ -176,7 +176,7 @@ class SMWSTests(unittest.TestCase):
self.assertEqual(indent, lbindent)
self.assertEqual(indented, literalblock)
self.assertEqual(offset, (len(para1) + len(item1) + len(item2)
- - len(literalblock)))
+ - len(literalblock)))
self.assertTrue(good)
def test_get_text_block(self):
@@ -246,10 +246,10 @@ class EmptyStateTests(unittest.TestCase):
def test_make_transition(self):
dummy = re.compile('dummy')
self.assertEqual(self.state.make_transition('nop', 'bogus'),
- (dummy, self.state.nop, 'bogus'))
+ (dummy, self.state.nop, 'bogus'))
self.assertEqual(self.state.make_transition('nop'),
- (dummy, self.state.nop,
- self.state.__class__.__name__))
+ (dummy, self.state.nop,
+ self.state.__class__.__name__))
self.assertRaises(statemachine.TransitionPatternNotFound,
self.state.make_transition, 'None')
self.assertRaises(statemachine.TransitionMethodNotFound,
@@ -258,13 +258,13 @@ class EmptyStateTests(unittest.TestCase):
def test_make_transitions(self):
dummy = re.compile('dummy')
self.assertEqual(self.state.make_transitions(('nop', ['nop2'],
- ('nop3', 'bogus'))),
- (['nop', 'nop2', 'nop3'],
- {'nop': (dummy, self.state.nop,
+ ('nop3', 'bogus'))),
+ (['nop', 'nop2', 'nop3'],
+ {'nop': (dummy, self.state.nop,
+ self.state.__class__.__name__),
+ 'nop2': (dummy, self.state.nop2,
self.state.__class__.__name__),
- 'nop2': (dummy, self.state.nop2,
- self.state.__class__.__name__),
- 'nop3': (dummy, self.state.nop3, 'bogus')}))
+ 'nop3': (dummy, self.state.nop3, 'bogus')}))
class MiscTests(unittest.TestCase):
@@ -274,7 +274,7 @@ class MiscTests(unittest.TestCase):
" I'm fine thanks."]
def test_string2lines(self):
self.assertEqual(statemachine.string2lines(self.s2l_string),
- self.s2l_expected)
+ self.s2l_expected)
if __name__ == '__main__':
diff --git a/docutils/test/test_traversals.py b/docutils/test/test_traversals.py
index f21ef3591..45934364f 100755
--- a/docutils/test/test_traversals.py
+++ b/docutils/test/test_traversals.py
@@ -66,9 +66,9 @@ class StopTraversalTests(unittest.TestCase, docutils.SettingsSpec):
self.assertTrue(isinstance(doctree, nodes.document))
parts = docutils.core.publish_parts(
- reader_name='doctree', source_class=docutils.io.DocTreeInput,
- source=doctree, source_path='test',
- writer=AttentiveWriter())
+ reader_name='doctree', source_class=docutils.io.DocTreeInput,
+ source=doctree, source_path='test',
+ writer=AttentiveWriter())
if __name__ == '__main__':
diff --git a/docutils/test/test_utils.py b/docutils/test/test_utils.py
index 17304bbd6..31c4ec7a7 100755
--- a/docutils/test/test_utils.py
+++ b/docutils/test/test_utils.py
@@ -34,7 +34,7 @@ class ReporterTests(unittest.TestCase):
debug output
""")
self.assertEqual(self.stream.getvalue(),
- 'test data:: (DEBUG/0) debug output\n')
+ 'test data:: (DEBUG/0) debug output\n')
def test_level1(self):
sw = self.reporter.system_message(1, 'a little reminder')
@@ -53,7 +53,7 @@ class ReporterTests(unittest.TestCase):
a warning
""")
self.assertEqual(self.stream.getvalue(),
- 'test data:: (WARNING/2) a warning\n')
+ 'test data:: (WARNING/2) a warning\n')
def test_level3(self):
sw = self.reporter.system_message(3, 'an error')
@@ -63,13 +63,13 @@ class ReporterTests(unittest.TestCase):
an error
""")
self.assertEqual(self.stream.getvalue(),
- 'test data:: (ERROR/3) an error\n')
+ 'test data:: (ERROR/3) an error\n')
def test_level4(self):
self.assertRaises(utils.SystemMessage, self.reporter.system_message, 4,
'a severe error, raises an exception')
self.assertEqual(self.stream.getvalue(), 'test data:: (SEVERE/4) '
- 'a severe error, raises an exception\n')
+ 'a severe error, raises an exception\n')
def test_unicode_message(self):
@@ -166,8 +166,8 @@ class NameValueTests(unittest.TestCase):
output = utils.extract_name_value(
"""att1=val1 att2=val2 att3="value number '3'" att4=val4""")
self.assertEqual(output, [('att1', 'val1'), ('att2', 'val2'),
- ('att3', "value number '3'"),
- ('att4', 'val4')])
+ ('att3', "value number '3'"),
+ ('att4', 'val4')])
class ExtensionOptionTests(unittest.TestCase):
diff --git a/docutils/test/test_viewlist.py b/docutils/test/test_viewlist.py
index 90f5e8f06..3c70f3a7e 100755
--- a/docutils/test/test_viewlist.py
+++ b/docutils/test/test_viewlist.py
@@ -202,11 +202,11 @@ Unindented text.
def test_get_indented(self):
self.assertEqual(self.a.get_indented(),
- ([], 0, 0))
+ ([], 0, 0))
block = statemachine.StringList(
statemachine.string2lines(self.indented_string))
self.assertEqual(block.get_indented(),
- ([s[6:] for s in block], 6, 1))
+ ([s[6:] for s in block], 6, 1))
if __name__ == '__main__':
diff --git a/docutils/test/test_writers/test_docutils_xml.py b/docutils/test/test_writers/test_docutils_xml.py
index 524df6105..c6f8bb434 100755
--- a/docutils/test/test_writers/test_docutils_xml.py
+++ b/docutils/test/test_writers/test_docutils_xml.py
@@ -184,7 +184,8 @@ class DocutilsXMLTestCase(DocutilsTestSupport.StandardTestCase):
+ invalid_raw_xml).encode('latin1', 'xmlcharrefreplace')
self.assertEqual(result, expected)
warnings.seek(0)
- self.assertEqual(warnings.readlines(),
+ self.assertEqual(
+ warnings.readlines(),
['<string>:5: '
'(WARNING/2) Invalid raw XML in column 2, line offset 3:\n',
'<root>\n',
diff --git a/docutils/test/test_writers/test_html4css1_misc.py b/docutils/test/test_writers/test_html4css1_misc.py
index 69b1f2509..76e6a351d 100755
--- a/docutils/test/test_writers/test_html4css1_misc.py
+++ b/docutils/test/test_writers/test_html4css1_misc.py
@@ -10,7 +10,9 @@ Miscellaneous HTML writer tests.
import os
-from .__init__ import DocutilsTestSupport
+if __name__ == '__main__':
+ import __init__
+from test_writers import DocutilsTestSupport
from docutils import core
@@ -22,7 +24,8 @@ class EncodingTestCase(DocutilsTestSupport.StandardTestCase):
settings_overrides={
'output_encoding': 'latin1',
'stylesheet': '',
- '_disable_config': True,}
+ '_disable_config': True,
+ }
result = core.publish_string(
'EUR = \u20ac', writer_name='html4css1',
settings_overrides=settings_overrides)
@@ -32,10 +35,10 @@ class EncodingTestCase(DocutilsTestSupport.StandardTestCase):
class MovingArgsTestCase(DocutilsTestSupport.StandardTestCase):
- settings_overrides={'stylesheet_path': '',
- # 'embed_stylesheet': False,
- '_disable_config': True,
- }
+ mys = {'stylesheet_path': '',
+ # 'embed_stylesheet': False,
+ '_disable_config': True,
+ }
def test_definition_list_item_classes(self):
# Do not drop class arguments for the definition list item.
@@ -50,7 +53,7 @@ second term:
second def
"""
result = core.publish_string(data, writer_name='html4css1',
- settings_overrides=self.settings_overrides)
+ settings_overrides=self.mys)
self.assertIn(b'<dt class="for the second item">second term:</dt>',
result)
@@ -67,7 +70,7 @@ second term:
second def
"""
result = core.publish_string(data, writer_name='html4css1',
- settings_overrides=self.settings_overrides)
+ settings_overrides=self.mys)
self.assertIn(b'<dt id="second-item">second term:</dt>',
result)
@@ -77,61 +80,61 @@ class SettingsTestCase(DocutilsTestSupport.StandardTestCase):
def test_default_stylesheet(self):
# default style sheet, embedded
- mysettings = {'_disable_config': True,}
+ mys = {'_disable_config': True,}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('Default cascading style sheet '
'for the HTML output of Docutils.', styles)
def test_default_stylesheet_linked(self):
# default style sheet, linked
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
def test_math_stylesheet_linked(self):
# default + math style sheet, linked
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False,
- 'stylesheet_path': 'html4css1.css, math.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False,
+ 'stylesheet_path': 'html4css1.css, math.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
self.assertIn('docutils/writers/html5_polyglot/math.css', styles)
def test_custom_stylesheet_linked(self):
# default + custom style sheet, linked
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False,
- 'stylesheet_path': 'html4css1.css, '
- 'data/ham.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False,
+ 'stylesheet_path': 'html4css1.css, '
+ 'data/ham.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
self.assertIn('href="data/ham.css"', styles)
def test_custom_stylesheet_dir(self):
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False,
- 'stylesheet_dirs': ('../docutils/writers/html4css1/',
- 'data'),
- 'stylesheet_path': 'html4css1.css, ham.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False,
+ 'stylesheet_dirs': ('../docutils/writers/html4css1/',
+ 'data'),
+ 'stylesheet_path': 'html4css1.css, ham.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
if os.path.isdir('../docutils/writers/html4css1/'):
self.assertIn('docutils/writers/html4css1/html4css1.css', styles)
self.assertIn('href="data/ham.css"', styles)
def test_custom_stylesheet_dir_embedded(self):
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': True,
- 'stylesheet_dirs': ('../docutils/writers/html4css1/',
- 'data'),
- 'stylesheet_path': 'ham.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': True,
+ 'stylesheet_dirs': ('../docutils/writers/html4css1/',
+ 'data'),
+ 'stylesheet_path': 'ham.css'}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('dl.docutils dd {\n margin-bottom: 0.5em }', styles)
@@ -148,45 +151,45 @@ class MathTestCase(DocutilsTestSupport.StandardTestCase):
def test_math_output_default(self):
# HTML with math.css stylesheet (since 0.11)
- mysettings = {'_disable_config': True,}
+ mys = {'_disable_config': True,}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('convert LaTeX equations to HTML output.', styles)
def test_math_output_mathjax(self):
# Explicitly specifying math_output=MathJax, case insensitively
# use default MathJax URL
- mysettings = {'_disable_config': True,
- 'report_level': 3,
- 'math_output': 'MathJax'}
+ mys = {'_disable_config': True,
+ 'report_level': 3,
+ 'math_output': 'MathJax'}
head = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['head']
+ settings_overrides=mys)['head']
self.assertIn(self.mathjax_script % self.default_mathjax_url, head)
def test_math_output_mathjax_custom(self):
# Customizing MathJax URL
- mysettings = {'_disable_config': True,
- 'math_output':
- 'mathjax %s' % self.custom_mathjax_url}
+ mys = {'_disable_config': True,
+ 'math_output':
+ 'mathjax %s' % self.custom_mathjax_url}
head = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['head']
+ settings_overrides=mys)['head']
self.assertIn(self.mathjax_script % self.custom_mathjax_url, head)
def test_math_output_html(self):
- mysettings = {'_disable_config': True,
- 'math_output': 'HTML'}
+ mys = {'_disable_config': True,
+ 'math_output': 'HTML'}
head = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['head']
+ settings_overrides=mys)['head']
# There should be no MathJax script when math_output is not MathJax
self.assertNotIn('MathJax.js', head)
def test_math_output_html_stylesheet(self):
- mysettings = {'_disable_config': True,
- 'math_output': 'HTML math.css,custom/style.css',
- 'stylesheet_dirs': ('.', 'functional/input/data'),
- 'embed_stylesheet': False}
+ mys = {'_disable_config': True,
+ 'math_output': 'HTML math.css,custom/style.css',
+ 'stylesheet_dirs': ('.', 'functional/input/data'),
+ 'embed_stylesheet': False}
styles = core.publish_parts(self.data, writer_name='html4css1',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertEqual("""\
<link rel="stylesheet" href="functional/input/data/html4css1.css" type="text/css" />
<link rel="stylesheet" href="functional/input/data/math.css" type="text/css" />
@@ -194,8 +197,6 @@ class MathTestCase(DocutilsTestSupport.StandardTestCase):
""", styles)
def test_math_output_mathjax_no_math(self):
- mysettings = {'_disable_config': True,
- 'math_output': 'MathJax'}
# There should be no math script when text does not contain math
head = core.publish_parts('No math.', writer_name='html4css1')['head']
self.assertNotIn('MathJax', head)
diff --git a/docutils/test/test_writers/test_html4css1_template.py b/docutils/test/test_writers/test_html4css1_template.py
index 30fb1a169..6e91ad77b 100755
--- a/docutils/test/test_writers/test_html4css1_template.py
+++ b/docutils/test/test_writers/test_html4css1_template.py
@@ -240,7 +240,7 @@ html_body = """\
footer text
</div>"""
''' % {'version': DocutilsTestSupport.docutils.__version__,
- 'drive': drive_prefix,
+ 'drive': drive_prefix,
}]
]
diff --git a/docutils/test/test_writers/test_html5_polyglot_misc.py b/docutils/test/test_writers/test_html5_polyglot_misc.py
index 1c029743c..8bca18524 100644
--- a/docutils/test/test_writers/test_html5_polyglot_misc.py
+++ b/docutils/test/test_writers/test_html5_polyglot_misc.py
@@ -35,10 +35,10 @@ class EncodingTestCase(DocutilsTestSupport.StandardTestCase):
class MovingArgsTestCase(DocutilsTestSupport.StandardTestCase):
- settings_overrides={'stylesheet_path': '',
- # 'embed_stylesheet': False,
- '_disable_config': True,
- }
+ mys = {'stylesheet_path': '',
+ # 'embed_stylesheet': False,
+ '_disable_config': True,
+ }
def test_definition_list_item_classes(self):
# Do not drop class arguments for the definition list item.
@@ -53,7 +53,7 @@ second term:
second def
"""
result = core.publish_string(data, writer_name='html5_polyglot',
- settings_overrides=self.settings_overrides)
+ settings_overrides=self.mys)
self.assertIn(b'<dt class="for the second item">second term:</dt>',
result)
@@ -70,7 +70,7 @@ second term:
second def
"""
result = core.publish_string(data, writer_name='html5_polyglot',
- settings_overrides=self.settings_overrides)
+ settings_overrides=self.mys)
self.assertIn(b'<dt id="second-item">second term:</dt>',
result)
@@ -80,72 +80,72 @@ class SettingsTestCase(DocutilsTestSupport.StandardTestCase):
def test_default_stylesheet(self):
# default style sheet, embedded
- mysettings = {'_disable_config': True,}
+ mys = {'_disable_config': True,}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('Minimal style sheet '
'for the HTML output of Docutils.', styles)
def test_default_stylesheet_linked(self):
# default style sheet, linked
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('docutils/writers/html5_polyglot/minimal.css', styles)
def test_math_stylesheet_linked(self):
# default + math style sheet, linked
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False,
- 'stylesheet_path': 'minimal.css, math.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False,
+ 'stylesheet_path': 'minimal.css, math.css'}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('docutils/writers/html5_polyglot/minimal.css', styles)
self.assertIn('docutils/writers/html5_polyglot/math.css', styles)
def test_custom_stylesheet_linked(self):
# default + custom style sheet, linked
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False,
- 'stylesheet_path': 'minimal.css, '
- 'data/ham.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False,
+ 'stylesheet_path': 'minimal.css, '
+ 'data/ham.css'}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('docutils/writers/html5_polyglot/minimal.css', styles)
self.assertIn('href="data/ham.css"', styles)
def test_custom_stylesheet_dir(self):
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': False,
- 'stylesheet_dirs': ('../docutils/writers/html5_polyglot/',
- 'data'),
- 'stylesheet_path': 'minimal.css, ham.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': False,
+ 'stylesheet_dirs': ('../docutils/writers/html5_polyglot/',
+ 'data'),
+ 'stylesheet_path': 'minimal.css, ham.css'}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
if os.path.isdir('../docutils/writers/html5_polyglot/'):
self.assertIn('docutils/writers/html5_polyglot/minimal.css', styles)
self.assertIn('href="data/ham.css"', styles)
def test_custom_stylesheet_dir_embedded(self):
- mysettings = {'_disable_config': True,
- 'embed_stylesheet': True,
- 'stylesheet_dirs': ('../docutils/writers/html5_polyglot/',
- 'data'),
- 'stylesheet_path': 'ham.css'}
+ mys = {'_disable_config': True,
+ 'embed_stylesheet': True,
+ 'stylesheet_dirs': ('../docutils/writers/html5_polyglot/',
+ 'data'),
+ 'stylesheet_path': 'ham.css'}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('dl.docutils dd {\n margin-bottom: 0.5em }', styles)
def test_future_warnings(self):
"""Warn about changing defaults."""
- mysettings={'_disable_config': True,
- 'embed_images': False,
- }
+ mys={'_disable_config': True,
+ 'embed_images': False,
+ }
with warnings.catch_warnings(record=True) as wng:
warnings.simplefilter("always")
core.publish_string('warnings test', writer_name='html5',
- settings_overrides=mysettings)
+ settings_overrides=mys)
self.assertEqual(len(wng), 1, "Expected FutureWarning.")
assert issubclass(wng[0].category, FutureWarning)
@@ -163,45 +163,45 @@ class MathTestCase(DocutilsTestSupport.StandardTestCase):
def test_math_output_default(self):
# HTML with math.css stylesheet (since 0.11)
- mysettings = {'_disable_config': True,}
+ mys = {'_disable_config': True,}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertIn('convert LaTeX equations to HTML output.', styles)
def test_math_output_mathjax(self):
# Explicitly specifying math_output=MathJax, case insensitively
# use default MathJax URL
- mysettings = {'_disable_config': True,
- 'report_level': 3,
- 'math_output': 'MathJax'}
+ mys = {'_disable_config': True,
+ 'report_level': 3,
+ 'math_output': 'MathJax'}
head = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['head']
+ settings_overrides=mys)['head']
self.assertIn(self.mathjax_script % self.default_mathjax_url, head)
def test_math_output_mathjax_custom(self):
# Customizing MathJax URL
- mysettings = {'_disable_config': True,
- 'math_output':
- 'mathjax %s' % self.custom_mathjax_url}
+ mys = {'_disable_config': True,
+ 'math_output':
+ 'mathjax %s' % self.custom_mathjax_url}
head = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['head']
+ settings_overrides=mys)['head']
self.assertIn(self.mathjax_script % self.custom_mathjax_url, head)
def test_math_output_html(self):
- mysettings = {'_disable_config': True,
- 'math_output': 'HTML'}
+ mys = {'_disable_config': True,
+ 'math_output': 'HTML'}
head = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['head']
+ settings_overrides=mys)['head']
# There should be no MathJax script when math_output is not MathJax
self.assertNotIn('MathJax.js', head)
def test_math_output_html_stylesheet(self):
- mysettings = {'_disable_config': True,
- 'math_output': 'HTML math.css,custom/style.css',
- 'stylesheet_dirs': ('.', 'functional/input/data'),
- 'embed_stylesheet': False}
+ mys = {'_disable_config': True,
+ 'math_output': 'HTML math.css,custom/style.css',
+ 'stylesheet_dirs': ('.', 'functional/input/data'),
+ 'embed_stylesheet': False}
styles = core.publish_parts(self.data, writer_name='html5_polyglot',
- settings_overrides=mysettings)['stylesheet']
+ settings_overrides=mys)['stylesheet']
self.assertEqual("""\
<link rel="stylesheet" href="functional/input/data/minimal.css" type="text/css" />
<link rel="stylesheet" href="functional/input/data/plain.css" type="text/css" />
@@ -210,8 +210,6 @@ class MathTestCase(DocutilsTestSupport.StandardTestCase):
""", styles)
def test_math_output_mathjax_no_math(self):
- mysettings = {'_disable_config': True,
- 'math_output': 'MathJax'}
# There should be no math script when text does not contain math
head = core.publish_parts('No math.', writer_name='html5_polyglot')['head']
self.assertNotIn('MathJax', head)
diff --git a/docutils/test/test_writers/test_html5_polyglot_parts.py b/docutils/test/test_writers/test_html5_polyglot_parts.py
index bbcbac59d..b36254a39 100644
--- a/docutils/test/test_writers/test_html5_polyglot_parts.py
+++ b/docutils/test/test_writers/test_html5_polyglot_parts.py
@@ -28,10 +28,10 @@ class Html5WriterPublishPartsTestCase(HtmlWriterPublishPartsTestCase):
settings_default_overrides['section_self_link'] = True
standard_content_type_template = '<meta charset="%s"/>\n'
- standard_generator_template = ('<meta name="generator"'
- ' content="Docutils %s: https://docutils.sourceforge.io/" />\n')
- standard_viewport_template = ('<meta name="viewport"'
- ' content="width=device-width, initial-scale=1" />\n')
+ standard_generator_template = '<meta name="generator"' \
+ ' content="Docutils %s: https://docutils.sourceforge.io/" />\n'
+ standard_viewport_template = '<meta name="viewport"' \
+ ' content="width=device-width, initial-scale=1" />\n'
standard_html_meta_value = (standard_content_type_template
+ standard_viewport_template
diff --git a/docutils/test/test_writers/test_latex2e.py b/docutils/test/test_writers/test_latex2e.py
index 75c852803..1277faf7e 100755
--- a/docutils/test/test_writers/test_latex2e.py
+++ b/docutils/test/test_writers/test_latex2e.py
@@ -16,10 +16,10 @@ from test_transforms import DocutilsTestSupport # before importing docutils!
def suite():
settings = {'use_latex_toc': False,
- # avoid latex writer future warnings:
- 'use_latex_citations': False,
- 'legacy_column_widths': True,
- }
+ # avoid latex writer future warnings:
+ 'use_latex_citations': False,
+ 'legacy_column_widths': True,
+ }
s = DocutilsTestSupport.PublishTestSuite('latex', suite_settings=settings)
s.generateTests(totest)
settings['use_latex_toc'] = True
@@ -112,14 +112,12 @@ head_booktabs = head_template.substitute(
+ '\\usepackage{booktabs}\n' + parts['longtable']))
head_textcomp = head_template.substitute(
- dict(parts, requirements = parts['requirements'] +
-r"""\usepackage{textcomp} % text symbol macros
-"""))
+ dict(parts, requirements=parts['requirements']
+ + '\\usepackage{textcomp} % text symbol macros\n'))
head_alltt = head_template.substitute(
- dict(parts, requirements = parts['requirements'] +
-r"""\usepackage{alltt}
-"""))
+ dict(parts, requirements = parts['requirements']
+ + '\\usepackage{alltt}\n'))
totest = {}
@@ -1054,8 +1052,7 @@ head_template.substitute(
\csname \DocutilsClassFunctionName \endcsname}%
{\csname end\DocutilsClassFunctionName \endcsname}%
\fi
-"""
- )
+""")
) + r"""
\begin{DUclass}{compound}
Compound paragraph
diff --git a/docutils/test/test_writers/test_latex2e_misc.py b/docutils/test/test_writers/test_latex2e_misc.py
index c4ce200b2..aff34cd4f 100644
--- a/docutils/test/test_writers/test_latex2e_misc.py
+++ b/docutils/test/test_writers/test_latex2e_misc.py
@@ -48,8 +48,8 @@ class TocTestCase(DocutilsTestSupport.StandardTestCase):
doctree = core.publish_doctree(contents_test_input,
settings_overrides=mysettings)
result = core.publish_from_doctree(doctree,
- writer_name='latex',
- settings_overrides=mysettings)
+ writer_name='latex',
+ settings_overrides=mysettings)
self.assertNotIn(r'\item \hyperref[foo]{foo}', result)
# self.assertIn(r'\tableofcontents', result)
diff --git a/docutils/test/test_writers/test_odt.py b/docutils/test/test_writers/test_odt.py
index 5484d98cd..0e263d2d2 100755
--- a/docutils/test/test_writers/test_odt.py
+++ b/docutils/test/test_writers/test_odt.py
@@ -51,7 +51,7 @@ EXPECTED_PATH = 'functional/expected/'
class DocutilsOdtTestCase(DocutilsTestSupport.StandardTestCase):
def process_test(self, input_filename, expected_filename,
- save_output_name=None, settings_overrides=None):
+ save_output_name=None, settings_overrides=None):
# Test that xmlcharrefreplace is the default output encoding
# error handler.
input_file = open(INPUT_PATH + input_filename, 'rb')
@@ -129,27 +129,22 @@ class DocutilsOdtTestCase(DocutilsTestSupport.StandardTestCase):
def test_odt_basic(self):
self.process_test('odt_basic.txt', 'odt_basic.odt',
- save_output_name='odt_basic.odt'
- )
+ save_output_name='odt_basic.odt')
def test_odt_nested_class(self):
self.process_test('odt_nested_class.txt',
'odt_nested_class.odt',
- save_output_name='odt_nested_class.odt'
- )
+ save_output_name='odt_nested_class.odt')
self.process_test('odt_unnested_class.txt',
'odt_unnested_class.odt',
- save_output_name='odt_unnested_class.odt'
- )
+ save_output_name='odt_unnested_class.odt')
self.process_test('odt_no_class.txt',
'odt_no_class.odt',
- save_output_name='odt_no_class.odt'
- )
+ save_output_name='odt_no_class.odt')
def test_odt_tables1(self):
self.process_test('odt_tables1.txt', 'odt_tables1.odt',
- save_output_name='odt_tables1.odt'
- )
+ save_output_name='odt_tables1.odt')
def test_odt_custom_headfoot(self):
settings_overrides = {
@@ -158,14 +153,12 @@ class DocutilsOdtTestCase(DocutilsTestSupport.StandardTestCase):
'language_code': 'en-US',
}
self.process_test('odt_custom_headfoot.txt', 'odt_custom_headfoot.odt',
- settings_overrides=settings_overrides,
- save_output_name='odt_custom_headfoot.odt'
- )
+ settings_overrides=settings_overrides,
+ save_output_name='odt_custom_headfoot.odt')
def test_odt_header_footer(self):
self.process_test('odt_header_footer.txt', 'odt_header_footer.odt',
- save_output_name='odt_header_footer.odt'
- )
+ save_output_name='odt_header_footer.odt')
def test_odt_literal_block(self):
self.process_test('odt_literal_block.txt', 'odt_literal_block.odt')
@@ -178,12 +171,10 @@ class DocutilsOdtTestCase(DocutilsTestSupport.StandardTestCase):
def test_odt_footnotes(self):
self.process_test('odt_footnotes.txt', 'odt_footnotes.odt',
- save_output_name='odt_footnotes.odt'
- )
+ save_output_name='odt_footnotes.odt')
def test_odt_raw(self):
self.process_test('odt_raw.txt', 'odt_raw.odt',
- save_output_name='odt_raw.odt'
- )
+ save_output_name='odt_raw.odt')
#
# Template for new tests.
diff --git a/docutils/tools/buildhtml.py b/docutils/tools/buildhtml.py
index faa76dceb..4751028ad 100755
--- a/docutils/tools/buildhtml.py
+++ b/docutils/tools/buildhtml.py
@@ -81,7 +81,7 @@ class SettingsSpec(docutils.SettingsSpec):
{'metavar': '<writer>',
'choices': ['html', 'html4', 'html5'],
# 'default': 'html' (set below)
- }),
+ }),
(frontend.SUPPRESS_HELP, # Obsoleted by "--writer"
['--html-writer'],
{'metavar': '<writer>',
@@ -133,13 +133,13 @@ class Builder:
'': Struct(components=(pep.Reader, rst.Parser, pep_html.Writer,
SettingsSpec)),
'html4': Struct(components=(rst.Parser, standalone.Reader,
- html4css1.Writer, SettingsSpec),
- reader_name='standalone',
- writer_name='html4'),
+ html4css1.Writer, SettingsSpec),
+ reader_name='standalone',
+ writer_name='html4'),
'html5': Struct(components=(rst.Parser, standalone.Reader,
- html5_polyglot.Writer, SettingsSpec),
- reader_name='standalone',
- writer_name='html5'),
+ html5_polyglot.Writer, SettingsSpec),
+ reader_name='standalone',
+ writer_name='html5'),
'PEPs': Struct(components=(rst.Parser, pep.Reader,
pep_html.Writer, SettingsSpec),
reader_name='pep',
@@ -178,8 +178,9 @@ class Builder:
if self.initial_settings.html_writer is not None:
warnings.warn('The configuration setting "html_writer" '
- 'will be removed in Docutils 1.2. '
- 'Use setting "writer" instead.', FutureWarning, stacklevel=5)
+ 'will be removed in Docutils 1.2. '
+ 'Use setting "writer" instead.',
+ FutureWarning, stacklevel=5)
if self.initial_settings.writer is None:
self.initial_settings.writer = (self.initial_settings.html_writer
or 'html')
@@ -265,11 +266,11 @@ class Builder:
if not settings.dry_run:
try:
core.publish_file(source_path=settings._source,
- destination_path=settings._destination,
- reader_name=pub_struct.reader_name,
- parser_name='restructuredtext',
- writer_name=pub_struct.writer_name,
- settings=settings)
+ destination_path=settings._destination,
+ reader_name=pub_struct.reader_name,
+ parser_name='restructuredtext',
+ writer_name=pub_struct.writer_name,
+ settings=settings)
except ApplicationError as err:
errout.write(f' {type(err).__name__}: {err}\n')
diff --git a/docutils/tools/dev/generate_punctuation_chars.py b/docutils/tools/dev/generate_punctuation_chars.py
index 352ee0c86..b97dd8362 100644
--- a/docutils/tools/dev/generate_punctuation_chars.py
+++ b/docutils/tools/dev/generate_punctuation_chars.py
@@ -166,12 +166,12 @@ def unicode_charlists(categories, cp_min=0, cp_max=None):
# categories with not too high characters):
if cp_max is None:
cp_max = max(x for x in range(sys.maxunicode+1)
- if unicodedata.category(chr(x)) in categories)
+ if unicodedata.category(chr(x)) in categories)
# print(cp_max) # => 74867 for unicode_punctuation_categories
charlists = {}
for cat in categories:
charlists[cat] = [chr(x) for x in range(cp_min, cp_max+1)
- if unicodedata.category(chr(x)) == cat]
+ if unicodedata.category(chr(x)) == cat]
return charlists
@@ -242,7 +242,7 @@ def character_category_patterns():
closing_delimiters = [r'\\.,;!?']
return [''.join(chars) for chars in (openers, closers, delimiters,
- closing_delimiters)]
+ closing_delimiters)]
def separate_wide_chars(s):
"""Return (s1,s2) with characters above 0xFFFF in s2"""
@@ -276,8 +276,7 @@ def mark_intervals(s):
return ''.join(l2)
-def wrap_string(s, startstring= "('",
- endstring = "')", wrap=67):
+def wrap_string(s, startstring= "('", endstring = "')", wrap=67):
"""Line-wrap a unicode string literal definition."""
c = len(startstring)
contstring = "'\n" + ' ' * (len(startstring)-2) + "'"
@@ -342,17 +341,16 @@ if __name__ == '__main__':
# Test: compare module content with re-generated definitions
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-# ::
-
- if args.test:
-
+#
# Import the punctuation_chars module from the source
# or Py3k build path for local Python modules::
+ if args.test:
+
sys.path.insert(0, '../../docutils')
- from docutils.utils.punctuation_chars import (openers, closers,
- delimiters, closing_delimiters)
+ from docutils.utils.punctuation_chars import (
+ openers, closers, delimiters, closing_delimiters)
print('Check for differences between the current `punctuation_chars`'
' module\n and a regeneration based on Unicode version %s:'
diff --git a/docutils/tools/dev/unicode2rstsubs.py b/docutils/tools/dev/unicode2rstsubs.py
index 839ed870d..74ce0717d 100755
--- a/docutils/tools/dev/unicode2rstsubs.py
+++ b/docutils/tools/dev/unicode2rstsubs.py
@@ -132,9 +132,9 @@ class CharacterEntitySetExtractor:
return
entity = attributes['id']
assert (entity not in self.sets[set]
- or self.sets[set][entity] == self.charid), \
- ('sets[%r][%r] == %r (!= %r)'
- % (set, entity, self.sets[set][entity], self.charid))
+ or self.sets[set][entity] == self.charid
+ ), ('sets[%r][%r] == %r (!= %r)'
+ % (set, entity, self.sets[set][entity], self.charid))
self.sets[set][entity] = self.charid
def description_data(self, data):
diff --git a/docutils/tools/docutils-cli.py b/docutils/tools/docutils-cli.py
index 931c77d3e..0edec1220 100755
--- a/docutils/tools/docutils-cli.py
+++ b/docutils/tools/docutils-cli.py
@@ -57,7 +57,7 @@ class CliSettingsSpec(docutils.SettingsSpec):
# Get default components from configuration files
# default to "html5" writer for backwards compatibility
default_settings = Publisher().get_settings(settings_spec=CliSettingsSpec,
- writer='html5')
+ writer='html5')
argparser = argparse.ArgumentParser(
diff --git a/docutils/tools/quicktest.py b/docutils/tools/quicktest.py
index 5294cc9ce..3dd85d3f9 100755
--- a/docutils/tools/quicktest.py
+++ b/docutils/tools/quicktest.py
@@ -76,10 +76,10 @@ def _rawxml(input, document, optargs):
def _styledxml(input, document, optargs):
docnode = document.asdom().childNodes[0]
- return '%s\n%s\n%s' % (
- '<?xml version="1.0" encoding="ISO-8859-1"?>',
- '<?xml-stylesheet type="text/xsl" href="%s"?>'
- % optargs['styledxml'], docnode.toxml())
+ return '\n'.join(('<?xml version="1.0" encoding="ISO-8859-1"?>',
+ '<?xml-stylesheet type="text/xsl" href="%s"?>'
+ % optargs['styledxml'],
+ docnode.toxml()))
def _prettyxml(input, document, optargs):
return document.asdom().toprettyxml(' ', '\n')
@@ -112,8 +112,7 @@ _outputFormatters = {
'styledxml': _styledxml,
'xml': _prettyxml,
'pretty' : _pretty,
- 'test': _test
- }
+ 'test': _test}
def format(outputFormat, input, document, optargs):
formatter = _outputFormatters[outputFormat]
diff --git a/docutils/tools/rst2odt.py b/docutils/tools/rst2odt.py
index 2e1534b22..21f537016 100755
--- a/docutils/tools/rst2odt.py
+++ b/docutils/tools/rst2odt.py
@@ -25,4 +25,4 @@ description = ('Generates OpenDocument/OpenOffice/ODF documents from '
writer = Writer()
reader = Reader()
output = publish_cmdline_to_binary(reader=reader, writer=writer,
- description=description)
+ description=description)
diff --git a/docutils/tox.ini b/docutils/tox.ini
index dce99605b..4ce5dde3b 100644
--- a/docutils/tox.ini
+++ b/docutils/tox.ini
@@ -16,91 +16,84 @@ commands =
flake8 {toxinidir}
[flake8]
-# The following rules are ignored as they're stylistic and can be addressed at
-# a later point:
-#
-# E101 indentation contains mixed spaces and tabs
-# E111 indentation is not a multiple of four
-# E114 indentation is not a multiple of four (comment)
-# E115 expected an indented block (comment)
-# E116 unexpected indentation (comment)
-# E117 over-indented
-# E121 continuation line under-indented for hanging indent
-# E122 continuation line missing indentation or outdented
-# E123 closing bracket does not match indentation of opening bracket's line
-# E124 closing bracket does not match visual indentation
-# E125 continuation line with same indent as next logical line
-# E126 continuation line over-indented for hanging indent
-# E127 continuation line over-indented for visual indent
-# E128 continuation line under-indented for visual indent
-# E129 visually indented line with same indent as next logical line
-# E131 continuation line unaligned for hanging indent
-# E201 whitespace after '('
-# E202 whitespace before '}'
-# E203 whitespace before ':'
-# E211 whitespace before '('
-# E221 multiple spaces before operator
-# E222 multiple spaces after operator
-# E225 missing whitespace around operator
-# E226 missing whitespace around arithmetic operator
-# E228 missing whitespace around modulo operator
-# E231 missing whitespace after ','
-# E241 multiple spaces after ':'
-# E251 unexpected spaces around keyword / parameter equals
-# E261 at least two spaces before inline comment
-# E262 inline comment should start with '# '
-# E265 block comment should start with '# '
-# E266 too many leading '#' for block comment
-# E271 multiple spaces after keyword
-# E301 expected 1 blank line, found 0
-# E302 expected 2 blank lines, found 1
-# E303 too many blank lines (N)
-# E305 expected 2 blank lines after class or function definition, found 1
-# E306 expected 1 blank line before a nested definition, found 0
-# E401 multiple imports on one line
-# E402 module level import not at top of file
-# E501 line too long (N > 79 characters)
-# E502 the backslash is redundant between brackets
-# E701 multiple statements on one line (colon)
-# E704 multiple statements on one line (def)
-# E711 comparison to None should be 'if cond is not None:'
-# E713 test for membership should be 'not in'
-# E721 do not compare types, use 'isinstance()'
-# E722 do not use bare 'except'
-# E731 do not assign a lambda expression, use a def
-# E741 ambiguous variable name 'a'
-# W191 indentation contains tabs
-# W291 trailing whitespace
-# W293 blank line contains whitespace
-# W391 blank line at end of file
-# W503 line break before binary operator
-# W504 line break after binary operator
-# F401 'foo' imported but unused
-# F841 local variable 'foo' is assigned to but never used
-#
-# The following rules are required for Python 3 support and so are not
-# disabled
-#
-# W605 invalid escape sequence '\ '
-# W601 .has_key() is deprecated, use 'in'
-# W602 deprecated form of raising exception
-# F811 redefinition of unused 'foo' from line 79
-#
-# Similarly, the following are straight up bugs that should be addressed
-# immediately:
-#
-# E999 SyntaxError: invalid syntax
-# F404 from __future__ imports must occur at the beginning of the file
-# F821 undefined name 'foo'
-ignore = E101,E111,E114,E115,E116,E117,E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E201,E202,E203,E211,E221,E222,E225,E226,E228,E231,E241,E251,E261,E262,E265,E266,E271,E301,E302,E303,E305,E306,E401,E402,E501,E502,E701,E704,E711,E713,E721,E722,E731,E741,W503,W504,W605,F401,F841
+# Prolems that are either not fixed yet or allowed by PEP8:
+ignore =
+ E123, # closing bracket does not match indentation of opening bracket's line
+ # PEP8 "may either line up under the first […] character of the last line …"
+
+ E125, # continuation line with same indent as next logical line
+ # allowed by PEP8 cf. https://github.com/PyCQA/pycodestyle/issues/126
+
+ E126, # continuation line over-indented for hanging indent
+ # some cases are mandated by PEP8 to distinguish hanging indent from nesting.
+ # Other cases improve readability.
+
+ E129, # visually indented line with same indent as next logical line
+ # allowed by PEP8
+
+ E201, # whitespace after '('
+ E202, # whitespace before '}'
+ E203, # whitespace before ':'
+ E211, # whitespace before '('
+ E221, # multiple spaces before operator
+ E222, # multiple spaces after operator
+ E225, # missing whitespace around operator
+ E226, # missing whitespace around arithmetic operator
+ E228, # missing whitespace around modulo operator
+ E231, # missing whitespace after ','
+ E241, # multiple spaces after ':'
+ E251, # unexpected spaces around keyword / parameter equals
+ E261, # at least two spaces before inline comment
+ E262, # inline comment should start with '# '
+ E265, # block comment should start with '# '
+ E266, # too many leading '#' for block comment
+ E271, # multiple spaces after keyword
+ E301, # expected 1 blank line, found 0
+ E302, # expected 2 blank lines, found 1
+ E303, # too many blank lines (N)
+ E305, # expected 2 blank lines after class or function definition, found 1
+ E306, # expected 1 blank line before a nested definition, found 0
+ E401, # multiple imports on one line
+ E402, # module level import not at top of file
+ E501, # line too long (N > 79 characters)
+ E502, # the backslash is redundant between brackets
+ E701, # multiple statements on one line (colon)
+ E704, # multiple statements on one line (def)
+ E711, # comparison to None should be 'if cond is not None:'
+ E713, # test for membership should be 'not in'
+ E721, # do not compare types, use 'isinstance()'
+ E722, # do not use bare 'except'
+ E731, # do not assign a lambda expression, use a def
+ E741, # ambiguous variable name 'a'
+ W503, # line break before binary operator
+ W504, # line break after binary operator
+ W605, # invalid escape sequence
+ F401, # 'foo' imported but unused
+ F841, # local variable 'foo' is assigned to but never used
+ #
+ # The following rules are required for Python 3 support
exclude = .venv,.tox,dist,*egg,build
max-complexity = 35
-# Some rules are disabled for specific files (requires flake8 3.7.0)
+
+# Some rules are disabled for specific files (requires flake8 >= 3.7.0)
+
per-file-ignores =
- test/functional/tests/*:F821 # included config files
+ # start of error message should be visible in traceback
+ docutils/parsers/rst/directives/tables.py:E128
+ # complex regexp definitions
+ docutils/parsers/rst/states.py:E121,E128
+ # module with 3rd-party origin
+ docutils/utils/math/math2html.py:E111,E114,E123 # leave indentation for now
+ # generated auxiliary files
+ docutils/utils/math/unichar2tex.py:E122
+ docutils/utils/math/tex2unichar.py:E123
+ # included configuration files referencing externally defined variables
+ test/functional/tests/*:F821
+ # don't indent list delimiters in lists of test samples (multi-line strings)
test/test_readers/test_pep/*:E122,E128
test/test_parsers/*:E122,E124,E128
test/test_writers/*:E122,E124,E128
- test/test_writers/test_manpage.py:E121,E128,W291
- test/test_writers/test_latex2e.py:E122,E128,W291,W293 # r'' test samples
test/test_transforms/*:E122,E124,E128
+ # raw string test samples with trailing whitespace
+ test/test_writers/test_manpage.py:E121,E128,W291
+ test/test_writers/test_latex2e.py:E122,E128,W291,W293