summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authormilde <milde@929543f6-e4f2-0310-98a6-ba3bd3dd1d04>2023-02-07 14:24:46 +0000
committermilde <milde@929543f6-e4f2-0310-98a6-ba3bd3dd1d04>2023-02-07 14:24:46 +0000
commitcdaf19ed84db0b73d433c9f2b6dd48acefea3cc3 (patch)
tree933e8f634628c062fa2505693fe88e9803ad9fd6
parent96a69cf4d519ccd74d672999ec06516be0bf3a3b (diff)
downloaddocutils-cdaf19ed84db0b73d433c9f2b6dd48acefea3cc3.tar.gz
Avoid ambiguous module name `io`.
There are standard `io` and `docutils.io`. Avoid ambiguity by not using ``from docutils import io``. git-svn-id: https://svn.code.sf.net/p/docutils/code/trunk@9329 929543f6-e4f2-0310-98a6-ba3bd3dd1d04
-rwxr-xr-xdocutils/test/test_io.py99
-rwxr-xr-xdocutils/tools/buildhtml.py8
2 files changed, 54 insertions, 53 deletions
diff --git a/docutils/test/test_io.py b/docutils/test/test_io.py
index 17b77eaa1..b1e55a148 100755
--- a/docutils/test/test_io.py
+++ b/docutils/test/test_io.py
@@ -19,7 +19,7 @@ if __name__ == '__main__':
# so we import the local `docutils` package.
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
-from docutils import io
+from docutils import io as du_io
# DATA_ROOT is ./test/data/ from the docutils root
DATA_ROOT = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
@@ -54,69 +54,70 @@ class HelperTests(unittest.TestCase):
def test_check_encoding_true(self):
"""Return `True` if lookup returns the same codec"""
- self.assertEqual(io.check_encoding(mock_stdout, 'utf-8'), True)
- self.assertEqual(io.check_encoding(mock_stdout, 'utf_8'), True)
- self.assertEqual(io.check_encoding(mock_stdout, 'utf8'), True)
- self.assertEqual(io.check_encoding(mock_stdout, 'UTF-8'), True)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'utf-8'), True)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'utf_8'), True)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'utf8'), True)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'UTF-8'), True)
def test_check_encoding_false(self):
"""Return `False` if lookup returns different codecs"""
- self.assertEqual(io.check_encoding(mock_stdout, 'ascii'), False)
- self.assertEqual(io.check_encoding(mock_stdout, 'latin-1'), False)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'ascii'), False)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'latin-1'), False)
def test_check_encoding_none(self):
"""Cases where the comparison fails."""
# stream.encoding is None:
- self.assertEqual(io.check_encoding(io.FileInput(), 'ascii'), None)
+ self.assertEqual(du_io.check_encoding(du_io.FileInput(), 'ascii'),
+ None)
# stream.encoding does not exist:
- self.assertEqual(io.check_encoding(BBuf, 'ascii'), None)
+ self.assertEqual(du_io.check_encoding(BBuf, 'ascii'), None)
# encoding is None or empty string:
- self.assertEqual(io.check_encoding(mock_stdout, None), None)
- self.assertEqual(io.check_encoding(mock_stdout, ''), None)
+ self.assertEqual(du_io.check_encoding(mock_stdout, None), None)
+ self.assertEqual(du_io.check_encoding(mock_stdout, ''), None)
# encoding is invalid
- self.assertEqual(io.check_encoding(mock_stdout, 'UTF-9'), None)
+ self.assertEqual(du_io.check_encoding(mock_stdout, 'UTF-9'), None)
def test_error_string(self):
us = '\xfc' # bytes(us) fails
bs = b'\xc3\xbc' # str(bs) returns repr(bs)
self.assertEqual('Exception: spam',
- io.error_string(Exception('spam')))
+ du_io.error_string(Exception('spam')))
self.assertEqual('IndexError: ' + str(bs),
- io.error_string(IndexError(bs)))
+ du_io.error_string(IndexError(bs)))
self.assertEqual('ImportError: %s' % us,
- io.error_string(ImportError(us)))
+ du_io.error_string(ImportError(us)))
class InputTests(unittest.TestCase):
def test_bom(self):
# Provisional:
- input = io.StringInput(source=b'\xef\xbb\xbf foo \xef\xbb\xbf bar')
+ input = du_io.StringInput(source=b'\xef\xbb\xbf foo \xef\xbb\xbf bar')
# Assert BOM is gone.
# TODO: only remove BOM (ZWNBSP at start of data)
self.assertEqual(input.read(), ' foo bar')
# Unicode input is left unchanged:
- input = io.StringInput(source='\ufeff foo \ufeff bar')
+ input = du_io.StringInput(source='\ufeff foo \ufeff bar')
# Assert ZWNBSPs are still there.
self.assertEqual(input.read(), '\ufeff foo \ufeff bar')
def test_coding_slug(self):
- input = io.StringInput(source=b"""\
+ input = du_io.StringInput(source=b"""\
.. -*- coding: ascii -*-
data
blah
""")
data = input.read() # noqa: F841
self.assertEqual(input.successful_encoding, 'ascii')
- input = io.StringInput(source=b"""\
+ input = du_io.StringInput(source=b"""\
#! python
# -*- coding: ascii -*-
print("hello world")
""")
data = input.read() # noqa: F841
self.assertEqual(input.successful_encoding, 'ascii')
- input = io.StringInput(source=b"""\
+ input = du_io.StringInput(source=b"""\
#! python
# extraneous comment; prevents coding slug from being read
# -*- coding: ascii -*-
@@ -127,15 +128,15 @@ print("hello world")
def test_bom_detection(self):
source = '\ufeffdata\nblah\n'
expected = 'data\nblah\n'
- input = io.StringInput(source=source.encode('utf-16-be'))
+ input = du_io.StringInput(source=source.encode('utf-16-be'))
self.assertEqual(input.read(), expected)
- input = io.StringInput(source=source.encode('utf-16-le'))
+ input = du_io.StringInput(source=source.encode('utf-16-le'))
self.assertEqual(input.read(), expected)
- input = io.StringInput(source=source.encode('utf-8'))
+ input = du_io.StringInput(source=source.encode('utf-8'))
self.assertEqual(input.read(), expected)
def test_readlines(self):
- input = io.FileInput(
+ input = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'include.txt'))
data = input.readlines()
self.assertEqual(data, ['Some include text.\n'])
@@ -144,8 +145,8 @@ print("hello world")
# if no encoding is given and decoding with 'utf-8' fails,
# use either the locale encoding (if specified) or 'latin-1':
# Provisional: the second fallback 'latin-1' will be dropped
- probed_encodings = (io._locale_encoding, 'latin-1') # noqa
- input = io.FileInput(
+ probed_encodings = (du_io._locale_encoding, 'latin-1') # noqa
+ input = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'latin1.txt'))
data = input.read()
if input.successful_encoding not in probed_encodings:
@@ -157,7 +158,7 @@ print("hello world")
def test_decode_unicode(self):
# With the special value "unicode" or "Unicode":
- uniinput = io.Input(encoding='unicode')
+ uniinput = du_io.Input(encoding='unicode')
# keep unicode instances as-is
self.assertEqual(uniinput.decode('ja'), 'ja')
# raise AssertionError if data is not an unicode string
@@ -178,60 +179,60 @@ class OutputTests(unittest.TestCase):
"""Stub of sys.stdout under Python 3"""
def test_write_unicode(self):
- fo = io.FileOutput(destination=self.udrain, encoding='unicode',
- autoclose=False)
+ fo = du_io.FileOutput(destination=self.udrain, encoding='unicode',
+ autoclose=False)
fo.write(self.udata)
self.assertEqual(self.udrain.getvalue(), self.udata)
def test_write_utf8(self):
- fo = io.FileOutput(destination=self.udrain, encoding='utf-8',
- autoclose=False)
+ fo = du_io.FileOutput(destination=self.udrain, encoding='utf-8',
+ autoclose=False)
fo.write(self.udata)
self.assertEqual(self.udrain.getvalue(), self.udata)
def test_FileOutput_hande_io_errors_deprection_warning(self):
with self.assertWarnsRegex(DeprecationWarning,
'"handle_io_errors" is ignored'):
- io.FileOutput(handle_io_errors=True)
+ du_io.FileOutput(handle_io_errors=True)
# With destination in binary mode, data must be binary string
# and is written as-is:
def test_write_bytes(self):
- fo = io.FileOutput(destination=self.bdrain, encoding='utf-8',
- mode='wb', autoclose=False)
+ fo = du_io.FileOutput(destination=self.bdrain, encoding='utf-8',
+ mode='wb', autoclose=False)
fo.write(self.bdata)
self.assertEqual(self.bdrain.getvalue(), self.bdata)
def test_write_bytes_to_stdout(self):
# try writing data to `destination.buffer`, if data is
# instance of `bytes` and writing to `destination` fails:
- fo = io.FileOutput(destination=self.mock_stdout)
+ fo = du_io.FileOutput(destination=self.mock_stdout)
fo.write(self.bdata)
self.assertEqual(self.mock_stdout.buffer.getvalue(),
self.bdata)
def test_encoding_clash_resolved(self):
- fo = io.FileOutput(destination=self.mock_stdout,
- encoding='latin1', autoclose=False)
+ fo = du_io.FileOutput(destination=self.mock_stdout,
+ encoding='latin1', autoclose=False)
fo.write(self.udata)
self.assertEqual(self.mock_stdout.buffer.getvalue(),
self.udata.encode('latin1'))
def test_encoding_clash_nonresolvable(self):
del self.mock_stdout.buffer
- fo = io.FileOutput(destination=self.mock_stdout,
- encoding='latin1', autoclose=False)
+ fo = du_io.FileOutput(destination=self.mock_stdout,
+ encoding='latin1', autoclose=False)
self.assertRaises(ValueError, fo.write, self.udata)
class ErrorOutputTests(unittest.TestCase):
def test_defaults(self):
- e = io.ErrorOutput()
+ e = du_io.ErrorOutput()
self.assertEqual(e.destination, sys.stderr)
def test_bbuf(self):
buf = BBuf() # buffer storing byte string
- e = io.ErrorOutput(buf, encoding='ascii')
+ e = du_io.ErrorOutput(buf, encoding='ascii')
# write byte-string as-is
e.write(b'b\xfc')
self.assertEqual(buf.getvalue(), b'b\xfc')
@@ -250,7 +251,7 @@ class ErrorOutputTests(unittest.TestCase):
def test_ubuf(self):
buf = UBuf() # buffer only accepting unicode string
# decode of binary strings
- e = io.ErrorOutput(buf, encoding='ascii')
+ e = du_io.ErrorOutput(buf, encoding='ascii')
e.write(b'b\xfc')
# use REPLACEMENT CHARACTER
self.assertEqual(buf.getvalue(), 'b\ufffd')
@@ -275,7 +276,7 @@ class FileInputTests(unittest.TestCase):
def test_bom_utf_8(self):
"""Drop optional BOM from utf-8 encoded files.
"""
- source = io.FileInput(
+ source = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'utf-8-sig.txt'))
self.assertTrue(source.read().startswith('Grüße'))
@@ -283,24 +284,24 @@ class FileInputTests(unittest.TestCase):
"""Drop BOM from utf-16 encoded files, use correct encoding.
"""
# Assert correct decoding, BOM is gone.
- source = io.FileInput(
+ source = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'utf-16-le-sig.txt'))
self.assertTrue(source.read().startswith('Grüße'))
def test_coding_slug(self):
"""Use self-declared encoding.
"""
- source = io.FileInput(
+ source = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'latin2.txt'))
self.assertTrue(source.read().endswith('škoda\n'))
def test_fallback_utf8(self):
"""Try 'utf-8', if encoding is not specified in the source."""
- source = io.FileInput(
+ source = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'utf8.txt'))
self.assertEqual(source.read(), 'Grüße\n')
- @unittest.skipIf(io._locale_encoding in (None, 'utf-8', 'utf8'),
+ @unittest.skipIf(du_io._locale_encoding in (None, 'utf-8', 'utf8'),
'locale encoding not set or UTF-8')
def test_fallback_no_utf8(self):
# if decoding with 'utf-8' fails, use the locale encoding
@@ -308,8 +309,8 @@ class FileInputTests(unittest.TestCase):
# provisional: behaviour details will change in future
# TODO: don't fall back to latin1
# TODO: use `locale.getpreferredlocale()` (honour UTF-8 mode)?
- probed_encodings = (io._locale_encoding, 'latin-1') # noqa
- source = io.FileInput(
+ probed_encodings = (du_io._locale_encoding, 'latin-1') # noqa
+ source = du_io.FileInput(
source_path=os.path.join(DATA_ROOT, 'latin1.txt'))
data = source.read()
self.assertTrue(source.successful_encoding in probed_encodings)
diff --git a/docutils/tools/buildhtml.py b/docutils/tools/buildhtml.py
index f31d887b4..8d2451c37 100755
--- a/docutils/tools/buildhtml.py
+++ b/docutils/tools/buildhtml.py
@@ -28,8 +28,8 @@ import sys
import warnings
import docutils
-from docutils import ApplicationError
-from docutils import core, frontend, io, utils
+import docutils.io
+from docutils import core, frontend, utils, ApplicationError
from docutils.parsers import rst
from docutils.readers import standalone, pep
from docutils.writers import html4css1, html5_polyglot, pep_html
@@ -226,7 +226,7 @@ class Builder:
def visit(self, directory, names, subdirectories):
settings = self.get_settings('', directory)
- errout = io.ErrorOutput(encoding=settings.error_encoding)
+ errout = docutils.io.ErrorOutput(encoding=settings.error_encoding)
if settings.prune and (os.path.abspath(directory) in settings.prune):
errout.write('/// ...Skipping directory (pruned): %s\n' %
directory)
@@ -253,7 +253,7 @@ class Builder:
else:
publisher = self.initial_settings.writer
settings = self.get_settings(publisher, directory)
- errout = io.ErrorOutput(encoding=settings.error_encoding)
+ errout = docutils.io.ErrorOutput(encoding=settings.error_encoding)
pub_struct = self.publishers[publisher]
settings._source = os.path.normpath(os.path.join(directory, name))
settings._destination = settings._source[:-4] + '.html'