summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorRob Dennis <robd@services-in.xr>2023-01-17 16:38:34 -0500
committerRob Dennis <robd@services-in.xr>2023-01-17 16:38:34 -0500
commit3323078a05d57bf99adb16a71b69fcbcd48146ea (patch)
tree3c7e56e41aa4d6bbd682e011b0b94043ac27c011 /src
parent178ba90da21998968acb18cf02e7449741ddac4e (diff)
downloadconfigobj-git-3323078a05d57bf99adb16a71b69fcbcd48146ea.tar.gz
#128, #203 - re-implement the move to a configobj package done in master
Diffstat (limited to 'src')
-rw-r--r--src/configobj/__init__.py2483
-rw-r--r--src/configobj/_version.py1
-rw-r--r--src/configobj/validate.py1472
-rw-r--r--src/tests/__init__.py0
-rw-r--r--src/tests/conf.ini10
-rw-r--r--src/tests/conf.spec13
-rw-r--r--src/tests/configobj_doctests.py986
-rw-r--r--src/tests/conftest.py14
-rw-r--r--src/tests/test_configobj.py1298
-rw-r--r--src/tests/test_validate.py163
-rw-r--r--src/tests/test_validate_errors.py79
11 files changed, 6519 insertions, 0 deletions
diff --git a/src/configobj/__init__.py b/src/configobj/__init__.py
new file mode 100644
index 0000000..0d752bc
--- /dev/null
+++ b/src/configobj/__init__.py
@@ -0,0 +1,2483 @@
+# configobj.py
+# A config file reader/writer that supports nested sections in config files.
+# Copyright (C) 2005-2014:
+# (name) : (email)
+# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
+# Nicola Larosa: nico AT tekNico DOT net
+# Rob Dennis: rdennis AT gmail DOT com
+# Eli Courtwright: eli AT courtwright DOT org
+
+# This software is licensed under the terms of the BSD license.
+# http://opensource.org/licenses/BSD-3-Clause
+
+# ConfigObj 5 - main repository for documentation and issue tracking:
+# https://github.com/DiffSK/configobj
+
+import os
+import re
+import sys
+
+from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
+
+import six
+from ._version import __version__
+
+# imported lazily to avoid startup performance hit if it isn't used
+compiler = None
+
+# A dictionary mapping BOM to
+# the encoding to decode with, and what to set the
+# encoding attribute to.
+BOMS = {
+ BOM_UTF8: ('utf_8', None),
+ BOM_UTF16_BE: ('utf16_be', 'utf_16'),
+ BOM_UTF16_LE: ('utf16_le', 'utf_16'),
+ BOM_UTF16: ('utf_16', 'utf_16'),
+ }
+# All legal variants of the BOM codecs.
+# TODO: the list of aliases is not meant to be exhaustive, is there a
+# better way ?
+BOM_LIST = {
+ 'utf_16': 'utf_16',
+ 'u16': 'utf_16',
+ 'utf16': 'utf_16',
+ 'utf-16': 'utf_16',
+ 'utf16_be': 'utf16_be',
+ 'utf_16_be': 'utf16_be',
+ 'utf-16be': 'utf16_be',
+ 'utf16_le': 'utf16_le',
+ 'utf_16_le': 'utf16_le',
+ 'utf-16le': 'utf16_le',
+ 'utf_8': 'utf_8',
+ 'u8': 'utf_8',
+ 'utf': 'utf_8',
+ 'utf8': 'utf_8',
+ 'utf-8': 'utf_8',
+ }
+
+# Map of encodings to the BOM to write.
+BOM_SET = {
+ 'utf_8': BOM_UTF8,
+ 'utf_16': BOM_UTF16,
+ 'utf16_be': BOM_UTF16_BE,
+ 'utf16_le': BOM_UTF16_LE,
+ None: BOM_UTF8
+ }
+
+
+def match_utf8(encoding):
+ return BOM_LIST.get(encoding.lower()) == 'utf_8'
+
+
+# Quote strings used for writing values
+squot = "'%s'"
+dquot = '"%s"'
+noquot = "%s"
+wspace_plus = ' \r\n\v\t\'"'
+tsquot = '"""%s"""'
+tdquot = "'''%s'''"
+
+# Sentinel for use in getattr calls to replace hasattr
+MISSING = object()
+
+__all__ = (
+ 'DEFAULT_INDENT_TYPE',
+ 'DEFAULT_INTERPOLATION',
+ 'ConfigObjError',
+ 'NestingError',
+ 'ParseError',
+ 'DuplicateError',
+ 'ConfigspecError',
+ 'ConfigObj',
+ 'SimpleVal',
+ 'InterpolationError',
+ 'InterpolationLoopError',
+ 'MissingInterpolationOption',
+ 'RepeatSectionError',
+ 'ReloadError',
+ 'UnreprError',
+ 'UnknownType',
+ 'flatten_errors',
+ 'get_extra_values'
+)
+
+DEFAULT_INTERPOLATION = 'configparser'
+DEFAULT_INDENT_TYPE = ' '
+MAX_INTERPOL_DEPTH = 10
+
+OPTION_DEFAULTS = {
+ 'interpolation': True,
+ 'raise_errors': False,
+ 'list_values': True,
+ 'create_empty': False,
+ 'file_error': False,
+ 'configspec': None,
+ 'stringify': True,
+ # option may be set to one of ('', ' ', '\t')
+ 'indent_type': None,
+ 'encoding': None,
+ 'default_encoding': None,
+ 'unrepr': False,
+ 'write_empty_values': False,
+}
+
+# this could be replaced if six is used for compatibility, or there are no
+# more assertions about items being a string
+
+
+def getObj(s):
+ global compiler
+ if compiler is None:
+ import compiler
+ s = "a=" + s
+ p = compiler.parse(s)
+ return p.getChildren()[1].getChildren()[0].getChildren()[1]
+
+
+class UnknownType(Exception):
+ pass
+
+
+class Builder(object):
+
+ def build(self, o):
+ if m is None:
+ raise UnknownType(o.__class__.__name__)
+ return m(o)
+
+ def build_List(self, o):
+ return list(map(self.build, o.getChildren()))
+
+ def build_Const(self, o):
+ return o.value
+
+ def build_Dict(self, o):
+ d = {}
+ i = iter(map(self.build, o.getChildren()))
+ for el in i:
+ d[el] = next(i)
+ return d
+
+ def build_Tuple(self, o):
+ return tuple(self.build_List(o))
+
+ def build_Name(self, o):
+ if o.name == 'None':
+ return None
+ if o.name == 'True':
+ return True
+ if o.name == 'False':
+ return False
+
+ # An undefined Name
+ raise UnknownType('Undefined Name')
+
+ def build_Add(self, o):
+ real, imag = list(map(self.build_Const, o.getChildren()))
+ try:
+ real = float(real)
+ except TypeError:
+ raise UnknownType('Add')
+ if not isinstance(imag, complex) or imag.real != 0.0:
+ raise UnknownType('Add')
+ return real+imag
+
+ def build_Getattr(self, o):
+ parent = self.build(o.expr)
+ return getattr(parent, o.attrname)
+
+ def build_UnarySub(self, o):
+ return -self.build_Const(o.getChildren()[0])
+
+ def build_UnaryAdd(self, o):
+ return self.build_Const(o.getChildren()[0])
+
+
+_builder = Builder()
+
+
+def unrepr(s):
+ if not s:
+ return s
+
+ # this is supposed to be safe
+ import ast
+ return ast.literal_eval(s)
+
+
+class ConfigObjError(SyntaxError):
+ """
+ This is the base class for all errors that ConfigObj raises.
+ It is a subclass of SyntaxError.
+ """
+ def __init__(self, message='', line_number=None, line=''):
+ self.line = line
+ self.line_number = line_number
+ SyntaxError.__init__(self, message)
+
+
+class NestingError(ConfigObjError):
+ """
+ This error indicates a level of nesting that doesn't match.
+ """
+
+
+class ParseError(ConfigObjError):
+ """
+ This error indicates that a line is badly written.
+ It is neither a valid ``key = value`` line,
+ nor a valid section marker line.
+ """
+
+
+class ReloadError(IOError):
+ """
+ A 'reload' operation failed.
+ This exception is a subclass of ``IOError``.
+ """
+ def __init__(self):
+ IOError.__init__(self, 'reload failed, filename is not set.')
+
+
+class DuplicateError(ConfigObjError):
+ """
+ The keyword or section specified already exists.
+ """
+
+
+class ConfigspecError(ConfigObjError):
+ """
+ An error occured whilst parsing a configspec.
+ """
+
+
+class InterpolationError(ConfigObjError):
+ """Base class for the two interpolation errors."""
+
+
+class InterpolationLoopError(InterpolationError):
+ """Maximum interpolation depth exceeded in string interpolation."""
+
+ def __init__(self, option):
+ InterpolationError.__init__(
+ self,
+ 'interpolation loop detected in value "%s".' % option)
+
+
+class RepeatSectionError(ConfigObjError):
+ """
+ This error indicates additional sections in a section with a
+ ``__many__`` (repeated) section.
+ """
+
+
+class MissingInterpolationOption(InterpolationError):
+ """A value specified for interpolation was missing."""
+ def __init__(self, option):
+ msg = 'missing option "%s" in interpolation.' % option
+ InterpolationError.__init__(self, msg)
+
+
+class UnreprError(ConfigObjError):
+ """An error parsing in unrepr mode."""
+
+
+
+class InterpolationEngine(object):
+ """
+ A helper class to help perform string interpolation.
+
+ This class is an abstract base class; its descendants perform
+ the actual work.
+ """
+
+ # compiled regexp to use in self.interpolate()
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s")
+ _cookie = '%'
+
+ def __init__(self, section):
+ # the Section instance that "owns" this engine
+ self.section = section
+
+
+ def interpolate(self, key, value):
+ # short-cut
+ if not self._cookie in value:
+ return value
+
+ def recursive_interpolate(key, value, section, backtrail):
+ """The function that does the actual work.
+
+ ``value``: the string we're trying to interpolate.
+ ``section``: the section in which that string was found
+ ``backtrail``: a dict to keep track of where we've been,
+ to detect and prevent infinite recursion loops
+
+ This is similar to a depth-first-search algorithm.
+ """
+ # Have we been here already?
+ if (key, section.name) in backtrail:
+ # Yes - infinite loop detected
+ raise InterpolationLoopError(key)
+ # Place a marker on our backtrail so we won't come back here again
+ backtrail[(key, section.name)] = 1
+
+ # Now start the actual work
+ match = self._KEYCRE.search(value)
+ while match:
+ # The actual parsing of the match is implementation-dependent,
+ # so delegate to our helper function
+ k, v, s = self._parse_match(match)
+ if k is None:
+ # That's the signal that no further interpolation is needed
+ replacement = v
+ else:
+ # Further interpolation may be needed to obtain final value
+ replacement = recursive_interpolate(k, v, s, backtrail)
+ # Replace the matched string with its final value
+ start, end = match.span()
+ value = ''.join((value[:start], replacement, value[end:]))
+ new_search_start = start + len(replacement)
+ # Pick up the next interpolation key, if any, for next time
+ # through the while loop
+ match = self._KEYCRE.search(value, new_search_start)
+
+ # Now safe to come back here again; remove marker from backtrail
+ del backtrail[(key, section.name)]
+
+ return value
+
+ # Back in interpolate(), all we have to do is kick off the recursive
+ # function with appropriate starting values
+ value = recursive_interpolate(key, value, self.section, {})
+ return value
+
+
+ def _fetch(self, key):
+ """Helper function to fetch values from owning section.
+
+ Returns a 2-tuple: the value, and the section where it was found.
+ """
+ # switch off interpolation before we try and fetch anything !
+ save_interp = self.section.main.interpolation
+ self.section.main.interpolation = False
+
+ # Start at section that "owns" this InterpolationEngine
+ current_section = self.section
+ while True:
+ # try the current section first
+ val = current_section.get(key)
+ if val is not None and not isinstance(val, Section):
+ break
+ # try "DEFAULT" next
+ val = current_section.get('DEFAULT', {}).get(key)
+ if val is not None and not isinstance(val, Section):
+ break
+ # move up to parent and try again
+ # top-level's parent is itself
+ if current_section.parent is current_section:
+ # reached top level, time to give up
+ break
+ current_section = current_section.parent
+
+ # restore interpolation to previous value before returning
+ self.section.main.interpolation = save_interp
+ if val is None:
+ raise MissingInterpolationOption(key)
+ return val, current_section
+
+
+ def _parse_match(self, match):
+ """Implementation-dependent helper function.
+
+ Will be passed a match object corresponding to the interpolation
+ key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
+ key in the appropriate config file section (using the ``_fetch()``
+ helper function) and return a 3-tuple: (key, value, section)
+
+ ``key`` is the name of the key we're looking for
+ ``value`` is the value found for that key
+ ``section`` is a reference to the section where it was found
+
+ ``key`` and ``section`` should be None if no further
+ interpolation should be performed on the resulting value
+ (e.g., if we interpolated "$$" and returned "$").
+ """
+ raise NotImplementedError()
+
+
+
+class ConfigParserInterpolation(InterpolationEngine):
+ """Behaves like ConfigParser."""
+ _cookie = '%'
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s")
+
+ def _parse_match(self, match):
+ key = match.group(1)
+ value, section = self._fetch(key)
+ return key, value, section
+
+
+
+class TemplateInterpolation(InterpolationEngine):
+ """Behaves like string.Template."""
+ _cookie = '$'
+ _delimiter = '$'
+ _KEYCRE = re.compile(r"""
+ \$(?:
+ (?P<escaped>\$) | # Two $ signs
+ (?P<named>[_a-z][_a-z0-9]*) | # $name format
+ {(?P<braced>[^}]*)} # ${name} format
+ )
+ """, re.IGNORECASE | re.VERBOSE)
+
+ def _parse_match(self, match):
+ # Valid name (in or out of braces): fetch value from section
+ key = match.group('named') or match.group('braced')
+ if key is not None:
+ value, section = self._fetch(key)
+ return key, value, section
+ # Escaped delimiter (e.g., $$): return single delimiter
+ if match.group('escaped') is not None:
+ # Return None for key and section to indicate it's time to stop
+ return None, self._delimiter, None
+ # Anything else: ignore completely, just return it unchanged
+ return None, match.group(), None
+
+
+interpolation_engines = {
+ 'configparser': ConfigParserInterpolation,
+ 'template': TemplateInterpolation,
+}
+
+
+def __newobj__(cls, *args):
+ # Hack for pickle
+ return cls.__new__(cls, *args)
+
+class Section(dict):
+ """
+ A dictionary-like object that represents a section in a config file.
+
+ It does string interpolation if the 'interpolation' attribute
+ of the 'main' object is set to True.
+
+ Interpolation is tried first from this object, then from the 'DEFAULT'
+ section of this object, next from the parent and its 'DEFAULT' section,
+ and so on until the main object is reached.
+
+ A Section will behave like an ordered dictionary - following the
+ order of the ``scalars`` and ``sections`` attributes.
+ You can use this to change the order of members.
+
+ Iteration follows the order: scalars, then sections.
+ """
+
+
+ def __setstate__(self, state):
+ dict.update(self, state[0])
+ self.__dict__.update(state[1])
+
+ def __reduce__(self):
+ state = (dict(self), self.__dict__)
+ return (__newobj__, (self.__class__,), state)
+
+
+ def __init__(self, parent, depth, main, indict=None, name=None):
+ """
+ * parent is the section above
+ * depth is the depth level of this section
+ * main is the main ConfigObj
+ * indict is a dictionary to initialise the section with
+ """
+ if indict is None:
+ indict = {}
+ dict.__init__(self)
+ # used for nesting level *and* interpolation
+ self.parent = parent
+ # used for the interpolation attribute
+ self.main = main
+ # level of nesting depth of this Section
+ self.depth = depth
+ # purely for information
+ self.name = name
+ #
+ self._initialise()
+ # we do this explicitly so that __setitem__ is used properly
+ # (rather than just passing to ``dict.__init__``)
+ for entry, value in indict.items():
+ self[entry] = value
+
+
+ def _initialise(self):
+ # the sequence of scalar values in this Section
+ self.scalars = []
+ # the sequence of sections in this Section
+ self.sections = []
+ # for comments :-)
+ self.comments = {}
+ self.inline_comments = {}
+ # the configspec
+ self.configspec = None
+ # for defaults
+ self.defaults = []
+ self.default_values = {}
+ self.extra_values = []
+ self._created = False
+
+
+ def _interpolate(self, key, value):
+ try:
+ # do we already have an interpolation engine?
+ engine = self._interpolation_engine
+ except AttributeError:
+ # not yet: first time running _interpolate(), so pick the engine
+ name = self.main.interpolation
+ if name == True: # note that "if name:" would be incorrect here
+ # backwards-compatibility: interpolation=True means use default
+ name = DEFAULT_INTERPOLATION
+ name = name.lower() # so that "Template", "template", etc. all work
+ class_ = interpolation_engines.get(name, None)
+ if class_ is None:
+ # invalid value for self.main.interpolation
+ self.main.interpolation = False
+ return value
+ else:
+ # save reference to engine so we don't have to do this again
+ engine = self._interpolation_engine = class_(self)
+ # let the engine do the actual work
+ return engine.interpolate(key, value)
+
+
+ def __getitem__(self, key):
+ """Fetch the item and do string interpolation."""
+ val = dict.__getitem__(self, key)
+ if self.main.interpolation:
+ if isinstance(val, six.string_types):
+ return self._interpolate(key, val)
+ if isinstance(val, list):
+ def _check(entry):
+ if isinstance(entry, six.string_types):
+ return self._interpolate(key, entry)
+ return entry
+ new = [_check(entry) for entry in val]
+ if new != val:
+ return new
+ return val
+
+
+ def __setitem__(self, key, value, unrepr=False):
+ """
+ Correctly set a value.
+
+ Making dictionary values Section instances.
+ (We have to special case 'Section' instances - which are also dicts)
+
+ Keys must be strings.
+ Values need only be strings (or lists of strings) if
+ ``main.stringify`` is set.
+
+ ``unrepr`` must be set when setting a value to a dictionary, without
+ creating a new sub-section.
+ """
+ if not isinstance(key, six.string_types):
+ raise ValueError('The key "%s" is not a string.' % key)
+
+ # add the comment
+ if key not in self.comments:
+ self.comments[key] = []
+ self.inline_comments[key] = ''
+ # remove the entry from defaults
+ if key in self.defaults:
+ self.defaults.remove(key)
+ #
+ if isinstance(value, Section):
+ if key not in self:
+ self.sections.append(key)
+ dict.__setitem__(self, key, value)
+ elif isinstance(value, dict) and not unrepr:
+ # First create the new depth level,
+ # then create the section
+ if key not in self:
+ self.sections.append(key)
+ new_depth = self.depth + 1
+ dict.__setitem__(
+ self,
+ key,
+ Section(
+ self,
+ new_depth,
+ self.main,
+ indict=value,
+ name=key))
+ else:
+ if key not in self:
+ self.scalars.append(key)
+ if not self.main.stringify:
+ if isinstance(value, six.string_types):
+ pass
+ elif isinstance(value, (list, tuple)):
+ for entry in value:
+ if not isinstance(entry, six.string_types):
+ raise TypeError('Value is not a string "%s".' % entry)
+ else:
+ raise TypeError('Value is not a string "%s".' % value)
+ dict.__setitem__(self, key, value)
+
+
+ def __delitem__(self, key):
+ """Remove items from the sequence when deleting."""
+ dict. __delitem__(self, key)
+ if key in self.scalars:
+ self.scalars.remove(key)
+ else:
+ self.sections.remove(key)
+ del self.comments[key]
+ del self.inline_comments[key]
+
+
+ def get(self, key, default=None):
+ """A version of ``get`` that doesn't bypass string interpolation."""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+
+ def update(self, indict):
+ """
+ A version of update that uses our ``__setitem__``.
+ """
+ for entry in indict:
+ self[entry] = indict[entry]
+
+
+ def pop(self, key, default=MISSING):
+ """
+ 'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised'
+ """
+ try:
+ val = self[key]
+ except KeyError:
+ if default is MISSING:
+ raise
+ val = default
+ else:
+ del self[key]
+ return val
+
+
+ def popitem(self):
+ """Pops the first (key,val)"""
+ sequence = (self.scalars + self.sections)
+ if not sequence:
+ raise KeyError(": 'popitem(): dictionary is empty'")
+ key = sequence[0]
+ val = self[key]
+ del self[key]
+ return key, val
+
+
+ def clear(self):
+ """
+ A version of clear that also affects scalars/sections
+ Also clears comments and configspec.
+
+ Leaves other attributes alone :
+ depth/main/parent are not affected
+ """
+ dict.clear(self)
+ self.scalars = []
+ self.sections = []
+ self.comments = {}
+ self.inline_comments = {}
+ self.configspec = None
+ self.defaults = []
+ self.extra_values = []
+
+
+ def setdefault(self, key, default=None):
+ """A version of setdefault that sets sequence if appropriate."""
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return self[key]
+
+
+ def items(self):
+ """D.items() -> list of D's (key, value) pairs, as 2-tuples"""
+ return list(zip((self.scalars + self.sections), list(self.values())))
+
+
+ def keys(self):
+ """D.keys() -> list of D's keys"""
+ return (self.scalars + self.sections)
+
+
+ def values(self):
+ """D.values() -> list of D's values"""
+ return [self[key] for key in (self.scalars + self.sections)]
+
+
+ def iteritems(self):
+ """D.iteritems() -> an iterator over the (key, value) items of D"""
+ return iter(list(self.items()))
+
+
+ def iterkeys(self):
+ """D.iterkeys() -> an iterator over the keys of D"""
+ return iter((self.scalars + self.sections))
+
+ __iter__ = iterkeys
+
+
+ def itervalues(self):
+ """D.itervalues() -> an iterator over the values of D"""
+ return iter(list(self.values()))
+
+
+ def __repr__(self):
+ """x.__repr__() <==> repr(x)"""
+ def _getval(key):
+ try:
+ return self[key]
+ except MissingInterpolationOption:
+ return dict.__getitem__(self, key)
+ return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
+ for key in (self.scalars + self.sections)])
+
+ __str__ = __repr__
+ __str__.__doc__ = "x.__str__() <==> str(x)"
+
+
+ # Extra methods - not in a normal dictionary
+
+ def dict(self):
+ """
+ Return a deepcopy of self as a dictionary.
+
+ All members that are ``Section`` instances are recursively turned to
+ ordinary dictionaries - by calling their ``dict`` method.
+
+ >>> n = a.dict()
+ >>> n == a
+ 1
+ >>> n is a
+ 0
+ """
+ newdict = {}
+ for entry in self:
+ this_entry = self[entry]
+ if isinstance(this_entry, Section):
+ this_entry = this_entry.dict()
+ elif isinstance(this_entry, list):
+ # create a copy rather than a reference
+ this_entry = list(this_entry)
+ elif isinstance(this_entry, tuple):
+ # create a copy rather than a reference
+ this_entry = tuple(this_entry)
+ newdict[entry] = this_entry
+ return newdict
+
+
+ def merge(self, indict):
+ """
+ A recursive update - useful for merging config files.
+
+ >>> a = '''[section1]
+ ... option1 = True
+ ... [[subsection]]
+ ... more_options = False
+ ... # end of file'''.splitlines()
+ >>> b = '''# File is user.ini
+ ... [section1]
+ ... option1 = False
+ ... # end of file'''.splitlines()
+ >>> c1 = ConfigObj(b)
+ >>> c2 = ConfigObj(a)
+ >>> c2.merge(c1)
+ >>> c2
+ ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
+ """
+ for key, val in list(indict.items()):
+ if (key in self and isinstance(self[key], dict) and
+ isinstance(val, dict)):
+ self[key].merge(val)
+ else:
+ self[key] = val
+
+
+ def rename(self, oldkey, newkey):
+ """
+ Change a keyname to another, without changing position in sequence.
+
+ Implemented so that transformations can be made on keys,
+ as well as on values. (used by encode and decode)
+
+ Also renames comments.
+ """
+ if oldkey in self.scalars:
+ the_list = self.scalars
+ elif oldkey in self.sections:
+ the_list = self.sections
+ else:
+ raise KeyError('Key "%s" not found.' % oldkey)
+ pos = the_list.index(oldkey)
+ #
+ val = self[oldkey]
+ dict.__delitem__(self, oldkey)
+ dict.__setitem__(self, newkey, val)
+ the_list.remove(oldkey)
+ the_list.insert(pos, newkey)
+ comm = self.comments[oldkey]
+ inline_comment = self.inline_comments[oldkey]
+ del self.comments[oldkey]
+ del self.inline_comments[oldkey]
+ self.comments[newkey] = comm
+ self.inline_comments[newkey] = inline_comment
+
+
+ def walk(self, function, raise_errors=True,
+ call_on_sections=False, **keywargs):
+ """
+ Walk every member and call a function on the keyword and value.
+
+ Return a dictionary of the return values
+
+ If the function raises an exception, raise the errror
+ unless ``raise_errors=False``, in which case set the return value to
+ ``False``.
+
+ Any unrecognised keyword arguments you pass to walk, will be pased on
+ to the function you pass in.
+
+ Note: if ``call_on_sections`` is ``True`` then - on encountering a
+ subsection, *first* the function is called for the *whole* subsection,
+ and then recurses into it's members. This means your function must be
+ able to handle strings, dictionaries and lists. This allows you
+ to change the key of subsections as well as for ordinary members. The
+ return value when called on the whole subsection has to be discarded.
+
+ See the encode and decode methods for examples, including functions.
+
+ .. admonition:: caution
+
+ You can use ``walk`` to transform the names of members of a section
+ but you mustn't add or delete members.
+
+ >>> config = '''[XXXXsection]
+ ... XXXXkey = XXXXvalue'''.splitlines()
+ >>> cfg = ConfigObj(config)
+ >>> cfg
+ ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
+ >>> def transform(section, key):
+ ... val = section[key]
+ ... newkey = key.replace('XXXX', 'CLIENT1')
+ ... section.rename(key, newkey)
+ ... if isinstance(val, (tuple, list, dict)):
+ ... pass
+ ... else:
+ ... val = val.replace('XXXX', 'CLIENT1')
+ ... section[newkey] = val
+ >>> cfg.walk(transform, call_on_sections=True)
+ {'CLIENT1section': {'CLIENT1key': None}}
+ >>> cfg
+ ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
+ """
+ out = {}
+ # scalars first
+ for i in range(len(self.scalars)):
+ entry = self.scalars[i]
+ try:
+ val = function(self, entry, **keywargs)
+ # bound again in case name has changed
+ entry = self.scalars[i]
+ out[entry] = val
+ except Exception:
+ if raise_errors:
+ raise
+ else:
+ entry = self.scalars[i]
+ out[entry] = False
+ # then sections
+ for i in range(len(self.sections)):
+ entry = self.sections[i]
+ if call_on_sections:
+ try:
+ function(self, entry, **keywargs)
+ except Exception:
+ if raise_errors:
+ raise
+ else:
+ entry = self.sections[i]
+ out[entry] = False
+ # bound again in case name has changed
+ entry = self.sections[i]
+ # previous result is discarded
+ out[entry] = self[entry].walk(
+ function,
+ raise_errors=raise_errors,
+ call_on_sections=call_on_sections,
+ **keywargs)
+ return out
+
+
+ def as_bool(self, key):
+ """
+ Accepts a key as input. The corresponding value must be a string or
+ the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
+ retain compatibility with Python 2.2.
+
+ If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
+ ``True``.
+
+ If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
+ ``False``.
+
+ ``as_bool`` is not case sensitive.
+
+ Any other input will raise a ``ValueError``.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_bool('a')
+ Traceback (most recent call last):
+ ValueError: Value "fish" is neither True nor False
+ >>> a['b'] = 'True'
+ >>> a.as_bool('b')
+ 1
+ >>> a['b'] = 'off'
+ >>> a.as_bool('b')
+ 0
+ """
+ val = self[key]
+ if val == True:
+ return True
+ elif val == False:
+ return False
+ else:
+ try:
+ if not isinstance(val, six.string_types):
+ # TODO: Why do we raise a KeyError here?
+ raise KeyError()
+ else:
+ return self.main._bools[val.lower()]
+ except KeyError:
+ raise ValueError('Value "%s" is neither True nor False' % val)
+
+
+ def as_int(self, key):
+ """
+ A convenience method which coerces the specified value to an integer.
+
+ If the value is an invalid literal for ``int``, a ``ValueError`` will
+ be raised.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_int('a')
+ Traceback (most recent call last):
+ ValueError: invalid literal for int() with base 10: 'fish'
+ >>> a['b'] = '1'
+ >>> a.as_int('b')
+ 1
+ >>> a['b'] = '3.2'
+ >>> a.as_int('b')
+ Traceback (most recent call last):
+ ValueError: invalid literal for int() with base 10: '3.2'
+ """
+ return int(self[key])
+
+
+ def as_float(self, key):
+ """
+ A convenience method which coerces the specified value to a float.
+
+ If the value is an invalid literal for ``float``, a ``ValueError`` will
+ be raised.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ValueError: invalid literal for float(): fish
+ >>> a['b'] = '1'
+ >>> a.as_float('b')
+ 1.0
+ >>> a['b'] = '3.2'
+ >>> a.as_float('b') #doctest: +ELLIPSIS
+ 3.2...
+ """
+ return float(self[key])
+
+
+ def as_list(self, key):
+ """
+ A convenience method which fetches the specified value, guaranteeing
+ that it is a list.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 1
+ >>> a.as_list('a')
+ [1]
+ >>> a['a'] = (1,)
+ >>> a.as_list('a')
+ [1]
+ >>> a['a'] = [1]
+ >>> a.as_list('a')
+ [1]
+ """
+ result = self[key]
+ if isinstance(result, (tuple, list)):
+ return list(result)
+ return [result]
+
+
+ def restore_default(self, key):
+ """
+ Restore (and return) default value for the specified key.
+
+ This method will only work for a ConfigObj that was created
+ with a configspec and has been validated.
+
+ If there is no default value for this key, ``KeyError`` is raised.
+ """
+ default = self.default_values[key]
+ dict.__setitem__(self, key, default)
+ if key not in self.defaults:
+ self.defaults.append(key)
+ return default
+
+
+ def restore_defaults(self):
+ """
+ Recursively restore default values to all members
+ that have them.
+
+ This method will only work for a ConfigObj that was created
+ with a configspec and has been validated.
+
+ It doesn't delete or modify entries without default values.
+ """
+ for key in self.default_values:
+ self.restore_default(key)
+
+ for section in self.sections:
+ self[section].restore_defaults()
+
+
+class ConfigObj(Section):
+ """An object to read, create, and write config files."""
+
+ _keyword = re.compile(r'''^ # line start
+ (\s*) # indentation
+ ( # keyword
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'"=].*?) # no quotes
+ )
+ \s*=\s* # divider
+ (.*) # value (including list values and comments)
+ $ # line end
+ ''',
+ re.VERBOSE)
+
+ _sectionmarker = re.compile(r'''^
+ (\s*) # 1: indentation
+ ((?:\[\s*)+) # 2: section marker open
+ ( # 3: section name open
+ (?:"\s*\S.*?\s*")| # at least one non-space with double quotes
+ (?:'\s*\S.*?\s*')| # at least one non-space with single quotes
+ (?:[^'"\s].*?) # at least one non-space unquoted
+ ) # section name close
+ ((?:\s*\])+) # 4: section marker close
+ \s*(\#.*)? # 5: optional comment
+ $''',
+ re.VERBOSE)
+
+ # this regexp pulls list values out as a single string
+ # or single values and comments
+ # FIXME: this regex adds a '' to the end of comma terminated lists
+ # workaround in ``_handle_value``
+ _valueexp = re.compile(r'''^
+ (?:
+ (?:
+ (
+ (?:
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#][^,\#]*?) # unquoted
+ )
+ \s*,\s* # comma
+ )* # match all list items ending in a comma (if any)
+ )
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#\s][^,]*?)| # unquoted
+ (?:(?<!,)) # Empty value
+ )? # last item in a list - or string value
+ )|
+ (,) # alternatively a single comma - empty list
+ )
+ \s*(\#.*)? # optional comment
+ $''',
+ re.VERBOSE)
+
+ # use findall to get the members of a list value
+ _listvalueexp = re.compile(r'''
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#]?.*?) # unquoted
+ )
+ \s*,\s* # comma
+ ''',
+ re.VERBOSE)
+
+ # this regexp is used for the value
+ # when lists are switched off
+ _nolistvalue = re.compile(r'''^
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'"\#].*?)| # unquoted
+ (?:) # Empty value
+ )
+ \s*(\#.*)? # optional comment
+ $''',
+ re.VERBOSE)
+
+ # regexes for finding triple quoted values on one line
+ _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
+ _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
+ _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
+ _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
+
+ _triple_quote = {
+ "'''": (_single_line_single, _multi_line_single),
+ '"""': (_single_line_double, _multi_line_double),
+ }
+
+ # Used by the ``istrue`` Section method
+ _bools = {
+ 'yes': True, 'no': False,
+ 'on': True, 'off': False,
+ '1': True, '0': False,
+ 'true': True, 'false': False,
+ }
+
+
+ def __init__(self, infile=None, options=None, configspec=None, encoding=None,
+ interpolation=True, raise_errors=False, list_values=True,
+ create_empty=False, file_error=False, stringify=True,
+ indent_type=None, default_encoding=None, unrepr=False,
+ write_empty_values=False, _inspec=False):
+ """
+ Parse a config file or create a config file object.
+
+ ``ConfigObj(infile=None, configspec=None, encoding=None,
+ interpolation=True, raise_errors=False, list_values=True,
+ create_empty=False, file_error=False, stringify=True,
+ indent_type=None, default_encoding=None, unrepr=False,
+ write_empty_values=False, _inspec=False)``
+ """
+ self._inspec = _inspec
+ # init the superclass
+ Section.__init__(self, self, 0, self)
+
+ infile = infile or []
+
+ _options = {'configspec': configspec,
+ 'encoding': encoding, 'interpolation': interpolation,
+ 'raise_errors': raise_errors, 'list_values': list_values,
+ 'create_empty': create_empty, 'file_error': file_error,
+ 'stringify': stringify, 'indent_type': indent_type,
+ 'default_encoding': default_encoding, 'unrepr': unrepr,
+ 'write_empty_values': write_empty_values}
+
+ if options is None:
+ options = _options
+ else:
+ import warnings
+ warnings.warn('Passing in an options dictionary to ConfigObj() is '
+ 'deprecated. Use **options instead.',
+ DeprecationWarning, stacklevel=2)
+
+ # TODO: check the values too.
+ for entry in options:
+ if entry not in OPTION_DEFAULTS:
+ raise TypeError('Unrecognised option "%s".' % entry)
+ for entry, value in list(OPTION_DEFAULTS.items()):
+ if entry not in options:
+ options[entry] = value
+ keyword_value = _options[entry]
+ if value != keyword_value:
+ options[entry] = keyword_value
+
+ # XXXX this ignores an explicit list_values = True in combination
+ # with _inspec. The user should *never* do that anyway, but still...
+ if _inspec:
+ options['list_values'] = False
+
+ self._initialise(options)
+ configspec = options['configspec']
+ self._original_configspec = configspec
+ self._load(infile, configspec)
+
+
+ def _load(self, infile, configspec):
+ if isinstance(infile, six.string_types):
+ self.filename = infile
+ if os.path.isfile(infile):
+ with open(infile, 'rb') as h:
+ content = h.readlines() or []
+ elif self.file_error:
+ # raise an error if the file doesn't exist
+ raise IOError('Config file not found: "%s".' % self.filename)
+ else:
+ # file doesn't already exist
+ if self.create_empty:
+ # this is a good test that the filename specified
+ # isn't impossible - like on a non-existent device
+ with open(infile, 'w') as h:
+ h.write('')
+ content = []
+
+ elif isinstance(infile, (list, tuple)):
+ content = list(infile)
+
+ elif isinstance(infile, dict):
+ # initialise self
+ # the Section class handles creating subsections
+ if isinstance(infile, ConfigObj):
+ # get a copy of our ConfigObj
+ def set_section(in_section, this_section):
+ for entry in in_section.scalars:
+ this_section[entry] = in_section[entry]
+ for section in in_section.sections:
+ this_section[section] = {}
+ set_section(in_section[section], this_section[section])
+ set_section(infile, self)
+
+ else:
+ for entry in infile:
+ self[entry] = infile[entry]
+ del self._errors
+
+ if configspec is not None:
+ self._handle_configspec(configspec)
+ else:
+ self.configspec = None
+ return
+
+ elif getattr(infile, 'read', MISSING) is not MISSING:
+ # This supports file like objects
+ content = infile.read() or []
+ # needs splitting into lines - but needs doing *after* decoding
+ # in case it's not an 8 bit encoding
+ else:
+ raise TypeError('infile must be a filename, file like object, or list of lines.')
+
+ if content:
+ # don't do it for the empty ConfigObj
+ content = self._handle_bom(content)
+ # infile is now *always* a list
+ #
+ # Set the newlines attribute (first line ending it finds)
+ # and strip trailing '\n' or '\r' from lines
+ for line in content:
+ if (not line) or (line[-1] not in ('\r', '\n')):
+ continue
+ for end in ('\r\n', '\n', '\r'):
+ if line.endswith(end):
+ self.newlines = end
+ break
+ break
+
+ assert all(isinstance(line, six.string_types) for line in content), repr(content)
+ content = [line.rstrip('\r\n') for line in content]
+
+ self._parse(content)
+ # if we had any errors, now is the time to raise them
+ if self._errors:
+ info = "at line %s." % self._errors[0].line_number
+ if len(self._errors) > 1:
+ msg = "Parsing failed with several errors.\nFirst error %s" % info
+ error = ConfigObjError(msg)
+ else:
+ error = self._errors[0]
+ # set the errors attribute; it's a list of tuples:
+ # (error_type, message, line_number)
+ error.errors = self._errors
+ # set the config attribute
+ error.config = self
+ raise error
+ # delete private attributes
+ del self._errors
+
+ if configspec is None:
+ self.configspec = None
+ else:
+ self._handle_configspec(configspec)
+
+
+ def _initialise(self, options=None):
+ if options is None:
+ options = OPTION_DEFAULTS
+
+ # initialise a few variables
+ self.filename = None
+ self._errors = []
+ self.raise_errors = options['raise_errors']
+ self.interpolation = options['interpolation']
+ self.list_values = options['list_values']
+ self.create_empty = options['create_empty']
+ self.file_error = options['file_error']
+ self.stringify = options['stringify']
+ self.indent_type = options['indent_type']
+ self.encoding = options['encoding']
+ self.default_encoding = options['default_encoding']
+ self.BOM = False
+ self.newlines = None
+ self.write_empty_values = options['write_empty_values']
+ self.unrepr = options['unrepr']
+
+ self.initial_comment = []
+ self.final_comment = []
+ self.configspec = None
+
+ if self._inspec:
+ self.list_values = False
+
+ # Clear section attributes as well
+ Section._initialise(self)
+
+
+ def __repr__(self):
+ def _getval(key):
+ try:
+ return self[key]
+ except MissingInterpolationOption:
+ return dict.__getitem__(self, key)
+ return ('ConfigObj({%s})' %
+ ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
+ for key in (self.scalars + self.sections)]))
+
+
+ def _handle_bom(self, infile):
+ """
+ Handle any BOM, and decode if necessary.
+
+ If an encoding is specified, that *must* be used - but the BOM should
+ still be removed (and the BOM attribute set).
+
+ (If the encoding is wrongly specified, then a BOM for an alternative
+ encoding won't be discovered or removed.)
+
+ If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
+ removed. The BOM attribute will be set. UTF16 will be decoded to
+ unicode.
+
+ NOTE: This method must not be called with an empty ``infile``.
+
+ Specifying the *wrong* encoding is likely to cause a
+ ``UnicodeDecodeError``.
+
+ ``infile`` must always be returned as a list of lines, but may be
+ passed in as a single string.
+ """
+
+ if ((self.encoding is not None) and
+ (self.encoding.lower() not in BOM_LIST)):
+ # No need to check for a BOM
+ # the encoding specified doesn't have one
+ # just decode
+ return self._decode(infile, self.encoding)
+
+ if isinstance(infile, (list, tuple)):
+ line = infile[0]
+ else:
+ line = infile
+
+ if isinstance(line, six.text_type):
+ # it's already decoded and there's no need to do anything
+ # else, just use the _decode utility method to handle
+ # listifying appropriately
+ return self._decode(infile, self.encoding)
+
+ if self.encoding is not None:
+ # encoding explicitly supplied
+ # And it could have an associated BOM
+ # TODO: if encoding is just UTF16 - we ought to check for both
+ # TODO: big endian and little endian versions.
+ enc = BOM_LIST[self.encoding.lower()]
+ if enc == 'utf_16':
+ # For UTF16 we try big endian and little endian
+ for BOM, (encoding, final_encoding) in list(BOMS.items()):
+ if not final_encoding:
+ # skip UTF8
+ continue
+ if infile.startswith(BOM):
+ ### BOM discovered
+ ##self.BOM = True
+ # Don't need to remove BOM
+ return self._decode(infile, encoding)
+
+ # If we get this far, will *probably* raise a DecodeError
+ # As it doesn't appear to start with a BOM
+ return self._decode(infile, self.encoding)
+
+ # Must be UTF8
+ BOM = BOM_SET[enc]
+ if not line.startswith(BOM):
+ return self._decode(infile, self.encoding)
+
+ newline = line[len(BOM):]
+
+ # BOM removed
+ if isinstance(infile, (list, tuple)):
+ infile[0] = newline
+ else:
+ infile = newline
+ self.BOM = True
+ return self._decode(infile, self.encoding)
+
+ # No encoding specified - so we need to check for UTF8/UTF16
+ for BOM, (encoding, final_encoding) in list(BOMS.items()):
+ if not isinstance(line, six.binary_type) or not line.startswith(BOM):
+ # didn't specify a BOM, or it's not a bytestring
+ continue
+ else:
+ # BOM discovered
+ self.encoding = final_encoding
+ if not final_encoding:
+ self.BOM = True
+ # UTF8
+ # remove BOM
+ newline = line[len(BOM):]
+ if isinstance(infile, (list, tuple)):
+ infile[0] = newline
+ else:
+ infile = newline
+ # UTF-8
+ if isinstance(infile, six.text_type):
+ return infile.splitlines(True)
+ elif isinstance(infile, six.binary_type):
+ return infile.decode('utf-8').splitlines(True)
+ else:
+ return self._decode(infile, 'utf-8')
+ # UTF16 - have to decode
+ return self._decode(infile, encoding)
+
+
+ if six.PY2 and isinstance(line, str):
+ # don't actually do any decoding, since we're on python 2 and
+ # returning a bytestring is fine
+ return self._decode(infile, None)
+ # No BOM discovered and no encoding specified, default to UTF-8
+ if isinstance(infile, six.binary_type):
+ return infile.decode('utf-8').splitlines(True)
+ else:
+ return self._decode(infile, 'utf-8')
+
+
+ def _a_to_u(self, aString):
+ """Decode ASCII strings to unicode if a self.encoding is specified."""
+ if isinstance(aString, six.binary_type) and self.encoding:
+ return aString.decode(self.encoding)
+ else:
+ return aString
+
+
+ def _decode(self, infile, encoding):
+ """
+ Decode infile to unicode. Using the specified encoding.
+
+ if is a string, it also needs converting to a list.
+ """
+ if isinstance(infile, six.string_types):
+ return infile.splitlines(True)
+ if isinstance(infile, six.binary_type):
+ # NOTE: Could raise a ``UnicodeDecodeError``
+ if encoding:
+ return infile.decode(encoding).splitlines(True)
+ else:
+ return infile.splitlines(True)
+
+ if encoding:
+ for i, line in enumerate(infile):
+ if isinstance(line, six.binary_type):
+ # NOTE: The isinstance test here handles mixed lists of unicode/string
+ # NOTE: But the decode will break on any non-string values
+ # NOTE: Or could raise a ``UnicodeDecodeError``
+ infile[i] = line.decode(encoding)
+ return infile
+
+
+ def _decode_element(self, line):
+ """Decode element to unicode if necessary."""
+ if isinstance(line, six.binary_type) and self.default_encoding:
+ return line.decode(self.default_encoding)
+ else:
+ return line
+
+
+ # TODO: this may need to be modified
+ def _str(self, value):
+ """
+ Used by ``stringify`` within validate, to turn non-string values
+ into strings.
+ """
+ if not isinstance(value, six.string_types):
+ # intentially 'str' because it's just whatever the "normal"
+ # string type is for the python version we're dealing with
+ return str(value)
+ else:
+ return value
+
+
+ def _parse(self, infile):
+ """Actually parse the config file."""
+ temp_list_values = self.list_values
+ if self.unrepr:
+ self.list_values = False
+
+ comment_list = []
+ done_start = False
+ this_section = self
+ maxline = len(infile) - 1
+ cur_index = -1
+ reset_comment = False
+
+ while cur_index < maxline:
+ if reset_comment:
+ comment_list = []
+ cur_index += 1
+ line = infile[cur_index]
+ sline = line.strip()
+ # do we have anything on the line ?
+ if not sline or sline.startswith('#'):
+ reset_comment = False
+ comment_list.append(line)
+ continue
+
+ if not done_start:
+ # preserve initial comment
+ self.initial_comment = comment_list
+ comment_list = []
+ done_start = True
+
+ reset_comment = True
+ # first we check if it's a section marker
+ mat = self._sectionmarker.match(line)
+ if mat is not None:
+ # is a section line
+ (indent, sect_open, sect_name, sect_close, comment) = mat.groups()
+ if indent and (self.indent_type is None):
+ self.indent_type = indent
+ cur_depth = sect_open.count('[')
+ if cur_depth != sect_close.count(']'):
+ self._handle_error("Cannot compute the section depth",
+ NestingError, infile, cur_index)
+ continue
+
+ if cur_depth < this_section.depth:
+ # the new section is dropping back to a previous level
+ try:
+ parent = self._match_depth(this_section,
+ cur_depth).parent
+ except SyntaxError:
+ self._handle_error("Cannot compute nesting level",
+ NestingError, infile, cur_index)
+ continue
+ elif cur_depth == this_section.depth:
+ # the new section is a sibling of the current section
+ parent = this_section.parent
+ elif cur_depth == this_section.depth + 1:
+ # the new section is a child the current section
+ parent = this_section
+ else:
+ self._handle_error("Section too nested",
+ NestingError, infile, cur_index)
+ continue
+
+ sect_name = self._unquote(sect_name)
+ if sect_name in parent:
+ self._handle_error('Duplicate section name',
+ DuplicateError, infile, cur_index)
+ continue
+
+ # create the new section
+ this_section = Section(
+ parent,
+ cur_depth,
+ self,
+ name=sect_name)
+ parent[sect_name] = this_section
+ parent.inline_comments[sect_name] = comment
+ parent.comments[sect_name] = comment_list
+ continue
+ #
+ # it's not a section marker,
+ # so it should be a valid ``key = value`` line
+ mat = self._keyword.match(line)
+ if mat is None:
+ self._handle_error(
+ 'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
+ ParseError, infile, cur_index)
+ else:
+ # is a keyword value
+ # value will include any inline comment
+ (indent, key, value) = mat.groups()
+ if indent and (self.indent_type is None):
+ self.indent_type = indent
+ # check for a multiline value
+ if value[:3] in ['"""', "'''"]:
+ try:
+ value, comment, cur_index = self._multiline(
+ value, infile, cur_index, maxline)
+ except SyntaxError:
+ self._handle_error(
+ 'Parse error in multiline value',
+ ParseError, infile, cur_index)
+ continue
+ else:
+ if self.unrepr:
+ comment = ''
+ try:
+ value = unrepr(value)
+ except Exception as e:
+ if type(e) == UnknownType:
+ msg = 'Unknown name or type in value'
+ else:
+ msg = 'Parse error from unrepr-ing multiline value'
+ self._handle_error(msg, UnreprError, infile,
+ cur_index)
+ continue
+ else:
+ if self.unrepr:
+ comment = ''
+ try:
+ value = unrepr(value)
+ except Exception as e:
+ if isinstance(e, UnknownType):
+ msg = 'Unknown name or type in value'
+ else:
+ msg = 'Parse error from unrepr-ing value'
+ self._handle_error(msg, UnreprError, infile,
+ cur_index)
+ continue
+ else:
+ # extract comment and lists
+ try:
+ (value, comment) = self._handle_value(value)
+ except SyntaxError:
+ self._handle_error(
+ 'Parse error in value',
+ ParseError, infile, cur_index)
+ continue
+ #
+ key = self._unquote(key)
+ if key in this_section:
+ self._handle_error(
+ 'Duplicate keyword name',
+ DuplicateError, infile, cur_index)
+ continue
+ # add the key.
+ # we set unrepr because if we have got this far we will never
+ # be creating a new section
+ this_section.__setitem__(key, value, unrepr=True)
+ this_section.inline_comments[key] = comment
+ this_section.comments[key] = comment_list
+ continue
+ #
+ if self.indent_type is None:
+ # no indentation used, set the type accordingly
+ self.indent_type = ''
+
+ # preserve the final comment
+ if not self and not self.initial_comment:
+ self.initial_comment = comment_list
+ elif not reset_comment:
+ self.final_comment = comment_list
+ self.list_values = temp_list_values
+
+
+ def _match_depth(self, sect, depth):
+ """
+ Given a section and a depth level, walk back through the sections
+ parents to see if the depth level matches a previous section.
+
+ Return a reference to the right section,
+ or raise a SyntaxError.
+ """
+ while depth < sect.depth:
+ if sect is sect.parent:
+ # we've reached the top level already
+ raise SyntaxError()
+ sect = sect.parent
+ if sect.depth == depth:
+ return sect
+ # shouldn't get here
+ raise SyntaxError()
+
+
+ def _handle_error(self, text, ErrorClass, infile, cur_index):
+ """
+ Handle an error according to the error settings.
+
+ Either raise the error or store it.
+ The error will have occured at ``cur_index``
+ """
+ line = infile[cur_index]
+ cur_index += 1
+ message = '{0} at line {1}.'.format(text, cur_index)
+ error = ErrorClass(message, cur_index, line)
+ if self.raise_errors:
+ # raise the error - parsing stops here
+ raise error
+ # store the error
+ # reraise when parsing has finished
+ self._errors.append(error)
+
+
+ def _unquote(self, value):
+ """Return an unquoted version of a value"""
+ if not value:
+ # should only happen during parsing of lists
+ raise SyntaxError
+ if (value[0] == value[-1]) and (value[0] in ('"', "'")):
+ value = value[1:-1]
+ return value
+
+
+ def _quote(self, value, multiline=True):
+ """
+ Return a safely quoted version of a value.
+
+ Raise a ConfigObjError if the value cannot be safely quoted.
+ If multiline is ``True`` (default) then use triple quotes
+ if necessary.
+
+ * Don't quote values that don't need it.
+ * Recursively quote members of a list and return a comma joined list.
+ * Multiline is ``False`` for lists.
+ * Obey list syntax for empty and single member lists.
+
+ If ``list_values=False`` then the value is only quoted if it contains
+ a ``\\n`` (is multiline) or '#'.
+
+ If ``write_empty_values`` is set, and the value is an empty string, it
+ won't be quoted.
+ """
+ if multiline and self.write_empty_values and value == '':
+ # Only if multiline is set, so that it is used for values not
+ # keys, and not values that are part of a list
+ return ''
+
+ if multiline and isinstance(value, (list, tuple)):
+ if not value:
+ return ','
+ elif len(value) == 1:
+ return self._quote(value[0], multiline=False) + ','
+ return ', '.join([self._quote(val, multiline=False)
+ for val in value])
+ if not isinstance(value, six.string_types):
+ if self.stringify:
+ # intentially 'str' because it's just whatever the "normal"
+ # string type is for the python version we're dealing with
+ value = str(value)
+ else:
+ raise TypeError('Value "%s" is not a string.' % value)
+
+ if not value:
+ return '""'
+
+ no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
+ need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
+ hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
+ check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
+
+ if check_for_single:
+ if not self.list_values:
+ # we don't quote if ``list_values=False``
+ quot = noquot
+ # for normal values either single or double quotes will do
+ elif '\n' in value:
+ # will only happen if multiline is off - e.g. '\n' in key
+ raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
+ elif ((value[0] not in wspace_plus) and
+ (value[-1] not in wspace_plus) and
+ (',' not in value)):
+ quot = noquot
+ else:
+ quot = self._get_single_quote(value)
+ else:
+ # if value has '\n' or "'" *and* '"', it will need triple quotes
+ quot = self._get_triple_quote(value)
+
+ if quot == noquot and '#' in value and self.list_values:
+ quot = self._get_single_quote(value)
+
+ return quot % value
+
+
+ def _get_single_quote(self, value):
+ if ("'" in value) and ('"' in value):
+ raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
+ elif '"' in value:
+ quot = squot
+ else:
+ quot = dquot
+ return quot
+
+
+ def _get_triple_quote(self, value):
+ if (value.find('"""') != -1) and (value.find("'''") != -1):
+ raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
+ if value.find('"""') == -1:
+ quot = tdquot
+ else:
+ quot = tsquot
+ return quot
+
+
+ def _handle_value(self, value):
+ """
+ Given a value string, unquote, remove comment,
+ handle lists. (including empty and single member lists)
+ """
+ if self._inspec:
+ # Parsing a configspec so don't handle comments
+ return (value, '')
+ # do we look for lists in values ?
+ if not self.list_values:
+ mat = self._nolistvalue.match(value)
+ if mat is None:
+ raise SyntaxError()
+ # NOTE: we don't unquote here
+ return mat.groups()
+ #
+ mat = self._valueexp.match(value)
+ if mat is None:
+ # the value is badly constructed, probably badly quoted,
+ # or an invalid list
+ raise SyntaxError()
+ (list_values, single, empty_list, comment) = mat.groups()
+ if (list_values == '') and (single is None):
+ # change this if you want to accept empty values
+ raise SyntaxError()
+ # NOTE: note there is no error handling from here if the regex
+ # is wrong: then incorrect values will slip through
+ if empty_list is not None:
+ # the single comma - meaning an empty list
+ return ([], comment)
+ if single is not None:
+ # handle empty values
+ if list_values and not single:
+ # FIXME: the '' is a workaround because our regex now matches
+ # '' at the end of a list if it has a trailing comma
+ single = None
+ else:
+ single = single or '""'
+ single = self._unquote(single)
+ if list_values == '':
+ # not a list value
+ return (single, comment)
+ the_list = self._listvalueexp.findall(list_values)
+ the_list = [self._unquote(val) for val in the_list]
+ if single is not None:
+ the_list += [single]
+ return (the_list, comment)
+
+
+ def _multiline(self, value, infile, cur_index, maxline):
+ """Extract the value, where we are in a multiline situation."""
+ quot = value[:3]
+ newvalue = value[3:]
+ single_line = self._triple_quote[quot][0]
+ multi_line = self._triple_quote[quot][1]
+ mat = single_line.match(value)
+ if mat is not None:
+ retval = list(mat.groups())
+ retval.append(cur_index)
+ return retval
+ elif newvalue.find(quot) != -1:
+ # somehow the triple quote is missing
+ raise SyntaxError()
+ #
+ while cur_index < maxline:
+ cur_index += 1
+ newvalue += '\n'
+ line = infile[cur_index]
+ if line.find(quot) == -1:
+ newvalue += line
+ else:
+ # end of multiline, process it
+ break
+ else:
+ # we've got to the end of the config, oops...
+ raise SyntaxError()
+ mat = multi_line.match(line)
+ if mat is None:
+ # a badly formed line
+ raise SyntaxError()
+ (value, comment) = mat.groups()
+ return (newvalue + value, comment, cur_index)
+
+
+ def _handle_configspec(self, configspec):
+ """Parse the configspec."""
+ # FIXME: Should we check that the configspec was created with the
+ # correct settings ? (i.e. ``list_values=False``)
+ if not isinstance(configspec, ConfigObj):
+ try:
+ configspec = ConfigObj(configspec,
+ raise_errors=True,
+ file_error=True,
+ _inspec=True)
+ except ConfigObjError as e:
+ # FIXME: Should these errors have a reference
+ # to the already parsed ConfigObj ?
+ raise ConfigspecError('Parsing configspec failed: %s' % e)
+ except IOError as e:
+ raise IOError('Reading configspec failed: %s' % e)
+
+ self.configspec = configspec
+
+
+
+ def _set_configspec(self, section, copy):
+ """
+ Called by validate. Handles setting the configspec on subsections
+ including sections to be validated by __many__
+ """
+ configspec = section.configspec
+ many = configspec.get('__many__')
+ if isinstance(many, dict):
+ for entry in section.sections:
+ if entry not in configspec:
+ section[entry].configspec = many
+
+ for entry in configspec.sections:
+ if entry == '__many__':
+ continue
+ if entry not in section:
+ section[entry] = {}
+ section[entry]._created = True
+ if copy:
+ # copy comments
+ section.comments[entry] = configspec.comments.get(entry, [])
+ section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
+
+ # Could be a scalar when we expect a section
+ if isinstance(section[entry], Section):
+ section[entry].configspec = configspec[entry]
+
+
+ def _write_line(self, indent_string, entry, this_entry, comment):
+ """Write an individual line, for the write method"""
+ # NOTE: the calls to self._quote here handles non-StringType values.
+ if not self.unrepr:
+ val = self._decode_element(self._quote(this_entry))
+ else:
+ val = repr(this_entry)
+ return '%s%s%s%s%s' % (indent_string,
+ self._decode_element(self._quote(entry, multiline=False)),
+ self._a_to_u(' = '),
+ val,
+ self._decode_element(comment))
+
+
+ def _write_marker(self, indent_string, depth, entry, comment):
+ """Write a section marker line"""
+ return '%s%s%s%s%s' % (indent_string,
+ self._a_to_u('[' * depth),
+ self._quote(self._decode_element(entry), multiline=False),
+ self._a_to_u(']' * depth),
+ self._decode_element(comment))
+
+
+ def _handle_comment(self, comment):
+ """Deal with a comment."""
+ if not comment:
+ return ''
+ start = self.indent_type
+ if not comment.startswith('#'):
+ start += self._a_to_u(' # ')
+ return (start + comment)
+
+
+ # Public methods
+
+ def write(self, outfile=None, section=None):
+ """
+ Write the current ConfigObj as a file
+
+ tekNico: FIXME: use StringIO instead of real files
+
+ >>> filename = a.filename
+ >>> a.filename = 'test.ini'
+ >>> a.write()
+ >>> a.filename = filename
+ >>> a == ConfigObj('test.ini', raise_errors=True)
+ 1
+ >>> import os
+ >>> os.remove('test.ini')
+ """
+ if self.indent_type is None:
+ # this can be true if initialised from a dictionary
+ self.indent_type = DEFAULT_INDENT_TYPE
+
+ out = []
+ cs = self._a_to_u('#')
+ csp = self._a_to_u('# ')
+ if section is None:
+ int_val = self.interpolation
+ self.interpolation = False
+ section = self
+ for line in self.initial_comment:
+ line = self._decode_element(line)
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(cs):
+ line = csp + line
+ out.append(line)
+
+ indent_string = self.indent_type * section.depth
+ for entry in (section.scalars + section.sections):
+ if entry in section.defaults:
+ # don't write out default values
+ continue
+ for comment_line in section.comments[entry]:
+ comment_line = self._decode_element(comment_line.lstrip())
+ if comment_line and not comment_line.startswith(cs):
+ comment_line = csp + comment_line
+ out.append(indent_string + comment_line)
+ this_entry = section[entry]
+ comment = self._handle_comment(section.inline_comments[entry])
+
+ if isinstance(this_entry, Section):
+ # a section
+ out.append(self._write_marker(
+ indent_string,
+ this_entry.depth,
+ entry,
+ comment))
+ out.extend(self.write(section=this_entry))
+ else:
+ out.append(self._write_line(
+ indent_string,
+ entry,
+ this_entry,
+ comment))
+
+ if section is self:
+ for line in self.final_comment:
+ line = self._decode_element(line)
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(cs):
+ line = csp + line
+ out.append(line)
+ self.interpolation = int_val
+
+ if section is not self:
+ return out
+
+ if (self.filename is None) and (outfile is None):
+ # output a list of lines
+ # might need to encode
+ # NOTE: This will *screw* UTF16, each line will start with the BOM
+ if self.encoding:
+ out = [l.encode(self.encoding) for l in out]
+ if (self.BOM and ((self.encoding is None) or
+ (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
+ # Add the UTF8 BOM
+ if not out:
+ out.append('')
+ out[0] = BOM_UTF8 + out[0]
+ return out
+
+ # Turn the list to a string, joined with correct newlines
+ newline = self.newlines or os.linesep
+ if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
+ and sys.platform == 'win32' and newline == '\r\n'):
+ # Windows specific hack to avoid writing '\r\r\n'
+ newline = '\n'
+ output = self._a_to_u(newline).join(out)
+ if not output.endswith(newline):
+ output += newline
+
+ if isinstance(output, six.binary_type):
+ output_bytes = output
+ else:
+ output_bytes = output.encode(self.encoding or
+ self.default_encoding or
+ 'ascii')
+
+ if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
+ # Add the UTF8 BOM
+ output_bytes = BOM_UTF8 + output_bytes
+
+ if outfile is not None:
+ outfile.write(output_bytes)
+ else:
+ with open(self.filename, 'wb') as h:
+ h.write(output_bytes)
+
+ def validate(self, validator, preserve_errors=False, copy=False,
+ section=None):
+ """
+ Test the ConfigObj against a configspec.
+
+ It uses the ``validator`` object from *validate.py*.
+
+ To run ``validate`` on the current ConfigObj, call: ::
+
+ test = config.validate(validator)
+
+ (Normally having previously passed in the configspec when the ConfigObj
+ was created - you can dynamically assign a dictionary of checks to the
+ ``configspec`` attribute of a section though).
+
+ It returns ``True`` if everything passes, or a dictionary of
+ pass/fails (True/False). If every member of a subsection passes, it
+ will just have the value ``True``. (It also returns ``False`` if all
+ members fail).
+
+ In addition, it converts the values from strings to their native
+ types if their checks pass (and ``stringify`` is set).
+
+ If ``preserve_errors`` is ``True`` (``False`` is default) then instead
+ of a marking a fail with a ``False``, it will preserve the actual
+ exception object. This can contain info about the reason for failure.
+ For example the ``VdtValueTooSmallError`` indicates that the value
+ supplied was too small. If a value (or section) is missing it will
+ still be marked as ``False``.
+
+ You must have the validate module to use ``preserve_errors=True``.
+
+ You can then use the ``flatten_errors`` function to turn your nested
+ results dictionary into a flattened list of failures - useful for
+ displaying meaningful error messages.
+ """
+ if section is None:
+ if self.configspec is None:
+ raise ValueError('No configspec supplied.')
+ if preserve_errors:
+ # We do this once to remove a top level dependency on the validate module
+ # Which makes importing configobj faster
+ from configobj.validate import VdtMissingValue
+ self._vdtMissingValue = VdtMissingValue
+
+ section = self
+
+ if copy:
+ section.initial_comment = section.configspec.initial_comment
+ section.final_comment = section.configspec.final_comment
+ section.encoding = section.configspec.encoding
+ section.BOM = section.configspec.BOM
+ section.newlines = section.configspec.newlines
+ section.indent_type = section.configspec.indent_type
+
+ #
+ # section.default_values.clear() #??
+ configspec = section.configspec
+ self._set_configspec(section, copy)
+
+
+ def validate_entry(entry, spec, val, missing, ret_true, ret_false):
+ section.default_values.pop(entry, None)
+
+ try:
+ section.default_values[entry] = validator.get_default_value(configspec[entry])
+ except (KeyError, AttributeError, validator.baseErrorClass):
+ # No default, bad default or validator has no 'get_default_value'
+ # (e.g. SimpleVal)
+ pass
+
+ try:
+ check = validator.check(spec,
+ val,
+ missing=missing
+ )
+ except validator.baseErrorClass as e:
+ if not preserve_errors or isinstance(e, self._vdtMissingValue):
+ out[entry] = False
+ else:
+ # preserve the error
+ out[entry] = e
+ ret_false = False
+ ret_true = False
+ else:
+ ret_false = False
+ out[entry] = True
+ if self.stringify or missing:
+ # if we are doing type conversion
+ # or the value is a supplied default
+ if not self.stringify:
+ if isinstance(check, (list, tuple)):
+ # preserve lists
+ check = [self._str(item) for item in check]
+ elif missing and check is None:
+ # convert the None from a default to a ''
+ check = ''
+ else:
+ check = self._str(check)
+ if (check != val) or missing:
+ section[entry] = check
+ if not copy and missing and entry not in section.defaults:
+ section.defaults.append(entry)
+ return ret_true, ret_false
+
+ #
+ out = {}
+ ret_true = True
+ ret_false = True
+
+ unvalidated = [k for k in section.scalars if k not in configspec]
+ incorrect_sections = [k for k in configspec.sections if k in section.scalars]
+ incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
+
+ for entry in configspec.scalars:
+ if entry in ('__many__', '___many___'):
+ # reserved names
+ continue
+ if (not entry in section.scalars) or (entry in section.defaults):
+ # missing entries
+ # or entries from defaults
+ missing = True
+ val = None
+ if copy and entry not in section.scalars:
+ # copy comments
+ section.comments[entry] = (
+ configspec.comments.get(entry, []))
+ section.inline_comments[entry] = (
+ configspec.inline_comments.get(entry, ''))
+ #
+ else:
+ missing = False
+ val = section[entry]
+
+ ret_true, ret_false = validate_entry(entry, configspec[entry], val,
+ missing, ret_true, ret_false)
+
+ many = None
+ if '__many__' in configspec.scalars:
+ many = configspec['__many__']
+ elif '___many___' in configspec.scalars:
+ many = configspec['___many___']
+
+ if many is not None:
+ for entry in unvalidated:
+ val = section[entry]
+ ret_true, ret_false = validate_entry(entry, many, val, False,
+ ret_true, ret_false)
+ unvalidated = []
+
+ for entry in incorrect_scalars:
+ ret_true = False
+ if not preserve_errors:
+ out[entry] = False
+ else:
+ ret_false = False
+ msg = 'Value %r was provided as a section' % entry
+ out[entry] = validator.baseErrorClass(msg)
+ for entry in incorrect_sections:
+ ret_true = False
+ if not preserve_errors:
+ out[entry] = False
+ else:
+ ret_false = False
+ msg = 'Section %r was provided as a single value' % entry
+ out[entry] = validator.baseErrorClass(msg)
+
+ # Missing sections will have been created as empty ones when the
+ # configspec was read.
+ for entry in section.sections:
+ # FIXME: this means DEFAULT is not copied in copy mode
+ if section is self and entry == 'DEFAULT':
+ continue
+ if section[entry].configspec is None:
+ unvalidated.append(entry)
+ continue
+ if copy:
+ section.comments[entry] = configspec.comments.get(entry, [])
+ section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
+ check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
+ out[entry] = check
+ if check == False:
+ ret_true = False
+ elif check == True:
+ ret_false = False
+ else:
+ ret_true = False
+
+ section.extra_values = unvalidated
+ if preserve_errors and not section._created:
+ # If the section wasn't created (i.e. it wasn't missing)
+ # then we can't return False, we need to preserve errors
+ ret_false = False
+ #
+ if ret_false and preserve_errors and out:
+ # If we are preserving errors, but all
+ # the failures are from missing sections / values
+ # then we can return False. Otherwise there is a
+ # real failure that we need to preserve.
+ ret_false = not any(out.values())
+ if ret_true:
+ return True
+ elif ret_false:
+ return False
+ return out
+
+
+ def reset(self):
+ """Clear ConfigObj instance and restore to 'freshly created' state."""
+ self.clear()
+ self._initialise()
+ # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
+ # requires an empty dictionary
+ self.configspec = None
+ # Just to be sure ;-)
+ self._original_configspec = None
+
+
+ def reload(self):
+ """
+ Reload a ConfigObj from file.
+
+ This method raises a ``ReloadError`` if the ConfigObj doesn't have
+ a filename attribute pointing to a file.
+ """
+ if not isinstance(self.filename, six.string_types):
+ raise ReloadError()
+
+ filename = self.filename
+ current_options = {}
+ for entry in OPTION_DEFAULTS:
+ if entry == 'configspec':
+ continue
+ current_options[entry] = getattr(self, entry)
+
+ configspec = self._original_configspec
+ current_options['configspec'] = configspec
+
+ self.clear()
+ self._initialise(current_options)
+ self._load(filename, configspec)
+
+
+
+class SimpleVal(object):
+ """
+ A simple validator.
+ Can be used to check that all members expected are present.
+
+ To use it, provide a configspec with all your members in (the value given
+ will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
+ method of your ``ConfigObj``. ``validate`` will return ``True`` if all
+ members are present, or a dictionary with True/False meaning
+ present/missing. (Whole missing sections will be replaced with ``False``)
+ """
+
+ def __init__(self):
+ self.baseErrorClass = ConfigObjError
+
+ def check(self, check, member, missing=False):
+ """A dummy check method, always returns the value unchanged."""
+ if missing:
+ raise self.baseErrorClass()
+ return member
+
+
+def flatten_errors(cfg, res, levels=None, results=None):
+ """
+ An example function that will turn a nested dictionary of results
+ (as returned by ``ConfigObj.validate``) into a flat list.
+
+ ``cfg`` is the ConfigObj instance being checked, ``res`` is the results
+ dictionary returned by ``validate``.
+
+ (This is a recursive function, so you shouldn't use the ``levels`` or
+ ``results`` arguments - they are used by the function.)
+
+ Returns a list of keys that failed. Each member of the list is a tuple::
+
+ ([list of sections...], key, result)
+
+ If ``validate`` was called with ``preserve_errors=False`` (the default)
+ then ``result`` will always be ``False``.
+
+ *list of sections* is a flattened list of sections that the key was found
+ in.
+
+ If the section was missing (or a section was expected and a scalar provided
+ - or vice-versa) then key will be ``None``.
+
+ If the value (or section) was missing then ``result`` will be ``False``.
+
+ If ``validate`` was called with ``preserve_errors=True`` and a value
+ was present, but failed the check, then ``result`` will be the exception
+ object returned. You can use this as a string that describes the failure.
+
+ For example *The value "3" is of the wrong type*.
+ """
+ if levels is None:
+ # first time called
+ levels = []
+ results = []
+ if res == True:
+ return sorted(results)
+ if res == False or isinstance(res, Exception):
+ results.append((levels[:], None, res))
+ if levels:
+ levels.pop()
+ return sorted(results)
+ for (key, val) in list(res.items()):
+ if val == True:
+ continue
+ if isinstance(cfg.get(key), dict):
+ # Go down one level
+ levels.append(key)
+ flatten_errors(cfg[key], val, levels, results)
+ continue
+ results.append((levels[:], key, val))
+ #
+ # Go up one level
+ if levels:
+ levels.pop()
+ #
+ return sorted(results)
+
+
+def get_extra_values(conf, _prepend=()):
+ """
+ Find all the values and sections not in the configspec from a validated
+ ConfigObj.
+
+ ``get_extra_values`` returns a list of tuples where each tuple represents
+ either an extra section, or an extra value.
+
+ The tuples contain two values, a tuple representing the section the value
+ is in and the name of the extra values. For extra values in the top level
+ section the first member will be an empty tuple. For values in the 'foo'
+ section the first member will be ``('foo',)``. For members in the 'bar'
+ subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
+
+ NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
+ been validated it will return an empty list.
+ """
+ out = []
+
+ out.extend([(_prepend, name) for name in conf.extra_values])
+ for name in conf.sections:
+ if name not in conf.extra_values:
+ out.extend(get_extra_values(conf[name], _prepend + (name,)))
+ return out
+
+
+"""*A programming language is a medium of expression.* - Paul Graham"""
diff --git a/src/configobj/_version.py b/src/configobj/_version.py
new file mode 100644
index 0000000..6d01371
--- /dev/null
+++ b/src/configobj/_version.py
@@ -0,0 +1 @@
+__version__ = '5.0.6' \ No newline at end of file
diff --git a/src/configobj/validate.py b/src/configobj/validate.py
new file mode 100644
index 0000000..b7a964c
--- /dev/null
+++ b/src/configobj/validate.py
@@ -0,0 +1,1472 @@
+# validate.py
+# A Validator object
+# Copyright (C) 2005-2014:
+# (name) : (email)
+# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
+# Mark Andrews: mark AT la-la DOT com
+# Nicola Larosa: nico AT tekNico DOT net
+# Rob Dennis: rdennis AT gmail DOT com
+# Eli Courtwright: eli AT courtwright DOT org
+
+# This software is licensed under the terms of the BSD license.
+# http://opensource.org/licenses/BSD-3-Clause
+
+# ConfigObj 5 - main repository for documentation and issue tracking:
+# https://github.com/DiffSK/configobj
+
+"""
+ The Validator object is used to check that supplied values
+ conform to a specification.
+
+ The value can be supplied as a string - e.g. from a config file.
+ In this case the check will also *convert* the value to
+ the required type. This allows you to add validation
+ as a transparent layer to access data stored as strings.
+ The validation checks that the data is correct *and*
+ converts it to the expected type.
+
+ Some standard checks are provided for basic data types.
+ Additional checks are easy to write. They can be
+ provided when the ``Validator`` is instantiated or
+ added afterwards.
+
+ The standard functions work with the following basic data types :
+
+ * integers
+ * floats
+ * booleans
+ * strings
+ * ip_addr
+
+ plus lists of these datatypes
+
+ Adding additional checks is done through coding simple functions.
+
+ The full set of standard checks are :
+
+ * 'integer': matches integer values (including negative)
+ Takes optional 'min' and 'max' arguments : ::
+
+ integer()
+ integer(3, 9) # any value from 3 to 9
+ integer(min=0) # any positive value
+ integer(max=9)
+
+ * 'float': matches float values
+ Has the same parameters as the integer check.
+
+ * 'boolean': matches boolean values - ``True`` or ``False``
+ Acceptable string values for True are :
+ true, on, yes, 1
+ Acceptable string values for False are :
+ false, off, no, 0
+
+ Any other value raises an error.
+
+ * 'ip_addr': matches an Internet Protocol address, v.4, represented
+ by a dotted-quad string, i.e. '1.2.3.4'.
+
+ * 'string': matches any string.
+ Takes optional keyword args 'min' and 'max'
+ to specify min and max lengths of the string.
+
+ * 'list': matches any list.
+ Takes optional keyword args 'min', and 'max' to specify min and
+ max sizes of the list. (Always returns a list.)
+
+ * 'tuple': matches any tuple.
+ Takes optional keyword args 'min', and 'max' to specify min and
+ max sizes of the tuple. (Always returns a tuple.)
+
+ * 'int_list': Matches a list of integers.
+ Takes the same arguments as list.
+
+ * 'float_list': Matches a list of floats.
+ Takes the same arguments as list.
+
+ * 'bool_list': Matches a list of boolean values.
+ Takes the same arguments as list.
+
+ * 'ip_addr_list': Matches a list of IP addresses.
+ Takes the same arguments as list.
+
+ * 'string_list': Matches a list of strings.
+ Takes the same arguments as list.
+
+ * 'mixed_list': Matches a list with different types in
+ specific positions. List size must match
+ the number of arguments.
+
+ Each position can be one of :
+ 'integer', 'float', 'ip_addr', 'string', 'boolean'
+
+ So to specify a list with two strings followed
+ by two integers, you write the check as : ::
+
+ mixed_list('string', 'string', 'integer', 'integer')
+
+ * 'pass': This check matches everything ! It never fails
+ and the value is unchanged.
+
+ It is also the default if no check is specified.
+
+ * 'option': This check matches any from a list of options.
+ You specify this check with : ::
+
+ option('option 1', 'option 2', 'option 3')
+
+ You can supply a default value (returned if no value is supplied)
+ using the default keyword argument.
+
+ You specify a list argument for default using a list constructor syntax in
+ the check : ::
+
+ checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
+
+ A badly formatted set of arguments will raise a ``VdtParamError``.
+"""
+
+__version__ = '1.0.1'
+
+
+__all__ = (
+ '__version__',
+ 'dottedQuadToNum',
+ 'numToDottedQuad',
+ 'ValidateError',
+ 'VdtUnknownCheckError',
+ 'VdtParamError',
+ 'VdtTypeError',
+ 'VdtValueError',
+ 'VdtValueTooSmallError',
+ 'VdtValueTooBigError',
+ 'VdtValueTooShortError',
+ 'VdtValueTooLongError',
+ 'VdtMissingValue',
+ 'Validator',
+ 'is_integer',
+ 'is_float',
+ 'is_boolean',
+ 'is_list',
+ 'is_tuple',
+ 'is_ip_addr',
+ 'is_string',
+ 'is_int_list',
+ 'is_bool_list',
+ 'is_float_list',
+ 'is_string_list',
+ 'is_ip_addr_list',
+ 'is_mixed_list',
+ 'is_option',
+ '__docformat__',
+)
+
+
+import re
+import sys
+from pprint import pprint
+
+#TODO - #21 - six is part of the repo now, but we didn't switch over to it here
+# this could be replaced if six is used for compatibility, or there are no
+# more assertions about items being a string
+if sys.version_info < (3,):
+ string_type = basestring
+else:
+ string_type = str
+ # so tests that care about unicode on 2.x can specify unicode, and the same
+ # tests when run on 3.x won't complain about a undefined name "unicode"
+ # since all strings are unicode on 3.x we just want to pass it through
+ # unchanged
+ unicode = lambda x: x
+ # in python 3, all ints are equivalent to python 2 longs, and they'll
+ # never show "L" in the repr
+ long = int
+
+_list_arg = re.compile(r'''
+ (?:
+ ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
+ (
+ (?:
+ \s*
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s\)][^,\)]*?) # unquoted
+ )
+ \s*,\s*
+ )*
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s\)][^,\)]*?) # unquoted
+ )? # last one
+ )
+ \)
+ )
+''', re.VERBOSE | re.DOTALL) # two groups
+
+_list_members = re.compile(r'''
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s=][^,=]*?) # unquoted
+ )
+ (?:
+ (?:\s*,\s*)|(?:\s*$) # comma
+ )
+''', re.VERBOSE | re.DOTALL) # one group
+
+_paramstring = r'''
+ (?:
+ (
+ (?:
+ [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
+ (?:
+ \s*
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s\)][^,\)]*?) # unquoted
+ )
+ \s*,\s*
+ )*
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s\)][^,\)]*?) # unquoted
+ )? # last one
+ \)
+ )|
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s=][^,=]*?)| # unquoted
+ (?: # keyword argument
+ [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\s=][^,=]*?) # unquoted
+ )
+ )
+ )
+ )
+ (?:
+ (?:\s*,\s*)|(?:\s*$) # comma
+ )
+ )
+ '''
+
+_matchstring = '^%s*' % _paramstring
+
+# Python pre 2.2.1 doesn't have bool
+try:
+ bool
+except NameError:
+ def bool(val):
+ """Simple boolean equivalent function. """
+ if val:
+ return 1
+ else:
+ return 0
+
+
+def dottedQuadToNum(ip):
+ """
+ Convert decimal dotted quad string to long integer
+
+ >>> int(dottedQuadToNum('1 '))
+ 1
+ >>> int(dottedQuadToNum(' 1.2'))
+ 16777218
+ >>> int(dottedQuadToNum(' 1.2.3 '))
+ 16908291
+ >>> int(dottedQuadToNum('1.2.3.4'))
+ 16909060
+ >>> dottedQuadToNum('255.255.255.255')
+ 4294967295
+ >>> dottedQuadToNum('255.255.255.256')
+ Traceback (most recent call last):
+ ValueError: Not a good dotted-quad IP: 255.255.255.256
+ """
+
+ # import here to avoid it when ip_addr values are not used
+ import socket, struct
+
+ try:
+ return struct.unpack('!L',
+ socket.inet_aton(ip.strip()))[0]
+ except socket.error:
+ raise ValueError('Not a good dotted-quad IP: %s' % ip)
+ return
+
+
+def numToDottedQuad(num):
+ """
+ Convert int or long int to dotted quad string
+
+ >>> numToDottedQuad(long(-1))
+ Traceback (most recent call last):
+ ValueError: Not a good numeric IP: -1
+ >>> numToDottedQuad(long(1))
+ '0.0.0.1'
+ >>> numToDottedQuad(long(16777218))
+ '1.0.0.2'
+ >>> numToDottedQuad(long(16908291))
+ '1.2.0.3'
+ >>> numToDottedQuad(long(16909060))
+ '1.2.3.4'
+ >>> numToDottedQuad(long(4294967295))
+ '255.255.255.255'
+ >>> numToDottedQuad(long(4294967296))
+ Traceback (most recent call last):
+ ValueError: Not a good numeric IP: 4294967296
+ >>> numToDottedQuad(-1)
+ Traceback (most recent call last):
+ ValueError: Not a good numeric IP: -1
+ >>> numToDottedQuad(1)
+ '0.0.0.1'
+ >>> numToDottedQuad(16777218)
+ '1.0.0.2'
+ >>> numToDottedQuad(16908291)
+ '1.2.0.3'
+ >>> numToDottedQuad(16909060)
+ '1.2.3.4'
+ >>> numToDottedQuad(4294967295)
+ '255.255.255.255'
+ >>> numToDottedQuad(4294967296)
+ Traceback (most recent call last):
+ ValueError: Not a good numeric IP: 4294967296
+
+ """
+
+ # import here to avoid it when ip_addr values are not used
+ import socket, struct
+
+ # no need to intercept here, 4294967295L is fine
+ if num > long(4294967295) or num < 0:
+ raise ValueError('Not a good numeric IP: %s' % num)
+ try:
+ return socket.inet_ntoa(
+ struct.pack('!L', long(num)))
+ except (socket.error, struct.error, OverflowError):
+ raise ValueError('Not a good numeric IP: %s' % num)
+
+
+class ValidateError(Exception):
+ """
+ This error indicates that the check failed.
+ It can be the base class for more specific errors.
+
+ Any check function that fails ought to raise this error.
+ (or a subclass)
+
+ >>> raise ValidateError
+ Traceback (most recent call last):
+ ValidateError
+ """
+
+
+class VdtMissingValue(ValidateError):
+ """No value was supplied to a check that needed one."""
+
+
+class VdtUnknownCheckError(ValidateError):
+ """An unknown check function was requested"""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtUnknownCheckError('yoda')
+ Traceback (most recent call last):
+ VdtUnknownCheckError: the check "yoda" is unknown.
+ """
+ ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
+
+
+class VdtParamError(SyntaxError):
+ """An incorrect parameter was passed"""
+
+ def __init__(self, name, value):
+ """
+ >>> raise VdtParamError('yoda', 'jedi')
+ Traceback (most recent call last):
+ VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
+ """
+ SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
+
+
+class VdtTypeError(ValidateError):
+ """The value supplied was of the wrong type"""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtTypeError('jedi')
+ Traceback (most recent call last):
+ VdtTypeError: the value "jedi" is of the wrong type.
+ """
+ ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
+
+
+class VdtValueError(ValidateError):
+ """The value supplied was of the correct type, but was not an allowed value."""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtValueError('jedi')
+ Traceback (most recent call last):
+ VdtValueError: the value "jedi" is unacceptable.
+ """
+ ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
+
+
+class VdtValueTooSmallError(VdtValueError):
+ """The value supplied was of the correct type, but was too small."""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtValueTooSmallError('0')
+ Traceback (most recent call last):
+ VdtValueTooSmallError: the value "0" is too small.
+ """
+ ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
+
+
+class VdtValueTooBigError(VdtValueError):
+ """The value supplied was of the correct type, but was too big."""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtValueTooBigError('1')
+ Traceback (most recent call last):
+ VdtValueTooBigError: the value "1" is too big.
+ """
+ ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
+
+
+class VdtValueTooShortError(VdtValueError):
+ """The value supplied was of the correct type, but was too short."""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtValueTooShortError('jed')
+ Traceback (most recent call last):
+ VdtValueTooShortError: the value "jed" is too short.
+ """
+ ValidateError.__init__(
+ self,
+ 'the value "%s" is too short.' % (value,))
+
+
+class VdtValueTooLongError(VdtValueError):
+ """The value supplied was of the correct type, but was too long."""
+
+ def __init__(self, value):
+ """
+ >>> raise VdtValueTooLongError('jedie')
+ Traceback (most recent call last):
+ VdtValueTooLongError: the value "jedie" is too long.
+ """
+ ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
+
+
+class Validator(object):
+ """
+ Validator is an object that allows you to register a set of 'checks'.
+ These checks take input and test that it conforms to the check.
+
+ This can also involve converting the value from a string into
+ the correct datatype.
+
+ The ``check`` method takes an input string which configures which
+ check is to be used and applies that check to a supplied value.
+
+ An example input string would be:
+ 'int_range(param1, param2)'
+
+ You would then provide something like:
+
+ >>> def int_range_check(value, min, max):
+ ... # turn min and max from strings to integers
+ ... min = int(min)
+ ... max = int(max)
+ ... # check that value is of the correct type.
+ ... # possible valid inputs are integers or strings
+ ... # that represent integers
+ ... if not isinstance(value, (int, long, string_type)):
+ ... raise VdtTypeError(value)
+ ... elif isinstance(value, string_type):
+ ... # if we are given a string
+ ... # attempt to convert to an integer
+ ... try:
+ ... value = int(value)
+ ... except ValueError:
+ ... raise VdtValueError(value)
+ ... # check the value is between our constraints
+ ... if not min <= value:
+ ... raise VdtValueTooSmallError(value)
+ ... if not value <= max:
+ ... raise VdtValueTooBigError(value)
+ ... return value
+
+ >>> fdict = {'int_range': int_range_check}
+ >>> vtr1 = Validator(fdict)
+ >>> vtr1.check('int_range(20, 40)', '30')
+ 30
+ >>> vtr1.check('int_range(20, 40)', '60')
+ Traceback (most recent call last):
+ VdtValueTooBigError: the value "60" is too big.
+
+ New functions can be added with : ::
+
+ >>> vtr2 = Validator()
+ >>> vtr2.functions['int_range'] = int_range_check
+
+ Or by passing in a dictionary of functions when Validator
+ is instantiated.
+
+ Your functions *can* use keyword arguments,
+ but the first argument should always be 'value'.
+
+ If the function doesn't take additional arguments,
+ the parentheses are optional in the check.
+ It can be written with either of : ::
+
+ keyword = function_name
+ keyword = function_name()
+
+ The first program to utilise Validator() was Michael Foord's
+ ConfigObj, an alternative to ConfigParser which supports lists and
+ can validate a config file using a config schema.
+ For more details on using Validator with ConfigObj see:
+ https://configobj.readthedocs.org/en/latest/configobj.html
+ """
+
+ # this regex does the initial parsing of the checks
+ _func_re = re.compile(r'(.+?)\((.*)\)', re.DOTALL)
+
+ # this regex takes apart keyword arguments
+ _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$', re.DOTALL)
+
+
+ # this regex finds keyword=list(....) type values
+ _list_arg = _list_arg
+
+ # this regex takes individual values out of lists - in one pass
+ _list_members = _list_members
+
+ # These regexes check a set of arguments for validity
+ # and then pull the members out
+ _paramfinder = re.compile(_paramstring, re.VERBOSE | re.DOTALL)
+ _matchfinder = re.compile(_matchstring, re.VERBOSE | re.DOTALL)
+
+
+ def __init__(self, functions=None):
+ """
+ >>> vtri = Validator()
+ """
+ self.functions = {
+ '': self._pass,
+ 'integer': is_integer,
+ 'float': is_float,
+ 'boolean': is_boolean,
+ 'ip_addr': is_ip_addr,
+ 'string': is_string,
+ 'list': is_list,
+ 'tuple': is_tuple,
+ 'int_list': is_int_list,
+ 'float_list': is_float_list,
+ 'bool_list': is_bool_list,
+ 'ip_addr_list': is_ip_addr_list,
+ 'string_list': is_string_list,
+ 'mixed_list': is_mixed_list,
+ 'pass': self._pass,
+ 'option': is_option,
+ 'force_list': force_list,
+ }
+ if functions is not None:
+ self.functions.update(functions)
+ # tekNico: for use by ConfigObj
+ self.baseErrorClass = ValidateError
+ self._cache = {}
+
+
+ def check(self, check, value, missing=False):
+ """
+ Usage: check(check, value)
+
+ Arguments:
+ check: string representing check to apply (including arguments)
+ value: object to be checked
+ Returns value, converted to correct type if necessary
+
+ If the check fails, raises a ``ValidateError`` subclass.
+
+ >>> vtor.check('yoda', '')
+ Traceback (most recent call last):
+ VdtUnknownCheckError: the check "yoda" is unknown.
+ >>> vtor.check('yoda()', '')
+ Traceback (most recent call last):
+ VdtUnknownCheckError: the check "yoda" is unknown.
+
+ >>> vtor.check('string(default="")', '', missing=True)
+ ''
+ """
+ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
+
+ if missing:
+ if default is None:
+ # no information needed here - to be handled by caller
+ raise VdtMissingValue()
+ value = self._handle_none(default)
+
+ if value is None:
+ return None
+
+ return self._check_value(value, fun_name, fun_args, fun_kwargs)
+
+
+ def _handle_none(self, value):
+ if value == 'None':
+ return None
+ elif value in ("'None'", '"None"'):
+ # Special case a quoted None
+ value = self._unquote(value)
+ return value
+
+
+ def _parse_with_caching(self, check):
+ if check in self._cache:
+ fun_name, fun_args, fun_kwargs, default = self._cache[check]
+ # We call list and dict below to work with *copies* of the data
+ # rather than the original (which are mutable of course)
+ fun_args = list(fun_args)
+ fun_kwargs = dict(fun_kwargs)
+ else:
+ fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
+ fun_kwargs = dict([(str(key), value) for (key, value) in list(fun_kwargs.items())])
+ self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
+ return fun_name, fun_args, fun_kwargs, default
+
+
+ def _check_value(self, value, fun_name, fun_args, fun_kwargs):
+ try:
+ fun = self.functions[fun_name]
+ except KeyError:
+ raise VdtUnknownCheckError(fun_name)
+ else:
+ return fun(value, *fun_args, **fun_kwargs)
+
+
+ def _parse_check(self, check):
+ fun_match = self._func_re.match(check)
+ if fun_match:
+ fun_name = fun_match.group(1)
+ arg_string = fun_match.group(2)
+ arg_match = self._matchfinder.match(arg_string)
+ if arg_match is None:
+ # Bad syntax
+ raise VdtParamError('Bad syntax in check "%s".' % check)
+ fun_args = []
+ fun_kwargs = {}
+ # pull out args of group 2
+ for arg in self._paramfinder.findall(arg_string):
+ # args may need whitespace removing (before removing quotes)
+ arg = arg.strip()
+ listmatch = self._list_arg.match(arg)
+ if listmatch:
+ key, val = self._list_handle(listmatch)
+ fun_kwargs[key] = val
+ continue
+ keymatch = self._key_arg.match(arg)
+ if keymatch:
+ val = keymatch.group(2)
+ if not val in ("'None'", '"None"'):
+ # Special case a quoted None
+ val = self._unquote(val)
+ fun_kwargs[keymatch.group(1)] = val
+ continue
+
+ fun_args.append(self._unquote(arg))
+ else:
+ # allows for function names without (args)
+ return check, (), {}, None
+
+ # Default must be deleted if the value is specified too,
+ # otherwise the check function will get a spurious "default" keyword arg
+ default = fun_kwargs.pop('default', None)
+ return fun_name, fun_args, fun_kwargs, default
+
+
+ def _unquote(self, val):
+ """Unquote a value if necessary."""
+ if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
+ val = val[1:-1]
+ return val
+
+
+ def _list_handle(self, listmatch):
+ """Take apart a ``keyword=list('val, 'val')`` type string."""
+ out = []
+ name = listmatch.group(1)
+ args = listmatch.group(2)
+ for arg in self._list_members.findall(args):
+ out.append(self._unquote(arg))
+ return name, out
+
+
+ def _pass(self, value):
+ """
+ Dummy check that always passes
+
+ >>> vtor.check('', 0)
+ 0
+ >>> vtor.check('', '0')
+ '0'
+ """
+ return value
+
+
+ def get_default_value(self, check):
+ """
+ Given a check, return the default value for the check
+ (converted to the right type).
+
+ If the check doesn't specify a default value then a
+ ``KeyError`` will be raised.
+ """
+ fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
+ if default is None:
+ raise KeyError('Check "%s" has no default value.' % check)
+ value = self._handle_none(default)
+ if value is None:
+ return value
+ return self._check_value(value, fun_name, fun_args, fun_kwargs)
+
+
+def _is_num_param(names, values, to_float=False):
+ """
+ Return numbers from inputs or raise VdtParamError.
+
+ Lets ``None`` pass through.
+ Pass in keyword argument ``to_float=True`` to
+ use float for the conversion rather than int.
+
+ >>> _is_num_param(('', ''), (0, 1.0))
+ [0, 1]
+ >>> _is_num_param(('', ''), (0, 1.0), to_float=True)
+ [0.0, 1.0]
+ >>> _is_num_param(('a'), ('a'))
+ Traceback (most recent call last):
+ VdtParamError: passed an incorrect value "a" for parameter "a".
+ """
+ fun = to_float and float or int
+ out_params = []
+ for (name, val) in zip(names, values):
+ if val is None:
+ out_params.append(val)
+ elif isinstance(val, (int, long, float, string_type)):
+ try:
+ out_params.append(fun(val))
+ except ValueError as e:
+ raise VdtParamError(name, val)
+ else:
+ raise VdtParamError(name, val)
+ return out_params
+
+
+# built in checks
+# you can override these by setting the appropriate name
+# in Validator.functions
+# note: if the params are specified wrongly in your input string,
+# you will also raise errors.
+
+def is_integer(value, min=None, max=None):
+ """
+ A check that tests that a given value is an integer (int, or long)
+ and optionally, between bounds. A negative value is accepted, while
+ a float will fail.
+
+ If the value is a string, then the conversion is done - if possible.
+ Otherwise a VdtError is raised.
+
+ >>> vtor.check('integer', '-1')
+ -1
+ >>> vtor.check('integer', '0')
+ 0
+ >>> vtor.check('integer', 9)
+ 9
+ >>> vtor.check('integer', 'a')
+ Traceback (most recent call last):
+ VdtTypeError: the value "a" is of the wrong type.
+ >>> vtor.check('integer', '2.2')
+ Traceback (most recent call last):
+ VdtTypeError: the value "2.2" is of the wrong type.
+ >>> vtor.check('integer(10)', '20')
+ 20
+ >>> vtor.check('integer(max=20)', '15')
+ 15
+ >>> vtor.check('integer(10)', '9')
+ Traceback (most recent call last):
+ VdtValueTooSmallError: the value "9" is too small.
+ >>> vtor.check('integer(10)', 9)
+ Traceback (most recent call last):
+ VdtValueTooSmallError: the value "9" is too small.
+ >>> vtor.check('integer(max=20)', '35')
+ Traceback (most recent call last):
+ VdtValueTooBigError: the value "35" is too big.
+ >>> vtor.check('integer(max=20)', 35)
+ Traceback (most recent call last):
+ VdtValueTooBigError: the value "35" is too big.
+ >>> vtor.check('integer(0, 9)', False)
+ 0
+ """
+ (min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
+ if not isinstance(value, (int, long, string_type)):
+ raise VdtTypeError(value)
+ if isinstance(value, string_type):
+ # if it's a string - does it represent an integer ?
+ try:
+ value = int(value)
+ except ValueError:
+ raise VdtTypeError(value)
+ if (min_val is not None) and (value < min_val):
+ raise VdtValueTooSmallError(value)
+ if (max_val is not None) and (value > max_val):
+ raise VdtValueTooBigError(value)
+ return value
+
+
+def is_float(value, min=None, max=None):
+ """
+ A check that tests that a given value is a float
+ (an integer will be accepted), and optionally - that it is between bounds.
+
+ If the value is a string, then the conversion is done - if possible.
+ Otherwise a VdtError is raised.
+
+ This can accept negative values.
+
+ >>> vtor.check('float', '2')
+ 2.0
+
+ From now on we multiply the value to avoid comparing decimals
+
+ >>> vtor.check('float', '-6.8') * 10
+ -68.0
+ >>> vtor.check('float', '12.2') * 10
+ 122.0
+ >>> vtor.check('float', 8.4) * 10
+ 84.0
+ >>> vtor.check('float', 'a')
+ Traceback (most recent call last):
+ VdtTypeError: the value "a" is of the wrong type.
+ >>> vtor.check('float(10.1)', '10.2') * 10
+ 102.0
+ >>> vtor.check('float(max=20.2)', '15.1') * 10
+ 151.0
+ >>> vtor.check('float(10.0)', '9.0')
+ Traceback (most recent call last):
+ VdtValueTooSmallError: the value "9.0" is too small.
+ >>> vtor.check('float(max=20.0)', '35.0')
+ Traceback (most recent call last):
+ VdtValueTooBigError: the value "35.0" is too big.
+ """
+ (min_val, max_val) = _is_num_param(
+ ('min', 'max'), (min, max), to_float=True)
+ if not isinstance(value, (int, long, float, string_type)):
+ raise VdtTypeError(value)
+ if not isinstance(value, float):
+ # if it's a string - does it represent a float ?
+ try:
+ value = float(value)
+ except ValueError:
+ raise VdtTypeError(value)
+ if (min_val is not None) and (value < min_val):
+ raise VdtValueTooSmallError(value)
+ if (max_val is not None) and (value > max_val):
+ raise VdtValueTooBigError(value)
+ return value
+
+
+bool_dict = {
+ True: True, 'on': True, '1': True, 'true': True, 'yes': True,
+ False: False, 'off': False, '0': False, 'false': False, 'no': False,
+}
+
+
+def is_boolean(value):
+ """
+ Check if the value represents a boolean.
+
+ >>> vtor.check('boolean', 0)
+ 0
+ >>> vtor.check('boolean', False)
+ 0
+ >>> vtor.check('boolean', '0')
+ 0
+ >>> vtor.check('boolean', 'off')
+ 0
+ >>> vtor.check('boolean', 'false')
+ 0
+ >>> vtor.check('boolean', 'no')
+ 0
+ >>> vtor.check('boolean', 'nO')
+ 0
+ >>> vtor.check('boolean', 'NO')
+ 0
+ >>> vtor.check('boolean', 1)
+ 1
+ >>> vtor.check('boolean', True)
+ 1
+ >>> vtor.check('boolean', '1')
+ 1
+ >>> vtor.check('boolean', 'on')
+ 1
+ >>> vtor.check('boolean', 'true')
+ 1
+ >>> vtor.check('boolean', 'yes')
+ 1
+ >>> vtor.check('boolean', 'Yes')
+ 1
+ >>> vtor.check('boolean', 'YES')
+ 1
+ >>> vtor.check('boolean', '')
+ Traceback (most recent call last):
+ VdtTypeError: the value "" is of the wrong type.
+ >>> vtor.check('boolean', 'up')
+ Traceback (most recent call last):
+ VdtTypeError: the value "up" is of the wrong type.
+
+ """
+ if isinstance(value, string_type):
+ try:
+ return bool_dict[value.lower()]
+ except KeyError:
+ raise VdtTypeError(value)
+ # we do an equality test rather than an identity test
+ # this ensures Python 2.2 compatibilty
+ # and allows 0 and 1 to represent True and False
+ if value == False:
+ return False
+ elif value == True:
+ return True
+ else:
+ raise VdtTypeError(value)
+
+
+def is_ip_addr(value):
+ """
+ Check that the supplied value is an Internet Protocol address, v.4,
+ represented by a dotted-quad string, i.e. '1.2.3.4'.
+
+ >>> vtor.check('ip_addr', '1 ')
+ '1'
+ >>> vtor.check('ip_addr', ' 1.2')
+ '1.2'
+ >>> vtor.check('ip_addr', ' 1.2.3 ')
+ '1.2.3'
+ >>> vtor.check('ip_addr', '1.2.3.4')
+ '1.2.3.4'
+ >>> vtor.check('ip_addr', '0.0.0.0')
+ '0.0.0.0'
+ >>> vtor.check('ip_addr', '255.255.255.255')
+ '255.255.255.255'
+ >>> vtor.check('ip_addr', '255.255.255.256')
+ Traceback (most recent call last):
+ VdtValueError: the value "255.255.255.256" is unacceptable.
+ >>> vtor.check('ip_addr', '1.2.3.4.5')
+ Traceback (most recent call last):
+ VdtValueError: the value "1.2.3.4.5" is unacceptable.
+ >>> vtor.check('ip_addr', 0)
+ Traceback (most recent call last):
+ VdtTypeError: the value "0" is of the wrong type.
+ """
+ if not isinstance(value, string_type):
+ raise VdtTypeError(value)
+ value = value.strip()
+ try:
+ dottedQuadToNum(value)
+ except ValueError:
+ raise VdtValueError(value)
+ return value
+
+
+def is_list(value, min=None, max=None):
+ """
+ Check that the value is a list of values.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ It does no check on list members.
+
+ >>> vtor.check('list', ())
+ []
+ >>> vtor.check('list', [])
+ []
+ >>> vtor.check('list', (1, 2))
+ [1, 2]
+ >>> vtor.check('list', [1, 2])
+ [1, 2]
+ >>> vtor.check('list(3)', (1, 2))
+ Traceback (most recent call last):
+ VdtValueTooShortError: the value "(1, 2)" is too short.
+ >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
+ Traceback (most recent call last):
+ VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
+ >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
+ [1, 2, 3, 4]
+ >>> vtor.check('list', 0)
+ Traceback (most recent call last):
+ VdtTypeError: the value "0" is of the wrong type.
+ >>> vtor.check('list', '12')
+ Traceback (most recent call last):
+ VdtTypeError: the value "12" is of the wrong type.
+ """
+ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
+ if isinstance(value, string_type):
+ raise VdtTypeError(value)
+ try:
+ num_members = len(value)
+ except TypeError:
+ raise VdtTypeError(value)
+ if min_len is not None and num_members < min_len:
+ raise VdtValueTooShortError(value)
+ if max_len is not None and num_members > max_len:
+ raise VdtValueTooLongError(value)
+ return list(value)
+
+
+def is_tuple(value, min=None, max=None):
+ """
+ Check that the value is a tuple of values.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ It does no check on members.
+
+ >>> vtor.check('tuple', ())
+ ()
+ >>> vtor.check('tuple', [])
+ ()
+ >>> vtor.check('tuple', (1, 2))
+ (1, 2)
+ >>> vtor.check('tuple', [1, 2])
+ (1, 2)
+ >>> vtor.check('tuple(3)', (1, 2))
+ Traceback (most recent call last):
+ VdtValueTooShortError: the value "(1, 2)" is too short.
+ >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
+ Traceback (most recent call last):
+ VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
+ >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
+ (1, 2, 3, 4)
+ >>> vtor.check('tuple', 0)
+ Traceback (most recent call last):
+ VdtTypeError: the value "0" is of the wrong type.
+ >>> vtor.check('tuple', '12')
+ Traceback (most recent call last):
+ VdtTypeError: the value "12" is of the wrong type.
+ """
+ return tuple(is_list(value, min, max))
+
+
+def is_string(value, min=None, max=None):
+ """
+ Check that the supplied value is a string.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ >>> vtor.check('string', '0')
+ '0'
+ >>> vtor.check('string', 0)
+ Traceback (most recent call last):
+ VdtTypeError: the value "0" is of the wrong type.
+ >>> vtor.check('string(2)', '12')
+ '12'
+ >>> vtor.check('string(2)', '1')
+ Traceback (most recent call last):
+ VdtValueTooShortError: the value "1" is too short.
+ >>> vtor.check('string(min=2, max=3)', '123')
+ '123'
+ >>> vtor.check('string(min=2, max=3)', '1234')
+ Traceback (most recent call last):
+ VdtValueTooLongError: the value "1234" is too long.
+ """
+ if not isinstance(value, string_type):
+ raise VdtTypeError(value)
+ (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
+ try:
+ num_members = len(value)
+ except TypeError:
+ raise VdtTypeError(value)
+ if min_len is not None and num_members < min_len:
+ raise VdtValueTooShortError(value)
+ if max_len is not None and num_members > max_len:
+ raise VdtValueTooLongError(value)
+ return value
+
+
+def is_int_list(value, min=None, max=None):
+ """
+ Check that the value is a list of integers.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ Each list member is checked that it is an integer.
+
+ >>> vtor.check('int_list', ())
+ []
+ >>> vtor.check('int_list', [])
+ []
+ >>> vtor.check('int_list', (1, 2))
+ [1, 2]
+ >>> vtor.check('int_list', [1, 2])
+ [1, 2]
+ >>> vtor.check('int_list', [1, 'a'])
+ Traceback (most recent call last):
+ VdtTypeError: the value "a" is of the wrong type.
+ """
+ return [is_integer(mem) for mem in is_list(value, min, max)]
+
+
+def is_bool_list(value, min=None, max=None):
+ """
+ Check that the value is a list of booleans.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ Each list member is checked that it is a boolean.
+
+ >>> vtor.check('bool_list', ())
+ []
+ >>> vtor.check('bool_list', [])
+ []
+ >>> check_res = vtor.check('bool_list', (True, False))
+ >>> check_res == [True, False]
+ 1
+ >>> check_res = vtor.check('bool_list', [True, False])
+ >>> check_res == [True, False]
+ 1
+ >>> vtor.check('bool_list', [True, 'a'])
+ Traceback (most recent call last):
+ VdtTypeError: the value "a" is of the wrong type.
+ """
+ return [is_boolean(mem) for mem in is_list(value, min, max)]
+
+
+def is_float_list(value, min=None, max=None):
+ """
+ Check that the value is a list of floats.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ Each list member is checked that it is a float.
+
+ >>> vtor.check('float_list', ())
+ []
+ >>> vtor.check('float_list', [])
+ []
+ >>> vtor.check('float_list', (1, 2.0))
+ [1.0, 2.0]
+ >>> vtor.check('float_list', [1, 2.0])
+ [1.0, 2.0]
+ >>> vtor.check('float_list', [1, 'a'])
+ Traceback (most recent call last):
+ VdtTypeError: the value "a" is of the wrong type.
+ """
+ return [is_float(mem) for mem in is_list(value, min, max)]
+
+
+def is_string_list(value, min=None, max=None):
+ """
+ Check that the value is a list of strings.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ Each list member is checked that it is a string.
+
+ >>> vtor.check('string_list', ())
+ []
+ >>> vtor.check('string_list', [])
+ []
+ >>> vtor.check('string_list', ('a', 'b'))
+ ['a', 'b']
+ >>> vtor.check('string_list', ['a', 1])
+ Traceback (most recent call last):
+ VdtTypeError: the value "1" is of the wrong type.
+ >>> vtor.check('string_list', 'hello')
+ Traceback (most recent call last):
+ VdtTypeError: the value "hello" is of the wrong type.
+ """
+ if isinstance(value, string_type):
+ raise VdtTypeError(value)
+ return [is_string(mem) for mem in is_list(value, min, max)]
+
+
+def is_ip_addr_list(value, min=None, max=None):
+ """
+ Check that the value is a list of IP addresses.
+
+ You can optionally specify the minimum and maximum number of members.
+
+ Each list member is checked that it is an IP address.
+
+ >>> vtor.check('ip_addr_list', ())
+ []
+ >>> vtor.check('ip_addr_list', [])
+ []
+ >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
+ ['1.2.3.4', '5.6.7.8']
+ >>> vtor.check('ip_addr_list', ['a'])
+ Traceback (most recent call last):
+ VdtValueError: the value "a" is unacceptable.
+ """
+ return [is_ip_addr(mem) for mem in is_list(value, min, max)]
+
+
+def force_list(value, min=None, max=None):
+ """
+ Check that a value is a list, coercing strings into
+ a list with one member. Useful where users forget the
+ trailing comma that turns a single value into a list.
+
+ You can optionally specify the minimum and maximum number of members.
+ A minumum of greater than one will fail if the user only supplies a
+ string.
+
+ >>> vtor.check('force_list', ())
+ []
+ >>> vtor.check('force_list', [])
+ []
+ >>> vtor.check('force_list', 'hello')
+ ['hello']
+ """
+ if not isinstance(value, (list, tuple)):
+ value = [value]
+ return is_list(value, min, max)
+
+
+
+fun_dict = {
+ 'integer': is_integer,
+ 'float': is_float,
+ 'ip_addr': is_ip_addr,
+ 'string': is_string,
+ 'boolean': is_boolean,
+}
+
+
+def is_mixed_list(value, *args):
+ """
+ Check that the value is a list.
+ Allow specifying the type of each member.
+ Work on lists of specific lengths.
+
+ You specify each member as a positional argument specifying type
+
+ Each type should be one of the following strings :
+ 'integer', 'float', 'ip_addr', 'string', 'boolean'
+
+ So you can specify a list of two strings, followed by
+ two integers as :
+
+ mixed_list('string', 'string', 'integer', 'integer')
+
+ The length of the list must match the number of positional
+ arguments you supply.
+
+ >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
+ >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
+ >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
+ 1
+ >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
+ >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
+ 1
+ >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
+ Traceback (most recent call last):
+ VdtTypeError: the value "b" is of the wrong type.
+ >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
+ Traceback (most recent call last):
+ VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
+ >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
+ Traceback (most recent call last):
+ VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
+ >>> vtor.check(mix_str, 0)
+ Traceback (most recent call last):
+ VdtTypeError: the value "0" is of the wrong type.
+
+ >>> vtor.check('mixed_list("yoda")', ('a'))
+ Traceback (most recent call last):
+ VdtParamError: passed an incorrect value "KeyError('yoda',)" for parameter "'mixed_list'"
+ """
+ try:
+ length = len(value)
+ except TypeError:
+ raise VdtTypeError(value)
+ if length < len(args):
+ raise VdtValueTooShortError(value)
+ elif length > len(args):
+ raise VdtValueTooLongError(value)
+ try:
+ return [fun_dict[arg](val) for arg, val in zip(args, value)]
+ except KeyError as e:
+ raise VdtParamError('mixed_list', e)
+
+
+def is_option(value, *options):
+ """
+ This check matches the value to any of a set of options.
+
+ >>> vtor.check('option("yoda", "jedi")', 'yoda')
+ 'yoda'
+ >>> vtor.check('option("yoda", "jedi")', 'jed')
+ Traceback (most recent call last):
+ VdtValueError: the value "jed" is unacceptable.
+ >>> vtor.check('option("yoda", "jedi")', 0)
+ Traceback (most recent call last):
+ VdtTypeError: the value "0" is of the wrong type.
+ """
+ if not isinstance(value, string_type):
+ raise VdtTypeError(value)
+ if not value in options:
+ raise VdtValueError(value)
+ return value
+
+
+def _test(value, *args, **keywargs):
+ """
+ A function that exists for test purposes.
+
+ >>> checks = [
+ ... '3, 6, min=1, max=3, test=list(a, b, c)',
+ ... '3',
+ ... '3, 6',
+ ... '3,',
+ ... 'min=1, test="a b c"',
+ ... 'min=5, test="a, b, c"',
+ ... 'min=1, max=3, test="a, b, c"',
+ ... 'min=-100, test=-99',
+ ... 'min=1, max=3',
+ ... '3, 6, test="36"',
+ ... '3, 6, test="a, b, c"',
+ ... '3, max=3, test=list("a", "b", "c")',
+ ... '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
+ ... "test='x=fish(3)'",
+ ... ]
+ >>> v = Validator({'test': _test})
+ >>> for entry in checks:
+ ... pprint(v.check(('test(%s)' % entry), 3))
+ (3, ('3', '6'), {'max': '3', 'min': '1', 'test': ['a', 'b', 'c']})
+ (3, ('3',), {})
+ (3, ('3', '6'), {})
+ (3, ('3',), {})
+ (3, (), {'min': '1', 'test': 'a b c'})
+ (3, (), {'min': '5', 'test': 'a, b, c'})
+ (3, (), {'max': '3', 'min': '1', 'test': 'a, b, c'})
+ (3, (), {'min': '-100', 'test': '-99'})
+ (3, (), {'max': '3', 'min': '1'})
+ (3, ('3', '6'), {'test': '36'})
+ (3, ('3', '6'), {'test': 'a, b, c'})
+ (3, ('3',), {'max': '3', 'test': ['a', 'b', 'c']})
+ (3, ('3',), {'max': '3', 'test': ["'a'", 'b', 'x=(c)']})
+ (3, (), {'test': 'x=fish(3)'})
+
+ >>> v = Validator()
+ >>> v.check('integer(default=6)', '3')
+ 3
+ >>> v.check('integer(default=6)', None, True)
+ 6
+ >>> v.get_default_value('integer(default=6)')
+ 6
+ >>> v.get_default_value('float(default=6)')
+ 6.0
+ >>> v.get_default_value('pass(default=None)')
+ >>> v.get_default_value("string(default='None')")
+ 'None'
+ >>> v.get_default_value('pass')
+ Traceback (most recent call last):
+ KeyError: 'Check "pass" has no default value.'
+ >>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
+ ['1', '2', '3', '4']
+
+ >>> v = Validator()
+ >>> v.check("pass(default=None)", None, True)
+ >>> v.check("pass(default='None')", None, True)
+ 'None'
+ >>> v.check('pass(default="None")', None, True)
+ 'None'
+ >>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
+ ['1', '2', '3', '4']
+
+ Bug test for unicode arguments
+ >>> v = Validator()
+ >>> v.check(unicode('string(min=4)'), unicode('test')) == unicode('test')
+ True
+
+ >>> v = Validator()
+ >>> v.get_default_value(unicode('string(min=4, default="1234")')) == unicode('1234')
+ True
+ >>> v.check(unicode('string(min=4, default="1234")'), unicode('test')) == unicode('test')
+ True
+
+ >>> v = Validator()
+ >>> default = v.get_default_value('string(default=None)')
+ >>> default == None
+ 1
+ """
+ return (value, args, keywargs)
+
+
+def _test2():
+ """
+ >>>
+ >>> v = Validator()
+ >>> v.get_default_value('string(default="#ff00dd")')
+ '#ff00dd'
+ >>> v.get_default_value('integer(default=3) # comment')
+ 3
+ """
+
+def _test3():
+ r"""
+ >>> vtor.check('string(default="")', '', missing=True)
+ ''
+ >>> vtor.check('string(default="\n")', '', missing=True)
+ '\n'
+ >>> print(vtor.check('string(default="\n")', '', missing=True))
+ <BLANKLINE>
+ <BLANKLINE>
+ >>> vtor.check('string()', '\n')
+ '\n'
+ >>> vtor.check('string(default="\n\n\n")', '', missing=True)
+ '\n\n\n'
+ >>> vtor.check('string()', 'random \n text goes here\n\n')
+ 'random \n text goes here\n\n'
+ >>> vtor.check('string(default=" \nrandom text\ngoes \n here\n\n ")',
+ ... '', missing=True)
+ ' \nrandom text\ngoes \n here\n\n '
+ >>> vtor.check("string(default='\n\n\n')", '', missing=True)
+ '\n\n\n'
+ >>> vtor.check("option('\n','a','b',default='\n')", '', missing=True)
+ '\n'
+ >>> vtor.check("string_list()", ['foo', '\n', 'bar'])
+ ['foo', '\n', 'bar']
+ >>> vtor.check("string_list(default=list('\n'))", '', missing=True)
+ ['\n']
+ """
+
+
+if __name__ == '__main__':
+ # run the code tests in doctest format
+ import sys
+ import doctest
+ m = sys.modules.get('__main__')
+ globs = m.__dict__.copy()
+ globs.update({
+ 'vtor': Validator(),
+ })
+
+ failures, tests = doctest.testmod(
+ m, globs=globs,
+ optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS)
+ assert not failures, '{} failures out of {} tests'.format(failures, tests)
diff --git a/src/tests/__init__.py b/src/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/tests/__init__.py
diff --git a/src/tests/conf.ini b/src/tests/conf.ini
new file mode 100644
index 0000000..aa429ff
--- /dev/null
+++ b/src/tests/conf.ini
@@ -0,0 +1,10 @@
+
+extra = 3
+
+[extra-section]
+
+[section]
+ [[sub-section]]
+ extra = 3
+ [[extra-sub-section]]
+ extra = 3
diff --git a/src/tests/conf.spec b/src/tests/conf.spec
new file mode 100644
index 0000000..3af70ac
--- /dev/null
+++ b/src/tests/conf.spec
@@ -0,0 +1,13 @@
+
+value = integer
+
+[section]
+ value = integer
+
+ [[sub-section]]
+ value = integer
+ [[missing-subsection]]
+ value = integer
+
+[missing-section]
+ value = integer
diff --git a/src/tests/configobj_doctests.py b/src/tests/configobj_doctests.py
new file mode 100644
index 0000000..ab9e04d
--- /dev/null
+++ b/src/tests/configobj_doctests.py
@@ -0,0 +1,986 @@
+# configobj_test.py
+# doctests for ConfigObj
+# A config file reader/writer that supports nested sections in config files.
+# Copyright (C) 2005-2014:
+# (name) : (email)
+# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
+# Nicola Larosa: nico AT tekNico DOT net
+# Rob Dennis: rdennis AT gmail DOT com
+# Eli Courtwright: eli AT courtwright DOT org
+
+# This software is licensed under the terms of the BSD license.
+# http://opensource.org/licenses/BSD-3-Clause
+
+# ConfigObj 5 - main repository for documentation and issue tracking:
+# https://github.com/DiffSK/configobj
+
+import sys
+# StringIO is used to simulate config files during doctests
+if sys.version_info >= (3,):
+ # Python 3.x case (io does exist in 2.7, but better to use the 2.x case):
+ #http://bugs.python.org/issue8025
+ from io import StringIO
+else:
+ # Python 2.x case, explicitly NOT using cStringIO due to unicode edge cases
+ from StringIO import StringIO
+
+import os
+import sys
+INTP_VER = sys.version_info[:2]
+if INTP_VER < (2, 2):
+ raise RuntimeError("Python v.2.2 or later needed")
+
+from codecs import BOM_UTF8
+
+from configobj import *
+from configobj.validate import Validator, VdtValueTooSmallError
+
+
+def _test_validate():
+ """
+ >>> val = Validator()
+
+ >>> a = ['foo = fish']
+ >>> b = ['foo = integer(default=3)']
+ >>> c = ConfigObj(a, configspec=b)
+ >>> c
+ ConfigObj({'foo': 'fish'})
+ >>> from configobj.validate import Validator
+ >>> v = Validator()
+ >>> c.validate(v)
+ 0
+ >>> c.default_values
+ {'foo': 3}
+ >>> c.restore_default('foo')
+ 3
+
+ Now testing with repeated sections : BIG TEST
+
+ >>> repeated_1 = '''
+ ... [dogs]
+ ... [[__many__]] # spec for a dog
+ ... fleas = boolean(default=True)
+ ... tail = option(long, short, default=long)
+ ... name = string(default=rover)
+ ... [[[__many__]]] # spec for a puppy
+ ... name = string(default="son of rover")
+ ... age = float(default=0.0)
+ ... [cats]
+ ... [[__many__]] # spec for a cat
+ ... fleas = boolean(default=True)
+ ... tail = option(long, short, default=short)
+ ... name = string(default=pussy)
+ ... [[[__many__]]] # spec for a kitten
+ ... name = string(default="son of pussy")
+ ... age = float(default=0.0)
+ ... '''.split('\\n')
+ >>> repeated_2 = '''
+ ... [dogs]
+ ...
+ ... # blank dogs with puppies
+ ... # should be filled in by the configspec
+ ... [[dog1]]
+ ... [[[puppy1]]]
+ ... [[[puppy2]]]
+ ... [[[puppy3]]]
+ ... [[dog2]]
+ ... [[[puppy1]]]
+ ... [[[puppy2]]]
+ ... [[[puppy3]]]
+ ... [[dog3]]
+ ... [[[puppy1]]]
+ ... [[[puppy2]]]
+ ... [[[puppy3]]]
+ ... [cats]
+ ...
+ ... # blank cats with kittens
+ ... # should be filled in by the configspec
+ ... [[cat1]]
+ ... [[[kitten1]]]
+ ... [[[kitten2]]]
+ ... [[[kitten3]]]
+ ... [[cat2]]
+ ... [[[kitten1]]]
+ ... [[[kitten2]]]
+ ... [[[kitten3]]]
+ ... [[cat3]]
+ ... [[[kitten1]]]
+ ... [[[kitten2]]]
+ ... [[[kitten3]]]
+ ... '''.split('\\n')
+ >>> repeated_3 = '''
+ ... [dogs]
+ ...
+ ... [[dog1]]
+ ... [[dog2]]
+ ... [[dog3]]
+ ... [cats]
+ ...
+ ... [[cat1]]
+ ... [[cat2]]
+ ... [[cat3]]
+ ... '''.split('\\n')
+ >>> repeated_4 = '''
+ ... [__many__]
+ ...
+ ... name = string(default=Michael)
+ ... age = float(default=0.0)
+ ... sex = option(m, f, default=m)
+ ... '''.split('\\n')
+ >>> repeated_5 = '''
+ ... [cats]
+ ... [[__many__]]
+ ... fleas = boolean(default=True)
+ ... tail = option(long, short, default=short)
+ ... name = string(default=pussy)
+ ... [[[description]]]
+ ... height = float(default=3.3)
+ ... weight = float(default=6)
+ ... [[[[coat]]]]
+ ... fur = option(black, grey, brown, "tortoise shell", default=black)
+ ... condition = integer(0,10, default=5)
+ ... '''.split('\\n')
+ >>> val= Validator()
+ >>> repeater = ConfigObj(repeated_2, configspec=repeated_1)
+ >>> repeater.validate(val)
+ 1
+ >>> repeater == {
+ ... 'dogs': {
+ ... 'dog1': {
+ ... 'fleas': True,
+ ... 'tail': 'long',
+ ... 'name': 'rover',
+ ... 'puppy1': {'name': 'son of rover', 'age': 0.0},
+ ... 'puppy2': {'name': 'son of rover', 'age': 0.0},
+ ... 'puppy3': {'name': 'son of rover', 'age': 0.0},
+ ... },
+ ... 'dog2': {
+ ... 'fleas': True,
+ ... 'tail': 'long',
+ ... 'name': 'rover',
+ ... 'puppy1': {'name': 'son of rover', 'age': 0.0},
+ ... 'puppy2': {'name': 'son of rover', 'age': 0.0},
+ ... 'puppy3': {'name': 'son of rover', 'age': 0.0},
+ ... },
+ ... 'dog3': {
+ ... 'fleas': True,
+ ... 'tail': 'long',
+ ... 'name': 'rover',
+ ... 'puppy1': {'name': 'son of rover', 'age': 0.0},
+ ... 'puppy2': {'name': 'son of rover', 'age': 0.0},
+ ... 'puppy3': {'name': 'son of rover', 'age': 0.0},
+ ... },
+ ... },
+ ... 'cats': {
+ ... 'cat1': {
+ ... 'fleas': True,
+ ... 'tail': 'short',
+ ... 'name': 'pussy',
+ ... 'kitten1': {'name': 'son of pussy', 'age': 0.0},
+ ... 'kitten2': {'name': 'son of pussy', 'age': 0.0},
+ ... 'kitten3': {'name': 'son of pussy', 'age': 0.0},
+ ... },
+ ... 'cat2': {
+ ... 'fleas': True,
+ ... 'tail': 'short',
+ ... 'name': 'pussy',
+ ... 'kitten1': {'name': 'son of pussy', 'age': 0.0},
+ ... 'kitten2': {'name': 'son of pussy', 'age': 0.0},
+ ... 'kitten3': {'name': 'son of pussy', 'age': 0.0},
+ ... },
+ ... 'cat3': {
+ ... 'fleas': True,
+ ... 'tail': 'short',
+ ... 'name': 'pussy',
+ ... 'kitten1': {'name': 'son of pussy', 'age': 0.0},
+ ... 'kitten2': {'name': 'son of pussy', 'age': 0.0},
+ ... 'kitten3': {'name': 'son of pussy', 'age': 0.0},
+ ... },
+ ... },
+ ... }
+ 1
+ >>> repeater = ConfigObj(repeated_3, configspec=repeated_1)
+ >>> repeater.validate(val)
+ 1
+ >>> repeater == {
+ ... 'cats': {
+ ... 'cat1': {'fleas': True, 'tail': 'short', 'name': 'pussy'},
+ ... 'cat2': {'fleas': True, 'tail': 'short', 'name': 'pussy'},
+ ... 'cat3': {'fleas': True, 'tail': 'short', 'name': 'pussy'},
+ ... },
+ ... 'dogs': {
+ ... 'dog1': {'fleas': True, 'tail': 'long', 'name': 'rover'},
+ ... 'dog2': {'fleas': True, 'tail': 'long', 'name': 'rover'},
+ ... 'dog3': {'fleas': True, 'tail': 'long', 'name': 'rover'},
+ ... },
+ ... }
+ 1
+ >>> repeater = ConfigObj(configspec=repeated_4)
+ >>> repeater['Michael'] = {}
+ >>> repeater.validate(val)
+ 1
+ >>> repeater == {
+ ... 'Michael': {'age': 0.0, 'name': 'Michael', 'sex': 'm'},
+ ... }
+ 1
+ >>> repeater = ConfigObj(repeated_3, configspec=repeated_5)
+ >>> repeater == {
+ ... 'dogs': {'dog1': {}, 'dog2': {}, 'dog3': {}},
+ ... 'cats': {'cat1': {}, 'cat2': {}, 'cat3': {}},
+ ... }
+ 1
+ >>> repeater.validate(val)
+ 1
+ >>> repeater == {
+ ... 'dogs': {'dog1': {}, 'dog2': {}, 'dog3': {}},
+ ... 'cats': {
+ ... 'cat1': {
+ ... 'fleas': True,
+ ... 'tail': 'short',
+ ... 'name': 'pussy',
+ ... 'description': {
+ ... 'weight': 6.0,
+ ... 'height': 3.2999999999999998,
+ ... 'coat': {'fur': 'black', 'condition': 5},
+ ... },
+ ... },
+ ... 'cat2': {
+ ... 'fleas': True,
+ ... 'tail': 'short',
+ ... 'name': 'pussy',
+ ... 'description': {
+ ... 'weight': 6.0,
+ ... 'height': 3.2999999999999998,
+ ... 'coat': {'fur': 'black', 'condition': 5},
+ ... },
+ ... },
+ ... 'cat3': {
+ ... 'fleas': True,
+ ... 'tail': 'short',
+ ... 'name': 'pussy',
+ ... 'description': {
+ ... 'weight': 6.0,
+ ... 'height': 3.2999999999999998,
+ ... 'coat': {'fur': 'black', 'condition': 5},
+ ... },
+ ... },
+ ... },
+ ... }
+ 1
+
+ Test that interpolation is preserved for validated string values.
+ Also check that interpolation works in configspecs.
+ >>> t = ConfigObj(configspec=['test = string'])
+ >>> t['DEFAULT'] = {}
+ >>> t['DEFAULT']['def_test'] = 'a'
+ >>> t['test'] = '%(def_test)s'
+ >>> t['test']
+ 'a'
+ >>> v = Validator()
+ >>> t.validate(v)
+ 1
+ >>> t.interpolation = False
+ >>> t
+ ConfigObj({'test': '%(def_test)s', 'DEFAULT': {'def_test': 'a'}})
+ >>> specs = [
+ ... 'interpolated string = string(default="fuzzy-%(man)s")',
+ ... '[DEFAULT]',
+ ... 'man = wuzzy',
+ ... ]
+ >>> c = ConfigObj(configspec=specs)
+ >>> c.validate(v)
+ 1
+ >>> c['interpolated string']
+ 'fuzzy-wuzzy'
+
+ Test SimpleVal
+ >>> val = SimpleVal()
+ >>> config = '''
+ ... test1=40
+ ... test2=hello
+ ... test3=3
+ ... test4=5.0
+ ... [section]
+ ... test1=40
+ ... test2=hello
+ ... test3=3
+ ... test4=5.0
+ ... [[sub section]]
+ ... test1=40
+ ... test2=hello
+ ... test3=3
+ ... test4=5.0
+ ... '''.split('\\n')
+ >>> configspec = '''
+ ... test1=''
+ ... test2=''
+ ... test3=''
+ ... test4=''
+ ... [section]
+ ... test1=''
+ ... test2=''
+ ... test3=''
+ ... test4=''
+ ... [[sub section]]
+ ... test1=''
+ ... test2=''
+ ... test3=''
+ ... test4=''
+ ... '''.split('\\n')
+ >>> o = ConfigObj(config, configspec=configspec)
+ >>> o.validate(val)
+ 1
+ >>> o = ConfigObj(configspec=configspec)
+ >>> o.validate(val)
+ 0
+
+ Test Flatten Errors
+ >>> vtor = Validator()
+ >>> my_ini = '''
+ ... option1 = True
+ ... [section1]
+ ... option1 = True
+ ... [section2]
+ ... another_option = Probably
+ ... [section3]
+ ... another_option = True
+ ... [[section3b]]
+ ... value = 3
+ ... value2 = a
+ ... value3 = 11
+ ... '''
+ >>> my_cfg = '''
+ ... option1 = boolean()
+ ... option2 = boolean()
+ ... option3 = boolean(default=Bad_value)
+ ... [section1]
+ ... option1 = boolean()
+ ... option2 = boolean()
+ ... option3 = boolean(default=Bad_value)
+ ... [section2]
+ ... another_option = boolean()
+ ... [section3]
+ ... another_option = boolean()
+ ... [[section3b]]
+ ... value = integer
+ ... value2 = integer
+ ... value3 = integer(0, 10)
+ ... [[[section3b-sub]]]
+ ... value = string
+ ... [section4]
+ ... another_option = boolean()
+ ... '''
+ >>> cs = my_cfg.split('\\n')
+ >>> ini = my_ini.split('\\n')
+ >>> cfg = ConfigObj(ini, configspec=cs)
+ >>> res = cfg.validate(vtor, preserve_errors=True)
+ >>> errors = []
+ >>> for entry in flatten_errors(cfg, res):
+ ... section_list, key, error = entry
+ ... section_list.insert(0, '[root]')
+ ... if key is not None:
+ ... section_list.append(key)
+ ... section_string = ', '.join(section_list)
+ ... errors.append('%s%s%s' % (section_string, ' = ', error or 'missing'))
+ >>> errors.sort()
+ >>> for entry in errors:
+ ... print(entry)
+ [root], option2 = missing
+ [root], option3 = the value "Bad_value" is of the wrong type.
+ [root], section1, option2 = missing
+ [root], section1, option3 = the value "Bad_value" is of the wrong type.
+ [root], section2, another_option = the value "Probably" is of the wrong type.
+ [root], section3, section3b, section3b-sub = missing
+ [root], section3, section3b, value2 = the value "a" is of the wrong type.
+ [root], section3, section3b, value3 = the value "11" is too big.
+ [root], section4 = missing
+ """
+
+
+def _test_errors():
+ """
+ Test the error messages and objects, in normal mode and unrepr mode.
+ >>> bad_syntax = '''
+ ... key = "value"
+ ... key2 = "value
+ ... '''.splitlines()
+ >>> c = ConfigObj(bad_syntax)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 3.
+ >>> c = ConfigObj(bad_syntax, raise_errors=True)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 3.
+ >>> c = ConfigObj(bad_syntax, raise_errors=True, unrepr=True)
+ Traceback (most recent call last):
+ UnreprError: Parse error in value at line 3.
+ >>> try:
+ ... c = ConfigObj(bad_syntax)
+ ... except Exception as exc:
+ ... e = exc
+ >>> assert(isinstance(e, ConfigObjError))
+ >>> print(e)
+ Parse error in value at line 3.
+ >>> len(e.errors) == 1
+ 1
+ >>> try:
+ ... c = ConfigObj(bad_syntax, unrepr=True)
+ ... except Exception as exc:
+ ... e = exc
+ >>> assert(isinstance(e, ConfigObjError))
+ >>> print(e)
+ Parse error from unrepr-ing value at line 3.
+ >>> len(e.errors) == 1
+ 1
+ >>> the_error = e.errors[0]
+ >>> assert(isinstance(the_error, UnreprError))
+
+ >>> multiple_bad_syntax = '''
+ ... key = "value"
+ ... key2 = "value
+ ... key3 = "value2
+ ... '''.splitlines()
+ >>> try:
+ ... c = ConfigObj(multiple_bad_syntax)
+ ... except ConfigObjError as e:
+ ... str(e)
+ 'Parsing failed with several errors.\\nFirst error at line 3.'
+ >>> c = ConfigObj(multiple_bad_syntax, raise_errors=True)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 3.
+ >>> c = ConfigObj(multiple_bad_syntax, raise_errors=True, unrepr=True)
+ Traceback (most recent call last):
+ UnreprError: Parse error in value at line 3.
+ >>> try:
+ ... c = ConfigObj(multiple_bad_syntax)
+ ... except Exception as exc:
+ ... e = exc
+ >>> assert(isinstance(e, ConfigObjError))
+ >>> print(e)
+ Parsing failed with several errors.
+ First error at line 3.
+ >>> len(e.errors) == 2
+ 1
+ >>> try:
+ ... c = ConfigObj(multiple_bad_syntax, unrepr=True)
+ ... except Exception as exc:
+ ... e = exc
+ >>> assert(isinstance(e, ConfigObjError))
+ >>> print(e)
+ Parsing failed with several errors.
+ First error at line 3.
+ >>> len(e.errors) == 2
+ 1
+ >>> the_error = e.errors[1]
+ >>> assert(isinstance(the_error, UnreprError))
+
+ >>> unknown_name = '''
+ ... key = "value"
+ ... key2 = value
+ ... '''.splitlines()
+ >>> c = ConfigObj(unknown_name)
+ >>> c = ConfigObj(unknown_name, unrepr=True)
+ Traceback (most recent call last):
+ UnreprError: Unknown name or type in value at line 3.
+ >>> c = ConfigObj(unknown_name, raise_errors=True, unrepr=True)
+ Traceback (most recent call last):
+ UnreprError: Unknown name or type in value at line 3.
+ """
+
+
+def _test_validate_with_copy_and_many():
+ """
+ >>> spec = '''
+ ... [section]
+ ... [[__many__]]
+ ... value = string(default='nothing')
+ ... '''
+ >>> config = '''
+ ... [section]
+ ... [[something]]
+ ... '''
+ >>> c = ConfigObj(StringIO(config), configspec=StringIO(spec))
+ >>> v = Validator()
+ >>> r = c.validate(v, copy=True)
+ >>> c['section']['something']['value'] == 'nothing'
+ True
+ """
+
+def _test_configspec_with_hash():
+ """
+ >>> spec = ['stuff = string(default="#ff00dd")']
+ >>> c = ConfigObj(spec, _inspec=True)
+ >>> c['stuff']
+ 'string(default="#ff00dd")'
+ >>> c = ConfigObj(configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> c['stuff']
+ '#ff00dd'
+
+
+ >>> spec = ['stuff = string(default="fish") # wooble']
+ >>> c = ConfigObj(spec, _inspec=True)
+ >>> c['stuff']
+ 'string(default="fish") # wooble'
+ """
+
+def _test_many_check():
+ """
+ >>> spec = ['__many__ = integer()']
+ >>> config = ['a = 6', 'b = 7']
+ >>> c = ConfigObj(config, configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> isinstance(c['a'], int)
+ True
+ >>> isinstance(c['b'], int)
+ True
+
+
+ >>> spec = ['[name]', '__many__ = integer()']
+ >>> config = ['[name]', 'a = 6', 'b = 7']
+ >>> c = ConfigObj(config, configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> isinstance(c['name']['a'], int)
+ True
+ >>> isinstance(c['name']['b'], int)
+ True
+
+
+ >>> spec = ['[__many__]', '__many__ = integer()']
+ >>> config = ['[name]', 'hello = 7', '[thing]', 'fish = 0']
+ >>> c = ConfigObj(config, configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> isinstance(c['name']['hello'], int)
+ True
+ >>> isinstance(c['thing']['fish'], int)
+ True
+
+
+ >>> spec = '''
+ ... ___many___ = integer
+ ... [__many__]
+ ... ___many___ = boolean
+ ... [[__many__]]
+ ... __many__ = float
+ ... '''.splitlines()
+ >>> config = '''
+ ... fish = 8
+ ... buggle = 4
+ ... [hi]
+ ... one = true
+ ... two = false
+ ... [[bye]]
+ ... odd = 3
+ ... whoops = 9.0
+ ... [bye]
+ ... one = true
+ ... two = true
+ ... [[lots]]
+ ... odd = 3
+ ... whoops = 9.0
+ ... '''.splitlines()
+ >>> c = ConfigObj(config, configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> isinstance(c['fish'], int)
+ True
+ >>> isinstance(c['buggle'], int)
+ True
+ >>> c['hi']['one']
+ 1
+ >>> c['hi']['two']
+ 0
+ >>> isinstance(c['hi']['bye']['odd'], float)
+ True
+ >>> isinstance(c['hi']['bye']['whoops'], float)
+ True
+ >>> c['bye']['one']
+ 1
+ >>> c['bye']['two']
+ 1
+ >>> isinstance(c['bye']['lots']['odd'], float)
+ True
+ >>> isinstance(c['bye']['lots']['whoops'], float)
+ True
+
+
+ >>> spec = ['___many___ = integer()']
+ >>> config = ['a = 6', 'b = 7']
+ >>> c = ConfigObj(config, configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> isinstance(c['a'], int)
+ True
+ >>> isinstance(c['b'], int)
+ True
+
+
+ >>> spec = '''
+ ... [__many__]
+ ... [[__many__]]
+ ... __many__ = float
+ ... '''.splitlines()
+ >>> config = '''
+ ... [hi]
+ ... [[bye]]
+ ... odd = 3
+ ... whoops = 9.0
+ ... [bye]
+ ... [[lots]]
+ ... odd = 3
+ ... whoops = 9.0
+ ... '''.splitlines()
+ >>> c = ConfigObj(config, configspec=spec)
+ >>> v = Validator()
+ >>> c.validate(v)
+ 1
+ >>> isinstance(c['hi']['bye']['odd'], float)
+ True
+ >>> isinstance(c['hi']['bye']['whoops'], float)
+ True
+ >>> isinstance(c['bye']['lots']['odd'], float)
+ True
+ >>> isinstance(c['bye']['lots']['whoops'], float)
+ True
+
+ >>> s = ['[dog]', '[[cow]]', 'something = boolean', '[[__many__]]',
+ ... 'fish = integer']
+ >>> c = ['[dog]', '[[cow]]', 'something = true', '[[ob]]',
+ ... 'fish = 3', '[[bo]]', 'fish = 6']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> ini.validate(v)
+ 1
+ >>> ini['dog']['cow']['something']
+ 1
+ >>> ini['dog']['ob']['fish']
+ 3
+ >>> ini['dog']['bo']['fish']
+ 6
+
+
+ >>> s = ['[cow]', 'something = boolean', '[__many__]',
+ ... 'fish = integer']
+ >>> c = ['[cow]', 'something = true', '[ob]',
+ ... 'fish = 3', '[bo]', 'fish = 6']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> ini.validate(v)
+ 1
+ >>> ini['cow']['something']
+ 1
+ >>> ini['ob']['fish']
+ 3
+ >>> ini['bo']['fish']
+ 6
+ """
+
+
+def _unexpected_validation_errors():
+ """
+ Although the input is nonsensical we should not crash but correctly
+ report the failure to validate
+
+ # section specified, got scalar
+ >>> from configobj.validate import ValidateError
+ >>> s = ['[cow]', 'something = boolean']
+ >>> c = ['cow = true']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> ini.validate(v)
+ 0
+
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> res = ini.validate(v, preserve_errors=True)
+ >>> check = flatten_errors(ini, res)
+ >>> for entry in check:
+ ... isinstance(entry[2], ValidateError)
+ ... print(str(entry[2]))
+ True
+ Section 'cow' was provided as a single value
+
+
+ # scalar specified, got section
+ >>> s = ['something = boolean']
+ >>> c = ['[something]', 'cow = true']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> ini.validate(v)
+ 0
+
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> res = ini.validate(v, preserve_errors=True)
+ >>> check = flatten_errors(ini, res)
+ >>> for entry in check:
+ ... isinstance(entry[2], ValidateError)
+ ... print(str(entry[2]))
+ True
+ Value 'something' was provided as a section
+
+ # unexpected section
+ >>> s = []
+ >>> c = ['[cow]', 'dog = true']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> ini.validate(v)
+ 1
+
+
+ >>> s = ['[cow]', 'dog = boolean']
+ >>> c = ['[cow]', 'dog = true']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> ini.validate(v, preserve_errors=True)
+ 1
+ """
+
+def _test_pickle():
+ """
+ >>> import pickle
+ >>> s = ['[cow]', 'dog = boolean']
+ >>> c = ['[cow]', 'dog = true']
+ >>> ini = ConfigObj(c, configspec=s)
+ >>> v = Validator()
+ >>> string = pickle.dumps(ini)
+ >>> new = pickle.loads(string)
+ >>> new.validate(v)
+ 1
+ """
+
+def _test_as_list():
+ """
+ >>> a = ConfigObj()
+ >>> a['a'] = 1
+ >>> a.as_list('a')
+ [1]
+ >>> a['a'] = (1,)
+ >>> a.as_list('a')
+ [1]
+ >>> a['a'] = [1]
+ >>> a.as_list('a')
+ [1]
+ """
+
+def _test_list_interpolation():
+ """
+ >>> c = ConfigObj()
+ >>> c['x'] = 'foo'
+ >>> c['list'] = ['%(x)s', 3]
+ >>> c['list']
+ ['foo', 3]
+ """
+
+def _test_extra_values():
+ """
+ >>> spec = ['[section]']
+ >>> infile = ['bar = 3', '[something]', 'foo = fish', '[section]', 'foo=boo']
+ >>> c = ConfigObj(infile, configspec=spec)
+ >>> c.extra_values
+ []
+ >>> c.extra_values = ['bar', 'gosh', 'what']
+ >>> c.validate(Validator())
+ 1
+ >>> c.extra_values
+ ['bar', 'something']
+ >>> c['section'].extra_values
+ ['foo']
+ >>> c['something'].extra_values
+ []
+ """
+
+def _test_reset_and_clear_more():
+ """
+ >>> c = ConfigObj()
+ >>> c.extra_values = ['foo']
+ >>> c.defaults = ['bar']
+ >>> c.default_values = {'bar': 'baz'}
+ >>> c.clear()
+ >>> c.defaults
+ []
+ >>> c.extra_values
+ []
+ >>> c.default_values
+ {'bar': 'baz'}
+ >>> c.extra_values = ['foo']
+ >>> c.defaults = ['bar']
+ >>> c.reset()
+ >>> c.defaults
+ []
+ >>> c.extra_values
+ []
+ >>> c.default_values
+ {}
+ """
+
+def _test_invalid_lists():
+ """
+ >>> v = ['string = val, val2, , val3']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = val, val2,, val3']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = val, val2,,']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = val, ,']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = val, , ']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = ,,']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = ,, ']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = ,foo']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ >>> v = ['string = foo, ']
+ >>> c = ConfigObj(v)
+ >>> c['string']
+ ['foo']
+ >>> v = ['string = foo, "']
+ >>> c = ConfigObj(v)
+ Traceback (most recent call last):
+ ParseError: Parse error in value at line 1.
+ """
+
+def _test_validation_with_preserve_errors():
+ """
+ >>> v = Validator()
+ >>> spec = ['[section]', 'foo = integer']
+ >>> c = ConfigObj(configspec=spec)
+ >>> c.validate(v, preserve_errors=True)
+ {'section': False}
+ >>> c = ConfigObj(['[section]'], configspec=spec)
+ >>> c.validate(v)
+ False
+ >>> c.validate(v, preserve_errors=True)
+ {'section': {'foo': False}}
+ """
+
+
+# test _created on Section
+
+# TODO: Test BOM handling
+# TODO: Test error code for badly built multiline values
+# TODO: Test handling of StringIO
+# TODO: Test interpolation with writing
+
+
+if __name__ == '__main__':
+ # run the code tests in doctest format
+ #
+ testconfig1 = """\
+ key1= val # comment 1
+ key2= val # comment 2
+ # comment 3
+ [lev1a] # comment 4
+ key1= val # comment 5
+ key2= val # comment 6
+ # comment 7
+ [lev1b] # comment 8
+ key1= val # comment 9
+ key2= val # comment 10
+ # comment 11
+ [[lev2ba]] # comment 12
+ key1= val # comment 13
+ # comment 14
+ [[lev2bb]] # comment 15
+ key1= val # comment 16
+ # comment 17
+ [lev1c] # comment 18
+ # comment 19
+ [[lev2c]] # comment 20
+ # comment 21
+ [[[lev3c]]] # comment 22
+ key1 = val # comment 23"""
+ #
+ testconfig2 = b"""\
+ key1 = 'val1'
+ key2 = "val2"
+ key3 = val3
+ ["section 1"] # comment
+ keys11 = val1
+ keys12 = val2
+ keys13 = val3
+ [section 2]
+ keys21 = val1
+ keys22 = val2
+ keys23 = val3
+
+ [['section 2 sub 1']]
+ fish = 3
+ """
+ #
+ testconfig6 = b'''
+ name1 = """ a single line value """ # comment
+ name2 = \''' another single line value \''' # comment
+ name3 = """ a single line value """
+ name4 = \''' another single line value \'''
+ [ "multi section" ]
+ name1 = """
+ Well, this is a
+ multiline value
+ """
+ name2 = \'''
+ Well, this is a
+ multiline value
+ \'''
+ name3 = """
+ Well, this is a
+ multiline value
+ """ # a comment
+ name4 = \'''
+ Well, this is a
+ multiline value
+ \''' # I guess this is a comment too
+ '''
+ #
+ # these cannot be put among the doctests, because the doctest module
+ # does a string.expandtabs() on all of them, sigh
+ # oneTabCfg = ['[sect]', '\t[[sect]]', '\t\tfoo = bar']
+ # twoTabsCfg = ['[sect]', '\t\t[[sect]]', '\t\t\t\tfoo = bar']
+ # tabsAndSpacesCfg = [b'[sect]', b'\t \t [[sect]]', b'\t \t \t \t foo = bar']
+ #
+ import doctest
+ m = sys.modules.get('__main__')
+ globs = m.__dict__.copy()
+ a = ConfigObj(testconfig1.split('\n'), raise_errors=True)
+ b = ConfigObj(testconfig2.split(b'\n'), raise_errors=True)
+ i = ConfigObj(testconfig6.split(b'\n'), raise_errors=True)
+ globs.update({'INTP_VER': INTP_VER, 'a': a, 'b': b, 'i': i})
+ pre_failures, pre_tests = doctest.testmod(
+ m, globs=globs,
+ optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS)
+
+ import configobj
+ post_failures, post_tests = doctest.testmod(
+ configobj, globs=globs,
+ optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS)
+ assert not (pre_failures or post_failures), (
+ '{} failures out of {} tests'.format(post_failures + pre_failures,
+ post_tests + pre_tests))
+
+
+# Man alive I prefer unittest ;-)
diff --git a/src/tests/conftest.py b/src/tests/conftest.py
new file mode 100644
index 0000000..33b1bc9
--- /dev/null
+++ b/src/tests/conftest.py
@@ -0,0 +1,14 @@
+# coding=utf-8
+import pytest
+
+from configobj import ConfigObj
+from configobj.validate import Validator
+
+@pytest.fixture
+def empty_cfg():
+ return ConfigObj()
+
+
+@pytest.fixture
+def val():
+ return Validator()
diff --git a/src/tests/test_configobj.py b/src/tests/test_configobj.py
new file mode 100644
index 0000000..b9004fd
--- /dev/null
+++ b/src/tests/test_configobj.py
@@ -0,0 +1,1298 @@
+# coding=utf-8
+from __future__ import unicode_literals
+import os
+import re
+
+from codecs import BOM_UTF8
+from warnings import catch_warnings
+from tempfile import NamedTemporaryFile
+
+import pytest
+import six
+
+import configobj as co
+from configobj import ConfigObj, flatten_errors, ReloadError, DuplicateError, MissingInterpolationOption, InterpolationLoopError, ConfigObjError
+from configobj.validate import Validator, VdtValueTooSmallError
+
+
+def cfg_lines(config_string_representation):
+ """
+ :param config_string_representation: string representation of a config
+ file (typically a triple-quoted string)
+ :type config_string_representation: str or unicode
+ :return: a list of lines of that config. Whitespace on the left will be
+ trimmed based on the indentation level to make it a bit saner to assert
+ content of a particular line
+ :rtype: str or unicode
+ """
+ lines = config_string_representation.splitlines()
+
+ for idx, line in enumerate(lines):
+ if line.strip():
+ line_no_with_content = idx
+ break
+ else:
+ raise ValueError('no content in provided config file: '
+ '{!r}'.format(config_string_representation))
+
+ first_content = lines[line_no_with_content]
+ if isinstance(first_content, six.binary_type):
+ first_content = first_content.decode('utf-8')
+ ws_chars = len(re.search('^(\s*)', first_content).group(1))
+
+ def yield_stringified_line():
+ for line in lines:
+ if isinstance(line, six.binary_type):
+ yield line.decode('utf-8')
+ else:
+ yield line
+
+
+ return [re.sub('^\s{0,%s}' % ws_chars, '', line).encode('utf-8')
+ for line in yield_stringified_line()]
+
+
+@pytest.fixture
+def cfg_contents(request):
+
+ def make_file_with_contents_and_return_name(config_string_representation):
+ """
+ :param config_string_representation: string representation of a config
+ file (typically a triple-quoted string)
+ :type config_string_representation: str or unicode
+ :return: a list of lines of that config. Whitespace on the left will be
+ trimmed based on the indentation level to make it a bit saner to assert
+ content of a particular line
+ :rtype: basestring
+ """
+
+ lines = cfg_lines(config_string_representation)
+
+ with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
+ for line in lines:
+ if isinstance(line, six.binary_type):
+ cfg_file.write(line + os.linesep.encode('utf-8'))
+ else:
+ cfg_file.write((line + os.linesep).encode('utf-8'))
+ request.addfinalizer(lambda : os.unlink(cfg_file.name))
+
+ return cfg_file.name
+
+ return make_file_with_contents_and_return_name
+
+
+def test_order_preserved():
+ c = ConfigObj()
+ c['a'] = 1
+ c['b'] = 2
+ c['c'] = 3
+ c['section'] = {}
+ c['section']['a'] = 1
+ c['section']['b'] = 2
+ c['section']['c'] = 3
+ c['section']['section'] = {}
+ c['section']['section2'] = {}
+ c['section']['section3'] = {}
+ c['section2'] = {}
+ c['section3'] = {}
+
+ c2 = ConfigObj(c)
+ assert c2.scalars == ['a', 'b', 'c']
+ assert c2.sections == ['section', 'section2', 'section3']
+ assert c2['section'].scalars == ['a', 'b', 'c']
+ assert c2['section'].sections == ['section', 'section2', 'section3']
+
+ assert c['section'] is not c2['section']
+ assert c['section']['section'] is not c2['section']['section']
+
+
+def test_options_deprecation():
+ with catch_warnings(record=True) as log:
+ ConfigObj(options={})
+
+ # unpack the only member of log
+ try:
+ warning, = log
+ except ValueError:
+ assert len(log) == 1
+
+ assert warning.category == DeprecationWarning
+
+
+def test_list_members():
+ c = ConfigObj()
+ c['a'] = []
+ c['a'].append('foo')
+ assert c['a'] == ['foo']
+
+
+def test_list_interpolation_with_pop():
+ c = ConfigObj()
+ c['a'] = []
+ c['a'].append('%(b)s')
+ c['b'] = 'bar'
+ assert c.pop('a') == ['bar']
+
+
+def test_with_default():
+ c = ConfigObj()
+ c['a'] = 3
+
+ assert c.pop('a') == 3
+ assert c.pop('b', 3) == 3
+ with pytest.raises(KeyError):
+ c.pop('c')
+
+
+def test_interpolation_with_section_names(cfg_contents):
+ cfg = cfg_contents("""
+item1 = 1234
+[section]
+ [[item1]]
+ foo='bar'
+ [[DEFAULT]]
+ [[[item1]]]
+ why = would you do this?
+ [[other-subsection]]
+ item2 = '$item1'""")
+ c = ConfigObj(cfg, interpolation='Template')
+
+ # This raises an exception in 4.7.1 and earlier due to the section
+ # being found as the interpolation value
+ repr(c)
+
+
+def test_interoplation_repr():
+ c = ConfigObj(['foo = $bar'], interpolation='Template')
+ c['baz'] = {}
+ c['baz']['spam'] = '%(bar)s'
+
+ # This raises a MissingInterpolationOption exception in 4.7.1 and earlier
+ repr(c)
+
+
+class TestEncoding(object):
+ @pytest.fixture
+ def ant_cfg(self):
+ return """
+ [tags]
+ [[bug]]
+ translated = \U0001f41c
+ """
+
+ #issue #18
+ def test_unicode_conversion_when_encoding_is_set(self, cfg_contents):
+ cfg = cfg_contents(b"test = some string")
+
+ c = ConfigObj(cfg, encoding='utf8')
+
+ if six.PY2:
+ assert not isinstance(c['test'], str)
+ assert isinstance(c['test'], unicode)
+ else:
+ assert isinstance(c['test'], str)
+
+
+ #issue #18
+ def test_no_unicode_conversion_when_encoding_is_omitted(self, cfg_contents):
+ cfg = cfg_contents(b"test = some string")
+
+ c = ConfigObj(cfg)
+ if six.PY2:
+ assert isinstance(c['test'], str)
+ assert not isinstance(c['test'], unicode)
+ else:
+ assert isinstance(c['test'], str)
+
+ #issue #44
+ def test_that_encoding_using_list_of_strings(self):
+ cfg = [b'test = \xf0\x9f\x90\x9c']
+
+ c = ConfigObj(cfg, encoding='utf8')
+
+ if six.PY2:
+ assert isinstance(c['test'], unicode)
+ assert not isinstance(c['test'], str)
+ else:
+ assert isinstance(c['test'], str)
+
+ assert c['test'] == '\U0001f41c'
+
+ #issue #44
+ def test_encoding_in_subsections(self, ant_cfg, cfg_contents):
+ c = cfg_contents(ant_cfg)
+ cfg = ConfigObj(c, encoding='utf-8')
+
+ assert isinstance(cfg['tags']['bug']['translated'], six.text_type)
+
+ #issue #44 and #55
+ def test_encoding_in_config_files(self, request, ant_cfg):
+ # the cfg_contents fixture is doing this too, but be explicit
+ with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
+ cfg_file.write(ant_cfg.encode('utf-8'))
+ request.addfinalizer(lambda : os.unlink(cfg_file.name))
+
+ cfg = ConfigObj(cfg_file.name, encoding='utf-8')
+ assert isinstance(cfg['tags']['bug']['translated'], six.text_type)
+ cfg.write()
+
+@pytest.fixture
+def testconfig1():
+ """
+ copied from the main doctest
+ """
+ return """\
+ key1= val # comment 1
+ key2= val # comment 2
+ # comment 3
+ [lev1a] # comment 4
+ key1= val # comment 5
+ key2= val # comment 6
+ # comment 7
+ [lev1b] # comment 8
+ key1= val # comment 9
+ key2= val # comment 10
+ # comment 11
+ [[lev2ba]] # comment 12
+ key1= val # comment 13
+ # comment 14
+ [[lev2bb]] # comment 15
+ key1= val # comment 16
+ # comment 17
+ [lev1c] # comment 18
+ # comment 19
+ [[lev2c]] # comment 20
+ # comment 21
+ [[[lev3c]]] # comment 22
+ key1 = val # comment 23"""
+
+
+@pytest.fixture
+def testconfig2():
+ return """\
+ key1 = 'val1'
+ key2 = "val2"
+ key3 = val3
+ ["section 1"] # comment
+ keys11 = val1
+ keys12 = val2
+ keys13 = val3
+ [section 2]
+ keys21 = val1
+ keys22 = val2
+ keys23 = val3
+
+ [['section 2 sub 1']]
+ fish = 3
+ """
+
+
+@pytest.fixture
+def testconfig6():
+ return b'''
+ name1 = """ a single line value """ # comment
+ name2 = \''' another single line value \''' # comment
+ name3 = """ a single line value """
+ name4 = \''' another single line value \'''
+ [ "multi section" ]
+ name1 = """
+ Well, this is a
+ multiline value
+ """
+ name2 = \'''
+ Well, this is a
+ multiline value
+ \'''
+ name3 = """
+ Well, this is a
+ multiline value
+ """ # a comment
+ name4 = \'''
+ Well, this is a
+ multiline value
+ \''' # I guess this is a comment too
+ '''
+
+
+@pytest.fixture
+def a(testconfig1, cfg_contents):
+ """
+ also copied from main doc tests
+ """
+ return ConfigObj(cfg_contents(testconfig1), raise_errors=True)
+
+
+@pytest.fixture
+def b(testconfig2, cfg_contents):
+ """
+ also copied from main doc tests
+ """
+ return ConfigObj(cfg_contents(testconfig2), raise_errors=True)
+
+
+@pytest.fixture
+def i(testconfig6, cfg_contents):
+ """
+ also copied from main doc tests
+ """
+ return ConfigObj(cfg_contents(testconfig6), raise_errors=True)
+
+
+def test_configobj_dict_representation(a, b, cfg_contents):
+
+ assert a.depth == 0
+ assert a == {
+ 'key2': 'val',
+ 'key1': 'val',
+ 'lev1c': {
+ 'lev2c': {
+ 'lev3c': {
+ 'key1': 'val',
+ },
+ },
+ },
+ 'lev1b': {
+ 'key2': 'val',
+ 'key1': 'val',
+ 'lev2ba': {
+ 'key1': 'val',
+ },
+ 'lev2bb': {
+ 'key1': 'val',
+ },
+ },
+ 'lev1a': {
+ 'key2': 'val',
+ 'key1': 'val',
+ },
+ }
+
+ assert b.depth == 0
+ assert b == {
+ 'key3': 'val3',
+ 'key2': 'val2',
+ 'key1': 'val1',
+ 'section 1': {
+ 'keys11': 'val1',
+ 'keys13': 'val3',
+ 'keys12': 'val2',
+ },
+ 'section 2': {
+ 'section 2 sub 1': {
+ 'fish': '3',
+ },
+ 'keys21': 'val1',
+ 'keys22': 'val2',
+ 'keys23': 'val3',
+ },
+ }
+
+ t = cfg_lines("""
+ 'a' = b # !"$%^&*(),::;'@~#= 33
+ "b" = b #= 6, 33
+ """)
+ t2 = ConfigObj(t)
+ assert t2 == {'a': 'b', 'b': 'b'}
+ t2.inline_comments['b'] = ''
+ del t2['a']
+ assert t2.write() == ['','b = b', '']
+
+
+def test_behavior_when_list_values_is_false():
+ c = '''
+ key1 = no quotes
+ key2 = 'single quotes'
+ key3 = "double quotes"
+ key4 = "list", 'with', several, "quotes"
+ '''
+ cfg = ConfigObj(cfg_lines(c), list_values=False)
+ assert cfg == {
+ 'key1': 'no quotes',
+ 'key2': "'single quotes'",
+ 'key3': '"double quotes"',
+ 'key4': '"list", \'with\', several, "quotes"'
+ }
+
+ cfg2 = ConfigObj(list_values=False)
+ cfg2['key1'] = 'Multiline\nValue'
+ cfg2['key2'] = '''"Value" with 'quotes' !'''
+ assert cfg2.write() == [
+ "key1 = '''Multiline\nValue'''",
+ 'key2 = "Value" with \'quotes\' !'
+ ]
+
+ cfg2.list_values = True
+ assert cfg2.write() == [
+ "key1 = '''Multiline\nValue'''",
+ 'key2 = \'\'\'"Value" with \'quotes\' !\'\'\''
+ ]
+
+
+def test_flatten_errors(val, cfg_contents):
+ config = cfg_contents("""
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [section]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [[sub section]]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ """)
+ configspec = cfg_contents("""
+ test1= integer(30,50)
+ test2= string
+ test3=integer
+ test4=float(6.0)
+ [section]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(6.0)
+ [[sub section]]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(6.0)
+ """)
+ c1 = ConfigObj(config, configspec=configspec)
+ res = c1.validate(val)
+ assert flatten_errors(c1, res) == [([], 'test4', False), (['section'], 'test4', False), (['section', 'sub section'], 'test4', False)]
+ res = c1.validate(val, preserve_errors=True)
+ check = flatten_errors(c1, res)
+ assert check[0][:2] == ([], 'test4')
+ assert check[1][:2] == (['section'], 'test4')
+ assert check[2][:2] == (['section', 'sub section'], 'test4')
+ for entry in check:
+ assert isinstance(entry[2], VdtValueTooSmallError)
+ assert str(entry[2]) == 'the value "5.0" is too small.'
+
+
+def test_unicode_handling():
+ u_base = '''
+ # initial comment
+ # inital comment 2
+ test1 = some value
+ # comment
+ test2 = another value # inline comment
+ # section comment
+ [section] # inline comment
+ test = test # another inline comment
+ test2 = test2
+ # final comment
+ # final comment2
+ '''
+
+ # needing to keep line endings means this isn't a good candidate
+ # for the cfg_lines utility method
+ u = u_base.encode('utf_8').splitlines(True)
+ u[0] = BOM_UTF8 + u[0]
+ uc = ConfigObj(u)
+ uc.encoding = None
+ assert uc.BOM
+ assert uc == {'test1': 'some value', 'test2': 'another value',
+ 'section': {'test': 'test', 'test2': 'test2'}}
+ uc = ConfigObj(u, encoding='utf_8', default_encoding='latin-1')
+ assert uc.BOM
+ assert isinstance(uc['test1'], six.text_type)
+ assert uc.encoding == 'utf_8'
+ assert uc.newlines == '\n'
+ assert len(uc.write()) == 13
+ uc['latin1'] = "This costs lot's of "
+ a_list = uc.write()
+ assert 'latin1' in str(a_list)
+ assert len(a_list) == 14
+ assert isinstance(a_list[0], six.binary_type)
+ assert a_list[0].startswith(BOM_UTF8)
+
+ u = u_base.replace('\n', '\r\n').encode('utf-8').splitlines(True)
+ uc = ConfigObj(u)
+ assert uc.newlines == '\r\n'
+ uc.newlines = '\r'
+ file_like = six.BytesIO()
+ uc.write(file_like)
+ file_like.seek(0)
+ uc2 = ConfigObj(file_like)
+ assert uc2 == uc
+ assert uc2.filename == None
+ assert uc2.newlines == '\r'
+
+
+class TestWritingConfigs(object):
+ def test_validate(self, val):
+ spec = [
+ '# Initial Comment',
+ '',
+ 'key1 = string(default=Hello)',
+ '',
+ '# section comment',
+ '[section] # inline comment',
+ '# key1 comment',
+ 'key1 = integer(default=6)',
+ '# key2 comment',
+ 'key2 = boolean(default=True)',
+ '# subsection comment',
+ '[[sub-section]] # inline comment',
+ '# another key1 comment',
+ 'key1 = float(default=3.0)'
+ ]
+ blank_config = ConfigObj(configspec=spec)
+ assert blank_config.validate(val, copy=True)
+ assert blank_config.dict() == {
+ 'key1': 'Hello',
+ 'section': {'key1': 6, 'key2': True, 'sub-section': {'key1': 3.0}}
+ }
+ assert blank_config.write() == [
+ '# Initial Comment',
+ '',
+ 'key1 = Hello',
+ '',
+ '# section comment',
+ '[section]# inline comment',
+ '# key1 comment',
+ 'key1 = 6',
+ '# key2 comment',
+ 'key2 = True',
+ '# subsection comment',
+ '[[sub-section]]# inline comment',
+ '# another key1 comment',
+ 'key1 = 3.0'
+ ]
+
+ def test_writing_empty_values(self):
+ config_with_empty_values = [
+ '',
+ 'key1 =',
+ 'key2 =# a comment',
+ ]
+ cfg = ConfigObj(config_with_empty_values)
+ assert cfg.write() == ['', 'key1 = ""', 'key2 = ""# a comment']
+ cfg.write_empty_values = True
+ assert cfg.write() == ['', 'key1 = ', 'key2 = # a comment']
+
+
+class TestUnrepr(object):
+ def test_in_reading(self):
+ config_to_be_unreprd = cfg_lines("""
+ key1 = (1, 2, 3) # comment
+ key2 = True
+ key3 = 'a string'
+ key4 = [1, 2, 3, 'a mixed list']
+ """)
+ cfg = ConfigObj(config_to_be_unreprd, unrepr=True)
+ assert cfg == {
+ 'key1': (1, 2, 3),
+ 'key2': True,
+ 'key3': 'a string',
+ 'key4': [1, 2, 3, 'a mixed list']
+ }
+
+ assert cfg == ConfigObj(cfg.write(), unrepr=True)
+
+ def test_in_multiline_values(self, cfg_contents):
+ config_with_multiline_value = cfg_contents('''
+ k = \"""{
+ 'k1': 3,
+ 'k2': 6.0}\"""
+ ''')
+ cfg = ConfigObj(config_with_multiline_value, unrepr=True)
+ assert cfg == {'k': {'k1': 3, 'k2': 6.0}}
+
+ def test_with_a_dictionary(self):
+ config_with_dict_value = ['k = {"a": 1}']
+ cfg = ConfigObj(config_with_dict_value, unrepr=True)
+ assert isinstance(cfg['k'], dict)
+
+ def test_with_hash(self):
+ config_with_a_hash_in_a_list = [
+ 'key1 = (1, 2, 3) # comment',
+ 'key2 = True',
+ "key3 = 'a string'",
+ "key4 = [1, 2, 3, 'a mixed list#']"
+ ]
+ cfg = ConfigObj(config_with_a_hash_in_a_list, unrepr=True)
+ assert cfg == {
+ 'key1': (1, 2, 3),
+ 'key2': True,
+ 'key3': 'a string',
+ 'key4': [1, 2, 3, 'a mixed list#']
+ }
+
+
+class TestValueErrors(object):
+ def test_bool(self, empty_cfg):
+ empty_cfg['a'] = 'fish'
+ with pytest.raises(ValueError) as excinfo:
+ empty_cfg.as_bool('a')
+ assert str(excinfo.value) == 'Value "fish" is neither True nor False'
+ empty_cfg['b'] = 'True'
+ assert empty_cfg.as_bool('b') is True
+ empty_cfg['b'] = 'off'
+ assert empty_cfg.as_bool('b') is False
+
+ def test_int(self, empty_cfg):
+ for bad in ('fish', '3.2'):
+ empty_cfg['a'] = bad
+ with pytest.raises(ValueError) as excinfo:
+ empty_cfg.as_int('a')
+ assert str(excinfo.value).startswith('invalid literal for int()')
+
+ empty_cfg['b'] = '1'
+ assert empty_cfg.as_bool('b') is True
+ empty_cfg['b'] = '3.2'
+
+ def test_float(self, empty_cfg):
+ empty_cfg['a'] = 'fish'
+ with pytest.raises(ValueError):
+ empty_cfg.as_float('a')
+
+ empty_cfg['b'] = '1'
+ assert empty_cfg.as_float('b') == 1
+ empty_cfg['b'] = '3.2'
+ assert empty_cfg.as_float('b') == 3.2
+
+
+
+def test_error_types():
+ # errors that don't have interesting messages
+ test_value = 'what'
+ for ErrorClass in (co.ConfigObjError, co.NestingError, co.ParseError,
+ co.DuplicateError, co.ConfigspecError,
+ co.RepeatSectionError):
+ with pytest.raises(ErrorClass) as excinfo:
+ # TODO: assert more interesting things
+ # now that we're not using doctest
+ raise ErrorClass(test_value)
+ assert str(excinfo.value) == test_value
+
+ for ErrorClassWithMessage, msg in (
+ (co.InterpolationLoopError,
+ 'interpolation loop detected in value "{0}".'),
+ (co.MissingInterpolationOption,
+ 'missing option "{0}" in interpolation.'),
+ ):
+ with pytest.raises(ErrorClassWithMessage) as excinfo:
+ raise ErrorClassWithMessage(test_value)
+ assert str(excinfo.value) == msg.format(test_value)
+
+ # ReloadError is raised as IOError
+ with pytest.raises(IOError):
+ raise co.ReloadError()
+
+
+class TestSectionBehavior(object):
+ def test_dictionary_representation(self, a):
+
+ n = a.dict()
+ assert n == a
+ assert n is not a
+
+ def test_merging(self, cfg_contents):
+ config_with_subsection = cfg_contents("""
+ [section1]
+ option1 = True
+ [[subsection]]
+ more_options = False
+ # end of file
+ """)
+ config_that_overwrites_parameter = cfg_contents("""
+ # File is user.ini
+ [section1]
+ option1 = False
+ # end of file
+ """)
+ c1 = ConfigObj(config_that_overwrites_parameter)
+ c2 = ConfigObj(config_with_subsection)
+ c2.merge(c1)
+ assert c2.dict() == {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
+
+ def test_walking_with_in_place_updates(self, cfg_contents):
+ config = cfg_contents("""
+ [XXXXsection]
+ XXXXkey = XXXXvalue
+ """)
+ cfg = ConfigObj(config)
+ assert cfg.dict() == {'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
+ def transform(section, key):
+ val = section[key]
+ newkey = key.replace('XXXX', 'CLIENT1')
+ section.rename(key, newkey)
+ if isinstance(val, six.string_types):
+ val = val.replace('XXXX', 'CLIENT1')
+ section[newkey] = val
+
+ assert cfg.walk(transform, call_on_sections=True) == {
+ 'CLIENT1section': {'CLIENT1key': None}
+ }
+ assert cfg.dict() == {
+ 'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}
+ }
+
+
+def test_reset_a_configobj():
+
+ something = object()
+ cfg = ConfigObj()
+ cfg['something'] = something
+ cfg['section'] = {'something': something}
+ cfg.filename = 'fish'
+ cfg.raise_errors = something
+ cfg.list_values = something
+ cfg.create_empty = something
+ cfg.file_error = something
+ cfg.stringify = something
+ cfg.indent_type = something
+ cfg.encoding = something
+ cfg.default_encoding = something
+ cfg.BOM = something
+ cfg.newlines = something
+ cfg.write_empty_values = something
+ cfg.unrepr = something
+ cfg.initial_comment = something
+ cfg.final_comment = something
+ cfg.configspec = something
+ cfg.inline_comments = something
+ cfg.comments = something
+ cfg.defaults = something
+ cfg.default_values = something
+ cfg.reset()
+
+ assert cfg.filename is None
+ assert cfg.raise_errors is False
+ assert cfg.list_values is True
+ assert cfg.create_empty is False
+ assert cfg.file_error is False
+ assert cfg.interpolation is True
+ assert cfg.configspec is None
+ assert cfg.stringify is True
+ assert cfg.indent_type is None
+ assert cfg.encoding is None
+ assert cfg.default_encoding is None
+ assert cfg.unrepr is False
+ assert cfg.write_empty_values is False
+ assert cfg.inline_comments == {}
+ assert cfg.comments == {}
+ assert cfg.defaults == []
+ assert cfg.default_values == {}
+ assert cfg == ConfigObj()
+ assert repr(cfg) == 'ConfigObj({})'
+
+
+class TestReloading(object):
+ @pytest.fixture
+ def reloadable_cfg_content(self):
+ content = '''
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [section]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [[sub section]]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [section2]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ '''
+ return content
+
+ def test_handle_no_filename(self):
+ for bad_args in ([six.BytesIO()], [], [[]]):
+ cfg = ConfigObj(*bad_args)
+ with pytest.raises(ReloadError) as excinfo:
+ cfg.reload()
+ assert str(excinfo.value) == 'reload failed, filename is not set.'
+
+ def test_reloading_with_an_actual_file(self, request,
+ reloadable_cfg_content,
+ cfg_contents):
+
+ with NamedTemporaryFile(delete=False, mode='wb') as cfg_file:
+ cfg_file.write(reloadable_cfg_content.encode('utf-8'))
+ request.addfinalizer(lambda : os.unlink(cfg_file.name))
+
+ configspec = cfg_contents("""
+ test1= integer(30,50)
+ test2= string
+ test3=integer
+ test4=float(4.5)
+ [section]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(4.5)
+ [[sub section]]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(4.5)
+ [section2]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(4.5)
+ """)
+
+ cfg = ConfigObj(cfg_file.name, configspec=configspec)
+ cfg.configspec['test1'] = 'integer(50,60)'
+ backup = ConfigObj(cfg_file.name)
+ del cfg['section']
+ del cfg['test1']
+ cfg['extra'] = '3'
+ cfg['section2']['extra'] = '3'
+ cfg.reload()
+ assert cfg == backup
+ assert cfg.validate(Validator())
+
+
+class TestDuplicates(object):
+ def test_duplicate_section(self):
+ cfg = '''
+ [hello]
+ member = value
+ [hello again]
+ member = value
+ [ "hello" ]
+ member = value
+ '''
+ with pytest.raises(DuplicateError) as excinfo:
+ ConfigObj(cfg.splitlines(), raise_errors=True)
+ assert str(excinfo.value) == 'Duplicate section name at line 6.'
+
+ def test_duplicate_members(self):
+ d = '''
+ [hello]
+ member=value
+ [helloagain]
+ member1=value
+ member2=value
+ 'member1'=value
+ ["andagain"]
+ member=value
+ '''
+ with pytest.raises(DuplicateError) as excinfo:
+ ConfigObj(d.splitlines(),raise_errors=True)
+ assert str(excinfo.value) == 'Duplicate keyword name at line 7.'
+
+
+class TestInterpolation(object):
+ """
+ tests various interpolation behaviors using config par
+ """
+ @pytest.fixture
+ def config_parser_cfg(self):
+ cfg = ConfigObj()
+ cfg['DEFAULT'] = {
+ 'b': 'goodbye',
+ 'userdir': r'c:\\home',
+ 'c': '%(d)s',
+ 'd': '%(c)s'
+ }
+ cfg['section'] = {
+ 'a': r'%(datadir)s\\some path\\file.py',
+ 'b': r'%(userdir)s\\some path\\file.py',
+ 'c': 'Yo %(a)s',
+ 'd': '%(not_here)s',
+ 'e': '%(e)s',
+ }
+ cfg['section']['DEFAULT'] = {
+ 'datadir': r'c:\\silly_test',
+ 'a': 'hello - %(b)s',
+ }
+ return cfg
+
+ @pytest.fixture
+ def template_cfg(self, cfg_contents):
+ interp_cfg = '''
+ [DEFAULT]
+ keyword1 = value1
+ 'keyword 2' = 'value 2'
+ reference = ${keyword1}
+ foo = 123
+
+ [ section ]
+ templatebare = $keyword1/foo
+ bar = $$foo
+ dollar = $$300.00
+ stophere = $$notinterpolated
+ with_braces = ${keyword1}s (plural)
+ with_spaces = ${keyword 2}!!!
+ with_several = $keyword1/$reference/$keyword1
+ configparsersample = %(keyword 2)sconfig
+ deep = ${reference}
+
+ [[DEFAULT]]
+ baz = $foo
+
+ [[ sub-section ]]
+ quux = '$baz + $bar + $foo'
+
+ [[[ sub-sub-section ]]]
+ convoluted = "$bar + $baz + $quux + $bar"
+ '''
+ return ConfigObj(cfg_contents(interp_cfg), interpolation='Template')
+
+ def test_interpolation(self, config_parser_cfg):
+ test_section = config_parser_cfg['section']
+ assert test_section['a'] == r'c:\\silly_test\\some path\\file.py'
+ assert test_section['b'] == r'c:\\home\\some path\\file.py'
+ assert test_section['c'] == r'Yo c:\\silly_test\\some path\\file.py'
+
+ def test_interpolation_turned_off(self, config_parser_cfg):
+ config_parser_cfg.interpolation = False
+ test_section = config_parser_cfg['section']
+ assert test_section['a'] == r'%(datadir)s\\some path\\file.py'
+ assert test_section['b'] == r'%(userdir)s\\some path\\file.py'
+ assert test_section['c'] == r'Yo %(a)s'
+
+ def test_handle_errors(self, config_parser_cfg):
+
+ with pytest.raises(MissingInterpolationOption) as excinfo:
+ print(config_parser_cfg['section']['d'])
+ assert (str(excinfo.value) ==
+ 'missing option "not_here" in interpolation.')
+
+ with pytest.raises(InterpolationLoopError) as excinfo:
+ print(config_parser_cfg['section']['e'])
+ assert (str(excinfo.value) ==
+ 'interpolation loop detected in value "e".')
+
+ def test_template_interpolation(self, template_cfg):
+ test_sec = template_cfg['section']
+ assert test_sec['templatebare'] == 'value1/foo'
+ assert test_sec['dollar'] == '$300.00'
+ assert test_sec['stophere'] == '$notinterpolated'
+ assert test_sec['with_braces'] == 'value1s (plural)'
+ assert test_sec['with_spaces'] == 'value 2!!!'
+ assert test_sec['with_several'] == 'value1/value1/value1'
+ assert test_sec['configparsersample'] == '%(keyword 2)sconfig'
+ assert test_sec['deep'] == 'value1'
+ assert test_sec['sub-section']['quux'] == '123 + $foo + 123'
+ assert (test_sec['sub-section']['sub-sub-section']['convoluted'] ==
+ '$foo + 123 + 123 + $foo + 123 + $foo')
+
+
+class TestQuotes(object):
+ """
+ tests what happens whn dealing with quotes
+ """
+ def assert_bad_quote_message(self, empty_cfg, to_quote, **kwargs):
+ #TODO: this should be use repr instead of str
+ message = 'Value "{0}" cannot be safely quoted.'
+ with pytest.raises(ConfigObjError) as excinfo:
+ empty_cfg._quote(to_quote, **kwargs)
+ assert str(excinfo.value) == message.format(to_quote)
+
+ def test_handle_unbalanced(self, i):
+ self.assert_bad_quote_message(i, '"""\'\'\'')
+
+ def test_handle_unallowed_newline(self, i):
+ newline = '\n'
+ self.assert_bad_quote_message(i, newline, multiline=False)
+
+ def test_handle_unallowed_open_quote(self, i):
+ open_quote = ' "\' '
+ self.assert_bad_quote_message(i, open_quote, multiline=False)
+
+ def test_handle_multiple_bad_quote_values(self):
+ testconfig5 = '''
+ config = "hello # comment
+ test = 'goodbye
+ fish = 'goodbye # comment
+ dummy = "hello again
+ '''
+ with pytest.raises(ConfigObjError) as excinfo:
+ ConfigObj(testconfig5.splitlines())
+ assert len(excinfo.value.errors) == 4
+
+
+
+def test_handle_stringify_off():
+ c = ConfigObj()
+ c.stringify = False
+
+ with pytest.raises(TypeError) as excinfo:
+ c['test'] = 1
+ assert str(excinfo.value) == 'Value is not a string "1".'
+
+
+class TestValues(object):
+ """
+ Tests specifics about behaviors with types of values
+ """
+ @pytest.fixture
+ def testconfig3(self, cfg_contents):
+ return cfg_contents("""
+ a = ,
+ b = test,
+ c = test1, test2 , test3
+ d = test1, test2, test3,
+ """)
+
+ def test_empty_values(self, cfg_contents):
+ cfg_with_empty = cfg_contents("""
+ k =
+ k2 =# comment test
+ val = test
+ val2 = ,
+ val3 = 1,
+ val4 = 1, 2
+ val5 = 1, 2, """)
+ cwe = ConfigObj(cfg_with_empty)
+ # see a comma? it's a list
+ assert cwe == {'k': '', 'k2': '', 'val': 'test', 'val2': [],
+ 'val3': ['1'], 'val4': ['1', '2'], 'val5': ['1', '2']}
+ # not any more
+ cwe = ConfigObj(cfg_with_empty, list_values=False)
+ assert cwe == {'k': '', 'k2': '', 'val': 'test', 'val2': ',',
+ 'val3': '1,', 'val4': '1, 2', 'val5': '1, 2,'}
+
+ def test_list_values(self, testconfig3):
+ cfg = ConfigObj(testconfig3, raise_errors=True)
+ assert cfg['a'] == []
+ assert cfg['b'] == ['test']
+ assert cfg['c'] == ['test1', 'test2', 'test3']
+ assert cfg['d'] == ['test1', 'test2', 'test3']
+
+ def test_list_values_off(self, testconfig3):
+ cfg = ConfigObj(testconfig3, raise_errors=True, list_values=False)
+ assert cfg['a'] == ','
+ assert cfg['b'] == 'test,'
+ assert cfg['c'] == 'test1, test2 , test3'
+ assert cfg['d'] == 'test1, test2, test3,'
+
+ def test_handle_multiple_list_value_errors(self):
+ testconfig4 = '''
+ config = 3,4,,
+ test = 3,,4
+ fish = ,,
+ dummy = ,,hello, goodbye
+ '''
+ with pytest.raises(ConfigObjError) as excinfo:
+ ConfigObj(testconfig4.splitlines())
+ assert len(excinfo.value.errors) == 4
+
+
+
+def test_creating_with_a_dictionary():
+ dictionary_cfg_content = {
+ 'key1': 'val1',
+ 'key2': 'val2',
+ 'section 1': {
+ 'key1': 'val1',
+ 'key2': 'val2',
+ 'section 1b': {
+ 'key1': 'val1',
+ 'key2': 'val2',
+ },
+ },
+ 'section 2': {
+ 'key1': 'val1',
+ 'key2': 'val2',
+ 'section 2b': {
+ 'key1': 'val1',
+ 'key2': 'val2',
+ },
+ },
+ 'key3': 'val3',
+ }
+ cfg = ConfigObj(dictionary_cfg_content)
+ assert dictionary_cfg_content == cfg
+ assert dictionary_cfg_content is not cfg
+ assert dictionary_cfg_content == cfg.dict()
+ assert dictionary_cfg_content is not cfg.dict()
+
+
+class TestComments(object):
+ @pytest.fixture
+ def comment_filled_cfg(self, cfg_contents):
+ return cfg_contents("""
+ # initial comments
+ # with two lines
+ key = "value"
+ # section comment
+ [section] # inline section comment
+ # key comment
+ key = "value"
+
+ # final comment
+ # with two lines"""
+ )
+
+ def test_multiline_comments(self, i):
+
+ expected_multiline_value = '\nWell, this is a\nmultiline value\n'
+ assert i == {
+ 'name4': ' another single line value ',
+ 'multi section': {
+ 'name4': expected_multiline_value,
+ 'name2': expected_multiline_value,
+ 'name3': expected_multiline_value,
+ 'name1': expected_multiline_value,
+ },
+ 'name2': ' another single line value ',
+ 'name3': ' a single line value ',
+ 'name1': ' a single line value ',
+ }
+
+ def test_starting_and_ending_comments(self, a, testconfig1, cfg_contents):
+
+ filename = a.filename
+ a.filename = None
+ values = a.write()
+ index = 0
+ while index < 23:
+ index += 1
+ line = values[index-1]
+ assert line.endswith('# comment ' + str(index))
+ a.filename = filename
+
+ start_comment = ['# Initial Comment', '', '#']
+ end_comment = ['', '#', '# Final Comment']
+ newconfig = start_comment + testconfig1.splitlines() + end_comment
+ nc = ConfigObj(newconfig)
+ assert nc.initial_comment == ['# Initial Comment', '', '#']
+ assert nc.final_comment == ['', '#', '# Final Comment']
+ assert nc.initial_comment == start_comment
+ assert nc.final_comment == end_comment
+
+ def test_inline_comments(self):
+ c = ConfigObj()
+ c['foo'] = 'bar'
+ c.inline_comments['foo'] = 'Nice bar'
+ assert c.write() == ['foo = bar # Nice bar']
+
+ def test_unrepr_comments(self, comment_filled_cfg):
+ c = ConfigObj(comment_filled_cfg, unrepr=True)
+ assert c == { 'key': 'value', 'section': { 'key': 'value'}}
+ assert c.initial_comment == [
+ '', '# initial comments', '# with two lines'
+ ]
+ assert c.comments == {'section': ['# section comment'], 'key': []}
+ assert c.inline_comments == {
+ 'section': '# inline section comment', 'key': ''
+ }
+ assert c['section'].comments == { 'key': ['# key comment']}
+ assert c.final_comment == ['', '# final comment', '# with two lines']
+
+ def test_comments(self, comment_filled_cfg):
+ c = ConfigObj(comment_filled_cfg)
+ assert c == { 'key': 'value', 'section': { 'key': 'value'}}
+ assert c.initial_comment == [
+ '', '# initial comments', '# with two lines'
+ ]
+ assert c.comments == {'section': ['# section comment'], 'key': []}
+ assert c.inline_comments == {
+ 'section': '# inline section comment', 'key': None
+ }
+ assert c['section'].comments == { 'key': ['# key comment']}
+ assert c.final_comment == ['', '# final comment', '# with two lines']
+
+
+
+def test_overwriting_filenames(a, b, i):
+ #TODO: I'm not entirely sure what this test is actually asserting
+ filename = a.filename
+ a.filename = 'test.ini'
+ a.write()
+ a.filename = filename
+ assert a == ConfigObj('test.ini', raise_errors=True)
+ os.remove('test.ini')
+ b.filename = 'test.ini'
+ b.write()
+ assert b == ConfigObj('test.ini', raise_errors=True)
+ os.remove('test.ini')
+ i.filename = 'test.ini'
+ i.write()
+ assert i == ConfigObj('test.ini', raise_errors=True)
+ os.remove('test.ini')
+
+
+def test_interpolation_using_default_sections():
+ c = ConfigObj()
+ c['DEFAULT'] = {'a' : 'fish'}
+ c['a'] = '%(a)s'
+ assert c.write() == ['a = %(a)s', '[DEFAULT]', 'a = fish']
+
+
+class TestIndentation(object):
+ @pytest.fixture
+ def max_tabbed_cfg(self):
+ return ['[sect]', ' [[sect]]', ' foo = bar']
+
+ def test_write_dictionary(self):
+ assert ConfigObj({'sect': {'sect': {'foo': 'bar'}}}).write() == [
+ '[sect]', ' [[sect]]', ' foo = bar'
+ ]
+
+ def test_indentation_preserved(self, max_tabbed_cfg):
+ for cfg_content in (
+ ['[sect]', '[[sect]]', 'foo = bar'],
+ ['[sect]', ' [[sect]]', ' foo = bar'],
+ max_tabbed_cfg
+ ):
+ assert ConfigObj(cfg_content).write() == cfg_content
+
+ def test_handle_tabs_vs_spaces(self, max_tabbed_cfg):
+ one_tab = ['[sect]', '\t[[sect]]', '\t\tfoo = bar']
+ two_tabs = ['[sect]', '\t\t[[sect]]', '\t\t\t\tfoo = bar']
+ tabs_and_spaces = [b'[sect]', b'\t \t [[sect]]',
+ b'\t \t \t \t foo = bar']
+
+ assert ConfigObj(one_tab).write() == one_tab
+ assert ConfigObj(two_tabs).write() == two_tabs
+ assert ConfigObj(tabs_and_spaces).write() == [s.decode('utf-8') for s in tabs_and_spaces]
+ assert ConfigObj(max_tabbed_cfg, indent_type=chr(9)).write() == one_tab
+ assert ConfigObj(one_tab, indent_type=' ').write() == max_tabbed_cfg
+
+
+class TestEdgeCasesWhenWritingOut(object):
+ def test_newline_terminated(self, empty_cfg):
+ empty_cfg.newlines = '\n'
+ empty_cfg['a'] = 'b'
+ collector = six.BytesIO()
+ empty_cfg.write(collector)
+ assert collector.getvalue() == b'a = b\n'
+
+ def test_hash_escaping(self, empty_cfg):
+ empty_cfg.newlines = '\n'
+ empty_cfg['#a'] = 'b # something'
+ collector = six.BytesIO()
+ empty_cfg.write(collector)
+ assert collector.getvalue() == b'"#a" = "b # something"\n'
+
+ empty_cfg = ConfigObj()
+ empty_cfg.newlines = '\n'
+ empty_cfg['a'] = 'b # something', 'c # something'
+ collector = six.BytesIO()
+ empty_cfg.write(collector)
+ assert collector.getvalue() == b'a = "b # something", "c # something"\n'
+
+ def test_detecting_line_endings_from_existing_files(self):
+ for expected_line_ending in ('\r\n', '\n'):
+ with open('temp', 'w') as h:
+ h.write(expected_line_ending)
+ c = ConfigObj('temp')
+ assert c.newlines == expected_line_ending
+ os.remove('temp')
+
+ def test_writing_out_dict_value_with_unrepr(self):
+ # issue #42
+ cfg = [str('thing = {"a": 1}')]
+ c = ConfigObj(cfg, unrepr=True)
+ assert repr(c) == "ConfigObj({'thing': {'a': 1}})"
+ assert c.write() == ["thing = {'a': 1}"]
diff --git a/src/tests/test_validate.py b/src/tests/test_validate.py
new file mode 100644
index 0000000..07c1de7
--- /dev/null
+++ b/src/tests/test_validate.py
@@ -0,0 +1,163 @@
+# coding=utf-8
+
+from configobj import ConfigObj
+import pytest
+from configobj.validate import Validator, VdtValueTooSmallError
+
+
+class TestBasic(object):
+ def test_values_too_small(self, val):
+ config = '''
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [section]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ [[sub section]]
+ test1=40
+ test2=hello
+ test3=3
+ test4=5.0
+ '''.splitlines()
+ configspec = '''
+ test1= integer(30,50)
+ test2= string
+ test3=integer
+ test4=float(6.0)
+ [section ]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(6.0)
+ [[sub section]]
+ test1=integer(30,50)
+ test2=string
+ test3=integer
+ test4=float(6.0)
+ '''.splitlines()
+ c1 = ConfigObj(config, configspec=configspec)
+ test = c1.validate(val)
+ assert test == {
+ 'test1': True,
+ 'test2': True,
+ 'test3': True,
+ 'test4': False,
+ 'section': {
+ 'test1': True,
+ 'test2': True,
+ 'test3': True,
+ 'test4': False,
+ 'sub section': {
+ 'test1': True,
+ 'test2': True,
+ 'test3': True,
+ 'test4': False,
+ },
+ },
+ }
+
+ with pytest.raises(VdtValueTooSmallError) as excinfo:
+ val.check(c1.configspec['test4'], c1['test4'])
+ assert str(excinfo.value) == 'the value "5.0" is too small.'
+
+ def test_values(self, val):
+ val_test_config = '''
+ key = 0
+ key2 = 1.1
+ [section]
+ key = some text
+ key2 = 1.1, 3.0, 17, 6.8
+ [[sub-section]]
+ key = option1
+ key2 = True'''.splitlines()
+ val_test_configspec = '''
+ key = integer
+ key2 = float
+ [section]
+ key = string
+ key2 = float_list(4)
+ [[sub-section]]
+ key = option(option1, option2)
+ key2 = boolean'''.splitlines()
+ val_test = ConfigObj(val_test_config, configspec=val_test_configspec)
+ assert val_test.validate(val)
+ val_test['key'] = 'text not a digit'
+ val_res = val_test.validate(val)
+ assert val_res == {'key2': True, 'section': True, 'key': False}
+
+ def test_defaults(self, val):
+ configspec = '''
+ test1=integer(30,50, default=40)
+ test2=string(default="hello")
+ test3=integer(default=3)
+ test4=float(6.0, default=6.0)
+ [section ]
+ test1=integer(30,50, default=40)
+ test2=string(default="hello")
+ test3=integer(default=3)
+ test4=float(6.0, default=6.0)
+ [[sub section]]
+ test1=integer(30,50, default=40)
+ test2=string(default="hello")
+ test3=integer(default=3)
+ test4=float(6.0, default=6.0)
+ '''.splitlines()
+ default_test = ConfigObj(['test1=30'], configspec=configspec)
+ assert repr(default_test) == "ConfigObj({'test1': '30'})"
+ assert default_test.defaults == []
+ assert default_test.default_values == {}
+ assert default_test.validate(val)
+ assert default_test == {
+ 'test1': 30,
+ 'test2': 'hello',
+ 'test3': 3,
+ 'test4': 6.0,
+ 'section': {
+ 'test1': 40,
+ 'test2': 'hello',
+ 'test3': 3,
+ 'test4': 6.0,
+ 'sub section': {
+ 'test1': 40,
+ 'test3': 3,
+ 'test2': 'hello',
+ 'test4': 6.0,
+ },
+ },
+ }
+
+ assert default_test.defaults == ['test2', 'test3', 'test4']
+ assert default_test.default_values == {
+ 'test1': 40, 'test2': 'hello',
+ 'test3': 3, 'test4': 6.0
+ }
+ assert default_test.restore_default('test1') == 40
+ assert default_test['test1'] == 40
+ assert 'test1' in default_test.defaults
+
+ def change(section, key):
+ section[key] = 3
+ default_test.walk(change)
+ assert default_test['section']['sub section']['test4'] == 3
+
+ default_test.restore_defaults()
+ assert default_test == {
+ 'test1': 40,
+ 'test2': "hello",
+ 'test3': 3,
+ 'test4': 6.0,
+ 'section': {
+ 'test1': 40,
+ 'test2': "hello",
+ 'test3': 3,
+ 'test4': 6.0,
+ 'sub section': {
+ 'test1': 40,
+ 'test2': "hello",
+ 'test3': 3,
+ 'test4': 6.0
+ }}}
diff --git a/src/tests/test_validate_errors.py b/src/tests/test_validate_errors.py
new file mode 100644
index 0000000..399daa8
--- /dev/null
+++ b/src/tests/test_validate_errors.py
@@ -0,0 +1,79 @@
+import os
+
+import pytest
+
+from configobj import ConfigObj, get_extra_values, ParseError, NestingError
+from configobj.validate import Validator
+
+@pytest.fixture()
+def thisdir():
+ return os.path.dirname(os.path.join(os.getcwd(), __file__))
+
+
+@pytest.fixture()
+def inipath(thisdir):
+ return os.path.join(thisdir, 'conf.ini')
+
+
+@pytest.fixture()
+def specpath(thisdir):
+ return os.path.join(thisdir, 'conf.spec')
+
+
+@pytest.fixture()
+def conf(inipath, specpath):
+ return ConfigObj(inipath, configspec=specpath)
+
+
+def test_validate_no_valid_entries(conf):
+ validator = Validator()
+ result = conf.validate(validator)
+ assert not result
+
+
+def test_validate_preserve_errors(conf):
+ validator = Validator()
+ result = conf.validate(validator, preserve_errors=True)
+
+ assert not result['value']
+ assert not result['missing-section']
+
+ section = result['section']
+ assert not section['value']
+ assert not section['sub-section']['value']
+ assert not section['missing-subsection']
+
+
+def test_validate_extra_values(conf):
+ conf.validate(Validator(), preserve_errors=True)
+
+ assert conf.extra_values == ['extra', 'extra-section']
+ assert conf['section'].extra_values == ['extra-sub-section']
+ assert conf['section']['sub-section'].extra_values == ['extra']
+
+
+def test_get_extra_values(conf):
+ conf.validate(Validator(), preserve_errors=True)
+ extra_values = get_extra_values(conf)
+
+ expected = sorted([
+ ((), 'extra'),
+ ((), 'extra-section'),
+ (('section', 'sub-section'), 'extra'),
+ (('section',), 'extra-sub-section'),
+ ])
+ assert sorted(extra_values) == expected
+
+
+def test_invalid_lines_with_percents(tmpdir, specpath):
+ ini = tmpdir.join('config.ini')
+ ini.write('extra: %H:%M\n')
+ with pytest.raises(ParseError):
+ conf = ConfigObj(str(ini), configspec=specpath, file_error=True)
+
+
+def test_no_parent(tmpdir, specpath):
+ ini = tmpdir.join('config.ini')
+ ini.write('[[haha]]')
+ with pytest.raises(NestingError):
+ conf = ConfigObj(str(ini), configspec=specpath, file_error=True)