summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFederico Caselli <cfederico87@gmail.com>2021-10-25 19:30:25 +0000
committerGerrit Code Review <gerrit@ci3.zzzcomputing.com>2021-10-25 19:30:25 +0000
commita6c37d64d434c84c853ce2a297bd8b2363b4e374 (patch)
tree0229ce673dd35e4e03de18a95512e8ad60d917af
parenteacf04b14d8a8f6aa6a8a0af2847558f98fd44b1 (diff)
parentc47d172e5c6524c37ec91e5304c8279cc0132e54 (diff)
downloadmako-a6c37d64d434c84c853ce2a297bd8b2363b4e374.tar.gz
Merge "Refactoring Code" into main
-rw-r--r--.github/workflows/run-test.yaml1
-rw-r--r--mako/ast.py2
-rw-r--r--mako/exceptions.py13
-rw-r--r--mako/ext/babelplugin.py3
-rw-r--r--mako/ext/extract.py6
-rw-r--r--mako/ext/linguaplugin.py14
-rw-r--r--mako/lexer.py192
-rw-r--r--mako/lookup.py22
-rw-r--r--mako/parsetree.py21
-rw-r--r--mako/pygen.py41
-rw-r--r--mako/pyparser.py19
-rw-r--r--mako/runtime.py17
-rw-r--r--mako/template.py26
-rw-r--r--mako/util.py34
-rw-r--r--setup.cfg3
15 files changed, 187 insertions, 227 deletions
diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml
index 5224c06..60307ea 100644
--- a/.github/workflows/run-test.yaml
+++ b/.github/workflows/run-test.yaml
@@ -23,7 +23,6 @@ jobs:
- "windows-latest"
- "macos-latest"
python-version:
- - "3.6"
- "3.7"
- "3.8"
- "3.9"
diff --git a/mako/ast.py b/mako/ast.py
index 1b017e1..6023ede 100644
--- a/mako/ast.py
+++ b/mako/ast.py
@@ -96,7 +96,7 @@ class PythonFragment(PythonCode):
code = code + "pass"
elif keyword == "try":
code = code + "pass\nexcept:pass"
- elif keyword == "elif" or keyword == "else":
+ elif keyword in ["elif", "else"]:
code = "if False:pass\n" + code + "pass"
elif keyword == "except":
code = "try:pass\n" + code + "pass"
diff --git a/mako/exceptions.py b/mako/exceptions.py
index c8c77a5..ec2de81 100644
--- a/mako/exceptions.py
+++ b/mako/exceptions.py
@@ -225,13 +225,12 @@ class RichTraceback:
if new_trcback:
try:
# A normal .py file (not a Template)
- fp = open(new_trcback[-1][0], "rb")
- encoding = util.parse_encoding(fp)
- if not encoding:
- encoding = "utf-8"
- fp.seek(0)
- self.source = fp.read()
- fp.close()
+ with open(new_trcback[-1][0], "rb") as fp:
+ encoding = util.parse_encoding(fp)
+ if not encoding:
+ encoding = "utf-8"
+ fp.seek(0)
+ self.source = fp.read()
if encoding:
self.source = self.source.decode(encoding)
except IOError:
diff --git a/mako/ext/babelplugin.py b/mako/ext/babelplugin.py
index 8f8e7d9..84d8478 100644
--- a/mako/ext/babelplugin.py
+++ b/mako/ext/babelplugin.py
@@ -54,5 +54,4 @@ def extract(fileobj, keywords, comment_tags, options):
:rtype: ``iterator``
"""
extractor = BabelMakoExtractor(keywords, comment_tags, options)
- for message in extractor(fileobj):
- yield message
+ yield from extractor(fileobj)
diff --git a/mako/ext/extract.py b/mako/ext/extract.py
index 9e14a0a..74d067d 100644
--- a/mako/ext/extract.py
+++ b/mako/ext/extract.py
@@ -19,8 +19,7 @@ class MessageExtractor:
template_node = lexer.Lexer(
fileobj.read(), input_encoding=self.config["encoding"]
).parse()
- for extracted in self.extract_nodes(template_node.get_children()):
- yield extracted
+ yield from self.extract_nodes(template_node.get_children())
def extract_nodes(self, nodes):
translator_comments = []
@@ -118,8 +117,7 @@ class MessageExtractor:
in_translator_comments = False
if child_nodes:
- for extracted in self.extract_nodes(child_nodes):
- yield extracted
+ yield from self.extract_nodes(child_nodes)
@staticmethod
def _split_comment(lineno, comment):
diff --git a/mako/ext/linguaplugin.py b/mako/ext/linguaplugin.py
index e50151e..4cce626 100644
--- a/mako/ext/linguaplugin.py
+++ b/mako/ext/linguaplugin.py
@@ -4,6 +4,7 @@
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
+import contextlib
import io
from lingua.extractors import Extractor
@@ -25,16 +26,11 @@ class LinguaMakoExtractor(Extractor, MessageExtractor):
self.filename = filename
self.python_extractor = get_extractor("x.py")
if fileobj is None:
- fileobj = open(filename, "r")
- must_close = True
+ ctx = open(filename, "r")
else:
- must_close = False
- try:
- for message in self.process_file(fileobj):
- yield message
- finally:
- if must_close:
- fileobj.close()
+ ctx = contextlib.nullcontext(fileobj)
+ with ctx as file_:
+ yield from self.process_file(file_)
def process_python(self, code, code_lineno, translator_strings):
source = code.getvalue().strip()
diff --git a/mako/lexer.py b/mako/lexer.py
index 74fafa1..306ae4b 100644
--- a/mako/lexer.py
+++ b/mako/lexer.py
@@ -54,10 +54,7 @@ class Lexer:
try:
reg = _regexp_cache[(regexp, flags)]
except KeyError:
- if flags:
- reg = re.compile(regexp, flags)
- else:
- reg = re.compile(regexp)
+ reg = re.compile(regexp, flags) if flags else re.compile(regexp)
_regexp_cache[(regexp, flags)] = reg
return self.match_reg(reg)
@@ -75,10 +72,7 @@ class Lexer:
match = reg.match(self.text, self.match_position)
if match:
(start, end) = match.span()
- if end == start:
- self.match_position = end + 1
- else:
- self.match_position = end
+ self.match_position = end + 1 if end == start else end
self.matched_lineno = self.lineno
lines = re.findall(r"\n", self.text[mp : self.match_position])
cp = mp - 1
@@ -86,10 +80,6 @@ class Lexer:
cp -= 1
self.matched_charpos = mp - cp
self.lineno += len(lines)
- # print "MATCHED:", match.group(0), "LINE START:",
- # self.matched_lineno, "LINE END:", self.lineno
- # print "MATCH:", regexp, "\n", self.text[mp : mp + 15], \
- # (match and "TRUE" or "FALSE")
return match
def parse_until_text(self, watch_nesting, *text):
@@ -149,12 +139,15 @@ class Lexer:
if self.control_line:
control_frame = self.control_line[-1]
control_frame.nodes.append(node)
- if not (
- isinstance(node, parsetree.ControlLine)
- and control_frame.is_ternary(node.keyword)
+ if (
+ not (
+ isinstance(node, parsetree.ControlLine)
+ and control_frame.is_ternary(node.keyword)
+ )
+ and self.ternary_stack
+ and self.ternary_stack[-1]
):
- if self.ternary_stack and self.ternary_stack[-1]:
- self.ternary_stack[-1][-1].nodes.append(node)
+ self.ternary_stack[-1][-1].nodes.append(node)
if isinstance(node, parsetree.Tag):
if len(self.tag):
node.parent = self.tag[-1]
@@ -207,11 +200,7 @@ class Lexer:
)
else:
m = self._coding_re.match(text.decode("utf-8", "ignore"))
- if m:
- parsed_encoding = m.group(1)
- else:
- parsed_encoding = known_encoding or "utf-8"
-
+ parsed_encoding = m.group(1) if m else known_encoding or "utf-8"
if decode_raw:
try:
text = text.decode(parsed_encoding)
@@ -301,35 +290,34 @@ class Lexer:
re.I | re.S | re.X,
)
- if match:
- keyword, attr, isend = match.groups()
- self.keyword = keyword
- attributes = {}
- if attr:
- for att in re.findall(
- r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr
- ):
- key, val1, val2 = att
- text = val1 or val2
- text = text.replace("\r\n", "\n")
- attributes[key] = text
- self.append_node(parsetree.Tag, keyword, attributes)
- if isend:
- self.tag.pop()
- else:
- if keyword == "text":
- match = self.match(r"(.*?)(?=\</%text>)", re.S)
- if not match:
- raise exceptions.SyntaxException(
- "Unclosed tag: <%%%s>" % self.tag[-1].keyword,
- **self.exception_kwargs,
- )
- self.append_node(parsetree.Text, match.group(1))
- return self.match_tag_end()
- return True
- else:
+ if not match:
return False
+ keyword, attr, isend = match.groups()
+ self.keyword = keyword
+ attributes = {}
+ if attr:
+ for att in re.findall(
+ r"\s*(\w+)\s*=\s*(?:'([^']*)'|\"([^\"]*)\")", attr
+ ):
+ key, val1, val2 = att
+ text = val1 or val2
+ text = text.replace("\r\n", "\n")
+ attributes[key] = text
+ self.append_node(parsetree.Tag, keyword, attributes)
+ if isend:
+ self.tag.pop()
+ elif keyword == "text":
+ match = self.match(r"(.*?)(?=\</%text>)", re.S)
+ if not match:
+ raise exceptions.SyntaxException(
+ "Unclosed tag: <%%%s>" % self.tag[-1].keyword,
+ **self.exception_kwargs
+ )
+ self.append_node(parsetree.Text, match.group(1))
+ return self.match_tag_end()
+ return True
+
def match_tag_end(self):
match = self.match(r"\</%[\t ]*(.+?)[\t ]*>")
if match:
@@ -352,15 +340,15 @@ class Lexer:
def match_end(self):
match = self.match(r"\Z", re.S)
- if match:
- string = match.group()
- if string:
- return string
- else:
- return True
- else:
+ if not match:
return False
+ string = match.group()
+ if string:
+ return string
+ else:
+ return True
+
def match_text(self):
match = self.match(
r"""
@@ -411,63 +399,63 @@ class Lexer:
def match_expression(self):
match = self.match(r"\${")
- if match:
- line, pos = self.matched_lineno, self.matched_charpos
- text, end = self.parse_until_text(True, r"\|", r"}")
- if end == "|":
- escapes, end = self.parse_until_text(True, r"}")
- else:
- escapes = ""
- text = text.replace("\r\n", "\n")
- self.append_node(
- parsetree.Expression,
- text,
- escapes.strip(),
- lineno=line,
- pos=pos,
- )
- return True
- else:
+ if not match:
return False
+ line, pos = self.matched_lineno, self.matched_charpos
+ text, end = self.parse_until_text(True, r"\|", r"}")
+ if end == "|":
+ escapes, end = self.parse_until_text(True, r"}")
+ else:
+ escapes = ""
+ text = text.replace("\r\n", "\n")
+ self.append_node(
+ parsetree.Expression,
+ text,
+ escapes.strip(),
+ lineno=line,
+ pos=pos,
+ )
+ return True
+
def match_control_line(self):
match = self.match(
r"(?<=^)[\t ]*(%(?!%)|##)[\t ]*((?:(?:\\r?\n)|[^\r\n])*)"
r"(?:\r?\n|\Z)",
re.M,
)
- if match:
- operator = match.group(1)
- text = match.group(2)
- if operator == "%":
- m2 = re.match(r"(end)?(\w+)\s*(.*)", text)
- if not m2:
+ if not match:
+ return False
+
+ operator = match.group(1)
+ text = match.group(2)
+ if operator == "%":
+ m2 = re.match(r"(end)?(\w+)\s*(.*)", text)
+ if not m2:
+ raise exceptions.SyntaxException(
+ "Invalid control line: '%s'" % text,
+ **self.exception_kwargs
+ )
+ isend, keyword = m2.group(1, 2)
+ isend = isend is not None
+
+ if isend:
+ if not len(self.control_line):
raise exceptions.SyntaxException(
- "Invalid control line: '%s'" % text,
- **self.exception_kwargs,
+ "No starting keyword '%s' for '%s'"
+ % (keyword, text),
+ **self.exception_kwargs
)
- isend, keyword = m2.group(1, 2)
- isend = isend is not None
-
- if isend:
- if not len(self.control_line):
- raise exceptions.SyntaxException(
- "No starting keyword '%s' for '%s'"
- % (keyword, text),
- **self.exception_kwargs,
- )
- elif self.control_line[-1].keyword != keyword:
- raise exceptions.SyntaxException(
- "Keyword '%s' doesn't match keyword '%s'"
- % (text, self.control_line[-1].keyword),
- **self.exception_kwargs,
- )
- self.append_node(parsetree.ControlLine, keyword, isend, text)
- else:
- self.append_node(parsetree.Comment, text)
- return True
+ elif self.control_line[-1].keyword != keyword:
+ raise exceptions.SyntaxException(
+ "Keyword '%s' doesn't match keyword '%s'"
+ % (text, self.control_line[-1].keyword),
+ **self.exception_kwargs
+ )
+ self.append_node(parsetree.ControlLine, keyword, isend, text)
else:
- return False
+ self.append_node(parsetree.Comment, text)
+ return True
def match_comment(self):
"""matches the multiline version of a comment"""
diff --git a/mako/lookup.py b/mako/lookup.py
index b048f2a..06dee89 100644
--- a/mako/lookup.py
+++ b/mako/lookup.py
@@ -262,15 +262,14 @@ class TemplateLookup(TemplateCollection):
if key in self._uri_cache:
return self._uri_cache[key]
- if uri[0] != "/":
- if relativeto is not None:
- v = self._uri_cache[key] = posixpath.join(
- posixpath.dirname(relativeto), uri
- )
- else:
- v = self._uri_cache[key] = "/" + uri
- else:
+ if uri[0] == "/":
v = self._uri_cache[key] = uri
+ elif relativeto is not None:
+ v = self._uri_cache[key] = posixpath.join(
+ posixpath.dirname(relativeto), uri
+ )
+ else:
+ v = self._uri_cache[key] = "/" + uri
return v
def filename_to_uri(self, filename):
@@ -334,11 +333,10 @@ class TemplateLookup(TemplateCollection):
try:
template_stat = os.stat(template.filename)
- if template.module._modified_time < template_stat[stat.ST_MTIME]:
- self._collection.pop(uri, None)
- return self._load(template.filename, uri)
- else:
+ if template.module._modified_time >= template_stat[stat.ST_MTIME]:
return template
+ self._collection.pop(uri, None)
+ return self._load(template.filename, uri)
except OSError:
self._collection.pop(uri, None)
raise exceptions.TemplateLookupException(
diff --git a/mako/parsetree.py b/mako/parsetree.py
index 4501b49..7ec0c1c 100644
--- a/mako/parsetree.py
+++ b/mako/parsetree.py
@@ -298,10 +298,13 @@ class Tag(Node, metaclass=_TagMeta):
missing = [r for r in required if r not in self.parsed_attributes]
if len(missing):
raise exceptions.CompileException(
- "Missing attribute(s): %s"
- % ",".join([repr(m) for m in missing]),
+ (
+ "Missing attribute(s): %s"
+ % ",".join(repr(m) for m in missing)
+ ),
**self.exception_kwargs,
)
+
self.parent = None
self.nodes = []
@@ -333,9 +336,8 @@ class Tag(Node, metaclass=_TagMeta):
code.undeclared_identifiers
)
expr.append("(%s)" % m.group(1))
- else:
- if x:
- expr.append(repr(x))
+ elif x:
+ expr.append(repr(x))
self.parsed_attributes[key] = " + ".join(expr) or repr("")
elif key in nonexpressions:
if re.search(r"\${.+?}", self.attributes[key]):
@@ -604,13 +606,12 @@ class CallNamespaceTag(Tag):
namespace,
defname,
",".join(
- [
- "%s=%s" % (k, v)
- for k, v in self.parsed_attributes.items()
- if k != "args"
- ]
+ "%s=%s" % (k, v)
+ for k, v in self.parsed_attributes.items()
+ if k != "args"
),
)
+
self.code = ast.PythonCode(self.expression, **self.exception_kwargs)
self.body_decl = ast.FunctionArgs(
attributes.get("args", ""), **self.exception_kwargs
diff --git a/mako/pygen.py b/mako/pygen.py
index b9d4c2e..bf8811c 100644
--- a/mako/pygen.py
+++ b/mako/pygen.py
@@ -96,19 +96,21 @@ class PythonPrinter:
is_comment = line and len(line) and line[0] == "#"
# see if this line should decrease the indentation level
- if not is_comment and (not hastext or self._is_unindentor(line)):
-
- if self.indent > 0:
- self.indent -= 1
- # if the indent_detail stack is empty, the user
- # probably put extra closures - the resulting
- # module wont compile.
- if len(self.indent_detail) == 0:
- # TODO: no coverage here
- raise exceptions.MakoException(
- "Too many whitespace closures"
- )
- self.indent_detail.pop()
+ if (
+ not is_comment
+ and (not hastext or self._is_unindentor(line))
+ and self.indent > 0
+ ):
+ self.indent -= 1
+ # if the indent_detail stack is empty, the user
+ # probably put extra closures - the resulting
+ # module wont compile.
+ if len(self.indent_detail) == 0:
+ # TODO: no coverage here
+ raise exceptions.MakoException(
+ "Too many whitespace closures"
+ )
+ self.indent_detail.pop()
if line is None:
return
@@ -168,13 +170,10 @@ class PythonPrinter:
# if the current line doesnt have one of the "unindentor" keywords,
# return False
match = re.match(r"^\s*(else|elif|except|finally).*\:", line)
- if not match:
- return False
-
- # whitespace matches up, we have a compound indentor,
+ # if True, whitespace matches up, we have a compound indentor,
# and this line has an unindentor, this
# is probably good enough
- return True
+ return bool(match)
# should we decide that its not good enough, heres
# more stuff to check.
@@ -219,11 +218,7 @@ class PythonPrinter:
current_state = self.backslashed or self.triplequoted
- if re.search(r"\\$", line):
- self.backslashed = True
- else:
- self.backslashed = False
-
+ self.backslashed = bool(re.search(r"\\$", line))
triples = len(re.findall(r"\"\"\"|\'\'\'", line))
if triples == 1 or triples % 2 != 0:
self.triplequoted = not self.triplequoted
diff --git a/mako/pyparser.py b/mako/pyparser.py
index 411b1b5..c626622 100644
--- a/mako/pyparser.py
+++ b/mako/pyparser.py
@@ -94,8 +94,7 @@ class FindIdentifiers(_ast_util.NodeVisitor):
def _expand_tuples(self, args):
for arg in args:
if isinstance(arg, _ast.Tuple):
- for n in arg.elts:
- yield n
+ yield from arg.elts
else:
yield arg
@@ -156,15 +155,15 @@ class FindIdentifiers(_ast_util.NodeVisitor):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
+ elif name.name == "*":
+ raise exceptions.CompileException(
+ "'import *' is not supported, since all identifier "
+ "names must be explicitly declared. Please use the "
+ "form 'from <modulename> import <name1>, <name2>, "
+ "...' instead.",
+ **self.exception_kwargs
+ )
else:
- if name.name == "*":
- raise exceptions.CompileException(
- "'import *' is not supported, since all identifier "
- "names must be explicitly declared. Please use the "
- "form 'from <modulename> import <name1>, <name2>, "
- "...' instead.",
- **self.exception_kwargs,
- )
self._add_declared(name.name)
diff --git a/mako/runtime.py b/mako/runtime.py
index f9ce630..28186a9 100644
--- a/mako/runtime.py
+++ b/mako/runtime.py
@@ -482,15 +482,14 @@ class Namespace:
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
- else:
- ns = TemplateNamespace(
- uri,
- self.context._copy(),
- templateuri=uri,
- calling_uri=self._templateuri,
- )
- self.context.namespaces[key] = ns
- return ns
+ ns = TemplateNamespace(
+ uri,
+ self.context._copy(),
+ templateuri=uri,
+ calling_uri=self._templateuri,
+ )
+ self.context.namespaces[key] = ns
+ return ns
def get_template(self, uri):
"""Return a :class:`.Template` from the given ``uri``.
diff --git a/mako/template.py b/mako/template.py
index 58b5e0f..df2a7ac 100644
--- a/mako/template.py
+++ b/mako/template.py
@@ -357,11 +357,7 @@ class Template:
):
self.cache_impl = cache_impl
self.cache_enabled = cache_enabled
- if cache_args:
- self.cache_args = cache_args
- else:
- self.cache_args = {}
-
+ self.cache_args = cache_args or {}
# transfer deprecated cache_* args
if cache_type:
self.cache_args["type"] = cache_type
@@ -636,22 +632,22 @@ class ModuleInfo:
@property
def source(self):
- if self.template_source is not None:
- if self.module._source_encoding and not isinstance(
- self.template_source, str
- ):
- return self.template_source.decode(
- self.module._source_encoding
- )
- else:
- return self.template_source
- else:
+ if self.template_source is None:
data = util.read_file(self.template_filename)
if self.module._source_encoding:
return data.decode(self.module._source_encoding)
else:
return data
+ elif self.module._source_encoding and not isinstance(
+ self.template_source, str
+ ):
+ return self.template_source.decode(
+ self.module._source_encoding
+ )
+ else:
+ return self.template_source
+
def _compile(template, text, filename, generate_magic_comment):
lexer = template.lexer_cls(
diff --git a/mako/util.py b/mako/util.py
index 2c927bf..952655c 100644
--- a/mako/util.py
+++ b/mako/util.py
@@ -26,18 +26,17 @@ class PluginLoader:
def load(self, name):
if name in self.impls:
return self.impls[name]()
- else:
- import pkg_resources
+ import pkg_resources
- for impl in pkg_resources.iter_entry_points(self.group, name):
- self.impls[name] = impl.load
- return impl.load()
- else:
- from mako import exceptions
+ for impl in pkg_resources.iter_entry_points(self.group, name):
+ self.impls[name] = impl.load
+ return impl.load()
+ else:
+ from mako import exceptions
- raise exceptions.RuntimeException(
- "Can't load plugin %s %s" % (self.group, name)
- )
+ raise exceptions.RuntimeException(
+ "Can't load plugin %s %s" % (self.group, name)
+ )
def register(self, name, modulepath, objname):
def load():
@@ -195,9 +194,8 @@ class LRUCache(dict):
def setdefault(self, key, value):
if key in self:
return self[key]
- else:
- self[key] = value
- return value
+ self[key] = value
+ return value
def __setitem__(self, key, value):
item = dict.get(self, key)
@@ -287,7 +285,7 @@ def sorted_dict_repr(d):
"""
keys = list(d.keys())
keys.sort()
- return "{" + ", ".join(["%r: %r" % (k, d[k]) for k in keys]) + "}"
+ return "{" + ", ".join("%r: %r" % (k, d[k]) for k in keys) + "}"
def restore__ast(_ast):
@@ -372,12 +370,8 @@ mako in baz not in mako""",
def read_file(path, mode="rb"):
- fp = open(path, mode)
- try:
- data = fp.read()
- return data
- finally:
- fp.close()
+ with open(path, mode) as fp:
+ return fp.read()
def read_python_file(path):
diff --git a/setup.cfg b/setup.cfg
index 63d50ba..f9d0978 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,7 +17,6 @@ classifiers =
Intended Audience :: Developers
Programming Language :: Python
Programming Language :: Python :: 3
- Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
@@ -31,7 +30,7 @@ project_urls =
[options]
packages = find:
-python_requires = >=3.6
+python_requires = >=3.7
zip_safe = false
install_requires =