summaryrefslogtreecommitdiff
path: root/scripts
diff options
context:
space:
mode:
authorMatthäus G. Chajdas <Anteru@users.noreply.github.com>2021-08-17 18:16:58 +0200
committerGitHub <noreply@github.com>2021-08-17 18:16:58 +0200
commit1a7994eef7ff3ac63875f02348e07f4c13278359 (patch)
tree31cde32ce24b9fb67a12eb42dd43e4002bb6ebac /scripts
parented7d2aa947eae50fbaf7b60bbdbdb05f8bbd2c4b (diff)
downloadpygments-git-1a7994eef7ff3ac63875f02348e07f4c13278359.tar.gz
Improve checks. (#1884)
Improve checks. * Fix lots of small errors. * Remove the line length check. * Add an option to skip lexers with no alias * Run checks in make check * Add a new CI target.
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/check_sources.py4
-rwxr-xr-xscripts/count_token_references.py2
-rw-r--r--scripts/detect_missing_analyse_text.py14
3 files changed, 13 insertions, 7 deletions
diff --git a/scripts/check_sources.py b/scripts/check_sources.py
index 98984208..cd3409ff 100755
--- a/scripts/check_sources.py
+++ b/scripts/check_sources.py
@@ -59,8 +59,6 @@ def check_syntax(fn, lines):
@checker('.py')
def check_style_and_encoding(fn, lines):
for lno, line in enumerate(lines):
- if len(line) > 110:
- yield lno+1, "line too long"
if is_const_re.search(line):
yield lno+1, 'using == None/True/False'
@@ -79,7 +77,7 @@ def check_fileheader(fn, lines):
llist.append(line)
if lno == 0:
if line != '"""' and line != 'r"""':
- yield 2, 'missing docstring begin (""")'
+ yield 2, f'missing docstring begin ("""), found {line!r}'
else:
docopen = True
elif docopen:
diff --git a/scripts/count_token_references.py b/scripts/count_token_references.py
index ee4f8fab..85fcdbe3 100755
--- a/scripts/count_token_references.py
+++ b/scripts/count_token_references.py
@@ -153,7 +153,7 @@ def find_token_references(lexer_sources, args):
it searches for the regular expression ``\\bInteger.Long\\b``. This
won't work reliably for top level token like ``Token.String`` since this
is often referred to as ``String``, but searching for ``\\bString\\b``
- yields to many false positives.
+ yields too many false positives.
"""
# Maps token to :class:`TokenCount` objects.
diff --git a/scripts/detect_missing_analyse_text.py b/scripts/detect_missing_analyse_text.py
index de925c14..8b0d14b3 100644
--- a/scripts/detect_missing_analyse_text.py
+++ b/scripts/detect_missing_analyse_text.py
@@ -11,13 +11,15 @@ import sys
from pygments.lexers import get_all_lexers, find_lexer_class
from pygments.lexer import Lexer
+import argparse
-def main():
+
+def main(args):
uses = {}
for name, aliases, filenames, mimetypes in get_all_lexers():
cls = find_lexer_class(name)
- if not cls.aliases:
+ if not cls.aliases and not args.skip_no_aliases:
print(cls, "has no aliases")
for f in filenames:
if f not in uses:
@@ -39,4 +41,10 @@ def main():
if __name__ == '__main__':
- sys.exit(main())
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--skip-no-aliases',
+ help='Skip checks for a lexer with no aliases',
+ action='store_true',
+ default=False)
+ args = parser.parse_args()
+ sys.exit(main(args))