diff options
35 files changed, 52 insertions, 52 deletions
@@ -788,7 +788,7 @@ Version 2.1.2 Version 2.1.1 ------------- -(relased Feb 14, 2016) +(released Feb 14, 2016) - Fixed Jython compatibility (#1205) - Fixed HTML formatter output with leading empty lines (#1111) diff --git a/doc/docs/api.rst b/doc/docs/api.rst index 5bbfe19e..4d330bf8 100644 --- a/doc/docs/api.rst +++ b/doc/docs/api.rst @@ -335,7 +335,7 @@ converted to the expected type if possible. Intuitively, this is `options.get(optname, default)`, but restricted to Boolean value. The Booleans can be represented as string, in order to accept Boolean value from the command line arguments. If the key `optname` is - present in the dictionnary `options` and is not associated with a Boolean, + present in the dictionary `options` and is not associated with a Boolean, raise an `OptionError`. If it is absent, `default` is returned instead. The valid string values for ``True`` are ``1``, ``yes``, ``true`` and diff --git a/doc/docs/filters.rst b/doc/docs/filters.rst index ca4942be..5cdcb4c6 100644 --- a/doc/docs/filters.rst +++ b/doc/docs/filters.rst @@ -11,7 +11,7 @@ done by filters. The most common example of filters transform each token by applying a simple rules such as highlighting the token if it is a TODO or another special word, or converting keywords to uppercase to enforce a style guide. More complex filters can transform the stream of tokens, such as removing -the line identation or merging tokens together. It should be noted that pygments +the line indentation or merging tokens together. It should be noted that pygments filters are entirely unrelated to Python's `filter <https://docs.python.org/3/library/functions.html#filter>`_. diff --git a/external/rst-directive.py b/external/rst-directive.py index a82ecfaf..77943be4 100644 --- a/external/rst-directive.py +++ b/external/rst-directive.py @@ -58,7 +58,7 @@ from pygments import highlight from pygments.lexers import get_lexer_by_name, TextLexer class Pygments(Directive): - """ Source code syntax hightlighting. + """ Source code syntax highlighting. """ required_arguments = 1 optional_arguments = 0 diff --git a/pygments/formatters/_mapping.py b/pygments/formatters/_mapping.py index 8b5e478e..95f8e566 100755 --- a/pygments/formatters/_mapping.py +++ b/pygments/formatters/_mapping.py @@ -2,7 +2,7 @@ pygments.formatters._mapping ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Formatter mapping definitions. This file is generated by itself. Everytime + Formatter mapping definitions. This file is generated by itself. Every time you change something on a builtin formatter definition, run this script from the formatters folder to update it. @@ -67,7 +67,7 @@ if __name__ == '__main__': # pragma: no cover content = fp.read() # replace crnl to nl for Windows. # - # Note that, originally, contributers should keep nl of master + # Note that, originally, contributors should keep nl of master # repository, for example by using some kind of automatic # management EOL, like `EolExtension # <https://www.mercurial-scm.org/wiki/EolExtension>`. diff --git a/pygments/lexer.py b/pygments/lexer.py index 33d738a8..dcbfcb8e 100644 --- a/pygments/lexer.py +++ b/pygments/lexer.py @@ -618,7 +618,7 @@ class RegexLexer(Lexer, metaclass=RegexLexerMeta): """ Split ``text`` into (tokentype, text) pairs. - ``stack`` is the inital stack (default: ``['root']``) + ``stack`` is the initial stack (default: ``['root']``) """ pos = 0 tokendefs = self._tokens @@ -792,7 +792,7 @@ def do_insertions(insertions, tokens): # iterate over the token stream where we want to insert # the tokens from the insertion list. for i, t, v in tokens: - # first iteration. store the postition of first item + # first iteration. store the position of first item if realpos is None: realpos = i oldi = 0 diff --git a/pygments/lexers/_cocoa_builtins.py b/pygments/lexers/_cocoa_builtins.py index 72d86db1..5f2a5223 100644 --- a/pygments/lexers/_cocoa_builtins.py +++ b/pygments/lexers/_cocoa_builtins.py @@ -5,7 +5,7 @@ This file defines a set of types used across Cocoa frameworks from Apple. There is a list of @interfaces, @protocols and some other (structs, unions) - File may be also used as standalone generator for aboves. + File may be also used as standalone generator for above. :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py index cccf58f2..0230d4e5 100644 --- a/pygments/lexers/_mapping.py +++ b/pygments/lexers/_mapping.py @@ -2,7 +2,7 @@ pygments.lexers._mapping ~~~~~~~~~~~~~~~~~~~~~~~~ - Lexer mapping definitions. This file is generated by itself. Everytime + Lexer mapping definitions. This file is generated by itself. Every time you change something on a builtin lexer definition, run this script from the lexers folder to update it. @@ -563,7 +563,7 @@ if __name__ == '__main__': # pragma: no cover content = fp.read() # replace crnl to nl for Windows. # - # Note that, originally, contributers should keep nl of master + # Note that, originally, contributors should keep nl of master # repository, for example by using some kind of automatic # management EOL, like `EolExtension # <https://www.mercurial-scm.org/wiki/EolExtension>`. diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py index e5f795f4..37052123 100644 --- a/pygments/lexers/asm.py +++ b/pygments/lexers/asm.py @@ -861,7 +861,7 @@ class TasmLexer(RegexLexer): include('punctuation'), (register, Name.Builtin), (identifier, Name.Variable), - # Do not match newline when it's preceeded by a backslash + # Do not match newline when it's preceded by a backslash (r'(\\)(\s*)(;.*)([\r\n])', bygroups(Text, Whitespace, Comment.Single, Whitespace)), (r'[\r\n]+', Whitespace, '#pop'), include('whitespace') diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py index 4adc2b3f..168c15da 100644 --- a/pygments/lexers/c_cpp.py +++ b/pygments/lexers/c_cpp.py @@ -59,7 +59,7 @@ class CFamilyLexer(RegexLexer): (r'\\\n', Text), # line continuation (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), - # Open until EOF, so no ending delimeter + # Open until EOF, so no ending delimiter (r'/(\\\n)?[*][\w\W]*', Comment.Multiline), ], 'statements': [ diff --git a/pygments/lexers/cddl.py b/pygments/lexers/cddl.py index 5c949802..de603769 100644 --- a/pygments/lexers/cddl.py +++ b/pygments/lexers/cddl.py @@ -122,7 +122,7 @@ class CddlLexer(RegexLexer): include("commentsandwhitespace"), # tag types (r"#(\d\.{uint})?".format(uint=_re_uint), Keyword.Type), # type or any - # occurence + # occurrence ( r"({uint})?(\*)({uint})?".format(uint=_re_uint), bygroups(Number, Operator, Number), diff --git a/pygments/lexers/devicetree.py b/pygments/lexers/devicetree.py index cb25a330..68778a7a 100644 --- a/pygments/lexers/devicetree.py +++ b/pygments/lexers/devicetree.py @@ -51,7 +51,7 @@ class DevicetreeLexer(RegexLexer): (r'\\\n', Text), # line continuation (r'//(\n|[\w\W]*?[^\\]\n)', Comment.Single), (r'/(\\\n)?[*][\w\W]*?[*](\\\n)?/', Comment.Multiline), - # Open until EOF, so no ending delimeter + # Open until EOF, so no ending delimiter (r'/(\\\n)?[*][\w\W]*', Comment.Multiline), ], 'statements': [ diff --git a/pygments/lexers/ecl.py b/pygments/lexers/ecl.py index 47cad222..bca852ea 100644 --- a/pygments/lexers/ecl.py +++ b/pygments/lexers/ecl.py @@ -124,7 +124,7 @@ class ECLLexer(RegexLexer): def analyse_text(text): """This is very difficult to guess relative to other business languages. - -> in conjuction with BEGIN/END seems relatively rare though.""" + -> in conjunction with BEGIN/END seems relatively rare though.""" result = 0 if '->' in text: diff --git a/pygments/lexers/grammar_notation.py b/pygments/lexers/grammar_notation.py index ff57c999..e7ff2b24 100644 --- a/pygments/lexers/grammar_notation.py +++ b/pygments/lexers/grammar_notation.py @@ -39,7 +39,7 @@ class BnfLexer(RegexLexer): * We don't distinguish any operators and punctuation except `::=`. - Though these desision making might cause too minimal highlighting + Though these decision making might cause too minimal highlighting and you might be disappointed, but it is reasonable for us. .. versionadded:: 2.1 diff --git a/pygments/lexers/haxe.py b/pygments/lexers/haxe.py index ee587e99..2fdcfb30 100644 --- a/pygments/lexers/haxe.py +++ b/pygments/lexers/haxe.py @@ -906,7 +906,7 @@ class HxmlLexer(RegexLexer): tokens = { 'root': [ - # Seperator + # Separator (r'(--)(next)', bygroups(Punctuation, Generic.Heading)), # Compiler switches with one dash (r'(-)(prompt|debug|v)', bygroups(Punctuation, Keyword.Keyword)), diff --git a/pygments/lexers/html.py b/pygments/lexers/html.py index 2e29f453..706cda39 100644 --- a/pygments/lexers/html.py +++ b/pygments/lexers/html.py @@ -285,7 +285,7 @@ class HamlLexer(ExtendedRegexLexer): flags = re.IGNORECASE # Haml can include " |\n" anywhere, # which is ignored and used to wrap long lines. - # To accomodate this, use this custom faux dot instead. + # To accommodate this, use this custom faux dot instead. _dot = r'(?: \|\n(?=.* \|)|.)' # In certain places, a comma at the end of the line diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py index 4ffc5c7f..20ac9d65 100644 --- a/pygments/lexers/jvm.py +++ b/pygments/lexers/jvm.py @@ -334,7 +334,7 @@ class ScalaLexer(RegexLexer): (r'(\.)(type)\b', bygroups(Punctuation, Keyword)), ], 'inline': [ - # inline is a soft modifer, only highlighted if followed by if, + # inline is a soft modifier, only highlighted if followed by if, # match or parameters. (r'\b(inline)(?=\s+(%s|%s)\s*:)' % (plainid, backQuotedId), Keyword), diff --git a/pygments/lexers/lilypond.py b/pygments/lexers/lilypond.py index 9705cbad..302d6557 100644 --- a/pygments/lexers/lilypond.py +++ b/pygments/lexers/lilypond.py @@ -99,7 +99,7 @@ class LilyPondLexer(SchemeLexer): # - chord: < >, # - bar check: |, # - dot in nested properties: \revert NoteHead.color, - # - equals sign in assignemnts and lists for various commands: + # - equals sign in assignments and lists for various commands: # \override Stem.color = red, # - comma as alternative syntax for lists: \time 3,3,2 4/4, # - colon in tremolos: c:32, diff --git a/pygments/lexers/lisp.py b/pygments/lexers/lisp.py index 5628e336..bf59ae77 100644 --- a/pygments/lexers/lisp.py +++ b/pygments/lexers/lisp.py @@ -119,7 +119,7 @@ class SchemeLexer(RegexLexer): (r';.*?$', Comment.Single), # multi-line comment (r'#\|', Comment.Multiline, 'multiline-comment'), - # commented form (entire sexpr folliwng) + # commented form (entire sexpr following) (r'#;\s*\(', Comment, 'commented-form'), # signifies that the program text that follows is written with the # lexical and datum syntax described in r6rs diff --git a/pygments/lexers/mime.py b/pygments/lexers/mime.py index 6dcc8155..c9f6d442 100644 --- a/pygments/lexers/mime.py +++ b/pygments/lexers/mime.py @@ -88,7 +88,7 @@ class MIMELexer(RegexLexer): pos_body_start = pos_body_start + 1 entire_body = entire_body[1:] - # if it is not a mulitpart + # if it is not a multipart if not self.content_type.startswith("multipart") or not self.boundary: for i, t, v in self.get_bodypart_tokens(entire_body): yield pos_body_start + i, t, v diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py index 60bd8b9d..5fc9f4be 100644 --- a/pygments/lexers/ml.py +++ b/pygments/lexers/ml.py @@ -55,7 +55,7 @@ class SMLLexer(RegexLexer): # A character constant is a sequence of the form #s, where s is a string # constant denoting a string of size one character. This setup just parses # the entire string as either a String.Double or a String.Char (depending - # on the argument), even if the String.Char is an erronous + # on the argument), even if the String.Char is an erroneous # multiple-character string. def stringy(whatkind): return [ diff --git a/pygments/lexers/pascal.py b/pygments/lexers/pascal.py index 0d1ac3fd..1b2d2a95 100644 --- a/pygments/lexers/pascal.py +++ b/pygments/lexers/pascal.py @@ -363,7 +363,7 @@ class DelphiLexer(Lexer): elif lowercase_name in self.keywords: token = Keyword # if we are in a special block and a - # block ending keyword occours (and the parenthesis + # block ending keyword occurs (and the parenthesis # is balanced) we end the current block context if (in_function_block or in_property_block) and \ lowercase_name in self.BLOCK_KEYWORDS and \ diff --git a/pygments/lexers/praat.py b/pygments/lexers/praat.py index 8fbae8c5..c76ea306 100644 --- a/pygments/lexers/praat.py +++ b/pygments/lexers/praat.py @@ -287,7 +287,7 @@ class PraatLexer(RegexLexer): (r'(boolean)(\s+\S+\s*)(0|1|"?(?:yes|no)"?)', bygroups(Keyword, Text, Name.Variable)), - # Ideally processing of the number would happend in the 'number' + # Ideally processing of the number would happen in the 'number' # but that doesn't seem to work (r'(real|natural|positive|integer)([ \t]+\S+[ \t]*)([+-]?)(\d+(?:\.\d*)?' r'(?:[eE][-+]?\d+)?%?)', diff --git a/pygments/lexers/prolog.py b/pygments/lexers/prolog.py index 21c81362..7d255813 100644 --- a/pygments/lexers/prolog.py +++ b/pygments/lexers/prolog.py @@ -200,9 +200,9 @@ class LogtalkLexer(RegexLexer): (r'(>>|<<|/\\|\\\\|\\)', Operator), # Predicate aliases (r'\bas\b', Operator), - # Arithemtic evaluation + # Arithmetic evaluation (r'\bis\b', Keyword), - # Arithemtic comparison + # Arithmetic comparison (r'(=:=|=\\=|<|=<|>=|>)', Operator), # Term creation and decomposition (r'=\.\.', Operator), @@ -213,7 +213,7 @@ class LogtalkLexer(RegexLexer): # Evaluable functors (r'(//|[-+*/])', Operator), (r'\b(e|pi|div|mod|rem)\b', Operator), - # Other arithemtic functors + # Other arithmetic functors (r'\b\*\*\b', Operator), # DCG rules (r'-->', Operator), diff --git a/pygments/lexers/rust.py b/pygments/lexers/rust.py index d01f73e4..06c5cd65 100644 --- a/pygments/lexers/rust.py +++ b/pygments/lexers/rust.py @@ -104,7 +104,7 @@ class RustLexer(RegexLexer): # Prelude (taken from Rust's src/libstd/prelude.rs) builtin_funcs_types, builtin_macros, - # Path seperators, so types don't catch them. + # Path separators, so types don't catch them. (r'::\b', Text), # Types in positions. (r'(?::|->)', Text, 'typename'), diff --git a/pygments/lexers/scripting.py b/pygments/lexers/scripting.py index 9a1e63d6..ad089e5d 100644 --- a/pygments/lexers/scripting.py +++ b/pygments/lexers/scripting.py @@ -798,7 +798,7 @@ class RexxLexer(RegexLexer): def analyse_text(text): """ - Check for inital comment and patterns that distinguish Rexx from other + Check for initial comment and patterns that distinguish Rexx from other C-like languages. """ if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE): @@ -975,7 +975,7 @@ class EasytrieveLexer(RegexLexer): # * apostrophe (') # * period (.) # * comma (,) - # * paranthesis ( and ) + # * parenthesis ( and ) # * colon (:) # # Additionally words end once a '*' appears, indicatins a comment. diff --git a/pygments/lexers/textedit.py b/pygments/lexers/textedit.py index 0e567bca..169fdae9 100644 --- a/pygments/lexers/textedit.py +++ b/pygments/lexers/textedit.py @@ -83,7 +83,7 @@ class SedLexer(RegexLexer): mimetypes = ['text/x-sed'] flags = re.MULTILINE - # Match the contents within delimeters such as /<contents>/ + # Match the contents within delimiters such as /<contents>/ _inside_delims = r'((?:(?:\\[^\n]|[^\\])*?\\\n)*?(?:\\.|[^\\])*?)' tokens = { diff --git a/pygments/lexers/urbi.py b/pygments/lexers/urbi.py index d9c1c9f8..25ad3cd0 100644 --- a/pygments/lexers/urbi.py +++ b/pygments/lexers/urbi.py @@ -51,7 +51,7 @@ class UrbiscriptLexer(ExtendedRegexLexer): ctx.pos += len(result) return - # if blob is well formated, yield as Escape + # if blob is well formatted, yield as Escape blob_text = blob_start + ctx.text[match.end():match.end()+blob_size] + ")" yield match.start(), String.Escape, blob_text ctx.pos = match.end() + blob_size + 1 # +1 is the ending ")" @@ -78,9 +78,9 @@ class UrbiscriptLexer(ExtendedRegexLexer): 'struct', 'template', 'typedef', 'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'volatile', 'wchar_t'), suffix=r'\b'), Keyword.Reserved), - # deprecated keywords, use a meaningfull token when available + # deprecated keywords, use a meaningful token when available (r'(emit|foreach|internal|loopn|static)\b', Keyword), - # ignored keywords, use a meaningfull token when available + # ignored keywords, use a meaningful token when available (r'(private|protected|public)\b', Keyword), (r'(var|do|const|function|class)\b', Keyword.Declaration), (r'(true|false|nil|void)\b', Keyword.Constant), diff --git a/pygments/scanner.py b/pygments/scanner.py index 5f32a22c..a17b7944 100644 --- a/pygments/scanner.py +++ b/pygments/scanner.py @@ -72,7 +72,7 @@ class Scanner: def scan(self, pattern): """ Scan the text for the given pattern and update pos/match - and related fields. The return value is a boolen that + and related fields. The return value is a boolean that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the diff --git a/pygments/styles/arduino.py b/pygments/styles/arduino.py index 4dfe0f3c..a8cc03cc 100644 --- a/pygments/styles/arduino.py +++ b/pygments/styles/arduino.py @@ -16,7 +16,7 @@ from pygments.token import Keyword, Name, Comment, String, Error, \ class ArduinoStyle(Style): """ The Arduino® language style. This style is designed to highlight the - Arduino source code, so exepect the best results with it. + Arduino source code, so expect the best results with it. """ background_color = "#ffffff" diff --git a/pygments/unistring.py b/pygments/unistring.py index 2872985c..7dfd2646 100644 --- a/pygments/unistring.py +++ b/pygments/unistring.py @@ -122,7 +122,7 @@ if __name__ == '__main__': # pragma: no cover c = chr(code) cat = unicodedata.category(c) if ord(c) == 0xdc00: - # Hack to avoid combining this combining with the preceeding high + # Hack to avoid combining this combining with the preceding high # surrogate, 0xdbff, when doing a repr. c = '\\' + c elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e): diff --git a/scripts/check_crlf.py b/scripts/check_crlf.py index 055446cf..7171978f 100644 --- a/scripts/check_crlf.py +++ b/scripts/check_crlf.py @@ -3,7 +3,7 @@ Checker for line endings ~~~~~~~~~~~~~~~~~~~~~~~~ - Make sure Python (.py) and Bash completition (.bashcomp) files do not + Make sure Python (.py) and Bash completion (.bashcomp) files do not contain CR/LF newlines. :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. diff --git a/scripts/count_token_references.py b/scripts/count_token_references.py index 85fcdbe3..e235cc40 100755 --- a/scripts/count_token_references.py +++ b/scripts/count_token_references.py @@ -32,8 +32,8 @@ be used to find typos in token names, as those tokens are only used by one lexer .. option:: -s, --subtokens When ``--subtoken`` is given each token is also counted for each of its - parent tokens. I.e. if we have 10 occurences of the token - ``Token.Literal.Number.Integer`` and 10 occurences of the token + parent tokens. I.e. if we have 10 occurrences of the token + ``Token.Literal.Number.Integer`` and 10 occurrences of the token ``Token.Literal.Number.Hex`` but none for ``Token.Literal.Number``, with ``--subtoken`` ``Token.Literal.Number`` would be counted as having 20 references. diff --git a/scripts/debug_lexer.py b/scripts/debug_lexer.py index 5cc0ef6f..0633dd70 100755 --- a/scripts/debug_lexer.py +++ b/scripts/debug_lexer.py @@ -36,7 +36,7 @@ class DebuggingRegexLexer(ExtendedRegexLexer): """ Split ``text`` into (tokentype, text) pairs. - ``stack`` is the inital stack (default: ``['root']``) + ``stack`` is the initial stack (default: ``['root']``) """ tokendefs = self._tokens self.ctx = ctx = LexerContext(text, 0) diff --git a/scripts/pylintrc b/scripts/pylintrc index aa04e12e..b602eaae 100644 --- a/scripts/pylintrc +++ b/scripts/pylintrc @@ -1,6 +1,6 @@ # lint Python modules using external checkers. # -# This is the main checker controling the other ones and the reports +# This is the main checker controlling the other ones and the reports # generation. It is itself both a raw checker and an astng checker in order # to: # * handle message activation / deactivation at the module level @@ -66,12 +66,12 @@ include-ids=yes # written in a file name "pylint_global.[txt|html]". files-output=no -# Tells wether to display a full report or only the messages +# Tells whether to display a full report or only the messages reports=yes # Python expression which should return a note less than 10 (10 is the highest # note).You have access to the variables errors warning, statement which -# respectivly contain the number of errors / warnings messages and the total +# respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (R0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) @@ -91,11 +91,11 @@ comment=no # * unused variables / imports # * undefined variables # * redefinition of variable from builtins or from an outer scope -# * use of variable before assigment +# * use of variable before assignment # [VARIABLES] -# Tells wether we should check for unused import in __init__ files. +# Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching names used for dummy variables (i.e. not used). @@ -110,7 +110,7 @@ additional-builtins= # [TYPECHECK] -# Tells wether missing members accessed in mixin class should be ignored. A +# Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes @@ -118,7 +118,7 @@ ignore-mixin-members=yes # access to some undefined attributes. zope=no -# List of members which are usually get through zope's acquisition mecanism and +# List of members which are usually get through zope's acquisition mechanism and # so shouldn't trigger E0201 when accessed (need zope=yes to be considered). acquired-members=REQUEST,acl_users,aq_parent @@ -126,7 +126,7 @@ acquired-members=REQUEST,acl_users,aq_parent # checks for : # * doc strings # * modules / classes / functions / methods / arguments / variables name -# * number of arguments, local variables, branchs, returns and statements in +# * number of arguments, local variables, branches, returns and statements in # functions, methods # * required module attributes # * dangerous default values as arguments @@ -241,7 +241,7 @@ int-import-graph= # checks for : # * methods without self as first argument # * overridden methods signature -# * access only to existant members via self +# * access only to existent members via self # * attributes not defined in the __init__ method # * supported interfaces implementation # * unreachable code |