summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--pygments/formatters/img.py2
-rw-r--r--pygments/lexers/_cocoa_builtins.py10
-rw-r--r--pygments/lexers/_php_builtins.py2
-rw-r--r--pygments/lexers/apl.py8
-rw-r--r--pygments/lexers/asm.py6
-rw-r--r--pygments/lexers/bibtex.py6
-rw-r--r--pygments/lexers/c_cpp.py8
-rw-r--r--pygments/lexers/c_like.py4
-rw-r--r--pygments/lexers/dotnet.py20
-rw-r--r--pygments/lexers/dylan.py6
-rw-r--r--pygments/lexers/erlang.py4
-rw-r--r--pygments/lexers/grammar_notation.py2
-rw-r--r--pygments/lexers/haskell.py4
-rw-r--r--pygments/lexers/haxe.py10
-rw-r--r--pygments/lexers/idl.py12
-rw-r--r--pygments/lexers/inferno.py2
-rw-r--r--pygments/lexers/j.py4
-rw-r--r--pygments/lexers/matlab.py4
-rw-r--r--pygments/lexers/ml.py2
-rw-r--r--pygments/lexers/objective.py28
-rw-r--r--pygments/lexers/parsers.py12
-rw-r--r--pygments/lexers/pascal.py10
-rw-r--r--pygments/lexers/pawn.py8
-rw-r--r--pygments/lexers/prolog.py8
-rw-r--r--pygments/lexers/qvt.py10
-rw-r--r--pygments/lexers/rebol.py12
-rw-r--r--pygments/lexers/robotframework.py2
-rw-r--r--pygments/lexers/testing.py2
-rw-r--r--pygments/lexers/textfmts.py10
-rw-r--r--pygments/lexers/varnish.py4
-rw-r--r--tests/run.py13
31 files changed, 118 insertions, 117 deletions
diff --git a/pygments/formatters/img.py b/pygments/formatters/img.py
index 2fb0dea5..6fafc476 100644
--- a/pygments/formatters/img.py
+++ b/pygments/formatters/img.py
@@ -237,7 +237,7 @@ class ImageFormatter(Formatter):
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
- Default: "Bitstream Vera Sans Mono" on Windows, Courier New on \*nix
+ Default: "Bitstream Vera Sans Mono" on Windows, Courier New on \\*nix
`font_size`
The font size in points to be used.
diff --git a/pygments/lexers/_cocoa_builtins.py b/pygments/lexers/_cocoa_builtins.py
index 064167ff..f55e9dd7 100644
--- a/pygments/lexers/_cocoa_builtins.py
+++ b/pygments/lexers/_cocoa_builtins.py
@@ -41,23 +41,23 @@ if __name__ == '__main__': # pragma: no cover
headerFilePath = frameworkHeadersDir + f
content = open(headerFilePath).read()
- res = re.findall('(?<=@interface )\w+', content)
+ res = re.findall(r'(?<=@interface )\w+', content)
for r in res:
all_interfaces.add(r)
- res = re.findall('(?<=@protocol )\w+', content)
+ res = re.findall(r'(?<=@protocol )\w+', content)
for r in res:
all_protocols.add(r)
- res = re.findall('(?<=typedef enum )\w+', content)
+ res = re.findall(r'(?<=typedef enum )\w+', content)
for r in res:
all_primitives.add(r)
- res = re.findall('(?<=typedef struct )\w+', content)
+ res = re.findall(r'(?<=typedef struct )\w+', content)
for r in res:
all_primitives.add(r)
- res = re.findall('(?<=typedef const struct )\w+', content)
+ res = re.findall(r'(?<=typedef const struct )\w+', content)
for r in res:
all_primitives.add(r)
diff --git a/pygments/lexers/_php_builtins.py b/pygments/lexers/_php_builtins.py
index fec3286a..bd4b7d99 100644
--- a/pygments/lexers/_php_builtins.py
+++ b/pygments/lexers/_php_builtins.py
@@ -4688,7 +4688,7 @@ if __name__ == '__main__': # pragma: no cover
PHP_MANUAL_URL = 'http://us3.php.net/distributions/manual/php_manual_en.tar.gz'
PHP_MANUAL_DIR = './php-chunked-xhtml/'
PHP_REFERENCE_GLOB = 'ref.*'
- PHP_FUNCTION_RE = '<a href="function\..*?\.html">(.*?)</a>'
+ PHP_FUNCTION_RE = r'<a href="function\..*?\.html">(.*?)</a>'
PHP_MODULE_RE = '<title>(.*?) Functions</title>'
def get_php_functions():
diff --git a/pygments/lexers/apl.py b/pygments/lexers/apl.py
index b3414cc0..4bb88ae3 100644
--- a/pygments/lexers/apl.py
+++ b/pygments/lexers/apl.py
@@ -71,14 +71,14 @@ class APLLexer(RegexLexer):
#
# Numbers
# =======
- (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
- u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
+ (u'¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞)'
+ u'([Jj]¯?(0[Xx][0-9A-Fa-f]+|[0-9]*\\.?[0-9]+([Ee][+¯]?[0-9]+)?|¯|∞))?',
Number),
#
# Operators
# ==========
- (u'[\.\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
- (u'[+\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
+ (u'[\\.\\\\\\/⌿⍀¨⍣⍨⍠⍤∘]', Name.Attribute), # closest token type
+ (u'[+\\-×÷⌈⌊∣|⍳?*⍟○!⌹<≤=>≥≠≡≢∊⍷∪∩~∨∧⍱⍲⍴,⍪⌽⊖⍉↑↓⊂⊃⌷⍋⍒⊤⊥⍕⍎⊣⊢⍁⍂≈⌸⍯↗]',
Operator),
#
# Constant
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 5b8cab80..9e854581 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -35,7 +35,7 @@ class GasLexer(RegexLexer):
#: optional Comment or Whitespace
string = r'"(\\"|[^"])*"'
char = r'[\w$.@-]'
- identifier = r'(?:[a-zA-Z$_]' + char + '*|\.' + char + '+)'
+ identifier = r'(?:[a-zA-Z$_]' + char + r'*|\.' + char + '+)'
number = r'(?:0[xX][a-zA-Z0-9]+|\d+)'
tokens = {
@@ -256,7 +256,7 @@ class HsailLexer(RegexLexer):
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(ieeefloat, Number.Float),
(float, Number.Float),
- ('\d+', Number.Integer),
+ (r'\d+', Number.Integer),
(r'[=<>{}\[\]()*.,:;!]|x\b', Punctuation)
],
@@ -350,7 +350,7 @@ class LlvmLexer(RegexLexer):
include('whitespace'),
# Before keywords, because keywords are valid label names :(...
- (identifier + '\s*:', Name.Label),
+ (identifier + r'\s*:', Name.Label),
include('keyword'),
diff --git a/pygments/lexers/bibtex.py b/pygments/lexers/bibtex.py
index a6159f81..7244ef2f 100644
--- a/pygments/lexers/bibtex.py
+++ b/pygments/lexers/bibtex.py
@@ -101,12 +101,12 @@ class BibTeXLexer(ExtendedRegexLexer):
'quoted-string': [
(r'\{', String, 'braced-string'),
('"', String, '#pop'),
- ('[^\{\"]+', String),
+ (r'[^\{\"]+', String),
],
'braced-string': [
(r'\{', String, '#push'),
(r'\}', String, '#pop'),
- ('[^\{\}]+', String),
+ (r'[^\{\}]+', String),
],
'whitespace': [
(r'\s+', Text),
@@ -154,7 +154,7 @@ class BSTLexer(RegexLexer):
default('#pop'),
],
'whitespace': [
- ('\s+', Text),
+ (r'\s+', Text),
('%.*?$', Comment.SingleLine),
],
}
diff --git a/pygments/lexers/c_cpp.py b/pygments/lexers/c_cpp.py
index 691f5ab4..38f425db 100644
--- a/pygments/lexers/c_cpp.py
+++ b/pygments/lexers/c_cpp.py
@@ -36,7 +36,7 @@ class CFamilyLexer(RegexLexer):
tokens = {
'whitespace': [
# preprocessor directives: without whitespace
- ('^#if\s+0', Comment.Preproc, 'if0'),
+ (r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^(' + _ws1 + r')(#if\s+0)',
@@ -84,7 +84,7 @@ class CFamilyLexer(RegexLexer):
prefix=r'__', suffix=r'\b'), Keyword.Reserved),
(r'(true|false|NULL)\b', Name.Builtin),
(r'([a-zA-Z_]\w*)(\s*)(:)(?!:)', bygroups(Name.Label, Text, Punctuation)),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
@@ -190,9 +190,9 @@ class CLexer(CFamilyLexer):
priority = 0.1
def analyse_text(text):
- if re.search('^\s*#include [<"]', text, re.MULTILINE):
+ if re.search(r'^\s*#include [<"]', text, re.MULTILINE):
return 0.1
- if re.search('^\s*#ifn?def ', text, re.MULTILINE):
+ if re.search(r'^\s*#ifn?def ', text, re.MULTILINE):
return 0.1
diff --git a/pygments/lexers/c_like.py b/pygments/lexers/c_like.py
index 38827219..fd147a8a 100644
--- a/pygments/lexers/c_like.py
+++ b/pygments/lexers/c_like.py
@@ -245,7 +245,7 @@ class ValaLexer(RegexLexer):
'ulong', 'unichar', 'ushort'), suffix=r'\b'),
Keyword.Type),
(r'(true|false|null)\b', Name.Builtin),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
@@ -344,7 +344,7 @@ class SwigLexer(CppLexer):
# SWIG directives
(r'(%[a-z_][a-z0-9_]*)', Name.Function),
# Special variables
- ('\$\**\&?\w+', Name),
+ (r'\$\**\&?\w+', Name),
# Stringification / additional preprocessor directives
(r'##*[a-zA-Z_]\w*', Comment.Preproc),
inherit,
diff --git a/pygments/lexers/dotnet.py b/pygments/lexers/dotnet.py
index 4e2bc8ab..27ae77c5 100644
--- a/pygments/lexers/dotnet.py
+++ b/pygments/lexers/dotnet.py
@@ -58,7 +58,7 @@ class CSharpLexer(RegexLexer):
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
- 'none': '@?[_a-zA-Z]\w*',
+ 'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
@@ -171,7 +171,7 @@ class NemerleLexer(RegexLexer):
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
- 'none': '@?[_a-zA-Z]\w*',
+ 'none': r'@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
@@ -352,13 +352,13 @@ class BooLexer(RegexLexer):
('[*/]', Comment.Multiline)
],
'funcname': [
- ('[a-zA-Z_]\w*', Name.Function, '#pop')
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
- ('[a-zA-Z_]\w*', Name.Class, '#pop')
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
- ('[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
+ (r'[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
@@ -413,7 +413,7 @@ class VbNetLexer(RegexLexer):
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
- prefix='(?<!\.)', suffix=r'\b'), Keyword),
+ prefix=r'(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
@@ -574,10 +574,10 @@ class FSharpLexer(RegexLexer):
'virtual', 'volatile',
]
keyopts = [
- '!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
- '->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
- '<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
- '_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>',
+ '!=', '#', '&&', '&', r'\(', r'\)', r'\*', r'\+', ',', r'-\.',
+ '->', '-', r'\.\.', r'\.', '::', ':=', ':>', ':', ';;', ';', '<-',
+ r'<\]', '<', r'>\]', '>', r'\?\?', r'\?', r'\[<', r'\[\|', r'\[', r'\]',
+ '_', '`', r'\{', r'\|\]', r'\|', r'\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
diff --git a/pygments/lexers/dylan.py b/pygments/lexers/dylan.py
index f61bb60d..30318f38 100644
--- a/pygments/lexers/dylan.py
+++ b/pygments/lexers/dylan.py
@@ -179,10 +179,10 @@ class DylanLexer(RegexLexer):
(valid_name + ':', Keyword),
# class names
- (r'<' + valid_name + '>', Name.Class),
+ ('<' + valid_name + '>', Name.Class),
# define variable forms.
- (r'\*' + valid_name + '\*', Name.Variable.Global),
+ (r'\*' + valid_name + r'\*', Name.Variable.Global),
# define constant forms.
(r'\$' + valid_name, Name.Constant),
@@ -260,7 +260,7 @@ class DylanConsoleLexer(Lexer):
mimetypes = ['text/x-dylan-console']
_line_re = re.compile('.*?\n')
- _prompt_re = re.compile('\?| ')
+ _prompt_re = re.compile(r'\?| ')
def get_tokens_unprocessed(self, text):
dylexer = DylanLexer(**self.options)
diff --git a/pygments/lexers/erlang.py b/pygments/lexers/erlang.py
index 9e7f85c1..0d0d0798 100644
--- a/pygments/lexers/erlang.py
+++ b/pygments/lexers/erlang.py
@@ -344,7 +344,7 @@ class ElixirLexer(RegexLexer):
op1_re = "|".join(re.escape(s) for s in OPERATORS1)
ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)
punctuation_re = "|".join(re.escape(s) for s in PUNCTUATION)
- alnum = '\w'
+ alnum = r'\w'
name_re = r'(?:\.\.\.|[a-z_]%s*[!?]?)' % alnum
modname_re = r'[A-Z]%(alnum)s*(?:\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}
complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)
@@ -495,7 +495,7 @@ class ElixirConsoleLexer(Lexer):
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
- _prompt_re = re.compile('(iex|\.{3})(\(\d+\))?> ')
+ _prompt_re = re.compile(r'(iex|\.{3})(\(\d+\))?> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
diff --git a/pygments/lexers/grammar_notation.py b/pygments/lexers/grammar_notation.py
index 076249d3..bc715ffa 100644
--- a/pygments/lexers/grammar_notation.py
+++ b/pygments/lexers/grammar_notation.py
@@ -158,7 +158,7 @@ class JsgfLexer(RegexLexer):
(r'//.*', Comment.Single),
],
'non-comments': [
- ('\A#JSGF[^;]*', Comment.Preproc),
+ (r'\A#JSGF[^;]*', Comment.Preproc),
(r'\s+', Text),
(r';', Punctuation),
(r'[=|()\[\]*+]', Operator),
diff --git a/pygments/lexers/haskell.py b/pygments/lexers/haskell.py
index 1a2f2217..e3604ed8 100644
--- a/pygments/lexers/haskell.py
+++ b/pygments/lexers/haskell.py
@@ -677,10 +677,10 @@ class KokaLexer(RegexLexer):
symbols = r'[$%&*+@!/\\^~=.:\-?|<>]+'
# symbol boundary: an operator keyword should not be followed by any of these
- sboundary = '(?!'+symbols+')'
+ sboundary = '(?!' + symbols + ')'
# name boundary: a keyword should not be followed by any of these
- boundary = '(?![\w/])'
+ boundary = r'(?![\w/])'
# koka token abstractions
tokenType = Name.Attribute
diff --git a/pygments/lexers/haxe.py b/pygments/lexers/haxe.py
index 6f5c3599..364ad344 100644
--- a/pygments/lexers/haxe.py
+++ b/pygments/lexers/haxe.py
@@ -43,7 +43,7 @@ class HaxeLexer(ExtendedRegexLexer):
typeid = r'_*[A-Z]\w*'
# combined ident and dollar and idtype
- ident = r'(?:_*[a-z]\w*|_+[0-9]\w*|' + typeid + '|_+|\$\w+)'
+ ident = r'(?:_*[a-z]\w*|_+[0-9]\w*|' + typeid + r'|_+|\$\w+)'
binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|'
r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|'
@@ -182,7 +182,7 @@ class HaxeLexer(ExtendedRegexLexer):
(r'[0-9]+[eE][+\-]?[0-9]+', Number.Float),
(r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float),
(r'[0-9]+\.[0-9]+', Number.Float),
- (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex),
@@ -219,7 +219,7 @@ class HaxeLexer(ExtendedRegexLexer):
(r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
(r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
(r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'preproc-expr-chain')),
- (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'preproc-expr-chain')),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float, ('#pop', 'preproc-expr-chain')),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'preproc-expr-chain')),
@@ -456,7 +456,7 @@ class HaxeLexer(ExtendedRegexLexer):
(r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
- (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float, ('#pop', 'expr-chain')),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')),
@@ -711,7 +711,7 @@ class HaxeLexer(ExtendedRegexLexer):
(r'[0-9]+[eE][+\-]?[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.[0-9]*[eE][+\-]?[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.[0-9]+', Number.Float, '#pop'),
- (r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'),
+ (r'[0-9]+\.(?!' + ident + r'|\.\.)', Number.Float, '#pop'),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, '#pop'),
diff --git a/pygments/lexers/idl.py b/pygments/lexers/idl.py
index 2fc39318..87cafe6a 100644
--- a/pygments/lexers/idl.py
+++ b/pygments/lexers/idl.py
@@ -53,7 +53,7 @@ class IDLLexer(RegexLexer):
'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder',
'bytscl', 'caldat', 'calendar', 'call_external',
'call_function', 'call_method', 'call_procedure', 'canny',
- 'catch', 'cd', 'cdf_\w*', 'ceil', 'chebyshev',
+ 'catch', 'cd', r'cdf_\w*', 'ceil', 'chebyshev',
'check_math',
'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen',
'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts',
@@ -87,7 +87,7 @@ class IDLLexer(RegexLexer):
'dlm_load', 'dlm_register', 'doc_library', 'double',
'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec',
'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn',
- 'eof', 'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
+ 'eof', r'eos_\w*', 'erase', 'erf', 'erfc', 'erfcx',
'erode', 'errorplot', 'errplot', 'estimator_filter',
'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint',
'extrac', 'extract_slice', 'factorial', 'fft', 'filepath',
@@ -104,11 +104,11 @@ class IDLLexer(RegexLexer):
'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv',
'getwindows', 'get_drive_list', 'get_dxf_objects',
'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size',
- 'greg2jul', 'grib_\w*', 'grid3', 'griddata',
+ 'greg2jul', r'grib_\w*', 'grid3', 'griddata',
'grid_input', 'grid_tps', 'gs_iter',
- 'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
+ r'h5[adfgirst]_\w*', 'h5_browser', 'h5_close',
'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse',
- 'hanning', 'hash', 'hdf_\w*', 'heap_free',
+ 'hanning', 'hash', r'hdf_\w*', 'heap_free',
'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save',
'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal',
'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int',
@@ -156,7 +156,7 @@ class IDLLexer(RegexLexer):
'modifyct', 'moment', 'morph_close', 'morph_distance',
'morph_gradient', 'morph_hitormiss', 'morph_open',
'morph_thin', 'morph_tophat', 'multi', 'm_correlate',
- 'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
+ r'ncdf_\w*', 'newton', 'noise_hurl', 'noise_pick',
'noise_scatter', 'noise_slur', 'norm', 'n_elements',
'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy',
'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid',
diff --git a/pygments/lexers/inferno.py b/pygments/lexers/inferno.py
index 5fc5a0ba..0d68856d 100644
--- a/pygments/lexers/inferno.py
+++ b/pygments/lexers/inferno.py
@@ -64,7 +64,7 @@ class LimboLexer(RegexLexer):
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
diff --git a/pygments/lexers/j.py b/pygments/lexers/j.py
index 434964fe..46037820 100644
--- a/pygments/lexers/j.py
+++ b/pygments/lexers/j.py
@@ -52,13 +52,13 @@ class JLexer(RegexLexer):
Name.Function, 'explicitDefinition'),
# Flow Control
- (words(('for_', 'goto_', 'label_'), suffix=validName+'\.'), Name.Label),
+ (words(('for_', 'goto_', 'label_'), suffix=validName+r'\.'), Name.Label),
(words((
'assert', 'break', 'case', 'catch', 'catchd',
'catcht', 'continue', 'do', 'else', 'elseif',
'end', 'fcase', 'for', 'if', 'return',
'select', 'throw', 'try', 'while', 'whilst',
- ), suffix='\.'), Name.Label),
+ ), suffix=r'\.'), Name.Label),
# Variable Names
(validName, Name.Variable),
diff --git a/pygments/lexers/matlab.py b/pygments/lexers/matlab.py
index 56a0f6d6..1c77b60c 100644
--- a/pygments/lexers/matlab.py
+++ b/pygments/lexers/matlab.py
@@ -134,9 +134,9 @@ class MatlabLexer(RegexLexer):
}
def analyse_text(text):
- if re.match('^\s*%', text, re.M): # comment
+ if re.match(r'^\s*%', text, re.M): # comment
return 0.2
- elif re.match('^!\w+', text, re.M): # system cmd
+ elif re.match(r'^!\w+', text, re.M): # system cmd
return 0.2
diff --git a/pygments/lexers/ml.py b/pygments/lexers/ml.py
index f80d5bfa..0bff9816 100644
--- a/pygments/lexers/ml.py
+++ b/pygments/lexers/ml.py
@@ -43,7 +43,7 @@ class SMLLexer(RegexLexer):
symbolicid_reserved = set((
# Core
- ':', '\|', '=', '=>', '->', '#',
+ ':', r'\|', '=', '=>', '->', '#',
# Modules
':>',
))
diff --git a/pygments/lexers/objective.py b/pygments/lexers/objective.py
index 7807255e..179928e9 100644
--- a/pygments/lexers/objective.py
+++ b/pygments/lexers/objective.py
@@ -87,26 +87,26 @@ def objective(baselexer):
],
'oc_classname': [
# interface definition that inherits
- ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)',
+ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?(\s*)(\{)',
bygroups(Name.Class, Text, Name.Class, Text, Punctuation),
('#pop', 'oc_ivars')),
- ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
+ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
- ('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)',
+ (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))(\s*)(\{)',
bygroups(Name.Class, Text, Name.Label, Text, Punctuation),
('#pop', 'oc_ivars')),
- ('([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))',
+ (r'([a-zA-Z$_][\w$]*)(\s*)(\([a-zA-Z$_][\w$]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
- ('([a-zA-Z$_][\w$]*)(\s*)(\{)',
+ (r'([a-zA-Z$_][\w$]*)(\s*)(\{)',
bygroups(Name.Class, Text, Punctuation), ('#pop', 'oc_ivars')),
- ('([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
+ (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
],
'oc_forward_classname': [
- ('([a-zA-Z$_][\w$]*)(\s*,\s*)',
+ (r'([a-zA-Z$_][\w$]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'oc_forward_classname'),
- ('([a-zA-Z$_][\w$]*)(\s*;?)',
+ (r'([a-zA-Z$_][\w$]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'oc_ivars': [
@@ -244,17 +244,17 @@ class LogosLexer(ObjectiveCppLexer):
inherit,
],
'logos_init_directive': [
- ('\s+', Text),
+ (r'\s+', Text),
(',', Punctuation, ('logos_init_directive', '#pop')),
- ('([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)',
+ (r'([a-zA-Z$_][\w$]*)(\s*)(=)(\s*)([^);]*)',
bygroups(Name.Class, Text, Punctuation, Text, Text)),
- ('([a-zA-Z$_][\w$]*)', Name.Class),
- ('\)', Punctuation, '#pop'),
+ (r'([a-zA-Z$_][\w$]*)', Name.Class),
+ (r'\)', Punctuation, '#pop'),
],
'logos_classname': [
- ('([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
+ (r'([a-zA-Z$_][\w$]*)(\s*:\s*)([a-zA-Z$_][\w$]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
- ('([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
+ (r'([a-zA-Z$_][\w$]*)', Name.Class, '#pop')
],
'root': [
(r'(%subclass)(\s+)', bygroups(Keyword, Text),
diff --git a/pygments/lexers/parsers.py b/pygments/lexers/parsers.py
index 1f3c9b4d..43eb6c1f 100644
--- a/pygments/lexers/parsers.py
+++ b/pygments/lexers/parsers.py
@@ -364,13 +364,13 @@ class AntlrLexer(RegexLexer):
# tokensSpec
(r'tokens\b', Keyword, 'tokens'),
# attrScope
- (r'(scope)(\s*)(' + _id + ')(\s*)(\{)',
+ (r'(scope)(\s*)(' + _id + r')(\s*)(\{)',
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation), 'action'),
# exception
(r'(catch|finally)\b', Keyword, 'exception'),
# action
- (r'(@' + _id + ')(\s*)(::)?(\s*)(' + _id + ')(\s*)(\{)',
+ (r'(@' + _id + r')(\s*)(::)?(\s*)(' + _id + r')(\s*)(\{)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
Name.Label, Whitespace, Punctuation), 'action'),
# rule
@@ -405,10 +405,10 @@ class AntlrLexer(RegexLexer):
# L173 ANTLRv3.g from ANTLR book
(r'(scope)(\s+)(\{)', bygroups(Keyword, Whitespace, Punctuation),
'action'),
- (r'(scope)(\s+)(' + _id + ')(\s*)(;)',
+ (r'(scope)(\s+)(' + _id + r')(\s*)(;)',
bygroups(Keyword, Whitespace, Name.Label, Whitespace, Punctuation)),
# ruleAction
- (r'(@' + _id + ')(\s*)(\{)',
+ (r'(@' + _id + r')(\s*)(\{)',
bygroups(Name.Label, Whitespace, Punctuation), 'action'),
# finished prelims, go to rule alts!
(r':', Punctuation, '#pop')
@@ -442,7 +442,7 @@ class AntlrLexer(RegexLexer):
include('comments'),
(r'\{', Punctuation),
(r'(' + _TOKEN_REF + r')(\s*)(=)?(\s*)(' + _STRING_LITERAL
- + ')?(\s*)(;)',
+ + r')?(\s*)(;)',
bygroups(Name.Label, Whitespace, Punctuation, Whitespace,
String, Whitespace, Punctuation)),
(r'\}', Punctuation, '#pop'),
@@ -452,7 +452,7 @@ class AntlrLexer(RegexLexer):
include('comments'),
(r'\{', Punctuation),
(r'(' + _id + r')(\s*)(=)(\s*)(' +
- '|'.join((_id, _STRING_LITERAL, _INT, '\*')) + ')(\s*)(;)',
+ '|'.join((_id, _STRING_LITERAL, _INT, r'\*')) + r')(\s*)(;)',
bygroups(Name.Variable, Whitespace, Punctuation, Whitespace,
Text, Whitespace, Punctuation)),
(r'\}', Punctuation, '#pop'),
diff --git a/pygments/lexers/pascal.py b/pygments/lexers/pascal.py
index 9aa1ac8f..467a0b2c 100644
--- a/pygments/lexers/pascal.py
+++ b/pygments/lexers/pascal.py
@@ -593,8 +593,8 @@ class AdaLexer(RegexLexer):
],
'end': [
('(if|case|record|loop|select)', Keyword.Reserved),
- ('"[^"]+"|[\w.]+', Name.Function),
- ('\s+', Text),
+ (r'"[^"]+"|[\w.]+', Name.Function),
+ (r'\s+', Text),
(';', Punctuation, '#pop'),
],
'type_def': [
@@ -628,11 +628,11 @@ class AdaLexer(RegexLexer):
],
'package': [
('body', Keyword.Declaration),
- ('is\s+new|renames', Keyword.Reserved),
+ (r'is\s+new|renames', Keyword.Reserved),
('is', Keyword.Reserved, '#pop'),
(';', Punctuation, '#pop'),
- ('\(', Punctuation, 'package_instantiation'),
- ('([\w.]+)', Name.Class),
+ (r'\(', Punctuation, 'package_instantiation'),
+ (r'([\w.]+)', Name.Class),
include('root'),
],
'package_instantiation': [
diff --git a/pygments/lexers/pawn.py b/pygments/lexers/pawn.py
index f462a883..0ef28175 100644
--- a/pygments/lexers/pawn.py
+++ b/pygments/lexers/pawn.py
@@ -36,7 +36,7 @@ class SourcePawnLexer(RegexLexer):
tokens = {
'root': [
# preprocessor directives: without whitespace
- ('^#if\s+0', Comment.Preproc, 'if0'),
+ (r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
@@ -62,7 +62,7 @@ class SourcePawnLexer(RegexLexer):
r'public|return|sizeof|static|decl|struct|switch)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
@@ -148,7 +148,7 @@ class PawnLexer(RegexLexer):
tokens = {
'root': [
# preprocessor directives: without whitespace
- ('^#if\s+0', Comment.Preproc, 'if0'),
+ (r'^#if\s+0', Comment.Preproc, 'if0'),
('^#', Comment.Preproc, 'macro'),
# or with whitespace
('^' + _ws1 + r'#if\s+0', Comment.Preproc, 'if0'),
@@ -174,7 +174,7 @@ class PawnLexer(RegexLexer):
r'public|return|sizeof|tagof|state|goto)\b', Keyword),
(r'(bool|Float)\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
diff --git a/pygments/lexers/prolog.py b/pygments/lexers/prolog.py
index 90f9529c..58e762b0 100644
--- a/pygments/lexers/prolog.py
+++ b/pygments/lexers/prolog.py
@@ -57,15 +57,15 @@ class PrologLexer(RegexLexer):
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
- u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
+ u'[\\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(u'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
- u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
+ u'[\\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
u'(\\s*)(\\()',
bygroups(Name.Function, Text, Punctuation)),
(u'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
- u'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
+ u'[\\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(u'[#&*+\\-./:<=>?@\\\\^~\u00a1-\u00bf\u2010-\u303f]+',
@@ -300,7 +300,7 @@ class LogtalkLexer(RegexLexer):
return 1.0
elif ':- category(' in text:
return 1.0
- elif re.search('^:-\s[a-z]', text, re.M):
+ elif re.search(r'^:-\s[a-z]', text, re.M):
return 0.9
else:
return 0.0
diff --git a/pygments/lexers/qvt.py b/pygments/lexers/qvt.py
index f496d600..af091a65 100644
--- a/pygments/lexers/qvt.py
+++ b/pygments/lexers/qvt.py
@@ -126,7 +126,7 @@ class QVToLexer(RegexLexer):
(r'[^\\\'"\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
- ],
+ ],
'stringescape': [
(r'\\([\\btnfr"\']|u[0-3][0-7]{2}|u[0-7]{1,2})', String.Escape)
],
@@ -134,15 +134,15 @@ class QVToLexer(RegexLexer):
(r'"', String, '#pop'),
(r'\\\\|\\"', String.Escape),
include('strings')
- ],
+ ],
'sqs': [ # single-quoted string
(r"'", String, '#pop'),
(r"\\\\|\\'", String.Escape),
include('strings')
- ],
+ ],
'name': [
- ('[a-zA-Z_]\w*', Name),
- ],
+ (r'[a-zA-Z_]\w*', Name),
+ ],
# numbers: excerpt taken from the python lexer
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
diff --git a/pygments/lexers/rebol.py b/pygments/lexers/rebol.py
index f3d00200..4d24daaa 100644
--- a/pygments/lexers/rebol.py
+++ b/pygments/lexers/rebol.py
@@ -102,12 +102,12 @@ class RebolLexer(RegexLexer):
yield match.start(), Generic.Heading, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
- elif re.match('(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
+ elif re.match(r'(\+|-|\*|/|//|\*\*|and|or|xor|=\?|=|==|<>|<|>|<=|>=)$',
word):
yield match.start(), Operator, word
- elif re.match(".*\?$", word):
+ elif re.match(r".*\?$", word):
yield match.start(), Keyword, word
- elif re.match(".*\!$", word):
+ elif re.match(r".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
@@ -297,10 +297,10 @@ class RedLexer(RegexLexer):
yield match.start(), Keyword.Namespace, word
elif re.match("to-.*", word):
yield match.start(), Keyword, word
- elif re.match('(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
- '<<<|>>>|<<|>>|<|>%)$', word):
+ elif re.match(r'(\+|-\*\*|-|\*\*|//|/|\*|and|or|xor|=\?|===|==|=|<>|<=|>=|'
+ r'<<<|>>>|<<|>>|<|>%)$', word):
yield match.start(), Operator, word
- elif re.match(".*\!$", word):
+ elif re.match(r".*\!$", word):
yield match.start(), Keyword.Type, word
elif re.match("'.*", word):
yield match.start(), Name.Variable.Instance, word # lit-word
diff --git a/pygments/lexers/robotframework.py b/pygments/lexers/robotframework.py
index e868127b..5bacffa3 100644
--- a/pygments/lexers/robotframework.py
+++ b/pygments/lexers/robotframework.py
@@ -161,7 +161,7 @@ class RowTokenizer(object):
class RowSplitter(object):
_space_splitter = re.compile('( {2,})')
- _pipe_splitter = re.compile('((?:^| +)\|(?: +|$))')
+ _pipe_splitter = re.compile(r'((?:^| +)\|(?: +|$))')
def split(self, row):
splitter = (row.startswith('| ') and self._split_from_pipes
diff --git a/pygments/lexers/testing.py b/pygments/lexers/testing.py
index 1e0795b1..86e60f25 100644
--- a/pygments/lexers/testing.py
+++ b/pygments/lexers/testing.py
@@ -29,7 +29,7 @@ class GherkinLexer(RegexLexer):
feature_keywords = u'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функціонал|Функционалност|Функционал|Фича|Особина|Могућност|Özellik|Właściwość|Tính năng|Trajto|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalitat|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
feature_element_keywords = u'^(\\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|剧本大纲|剧本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарій|Сценарио|Сценарий структураси|Сценарий|Структура сценарію|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Передумова|Основа|Концепт|Контекст|Założenia|Wharrimean is|Tình huống|The thing of it is|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenaro|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Konturo de la scenaro|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Fono|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dis is what went down|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
examples_keywords = u'^(\\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Приклади|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Ekzemploj|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
- step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假设|假如|假定|但是|但し|並且|并且|同時|同时|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
+ step_keywords = u'^(\\s*)(하지만|조건|먼저|만일|만약|단|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假设|假如|假定|但是|但し|並且|并且|同時|同时|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Якщо |Унда |То |Припустимо, що |Припустимо |Онда |Но |Нехай |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Але |Агар |А |І |Și |És |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Youse know when youse got |Youse know like when |Yna |Ya know how |Ya gotta |Y |Wun |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Sed |Se |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kaj |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givun |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Dun |Donitaĵo |Donat |Donada |Do |Diyelim ki |Dengan |Den youse gotta |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |Cal |But y\'all |But |Buh |Biết |Bet |BUT |Atès |Atunci |Atesa |Anrhegedig a |Angenommen |And y\'all |And |An |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\\* )'
tokens = {
'comments': [
diff --git a/pygments/lexers/textfmts.py b/pygments/lexers/textfmts.py
index bb8124ef..b70c2ad6 100644
--- a/pygments/lexers/textfmts.py
+++ b/pygments/lexers/textfmts.py
@@ -266,7 +266,7 @@ class TodotxtLexer(RegexLexer):
# 5. Leading project
(project_regex, Project, 'incomplete'),
# 6. Non-whitespace catch-all
- ('\S+', IncompleteTaskText, 'incomplete'),
+ (r'\S+', IncompleteTaskText, 'incomplete'),
],
# Parse a complete task
@@ -277,9 +277,9 @@ class TodotxtLexer(RegexLexer):
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
- ('\S+', CompleteTaskText),
+ (r'\S+', CompleteTaskText),
# Tokenize whitespace not containing a newline
- ('\s+', CompleteTaskText),
+ (r'\s+', CompleteTaskText),
],
# Parse an incomplete task
@@ -290,8 +290,8 @@ class TodotxtLexer(RegexLexer):
(context_regex, Context),
(project_regex, Project),
# Tokenize non-whitespace text
- ('\S+', IncompleteTaskText),
+ (r'\S+', IncompleteTaskText),
# Tokenize whitespace not containing a newline
- ('\s+', IncompleteTaskText),
+ (r'\s+', IncompleteTaskText),
],
}
diff --git a/pygments/lexers/varnish.py b/pygments/lexers/varnish.py
index 44521422..f3b37d60 100644
--- a/pygments/lexers/varnish.py
+++ b/pygments/lexers/varnish.py
@@ -36,7 +36,7 @@ class VCLLexer(RegexLexer):
# Skip over comments and blank lines
# This is accurate enough that returning 0.9 is reasonable.
# Almost no VCL files start without some comments.
- elif '\nvcl 4\.0;' in text[:1000]:
+ elif '\nvcl 4.0;' in text[:1000]:
return 0.9
tokens = {
@@ -120,7 +120,7 @@ class VCLLexer(RegexLexer):
r'([a-zA-Z_]\w*)'
r'(\s*\(.*\))',
bygroups(Name.Function, Punctuation, Name.Function, using(this))),
- ('[a-zA-Z_]\w*', Name),
+ (r'[a-zA-Z_]\w*', Name),
],
'comment': [
(r'[^*/]+', Comment.Multiline),
diff --git a/tests/run.py b/tests/run.py
index 161e5c04..2e962f2f 100644
--- a/tests/run.py
+++ b/tests/run.py
@@ -22,6 +22,13 @@ import warnings
if os.path.dirname(__file__):
os.chdir(os.path.dirname(__file__))
+# make FutureWarnings (coming from Regex syntax most likely) and
+# DeprecationWarnings due to non-raw strings an error
+warnings.filterwarnings("error", module=r"pygments\..*",
+ category=FutureWarning)
+warnings.filterwarnings("error", module=r".*pygments.*",
+ category=DeprecationWarning)
+
try:
import nose
@@ -32,12 +39,6 @@ except ImportError:
# make sure the current source is first on sys.path
sys.path.insert(0, '..')
-# make FutureWarnings (coming from Regex syntax most likely) and
-# DeprecationWarnings (coming from invalid escapes due to non-raw strings)
-# an error
-warnings.filterwarnings("error", category=FutureWarning)
-warnings.filterwarnings("error", category=DeprecationWarning)
-
if '--with-coverage' not in sys.argv:
# if running with coverage, pygments should not be imported before coverage
# is started, otherwise it will count already executed lines as uncovered