summaryrefslogtreecommitdiff
path: root/pygments
diff options
context:
space:
mode:
Diffstat (limited to 'pygments')
-rw-r--r--pygments/cmdline.py2
-rw-r--r--pygments/lexer.py4
-rw-r--r--pygments/lexers/_stan_builtins.py111
-rw-r--r--pygments/lexers/agile.py6
-rw-r--r--pygments/lexers/asm.py20
-rw-r--r--pygments/lexers/jvm.py144
-rw-r--r--pygments/lexers/math.py8
-rw-r--r--pygments/lexers/other.py42
-rw-r--r--pygments/lexers/web.py10
-rw-r--r--pygments/styles/monokai.py6
10 files changed, 216 insertions, 137 deletions
diff --git a/pygments/cmdline.py b/pygments/cmdline.py
index c25204bf..f260f76a 100644
--- a/pygments/cmdline.py
+++ b/pygments/cmdline.py
@@ -92,7 +92,7 @@ def _parse_options(o_strs):
for o_arg in o_args:
o_arg = o_arg.strip()
try:
- o_key, o_val = o_arg.split('=')
+ o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
diff --git a/pygments/lexer.py b/pygments/lexer.py
index 662c059e..8538fe7c 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -192,7 +192,9 @@ class Lexer(object):
def get_tokens_unprocessed(self, text):
"""
- Return an iterable of (tokentype, value) pairs.
+ Return an iterable of (index, tokentype, value) pairs where "index"
+ is the starting position of the token within the input text.
+
In subclasses, implement this method as a generator to
maximize effectiveness.
"""
diff --git a/pygments/lexers/_stan_builtins.py b/pygments/lexers/_stan_builtins.py
index 637072e4..1cea6ba4 100644
--- a/pygments/lexers/_stan_builtins.py
+++ b/pygments/lexers/_stan_builtins.py
@@ -10,9 +10,10 @@ This file contains the names of functions for Stan used by
:license: BSD, see LICENSE for details.
"""
-KEYWORDS = ['else', 'for', 'if', 'in', 'lower', 'lp__', 'print', 'upper', 'while']
+KEYWORDS = ['else', 'for', 'if', 'in', 'lp__', 'print', 'while']
-TYPES = [ 'corr_matrix',
+TYPES = [ 'cholesky_factor_cov',
+ 'corr_matrix',
'cov_matrix',
'int',
'matrix',
@@ -34,35 +35,53 @@ FUNCTIONS = [ 'Phi',
'atan',
'atan2',
'atanh',
+ 'bernoulli_ccdf_log',
'bernoulli_cdf',
+ 'bernoulli_cdf_log',
'bernoulli_log',
'bernoulli_logit_log',
'bernoulli_rng',
+ 'bessel_first_kind',
+ 'bessel_second_kind',
+ 'beta_binomial_ccdf_log',
'beta_binomial_cdf',
+ 'beta_binomial_cdf_log',
'beta_binomial_log',
'beta_binomial_rng',
+ 'beta_ccdf_log',
'beta_cdf',
+ 'beta_cdf_log',
'beta_log',
'beta_rng',
'binary_log_loss',
+ 'binomial_ccdf_log',
'binomial_cdf',
+ 'binomial_cdf_log',
'binomial_coefficient_log',
'binomial_log',
'binomial_logit_log',
'binomial_rng',
'block',
'categorical_log',
+ 'categorical_logit_log',
'categorical_rng',
+ 'cauchy_ccdf_log',
'cauchy_cdf',
+ 'cauchy_cdf_log',
'cauchy_log',
'cauchy_rng',
'cbrt',
'ceil',
+ 'chi_square_ccdf_log',
+ 'chi_square_cdf',
+ 'chi_square_cdf_log',
'chi_square_log',
'chi_square_rng',
'cholesky_decompose',
'col',
'cols',
+ 'columns_dot_product',
+ 'columns_dot_self',
'cos',
'cosh',
'crossprod',
@@ -77,55 +96,78 @@ FUNCTIONS = [ 'Phi',
'dirichlet_rng',
'dot_product',
'dot_self',
+ 'double_exponential_ccdf_log',
+ 'double_exponential_cdf',
+ 'double_exponential_cdf_log',
'double_exponential_log',
'double_exponential_rng',
'e',
'eigenvalues_sym',
'eigenvectors_sym',
- 'epsilon',
'erf',
'erfc',
'exp',
'exp2',
+ 'exp_mod_normal_ccdf_log',
'exp_mod_normal_cdf',
+ 'exp_mod_normal_cdf_log',
'exp_mod_normal_log',
'exp_mod_normal_rng',
'expm1',
+ 'exponential_ccdf_log',
'exponential_cdf',
+ 'exponential_cdf_log',
'exponential_log',
'exponential_rng',
'fabs',
+ 'falling_factorial',
'fdim',
'floor',
'fma',
'fmax',
'fmin',
'fmod',
+ 'gamma_ccdf_log',
+ 'gamma_cdf',
+ 'gamma_cdf_log',
'gamma_log',
+ 'gamma_p',
+ 'gamma_q',
'gamma_rng',
+ 'gaussian_dlm_obs_log',
+ 'gumbel_ccdf_log',
'gumbel_cdf',
+ 'gumbel_cdf_log',
'gumbel_log',
'gumbel_rng',
+ 'head',
'hypergeometric_log',
'hypergeometric_rng',
'hypot',
'if_else',
+ 'increment_log_prob',
'int_step',
+ 'inv',
+ 'inv_chi_square_ccdf_log',
'inv_chi_square_cdf',
+ 'inv_chi_square_cdf_log',
'inv_chi_square_log',
'inv_chi_square_rng',
'inv_cloglog',
+ 'inv_gamma_ccdf_log',
'inv_gamma_cdf',
+ 'inv_gamma_cdf_log',
'inv_gamma_log',
'inv_gamma_rng',
'inv_logit',
+ 'inv_sqrt',
+ 'inv_square',
'inv_wishart_log',
'inv_wishart_rng',
'inverse',
+ 'inverse_spd',
'lbeta',
'lgamma',
- 'lkj_corr_cholesky_log',
- 'lkj_corr_cholesky_rng',
'lkj_corr_log',
'lkj_corr_rng',
'lkj_cov_log',
@@ -133,96 +175,145 @@ FUNCTIONS = [ 'Phi',
'log',
'log10',
'log1m',
+ 'log1m_exp',
'log1m_inv_logit',
'log1p',
'log1p_exp',
'log2',
'log_determinant',
+ 'log_diff_exp',
+ 'log_falling_factorial',
'log_inv_logit',
+ 'log_rising_factorial',
+ 'log_softmax',
'log_sum_exp',
+ 'logistic_ccdf_log',
'logistic_cdf',
+ 'logistic_cdf_log',
'logistic_log',
'logistic_rng',
'logit',
+ 'lognormal_ccdf_log',
'lognormal_cdf',
+ 'lognormal_cdf_log',
'lognormal_log',
'lognormal_rng',
+ 'machine_precision',
'max',
'mdivide_left_tri_low',
'mdivide_right_tri_low',
'mean',
'min',
+ 'modified_bessel_first_kind',
+ 'modified_bessel_second_kind',
'multi_normal_cholesky_log',
'multi_normal_log',
'multi_normal_prec_log',
'multi_normal_rng',
'multi_student_t_log',
'multi_student_t_rng',
- 'multinomial_cdf',
'multinomial_log',
'multinomial_rng',
'multiply_log',
'multiply_lower_tri_self_transpose',
+ 'neg_binomial_ccdf_log',
'neg_binomial_cdf',
+ 'neg_binomial_cdf_log',
'neg_binomial_log',
'neg_binomial_rng',
- 'negative_epsilon',
'negative_infinity',
+ 'normal_ccdf_log',
'normal_cdf',
+ 'normal_cdf_log',
'normal_log',
'normal_rng',
'not_a_number',
'ordered_logistic_log',
'ordered_logistic_rng',
'owens_t',
+ 'pareto_ccdf_log',
'pareto_cdf',
+ 'pareto_cdf_log',
'pareto_log',
'pareto_rng',
'pi',
+ 'poisson_ccdf_log',
'poisson_cdf',
+ 'poisson_cdf_log',
'poisson_log',
'poisson_log_log',
'poisson_rng',
'positive_infinity',
'pow',
'prod',
+ 'quad_form',
+ 'rank',
+ 'rayleigh_ccdf_log',
+ 'rayleigh_cdf',
+ 'rayleigh_cdf_log',
+ 'rayleigh_log',
+ 'rayleigh_rng',
'rep_array',
'rep_matrix',
'rep_row_vector',
'rep_vector',
+ 'rising_factorial',
'round',
'row',
'rows',
+ 'rows_dot_product',
+ 'rows_dot_self',
+ 'scaled_inv_chi_square_ccdf_log',
'scaled_inv_chi_square_cdf',
+ 'scaled_inv_chi_square_cdf_log',
'scaled_inv_chi_square_log',
'scaled_inv_chi_square_rng',
'sd',
+ 'segment',
'sin',
'singular_values',
'sinh',
'size',
+ 'skew_normal_ccdf_log',
'skew_normal_cdf',
+ 'skew_normal_cdf_log',
'skew_normal_log',
'skew_normal_rng',
'softmax',
+ 'sort_asc',
+ 'sort_desc',
'sqrt',
'sqrt2',
'square',
'step',
+ 'student_t_ccdf_log',
'student_t_cdf',
+ 'student_t_cdf_log',
'student_t_log',
'student_t_rng',
+ 'sub_col',
+ 'sub_row',
'sum',
+ 'tail',
'tan',
'tanh',
'tcrossprod',
'tgamma',
+ 'to_vector',
'trace',
+ 'trace_gen_quad_form',
+ 'trace_quad_form',
'trunc',
+ 'uniform_ccdf_log',
+ 'uniform_cdf',
+ 'uniform_cdf_log',
'uniform_log',
'uniform_rng',
'variance',
+ 'von_mises_log',
+ 'weibull_ccdf_log',
'weibull_cdf',
+ 'weibull_cdf_log',
'weibull_log',
'weibull_rng',
'wishart_log',
@@ -236,6 +327,7 @@ DISTRIBUTIONS = [ 'bernoulli',
'binomial_coefficient',
'binomial_logit',
'categorical',
+ 'categorical_logit',
'cauchy',
'chi_square',
'dirichlet',
@@ -243,13 +335,13 @@ DISTRIBUTIONS = [ 'bernoulli',
'exp_mod_normal',
'exponential',
'gamma',
+ 'gaussian_dlm_obs',
'gumbel',
'hypergeometric',
'inv_chi_square',
'inv_gamma',
'inv_wishart',
'lkj_corr',
- 'lkj_corr_cholesky',
'lkj_cov',
'logistic',
'lognormal',
@@ -265,10 +357,12 @@ DISTRIBUTIONS = [ 'bernoulli',
'pareto',
'poisson',
'poisson_log',
+ 'rayleigh',
'scaled_inv_chi_square',
'skew_normal',
'student_t',
'uniform',
+ 'von_mises',
'weibull',
'wishart']
@@ -357,4 +451,3 @@ RESERVED = [ 'alignas',
'wchar_t',
'xor',
'xor_eq']
-
diff --git a/pygments/lexers/agile.py b/pygments/lexers/agile.py
index 1f81365e..23404320 100644
--- a/pygments/lexers/agile.py
+++ b/pygments/lexers/agile.py
@@ -532,7 +532,7 @@ class RubyLexer(ExtendedRegexLexer):
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
- (r'([a-zA-Z_][a-zA-Z0-9]*)(:)(?!:)',
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)(:)(?!:)',
bygroups(String.Symbol, Punctuation)), # Since Ruby 1.9
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
@@ -848,7 +848,7 @@ class PerlLexer(RegexLexer):
name = 'Perl'
aliases = ['perl', 'pl']
- filenames = ['*.pl', '*.pm']
+ filenames = ['*.pl', '*.pm', '*.t']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
@@ -1937,7 +1937,7 @@ class Perl6Lexer(ExtendedRegexLexer):
name = 'Perl6'
aliases = ['perl6', 'pl6']
filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6',
- '*.6pm', '*.p6m', '*.pm6']
+ '*.6pm', '*.p6m', '*.pm6', '*.t']
mimetypes = ['text/x-perl6', 'application/x-perl6']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index 3f67862c..c599deff 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -216,6 +216,7 @@ class LlvmLexer(RegexLexer):
(r'@' + identifier, Name.Variable.Global),#Name.Identifier.Global),
(r'%\d+', Name.Variable.Anonymous),#Name.Identifier.Anonymous),
(r'@\d+', Name.Variable.Global),#Name.Identifier.Anonymous),
+ (r'#\d+', Name.Variable.Global),#Name.Identifier.Global),
(r'!' + identifier, Name.Variable),
(r'!\d+', Name.Variable.Anonymous),
(r'c?' + string, String),
@@ -242,17 +243,24 @@ class LlvmLexer(RegexLexer):
r'|thread_local|zeroinitializer|undef|null|to|tail|target|triple'
r'|datalayout|volatile|nuw|nsw|nnan|ninf|nsz|arcp|fast|exact|inbounds'
r'|align|addrspace|section|alias|module|asm|sideeffect|gc|dbg'
+ r'|linker_private_weak'
+ r'|attributes|blockaddress|initialexec|localdynamic|localexec'
+ r'|prefix|unnamed_addr'
r'|ccc|fastcc|coldcc|x86_stdcallcc|x86_fastcallcc|arm_apcscc'
r'|arm_aapcscc|arm_aapcs_vfpcc|ptx_device|ptx_kernel'
+ r'|intel_ocl_bicc|msp430_intrcc|spir_func|spir_kernel'
+ r'|x86_64_sysvcc|x86_64_win64cc|x86_thiscallcc'
r'|cc|c'
r'|signext|zeroext|inreg|sret|nounwind|noreturn|noalias|nocapture'
r'|byval|nest|readnone|readonly'
-
r'|inlinehint|noinline|alwaysinline|optsize|ssp|sspreq|noredzone'
r'|noimplicitfloat|naked'
+ r'|builtin|cold|nobuiltin|noduplicate|nonlazybind|optnone'
+ r'|returns_twice|sanitize_address|sanitize_memory|sanitize_thread'
+ r'|sspstrong|uwtable|returned'
r'|type|opaque'
@@ -261,24 +269,30 @@ class LlvmLexer(RegexLexer):
r'|oeq|one|olt|ogt|ole'
r'|oge|ord|uno|ueq|une'
r'|x'
+ r'|acq_rel|acquire|alignstack|atomic|catch|cleanup|filter'
+ r'|inteldialect|max|min|monotonic|nand|personality|release'
+ r'|seq_cst|singlethread|umax|umin|unordered|xchg'
# instructions
r'|add|fadd|sub|fsub|mul|fmul|udiv|sdiv|fdiv|urem|srem|frem|shl'
r'|lshr|ashr|and|or|xor|icmp|fcmp'
r'|phi|call|trunc|zext|sext|fptrunc|fpext|uitofp|sitofp|fptoui'
- r'fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
+ r'|fptosi|inttoptr|ptrtoint|bitcast|select|va_arg|ret|br|switch'
r'|invoke|unwind|unreachable'
+ r'|indirectbr|landingpad|resume'
r'|malloc|alloca|free|load|store|getelementptr'
r'|extractelement|insertelement|shufflevector|getresult'
r'|extractvalue|insertvalue'
+
+ r'|atomicrmw|cmpxchg|fence'
r')\b', Keyword),
# Types
- (r'void|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
+ (r'void|half|float|double|x86_fp80|fp128|ppc_fp128|label|metadata',
Keyword.Type),
# Integer types
diff --git a/pygments/lexers/jvm.py b/pygments/lexers/jvm.py
index 8b5d5964..e07656a4 100644
--- a/pygments/lexers/jvm.py
+++ b/pygments/lexers/jvm.py
@@ -15,7 +15,6 @@ from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
-from pygments.util import get_choice_opt
from pygments import unistring as uni
@@ -937,114 +936,65 @@ class CeylonLexer(RegexLexer):
class KotlinLexer(RegexLexer):
"""
- For `Kotlin <http://confluence.jetbrains.net/display/Kotlin/>`_
+ For `Kotlin <http://kotlin.jetbrains.org/>`_
source code.
- Additional options accepted:
-
- `unicodelevel`
- Determines which Unicode characters this lexer allows for identifiers.
- The possible values are:
-
- * ``none`` -- only the ASCII letters and numbers are allowed. This
- is the fastest selection.
- * ``basic`` -- all Unicode characters from the specification except
- category ``Lo`` are allowed.
- * ``full`` -- all Unicode characters as specified in the C# specs
- are allowed. Note that this means a considerable slowdown since the
- ``Lo`` category has more than 40,000 characters in it!
-
- The default value is ``basic``.
-
*New in Pygments 1.5.*
"""
name = 'Kotlin'
aliases = ['kotlin']
filenames = ['*.kt']
- mimetypes = ['text/x-kotlin'] # inferred
+ mimetypes = ['text/x-kotlin']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
- # for the range of allowed unicode characters in identifiers,
- # see http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
-
- levels = {
- 'none': '@?[_a-zA-Z][a-zA-Z0-9_]*',
- 'basic': ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
- '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl +
- uni.Nd + uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*'),
- 'full': ('@?(?:_|[^' +
- uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
- + '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
- 'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
- }
+ kt_name = ('@?[_' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + ']' +
+ '[' + uni.Lu + uni.Ll + uni.Lt + uni.Lm + uni.Nl + uni.Nd +
+ uni.Pc + uni.Cf + uni.Mn + uni.Mc + ']*')
+ kt_id = '(' + kt_name + '|`' + kt_name + '`)'
- tokens = {}
- token_variants = True
-
- for levelname, cs_ident in levels.items():
- tokens[levelname] = {
- 'root': [
- # method names
- (r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
- r'(' + cs_ident + ')' # method name
- r'(\s*)(\()', # signature start
- bygroups(using(this), Name.Function, Text, Punctuation)),
- (r'^\s*\[.*?\]', Name.Attribute),
- (r'[^\S\n]+', Text),
- (r'\\\n', Text), # line continuation
- (r'//.*?\n', Comment.Single),
- (r'/[*](.|\n)*?[*]/', Comment.Multiline),
- (r'\n', Text),
- (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
- (r'[{}]', Punctuation),
- (r'@"(""|[^"])*"', String),
- (r'"(\\\\|\\"|[^"\n])*["\n]', String),
- (r"'\\.'|'[^\\]'", String.Char),
- (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
- r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
- (r'#[ \t]*(if|endif|else|elif|define|undef|'
- r'line|error|warning|region|endregion|pragma)\b.*?\n',
- Comment.Preproc),
- (r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
- Keyword)),
- (r'(abstract|as|break|catch|'
- r'fun|continue|default|delegate|'
- r'do|else|enum|extern|false|finally|'
- r'fixed|for|goto|if|implicit|in|interface|'
- r'internal|is|lock|null|'
- r'out|override|private|protected|public|readonly|'
- r'ref|return|sealed|sizeof|'
- r'when|this|throw|true|try|typeof|'
- r'unchecked|unsafe|virtual|void|while|'
- r'get|set|new|partial|yield|val|var)\b', Keyword),
- (r'(global)(::)', bygroups(Keyword, Punctuation)),
- (r'(bool|byte|char|decimal|double|dynamic|float|int|long|'
- r'short)\b\??', Keyword.Type),
- (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
- (r'(package|using)(\s+)', bygroups(Keyword, Text), 'package'),
- (cs_ident, Name),
- ],
- 'class': [
- (cs_ident, Name.Class, '#pop')
- ],
- 'package': [
- (r'(?=\()', Text, '#pop'), # using (resource)
- ('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
- ]
- }
-
- def __init__(self, **options):
- level = get_choice_opt(options, 'unicodelevel', self.tokens.keys(),
- 'basic')
- if level not in self._all_tokens:
- # compile the regexes now
- self._tokens = self.__class__.process_tokendef(level)
- else:
- self._tokens = self._all_tokens[level]
-
- RegexLexer.__init__(self, **options)
+ tokens = {
+ 'root': [
+ (r'^\s*\[.*?\]', Name.Attribute),
+ (r'[^\S\n]+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//.*?\n', Comment.Single),
+ (r'/[*].*?[*]/', Comment.Multiline),
+ (r'\n', Text),
+ (r'::|!!|\?[:.]', Operator),
+ (r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
+ (r'[{}]', Punctuation),
+ (r'@"(""|[^"])*"', String),
+ (r'"(\\\\|\\"|[^"\n])*["\n]', String),
+ (r"'\\.'|'[^\\]'", String.Char),
+ (r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFL]?|"
+ r"0[xX][0-9a-fA-F]+[Ll]?", Number),
+ (r'(class)(\s+)(object)', bygroups(Keyword, Text, Keyword)),
+ (r'(class|trait|object)(\s+)', bygroups(Keyword, Text), 'class'),
+ (r'(package|import)(\s+)', bygroups(Keyword, Text), 'package'),
+ (r'(val|var)(\s+)', bygroups(Keyword, Text), 'property'),
+ (r'(fun)(\s+)', bygroups(Keyword, Text), 'function'),
+ (r'(abstract|annotation|as|break|by|catch|class|continue|do|else|'
+ r'enum|false|final|finally|for|fun|get|if|import|in|inner|'
+ r'internal|is|null|object|open|out|override|package|private|'
+ r'protected|public|reified|return|set|super|this|throw|trait|'
+ r'true|try|type|val|var|vararg|when|where|while|This)\b', Keyword),
+ (kt_id, Name),
+ ],
+ 'package': [
+ (r'\S+', Name.Namespace, '#pop')
+ ],
+ 'class': [
+ (kt_id, Name.Class, '#pop')
+ ],
+ 'property': [
+ (kt_id, Name.Property, '#pop')
+ ],
+ 'function': [
+ (kt_id, Name.Function, '#pop')
+ ],
+ }
class XtendLexer(RegexLexer):
diff --git a/pygments/lexers/math.py b/pygments/lexers/math.py
index f0e49fef..1cdcfb43 100644
--- a/pygments/lexers/math.py
+++ b/pygments/lexers/math.py
@@ -167,8 +167,8 @@ class JuliaConsoleLexer(Lexer):
if line.startswith('julia>'):
insertions.append((len(curcode),
- [(0, Generic.Prompt, line[:3])]))
- curcode += line[3:]
+ [(0, Generic.Prompt, line[:6])]))
+ curcode += line[6:]
elif line.startswith(' '):
@@ -1308,9 +1308,9 @@ class JagsLexer(RegexLexer):
class StanLexer(RegexLexer):
"""Pygments Lexer for Stan models.
- The Stan modeling language is specified in the *Stan 1.3.0
+ The Stan modeling language is specified in the *Stan 2.0.1
Modeling Language Manual* `pdf
- <http://code.google.com/p/stan/downloads/detail?name=stan-reference-1.3.0.pdf>`_.
+ <https://github.com/stan-dev/stan/releases/download/v2.0.1/stan-reference-2.0.1.pdf>`__
*New in Pygments 1.6.*
"""
diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py
index 10598fb4..44d7404e 100644
--- a/pygments/lexers/other.py
+++ b/pygments/lexers/other.py
@@ -1217,15 +1217,16 @@ class ModelicaLexer(RegexLexer):
],
'statements': [
(r'"', String, 'string'),
+ (r'\'', Name, 'quoted_ident'),
(r'(\d+\.\d*|\.\d+|\d+|\d.)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\]{},.;]', Punctuation),
(r'(true|false|NULL|Real|Integer|Boolean)\b', Name.Builtin),
- (r"([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*')"
- r"(\.([a-zA-Z_][\w]*|'[a-zA-Z_\+\-\*\/\^][\w]*'))+", Name.Class),
- (r"('[\w\+\-\*\/\^]+'|\w+)", Name),
+ (r'([a-zA-Z_][\w\[\]]*|\'[a-zA-Z_\+\-\*\/\^][\w]*\')'
+ r'(\.([a-zA-Z_\][\w\[\]]*|\'[a-zA-Z_\+\-\*\/\^][\w]*\'))+', Name.Class),
+ (r'(\'[\w\+\-\*\/\^]+\'|\w+)', Name),
],
'root': [
include('whitespace'),
@@ -1239,7 +1240,7 @@ class ModelicaLexer(RegexLexer):
'keywords': [
(r'(algorithm|annotation|break|connect|constant|constrainedby|'
r'discrete|each|else|elseif|elsewhen|encapsulated|enumeration|'
- r'end|equation|exit|expandable|extends|'
+ r'equation|exit|expandable|extends|'
r'external|false|final|flow|for|if|import|impure|in|initial\sequation|'
r'inner|input|loop|nondiscrete|outer|output|parameter|partial|'
r'protected|public|pure|redeclare|replaceable|stream|time|then|true|'
@@ -1258,9 +1259,13 @@ class ModelicaLexer(RegexLexer):
r'terminate)\b', Name.Builtin),
],
'classes': [
- (r'(block|class|connector|function|model|package|'
- r'record|type)(\s+)([A-Za-z_]+)',
- bygroups(Keyword, Text, Name.Class))
+ (r'(block|class|connector|end|function|model|package|'
+ r'record|type)(\s+)((?!if|when|while)[A-Za-z_]\w*|[\'][^\']+[\'])([;]?)',
+ bygroups(Keyword, Text, Name.Class, Text))
+ ],
+ 'quoted_ident': [
+ (r'\'', Name, '#pop'),
+ (r'[^\']+', Name), # all other characters
],
'string': [
(r'"', String, '#pop'),
@@ -1271,7 +1276,7 @@ class ModelicaLexer(RegexLexer):
(r'\\', String), # stray backslash
],
'html-content': [
- (r'<\s*/\s*html\s*>', Name.Tag, '#pop'),
+ (r'<\s*/\s*html\s*>"', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*html\s*>)', using(HtmlLexer)),
]
}
@@ -1381,9 +1386,9 @@ class RebolLexer(RegexLexer):
tokens = {
'root': [
- (r'REBOL', Generic.Strong, 'script'),
- (r'R', Comment),
(r'[^R]+', Comment),
+ (r'REBOL\s+\[', Generic.Strong, 'script'),
+ (r'R', Comment)
],
'script': [
(r'\s+', Text),
@@ -1400,8 +1405,8 @@ class RebolLexer(RegexLexer):
(r'%[^(\^{^")\s\[\]]+', Name.Decorator),
(r'[+-]?([a-zA-Z]{1,3})?\$\d+(\.\d+)?', Number.Float), # money
(r'[+-]?\d+\:\d+(\:\d+)?(\.\d+)?', String.Other), # time
- (r'\d+\-[0-9a-zA-Z]+\-\d+(\/\d+\:\d+(\:\d+)?'
- r'([\.\d+]?([+-]?\d+:\d+)?)?)?', String.Other), # date
+ (r'\d+[\-\/][0-9a-zA-Z]+[\-\/]\d+(\/\d+\:\d+((\:\d+)?'
+ r'([\.\d+]?([+-]?\d+:\d+)?)?)?)?', String.Other), # date
(r'\d+(\.\d+)+\.\d+', Keyword.Constant), # tuple
(r'\d+[xX]\d+', Keyword.Constant), # pair
(r'[+-]?\d+(\'\d+)?([\.,]\d*)?[eE][+-]?\d+', Number.Float),
@@ -1493,6 +1498,16 @@ class RebolLexer(RegexLexer):
(r'[^(\[\])]+', Comment),
],
}
+ def analyse_text(text):
+ """
+ Check if code contains REBOL header and so it probably not R code
+ """
+ if re.match(r'^\s*REBOL\s*\[', text, re.IGNORECASE):
+ # The code starts with REBOL header
+ return 1.0
+ elif re.search(r'\s*REBOL\s*[', text, re.IGNORECASE):
+ # The code contains REBOL header but also some text before it
+ return 0.5
class ABAPLexer(RegexLexer):
@@ -1680,6 +1695,7 @@ class ABAPLexer(RegexLexer):
# because < and > are part of field symbols.
(r'[?*<>=\-+]', Operator),
(r"'(''|[^'])*'", String.Single),
+ (r"`([^`])*`", String.Single),
(r'[/;:()\[\],\.]', Punctuation)
],
}
@@ -1780,6 +1796,7 @@ class GherkinLexer(RegexLexer):
'examples_table_header': [
(r"\s+\|\s*$", Keyword, "#pop:2"),
include('comments'),
+ (r"\\\|", Name.Variable),
(r"\s*\|", Keyword),
(r"[^\|]", Name.Variable),
],
@@ -1822,6 +1839,7 @@ class GherkinLexer(RegexLexer):
'table_content': [
(r"\s+\|\s*$", Keyword, "#pop"),
include('comments'),
+ (r"\\\|", String),
(r"\s*\|", Keyword),
include('string'),
],
diff --git a/pygments/lexers/web.py b/pygments/lexers/web.py
index 142fef57..b0c5d4b7 100644
--- a/pygments/lexers/web.py
+++ b/pygments/lexers/web.py
@@ -833,7 +833,8 @@ class PhpLexer(RegexLexer):
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
- r'catch|throw|this|use|namespace|trait)\b', Keyword),
+ r'catch|throw|this|use|namespace|trait|yield|'
+ r'finally)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
@@ -843,6 +844,7 @@ class PhpLexer(RegexLexer):
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
+ (r'0b[01]+', Number.Binary),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
@@ -2442,10 +2444,10 @@ class CoffeeScriptLexer(RegexLexer):
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
- r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
- r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
+ r'\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?(?!>)|!=?|=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
- (r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
+ (r'(?:\([^()]*\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
diff --git a/pygments/styles/monokai.py b/pygments/styles/monokai.py
index 31dc83b2..7b41b3ec 100644
--- a/pygments/styles/monokai.py
+++ b/pygments/styles/monokai.py
@@ -93,14 +93,14 @@ class MonokaiStyle(Style):
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
- Generic.Deleted: "", # class: 'gd',
+ Generic.Deleted: "#f92672", # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "", # class: 'gh'
- Generic.Inserted: "", # class: 'gi'
+ Generic.Inserted: "#a6e22e", # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "", # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
- Generic.Subheading: "", # class: 'gu'
+ Generic.Subheading: "#75715e", # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}