summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/examplefiles/99_bottles_of_beer.chpl17
-rw-r--r--tests/examplefiles/StdGeneric.icl92
-rw-r--r--tests/examplefiles/capdl_example.cdl64
-rw-r--r--tests/examplefiles/durexmania.aheui4
-rw-r--r--tests/examplefiles/example.bat6
-rw-r--r--tests/examplefiles/example.jsgf28
-rw-r--r--tests/examplefiles/example.juttle110
-rw-r--r--tests/examplefiles/example.lua24
-rw-r--r--tests/examplefiles/example.praat51
-rw-r--r--tests/examplefiles/example.whiley296
-rw-r--r--tests/examplefiles/example.xtm1101
-rw-r--r--tests/examplefiles/example.yaml9
-rw-r--r--tests/examplefiles/example2.cpp20
-rw-r--r--tests/examplefiles/fibonacci.tokigun.aheui4
-rw-r--r--tests/examplefiles/flatline_example186
-rw-r--r--tests/examplefiles/guidance.smv1124
-rw-r--r--tests/examplefiles/hello-world.puzzlet.aheui10
-rw-r--r--tests/examplefiles/plain.bst1097
-rw-r--r--tests/examplefiles/postgresql_test.txt34
-rw-r--r--tests/examplefiles/scope.cirru26
-rw-r--r--tests/examplefiles/sparql.rq4
-rw-r--r--tests/examplefiles/test.bib77
-rw-r--r--tests/examplefiles/test.cr2871
-rw-r--r--tests/examplefiles/test.erl12
-rw-r--r--tests/examplefiles/test.escript4
-rw-r--r--tests/examplefiles/test.hsail62
-rw-r--r--tests/examplefiles/test.ncl20
-rw-r--r--tests/examplefiles/test.php29
-rw-r--r--tests/examplefiles/test.sil206
-rw-r--r--tests/examplefiles/tsql_example.sql72
-rw-r--r--tests/examplefiles/typescript_example (renamed from tests/examplefiles/example.ts)0
-rw-r--r--tests/examplefiles/typoscript_example1930
-rw-r--r--tests/examplefiles/varnish.vcl187
-rw-r--r--tests/examplefiles/wdiff_example1.wdiff731
-rw-r--r--tests/examplefiles/wdiff_example3.wdiff10
-rw-r--r--tests/test_bibtex.py236
-rw-r--r--tests/test_crystal.py308
-rw-r--r--tests/test_html_formatter.py4
-rw-r--r--tests/test_java.py38
-rw-r--r--tests/test_lexers_other.py26
-rw-r--r--tests/test_praat.py130
-rw-r--r--tests/test_sql.py74
-rw-r--r--tests/test_terminal_formatter.py53
-rw-r--r--tests/test_token.py8
-rw-r--r--tests/test_whiley.py30
45 files changed, 11392 insertions, 33 deletions
diff --git a/tests/examplefiles/99_bottles_of_beer.chpl b/tests/examplefiles/99_bottles_of_beer.chpl
index 3629028d..cdc1e650 100644
--- a/tests/examplefiles/99_bottles_of_beer.chpl
+++ b/tests/examplefiles/99_bottles_of_beer.chpl
@@ -4,7 +4,7 @@
* by Brad Chamberlain and Steve Deitz
* 07/13/2006 in Knoxville airport while waiting for flight home from
* HPLS workshop
- * compiles and runs with chpl compiler version 1.7.0
+ * compiles and runs with chpl compiler version 1.12.0
* for more information, contact: chapel_info@cray.com
*
*
@@ -71,10 +71,13 @@ proc computeAction(bottleNum) {
// Modules...
module M1 {
var x = 10;
+
+ var y = 13.0;
}
module M2 {
- use M1;
+ use M1 except y;
+ use M1 only y;
proc main() {
writeln("M2 -> M1 -> x " + x);
}
@@ -148,10 +151,10 @@ class IntPair {
var ip = new IntPair(17,2);
write(ip);
-var targetDom: {1..10},
+var targetDom = {1..10},
target: [targetDom] int;
coforall i in targetDom with (ref target) {
- targetDom[i] = i ** 3;
+ target[i] = i ** 3;
}
var wideOpen = 0o777,
@@ -166,9 +169,11 @@ private module M3 {
}
private iter bar() {
-
+ for i in 1..10 {
+ yield i;
+ }
}
private var x: int;
-} \ No newline at end of file
+}
diff --git a/tests/examplefiles/StdGeneric.icl b/tests/examplefiles/StdGeneric.icl
new file mode 100644
index 00000000..2e6c3931
--- /dev/null
+++ b/tests/examplefiles/StdGeneric.icl
@@ -0,0 +1,92 @@
+implementation module StdGeneric
+
+import StdInt, StdMisc, StdClass, StdFunc
+
+generic bimap a b :: Bimap .a .b
+
+bimapId :: Bimap .a .a
+bimapId = { map_to = id, map_from = id }
+
+bimap{|c|} = { map_to = id, map_from = id }
+
+bimap{|PAIR|} bx by = { map_to= map_to, map_from=map_from }
+where
+ map_to (PAIR x y) = PAIR (bx.map_to x) (by.map_to y)
+ map_from (PAIR x y) = PAIR (bx.map_from x) (by.map_from y)
+bimap{|EITHER|} bl br = { map_to= map_to, map_from=map_from }
+where
+ map_to (LEFT x) = LEFT (bl.map_to x)
+ map_to (RIGHT x) = RIGHT (br.map_to x)
+ map_from (LEFT x) = LEFT (bl.map_from x)
+ map_from (RIGHT x) = RIGHT (br.map_from x)
+
+bimap{|(->)|} barg bres = { map_to = map_to, map_from = map_from }
+where
+ map_to f = comp3 bres.map_to f barg.map_from
+ map_from f = comp3 bres.map_from f barg.map_to
+
+bimap{|CONS|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (CONS x) = CONS (barg.map_to x)
+ map_from (CONS x) = CONS (barg.map_from x)
+
+bimap{|FIELD|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (FIELD x) = FIELD (barg.map_to x)
+ map_from (FIELD x) = FIELD (barg.map_from x)
+
+bimap{|OBJECT|} barg = { map_to= map_to, map_from=map_from }
+where
+ map_to (OBJECT x) = OBJECT (barg.map_to x)
+ map_from (OBJECT x) = OBJECT (barg.map_from x)
+
+bimap{|Bimap|} x y = {map_to = map_to, map_from = map_from}
+where
+ map_to {map_to, map_from} =
+ { map_to = comp3 y.map_to map_to x.map_from
+ , map_from = comp3 x.map_to map_from y.map_from
+ }
+ map_from {map_to, map_from} =
+ { map_to = comp3 y.map_from map_to x.map_to
+ , map_from = comp3 x.map_from map_from y.map_to
+ }
+
+comp3 :: !(.a -> .b) u:(.c -> .a) !(.d -> .c) -> u:(.d -> .b)
+comp3 f g h
+ | is_id f
+ | is_id h
+ = cast g
+ = cast (\x -> g (h x))
+ | is_id h
+ = cast (\x -> f (g x))
+ = \x -> f (g (h x))
+where
+ is_id :: !.(.a -> .b) -> Bool
+ is_id f = code inline
+ {
+ eq_desc e_StdFunc_did 0 0
+ pop_a 1
+ }
+
+ cast :: !u:a -> u:b
+ cast f = code inline
+ {
+ pop_a 0
+ }
+
+getConsPath :: !GenericConsDescriptor -> [ConsPos]
+getConsPath {gcd_index, gcd_type_def={gtd_num_conses}}
+ = doit gcd_index gtd_num_conses
+where
+ doit i n
+ | n == 0
+ = abort "getConsPath: zero conses\n"
+ | i >= n
+ = abort "getConsPath: cons index >= number of conses"
+ | n == 1
+ = []
+ | i < (n/2)
+ = [ ConsLeft : doit i (n/2) ]
+ | otherwise
+ = [ ConsRight : doit (i - (n/2)) (n - (n/2)) ]
+ \ No newline at end of file
diff --git a/tests/examplefiles/capdl_example.cdl b/tests/examplefiles/capdl_example.cdl
new file mode 100644
index 00000000..050e56a6
--- /dev/null
+++ b/tests/examplefiles/capdl_example.cdl
@@ -0,0 +1,64 @@
+#ifdef ARCH_ARM
+arch arm11
+#else
+arch ia32
+#endif
+
+objects {
+ my_ep = ep /* A synchronous endpoint */
+
+ /* Two thread control blocks */
+ tcb1 = tcb
+ tcb2 = tcb
+
+ /* Four frames of physical memory */
+ frame1 = frame (4k)
+ frame2 = frame (4k)
+ frame3 = frame (4k)
+ frame4 = frame (4k)
+
+ /* Two page tables */
+ pt1 = pt
+ pt2 = pt
+
+ /* Two page directories */
+ pd1 = pd
+ pd2 = pd
+
+ /* Two capability nodes */
+ cnode1 = cnode (2 bits)
+ cnode2 = cnode (3 bits)
+}
+caps {
+ cnode1 {
+ 0x1: frame1 (RW) /* read/write */
+ 0x2: my_ep (R) /* read-only */
+ }
+ cnode2 {
+ 0x1: my_ep (W) /* write-only */
+ }
+ tcb1 {
+ vspace: pd1
+ ipc_buffer_slot: frame1
+ cspace: cnode1
+ }
+ pd1 {
+ 0x10: pt1
+ }
+ pt1 {
+ 0x8: frame1 (RW)
+ 0x9: frame2 (R)
+ }
+ tcb2 {
+ vspace: pd2
+ ipc_buffer_slot: frame3
+ cspace: cnode2
+ }
+ pd2 {
+ 0x10: pt2
+ }
+ pt2 {
+ 0x10: frame3 (RW)
+ 0x12: frame4 (R)
+ }
+}
diff --git a/tests/examplefiles/durexmania.aheui b/tests/examplefiles/durexmania.aheui
new file mode 100644
index 00000000..89654c00
--- /dev/null
+++ b/tests/examplefiles/durexmania.aheui
@@ -0,0 +1,4 @@
+우주메이저☆듀렉스전도사♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♂♀♡먊
+삶은밥과야근밥샤주세양♡밥사밥사밥사밥사밥사땅땅땅빵☆따밦내발따밦다빵맣밥밥밥내놔밥줘밥밥밥밗땅땅땅박밝땅땅딻타밟타맣밦밣따박타맣밦밣따박타맣밦밣따박타맣박빵빵빵빵따따따따맣삶몲
+Original Source by @harunene // Run it on AheuiChem(http://yoo2001818.github.io/AheuiChem/)
+https://gist.github.com/item4/ca870a63b390da6cc6f1
diff --git a/tests/examplefiles/example.bat b/tests/examplefiles/example.bat
index bf27673c..2b45d2bc 100644
--- a/tests/examplefiles/example.bat
+++ b/tests/examplefiles/example.bat
@@ -99,6 +99,10 @@ goto fail
rem "comment comment"^
goto fail
rem comment comment^
+if "1==1" equ "1==1" goto comments4
+goto fail
+:comments4
+rem comment"comment^
set /a _passed+=1
GOTO :EOF
goto :fail
@@ -201,5 +205,7 @@ for /f "tokens=2 delims==" %%G in ( 'assoc %+;/p extension'),%'
) &>nul ver
if errorlevel 0 if not errorlevel 1 set /a _passed+=1
goto :eof
+FOR /F %%a IN ('%%c%%') DO %%a
+rem %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x% %x%
:/?
goto :fail
diff --git a/tests/examplefiles/example.jsgf b/tests/examplefiles/example.jsgf
new file mode 100644
index 00000000..cd79dbed
--- /dev/null
+++ b/tests/examplefiles/example.jsgf
@@ -0,0 +1,28 @@
+#JSGF V1.0 UTF-8 en;
+
+grammar org.pygments.example;
+
+// comment /*
+/*
+ multi-line
+ comment
+ /* without nesting
+ @example doesn't mean anything here.
+*/
+/**/
+
+/**
+ * A silly @example grammar.
+ *
+ * @author David Corbett
+ * @version 1
+* @see <org.pygments.example.en>
+ * @example hello world
+ @example hello ","
+ *
+ **@blah
+ **world
+ */
+public <sentence> = (/1/<en> | / 0.8f /<fr> |/0/<VOID>""""{}{})*<NULL>;
+<org.pygments.example.fr> = bonjour {hello} [,] "le monde" {world};
+<en> = (/56/hello|/3.14e3/"\"hello\"") {hello} {{ {\\//\} } world {world} !+ ;
diff --git a/tests/examplefiles/example.juttle b/tests/examplefiles/example.juttle
new file mode 100644
index 00000000..ae861996
--- /dev/null
+++ b/tests/examplefiles/example.juttle
@@ -0,0 +1,110 @@
+/* Block comment */
+/*
+ Multiline block
+ comment
+*/
+
+// inline comment
+function juttleFunction(arg) {
+ if (arg == null) {
+ return null;
+ }
+ else if (arg == 0) {
+ return 'zero';
+ }
+ else if (arg == 1) {
+ return "one";
+ }
+ else {
+ return 1.1;
+ }
+}
+
+reducer juttleReducer(field) {
+ var x = 0;
+ function update() {
+ x = *field;
+ }
+
+ function result() {
+ return x;
+ }
+}
+
+sub myemit(limit) {
+ emit -limit limit
+}
+
+input test: text -default 'input';
+const object = {
+ xyz: 123,
+ name: 'something'
+};
+
+const array = [
+ :2016-01-01:,
+ :2016-01-01T01:00:00:,
+ :2016-01-01T01:00:00.000:,
+ :2016-01-01T01:00:00.000Z:,
+ :2016-01-01T01:00:00.000-0800:,
+ :2016-01-01T01:00:00.000-08:00:,
+ :00:00:01:,
+ :00:00:00.001:,
+ :now:,
+ :beginning:,
+ :end:,
+ :forever:,
+ :yesterday:,
+ :today:,
+ :tomorrow:,
+ :1:,
+ :1.1:,
+ :1s:,
+ :1 second:,
+ :1 seconds:,
+ :100ms:,
+ :100 millisecond:,
+ :100 milliseconds:,
+ :1d:,
+ :1 day:,
+ :1 days:,
+ :.2h:,
+ :1.2h:,
+ :.2 hour:,
+ :1.2 hours:,
+ :.5d:,
+ :1.5d:,
+ :.5 day:,
+ :1.5 days:,
+ :5m:,
+ :5 minutes:,
+ :10w:,
+ :10 weeks:,
+ :10M:,
+ :10 months:,
+ :100y:,
+ :100 years:,
+ :1 year and 2 months and 2 days:
+];
+
+emit
+ | batch :10 minutes:
+ | filter x=true
+ | head 1
+ | join
+ | keep x
+ | pace -every :1 minute:
+ | pass
+ | put y=false
+ | remove z
+ | sequence
+ | skip 1
+ | sort field -desc
+ | split field
+ | tail 10
+ | unbatch
+ | uniq field
+;
+
+read adapter -last :day: 'search' AND field~/pattern/ OR field == 'string'
+ | write adapter
diff --git a/tests/examplefiles/example.lua b/tests/examplefiles/example.lua
index 0289e58c..8ecd6a13 100644
--- a/tests/examplefiles/example.lua
+++ b/tests/examplefiles/example.lua
@@ -247,4 +247,28 @@ function AucAdvanced.Debug.Assert(test, message)
return DebugLib.Assert(addonName, test, message)
end
+--[==[
+Here follow further tests of Lua syntax.
+]]==]
+---[[
+local t = {
+ [ [[
+x
+]==] \]]]=1|2; a={b={c={}}},
+ 1, 1., 1.2, .2, 1e3, 1.e3, 1.2e3, .2e3, 1.2e+3, 1.2E-3;
+ 0xA, 0Xa, 0xA., 0x.F, 0xA.F, 0xA.Fp1, 0xA.FP+1, 0Xa.fp-1;
+}
+function t.f()
+ goto eof
+ os.exit()
+ :: eof ::
+end
+
+function t . a --[==[x]==] .b --[==[y]==] --
+-- () end
+ . c : d (file)
+ return '.\a.\b.\f.\n.\r.\t.\v.\\.\".\'.\
+.\z
+ .\0.\00.\000.\0000.\xFa.\u{1}.\u{1234}'
+end
diff --git a/tests/examplefiles/example.praat b/tests/examplefiles/example.praat
index bf2d005f..85573919 100644
--- a/tests/examplefiles/example.praat
+++ b/tests/examplefiles/example.praat
@@ -2,17 +2,27 @@ form Highlighter test
sentence Blank
sentence My_sentence This should all be a string
text My_text This should also all be a string
- word My_word Only the first word is a string, the rest is invalid
+ word My_word Only the first word is a string, the rest is discarded
boolean Binary 1
boolean Text no
boolean Quoted "yes"
comment This should be a string
+ optionmenu Choice: 1
+ option Foo
+ option Bar
+ option 100
real left_Range -123.6
positive right_Range_max 3.3
integer Int 4
natural Nat 4
endform
+# Periods do not establish boundaries for keywords
+form.var = 10
+# Or operators
+not.an.operator$ = "Bad variable name"
+bad.or.not = 1
+
# External scripts
include /path/to/file
runScript: "/path/to/file"
@@ -51,12 +61,16 @@ endif
string$ = "Strings can be 'interpolated'"
string$ = "But don't interpolate everything!"
+string$(10)
+
+repeat
+ string$ = string$ - right$(string$)
+until !length(string$)
Text... 1 Right 0.2 Half many----hyphens
Text... 1 Right -0.4 Bottom aحبيبa
Text... 1 Right -0.6 Bottom 日本
Draw circle (mm)... 0.5 0.5 i
-x=1
rows = Object_'table'.nrow
value$ = Table_'table'$[25, "f0"]
@@ -83,17 +97,19 @@ var = if macintosh = 1 then 0 else 1 fi ; This is an inline comment
n = numberOfSelected("Sound")
for i from newStyle.local to n
name = selected$(extractWord$(selected$(), " "))
- sound'i' = selected("Sound", i)
+ sound'i' = selected("Sound", i+(a*b))
sound[i] = sound'i'
endfor
-for i from 1 to n
+i = 1
+while i < n
+ i++
# Different styles of object selection
select sound'i'
sound = selected()
sound$ = selected$("Sound")
select Sound 'sound$'
- selectObject(sound[i])
+ selectObject( sound[i])
selectObject: sound
# Pause commands
@@ -124,14 +140,16 @@ for i from 1 to n
# Multi-line command with modifier
pitch = noprogress To Pitch (ac): 0, 75, 15, "no",
...0.03, 0.45, 0.01, 0.35, 0.14, 600
+ # Formulas are strings
+ Formula: "if col = 1 then row * Object_'pitch'.dx + 'first' else self fi"
# do-style command with assignment
minimum = do("Get minimum...", 0, 0, "Hertz", "Parabolic")
# New-style multi-line command call with broken strings
table = Create Table with column names: "table", 0,
- ..."file subject speaker
- ...f0 f1 f2 f3 " +
+ ..."file subject speaker
+ ... f0 f1 f2 f" + string$(3) + " " +
..."duration response"
# Function call with trailing space
@@ -156,7 +174,7 @@ for i from 1 to n
demoWaitForInput ( )
demo Erase all
demo Text: 50, "centre", 50, "half", "Finished"
-endfor
+endwhile
switch$ = if switch == 1 then "a" else
... if switch == 2 then "b" else
@@ -207,6 +225,11 @@ assert a != b && c
assert a <> b || c
assert a < b | c
assert a > b
+
+assert (a)or (b)
+assert (a) or(b)
+assert (a)and(b)
+
assert "hello" = "he" + "llo"
assert "hello" == "hello world" - " world"
@@ -243,3 +266,15 @@ endproc
asserterror Unknown symbol:'newline$'« _
assert '_new_style.local'
+@proc: a, selected("string"), b
+# Comment
+
+for i to saveSelection.n
+ selectObject: saveSelection.id[i]
+ appendInfoLine: selected$()
+endfor
+
+@ok(if selected$("Sound") = "tone" then 1 else 0 fi,
+ ... "selected sound is tone")
+
+@ok_formula("selected$(""Sound"") = ""tone""", "selected sound is tone")
diff --git a/tests/examplefiles/example.whiley b/tests/examplefiles/example.whiley
new file mode 100644
index 00000000..74b39370
--- /dev/null
+++ b/tests/examplefiles/example.whiley
@@ -0,0 +1,296 @@
+/**
+ * Example Whiley program, taken from the Whiley benchmark suite.
+ * https://github.com/Whiley/WyBench/blob/master/src/101_interpreter/Main.whiley
+ */
+
+import whiley.lang.System
+import whiley.lang.Int
+import whiley.io.File
+import string from whiley.lang.ASCII
+import char from whiley.lang.ASCII
+
+// ====================================================
+// A simple calculator for expressions
+// ====================================================
+
+constant ADD is 0
+constant SUB is 1
+constant MUL is 2
+constant DIV is 3
+
+// binary operation
+type BOp is (int x) where ADD <= x && x <= DIV
+type BinOp is { BOp op, Expr lhs, Expr rhs }
+
+// variables
+type Var is { string id }
+
+// list access
+type ListAccess is {
+ Expr src,
+ Expr index
+}
+
+// expression tree
+type Expr is int | // constant
+ Var | // variable
+ BinOp | // binary operator
+ Expr[] | // array constructor
+ ListAccess // list access
+
+// values
+type Value is int | Value[]
+
+// stmts
+type Print is { Expr rhs }
+type Set is { string lhs, Expr rhs }
+type Stmt is Print | Set
+
+// ====================================================
+// Expression Evaluator
+// ====================================================
+
+type RuntimeError is { string msg }
+type Environment is [{string k, Value v}]
+
+// Evaluate an expression in a given environment reducing either to a
+// value, or a runtime error. The latter occurs if evaluation gets
+// "stuck" (e.g. expression is // not well-formed)
+function evaluate(Expr e, Environment env) -> Value | RuntimeError:
+ //
+ if e is int:
+ return e
+ else if e is Var:
+ return env[e.id]
+ else if e is BinOp:
+ Value|RuntimeError lhs = evaluate(e.lhs, env)
+ Value|RuntimeError rhs = evaluate(e.rhs, env)
+ // check if stuck
+ if !(lhs is int && rhs is int):
+ return {msg: "arithmetic attempted on non-numeric value"}
+ // switch statement would be good
+ if e.op == ADD:
+ return lhs + rhs
+ else if e.op == SUB:
+ return lhs - rhs
+ else if e.op == MUL:
+ return lhs * rhs
+ else if rhs != 0:
+ return lhs / rhs
+ return {msg: "divide-by-zero"}
+ else if e is Expr[]:
+ [Value] r = []
+ for i in e:
+ Value|RuntimeError v = evaluate(i, env)
+ if v is RuntimeError:
+ return v
+ else:
+ r = r ++ [v]
+ return r
+ else if e is ListAccess:
+ Value|RuntimeError src = evaluate(e.src, env)
+ Value|RuntimeError index = evaluate(e.index, env)
+ // santity checks
+ if src is [Value] && index is int && index >= 0 && index < |src|:
+ return src[index]
+ else:
+ return {msg: "invalid list access"}
+ else:
+ return 0 // dead-code
+
+// ====================================================
+// Expression Parser
+// ====================================================
+
+type State is { string input, int pos }
+type SyntaxError is { string msg, int start, int end }
+
+function SyntaxError(string msg, int start, int end) -> SyntaxError:
+ return { msg: msg, start: start, end: end }
+
+// Top-level parse method
+function parse(State st) -> (Stmt,State)|SyntaxError:
+ //
+ Var keyword, Var v
+ Expr e
+ int start = st.pos
+ //
+ keyword,st = parseIdentifier(st)
+ switch keyword.id:
+ case "print":
+ any r = parseAddSubExpr(st)
+ if !(r is SyntaxError):
+ e,st = r
+ return {rhs: e},st
+ else:
+ return r // error case
+ case "set":
+ st = parseWhiteSpace(st)
+ v,st = parseIdentifier(st)
+ any r = parseAddSubExpr(st)
+ if !(r is SyntaxError):
+ e,st = r
+ return {lhs: v.id, rhs: e},st
+ else:
+ return r // error case
+ default:
+ return SyntaxError("unknown statement",start,st.pos-1)
+
+function parseAddSubExpr(State st) -> (Expr, State)|SyntaxError:
+ //
+ Expr lhs, Expr rhs
+ // First, pass left-hand side
+ any r = parseMulDivExpr(st)
+ //
+ if r is SyntaxError:
+ return r
+ //
+ lhs,st = r
+ st = parseWhiteSpace(st)
+ // Second, see if there is a right-hand side
+ if st.pos < |st.input| && st.input[st.pos] == '+':
+ // add expression
+ st.pos = st.pos + 1
+ r = parseAddSubExpr(st)
+ if !(r is SyntaxError):
+ rhs,st = r
+ return {op: ADD, lhs: lhs, rhs: rhs},st
+ else:
+ return r
+ else if st.pos < |st.input| && st.input[st.pos] == '-':
+ // subtract expression
+ st.pos = st.pos + 1
+ r = parseAddSubExpr(st)
+ if !(r is SyntaxError):
+ rhs,st = r
+ return {op: SUB, lhs: lhs, rhs: rhs},st
+ else:
+ return r
+ // No right-hand side
+ return (lhs,st)
+
+function parseMulDivExpr(State st) -> (Expr, State)|SyntaxError:
+ // First, parse left-hand side
+ Expr lhs, Expr rhs
+ any r = parseTerm(st)
+ if r is SyntaxError:
+ return r
+ //
+ lhs,st = r
+ st = parseWhiteSpace(st)
+ // Second, see if there is a right-hand side
+ if st.pos < |st.input| && st.input[st.pos] == '*':
+ // add expression
+ st.pos = st.pos + 1
+ r = parseMulDivExpr(st)
+ if !(r is SyntaxError):
+ rhs,st = r
+ return {op: MUL, lhs: lhs, rhs: rhs}, st
+ else:
+ return r
+ else if st.pos < |st.input| && st.input[st.pos] == '/':
+ // subtract expression
+ st.pos = st.pos + 1
+ r = parseMulDivExpr(st)
+ if !(r is SyntaxError):
+ rhs,st = r
+ return {op: DIV, lhs: lhs, rhs: rhs}, st
+ else:
+ return r
+ // No right-hand side
+ return (lhs,st)
+
+function parseTerm(State st) -> (Expr, State)|SyntaxError:
+ //
+ st = parseWhiteSpace(st)
+ if st.pos < |st.input|:
+ if ASCII.isLetter(st.input[st.pos]):
+ return parseIdentifier(st)
+ else if ASCII.isDigit(st.input[st.pos]):
+ return parseNumber(st)
+ else if st.input[st.pos] == '[':
+ return parseList(st)
+ //
+ return SyntaxError("expecting number or variable",st.pos,st.pos)
+
+function parseIdentifier(State st) -> (Var, State):
+ //
+ string txt = ""
+ // inch forward until end of identifier reached
+ while st.pos < |st.input| && ASCII.isLetter(st.input[st.pos]):
+ txt = txt ++ [st.input[st.pos]]
+ st.pos = st.pos + 1
+ return ({id:txt}, st)
+
+function parseNumber(State st) -> (Expr, State)|SyntaxError:
+ // inch forward until end of identifier reached
+ int start = st.pos
+ while st.pos < |st.input| && ASCII.isDigit(st.input[st.pos]):
+ st.pos = st.pos + 1
+ //
+ int|null iv = Int.parse(st.input[start..st.pos])
+ if iv == null:
+ return SyntaxError("Error parsing number",start,st.pos)
+ else:
+ return iv, st
+
+function parseList(State st) -> (Expr, State)|SyntaxError:
+ //
+ st.pos = st.pos + 1 // skip '['
+ st = parseWhiteSpace(st)
+ [Expr] l = [] // initial list
+ bool firstTime = true
+ while st.pos < |st.input| && st.input[st.pos] != ']':
+ if !firstTime && st.input[st.pos] != ',':
+ return SyntaxError("expecting comma",st.pos,st.pos)
+ else if !firstTime:
+ st.pos = st.pos + 1 // skip ','
+ firstTime = false
+ any r = parseAddSubExpr(st)
+ if r is SyntaxError:
+ return r
+ else:
+ Expr e
+ e,st = r
+ // perform annoying error check
+ l = l ++ [e]
+ st = parseWhiteSpace(st)
+ st.pos = st.pos + 1
+ return l,st
+
+// Parse all whitespace upto end-of-file
+function parseWhiteSpace(State st) -> State:
+ while st.pos < |st.input| && ASCII.isWhiteSpace(st.input[st.pos]):
+ st.pos = st.pos + 1
+ return st
+
+// ====================================================
+// Main Method
+// ====================================================
+
+public method main(System.Console sys):
+ if(|sys.args| == 0):
+ sys.out.println("no parameter provided!")
+ else:
+ File.Reader file = File.Reader(sys.args[0])
+ string input = ASCII.fromBytes(file.readAll())
+
+ Environment env = Environment()
+ State st = {pos: 0, input: input}
+ while st.pos < |st.input|:
+ Stmt s
+ any r = parse(st)
+ if r is SyntaxError:
+ sys.out.println("syntax error: " ++ r.msg)
+ return
+ s,st = r
+ Value|RuntimeError v = evaluate(s.rhs,env)
+ if v is RuntimeError:
+ sys.out.println("runtime error: " ++ v.msg)
+ return
+ if s is Set:
+ env[s.lhs] = v
+ else:
+ sys.out.println(r)
+ st = parseWhiteSpace(st)
+
diff --git a/tests/examplefiles/example.xtm b/tests/examplefiles/example.xtm
new file mode 100644
index 00000000..927117da
--- /dev/null
+++ b/tests/examplefiles/example.xtm
@@ -0,0 +1,1101 @@
+;;; example.xtm -- Extempore code examples
+
+;; Author: Ben Swift, Andrew Sorensen
+;; Keywords: extempore
+
+;;; Commentary:
+
+
+
+;;; Code:
+
+;; bit twiddling
+
+(xtmtest '(bind-func test_bit_twiddle_1
+ (lambda ()
+ (bitwise-and 65535 255 15 1)))
+
+ (test_bit_twiddle_1) 1)
+
+(xtmtest '(bind-func test_bit_twiddle_2
+ (lambda ()
+ (bitwise-not -1)))
+
+ (test_bit_twiddle_2) 0)
+
+(xtmtest '(bind-func test_bit_twiddle_3
+ (lambda ()
+ (bitwise-not 0)))
+
+ (test_bit_twiddle_3) -1)
+
+(xtmtest '(bind-func test_bit_twiddle_4
+ (lambda ()
+ (bitwise-shift-right 65535 8)
+ (bitwise-shift-right 65535 4 4)))
+
+ (test_bit_twiddle_4) 255)
+
+(xtmtest '(bind-func test_bit_twiddle_5
+ (lambda ()
+ (bitwise-shift-left (bitwise-shift-right 65535 8) 4 4)))
+
+ (test_bit_twiddle_5) 65280)
+
+(xtmtest '(bind-func test_bit_twiddle_6
+ (lambda ()
+ (bitwise-and (bitwise-or (bitwise-eor 21844 65534) (bitwise-eor 43690 65534)) 1)))
+
+ (test_bit_twiddle_6) 0)
+
+;; integer literals default to 64 bit integers
+(xtmtest '(bind-func int-literal-test
+ (lambda (a)
+ (* a 5)))
+
+ (int-literal-test 6) 30)
+
+;; float literals default to doubles
+(xtmtest '(bind-func float-literal-test
+ (lambda (a)
+ (* a 5.0)))
+
+ (float-literal-test 6.0) 30.0)
+
+;; you are free to recompile an existing closure
+(xtmtest '(bind-func int-literal-test
+ (lambda (a)
+ (/ a 5)))
+
+ (int-literal-test 30))
+
+(xtmtest '(bind-func closure-test1
+ (let ((power 0))
+ (lambda (x)
+ (set! power (+ power 1)) ;; set! for closure mutation as per scheme
+ (* x power))))
+
+ (closure-test1 2))
+
+(xtmtest '(bind-func closure-returns-closure-test
+ (lambda ()
+ (lambda (x)
+ (* x 3))))
+
+ (closure-returns-closure-test))
+
+(xtmtest '(bind-func incrementer-test1
+ (lambda (i:i64)
+ (lambda (incr)
+ (set! i (+ i incr))
+ i)))
+
+ (incrementer-test1 0))
+
+(define myf (incrementer-test1 0))
+
+;; so we need to type f properly
+(xtmtest '(bind-func incrementer-test2
+ (lambda (f:[i64,i64]* x)
+ (f x)))
+ (incrementer-test2 myf 1) 1)
+
+;; and we can call my-in-maker-wrapper
+;; to appy myf
+(xtmtest-result (incrementer-test2 myf 1) 2)
+(xtmtest-result (incrementer-test2 myf 1) 3)
+(xtmtest-result (incrementer-test2 myf 1) 4)
+
+;; of course the wrapper is only required if you
+;; need interaction with the scheme world.
+;; otherwise you just call my-inc-maker directly
+
+;; this avoids the wrapper completely
+(xtmtest '(bind-func incrementer-test3
+ (let ((f (incrementer-test1 0)))
+ (lambda ()
+ (f 1))))
+
+ (incrementer-test3) 1)
+
+(xtmtest-result (incrementer-test3) 2)
+(xtmtest-result (incrementer-test3) 3)
+
+;; hopefully you're getting the idea.
+;; note that once we've compiled something
+;; we can then use it any of our new
+;; function definitions.
+
+;; do a little 16bit test
+(xtmtest '(bind-func bitsize-sixteen
+ (lambda (a:i16)
+ (dtoi16 (* (i16tod a) 5.0))))
+
+ (bitsize-sixteen 5) 25)
+
+;; while loop test
+
+(xtmtest '(bind-func test_while_loop_1
+ (lambda ()
+ (let ((count 0))
+ (while (< count 5)
+ (printf "count = %lld\n" count)
+ (set! count (+ count 1)))
+ count)))
+
+ (test_while_loop_1) 5)
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Closures can be recursive
+;;
+
+(xtmtest '(bind-func recursive-closure-test
+ (lambda (a)
+ (if (< a 1)
+ (printf "done\n")
+ (begin (printf "a: %lld\n" a)
+ (recursive-closure-test (- a 1))))))
+
+ (recursive-closure-test 3))
+
+;; check TAIL OPTIMIZATION
+;; if there is no tail call optimiation
+;; in place then this should blow the
+;; stack and crash the test
+
+;; CANNOT RUN THIS TEST ON WINDOWS (i.e. no salloc)!
+(if (not (equal? (sys:platform) "Windows"))
+ (xtmtest '(bind-func tail_opt_test
+ (lambda (n:i64)
+ (let ((a:float* (salloc 8000)))
+ (if (= n 0)
+ (printf "tail opt test passed!\n")
+ (tail_opt_test (- n 1))))))
+
+ (tail_opt_test 200)))
+
+(println 'A 'segfault 'here 'incidates 'that 'tail-call-optimizations 'are 'not 'working!)
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; some anon lambda tests
+;;
+
+(xtmtest '(bind-func infer_lambdas_test
+ (lambda ()
+ (let ((a 5)
+ (b (lambda (x) (* x x)))
+ (c (lambda (y) (* y y))))
+ (c (b a)))))
+
+ (infer_lambdas_test))
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; a simple tuple example
+;;
+;; tuple types are represented as <type,type,type>*
+;;
+
+;; make and return a simple tuple
+(xtmtest '(bind-func tuple-test1
+ (lambda ()
+ (let ((t:<i64,double,i32>* (alloc)))
+ t)))
+
+ (tuple-test1))
+
+;; logview shows [<i64,double,i32>*]*
+;; i.e. a closure that takes no arguments
+;; and returns the tuple <i64,double,i32>*
+
+
+;; here's another tuple example
+;; note that my-test-7's return type is inferred
+;; by the tuple-reference index
+;; (i.e. i64 being tuple index 0)
+(xtmtest '(bind-func tuple-test2
+ (lambda ()
+ (let ((a:<i64,double>* (alloc)) ; returns pointer to type <i64,double>
+ (b 37)
+ (c 6.4))
+ (tuple-set! a 0 b) ;; set i64 to 64
+ (tset! a 1 c) ;; set double to 6.4 - tset! is an alias for tuple-set!
+ (printf "tuple:1 %lld::%f\n" (tuple-ref a 0) (tref a 1))
+ ;; we can fill a tuple in a single call by using tfill!
+ (tfill! a 77 77.7)
+ (printf "tuple:2 %lld::%f\n" (tuple-ref a 0) (tuple-ref a 1))
+ (tuple-ref a 0))))
+
+ (tuple-test2) 77)
+
+;; return first element which is i64
+;; should be 64 as we return the
+;; first element of the tuple
+;; (println (my-test-7)) ; 77
+
+
+;; tbind binds variables to values
+;; based on tuple structure
+;; _ (underscore) means don't attempt
+;; to match against this position in
+;; the tuple (i.e. skip)
+(xtmtest '(bind-func tuple-bind-test
+ (lambda ()
+ (let ((t1:<i32,float,<i32,float>*,double>* (alloc))
+ (t2:<i32,float>* (alloc))
+ (a 0) (b:float 0.0) (c 0.0))
+ (tfill! t2 3 3.3)
+ (tfill! t1 1 2.0 t2 4.0)
+ (tbind t1 a b _ c)
+ c)))
+
+ (tuple-bind-test) 4.0)
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; some array code with *casting*
+;; this function returns void
+(xtmtest '(bind-func array-test1
+ (lambda ()
+ (let ((v1:|5,float|* (alloc))
+ (v2:|5,float|* (alloc))
+ (i 0)
+ (k 0))
+ (dotimes (i 5)
+ ;; random returns double so "truncate" to float
+ ;; which is what v expects
+ (array-set! v1 i (dtof (random))))
+ ;; we can use the afill! function to fill an array
+ (afill! v2 1.1 2.2 3.3 4.4 5.5)
+ (dotimes (k 5)
+ ;; unfortunately printf doesn't like floats
+ ;; so back to double for us :(
+ (printf "val: %lld::%f::%f\n" k
+ (ftod (array-ref v1 k))
+ (ftod (aref v2 k)))))))
+
+ (array-test1))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; some crazy array code with
+;; closures and arrays
+;; try to figure out what this all does
+;;
+;; this example uses the array type
+;; the pretty print for this type is
+;; |num,type| num elements of type
+;; |5,i64| is an array of 5 x i64
+;;
+;; An array is not a pointer type
+;; i.e. |5,i64| cannot be bitcast to i64*
+;;
+;; However an array can be a pointer
+;; i.e. |5,i64|* can be bitcast to i64*
+;; i.e. |5,i64|** to i64** etc..
+;;
+;; make-array returns a pointer to an array
+;; i.e. (make-array 5 i64) returns type |5,i64|*
+;;
+;; aref (array-ref) and aset! (array-set!)
+;; can operate with either pointers to arrays or
+;; standard pointers.
+;;
+;; in other words aref and aset! are happy
+;; to work with either i64* or |5,i64|*
+
+(bind-func array-test2
+ (lambda (v:|5,i64|*)
+ (let ((f (lambda (x)
+ (* (array-ref v 2) x))))
+ f)))
+
+(bind-func array-test3
+ (lambda (v:|5,[i64,i64]*|*)
+ (let ((ff (aref v 0))) ; aref alias for array-ref
+ (ff 5))))
+
+(xtmtest '(bind-func array-test4
+ (lambda ()
+ (let ((v:|5,[i64,i64]*|* (alloc)) ;; make an array of closures!
+ (vv:|5,i64|* (alloc)))
+ (array-set! vv 2 3)
+ (aset! v 0 (array-test2 vv)) ;; aset! alias for array-set!
+ (array-test3 v))))
+
+ ;; try to guess the answer before you call this!!
+ (array-test4))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; some conditionals
+
+(xtmtest '(bind-func cond-test1
+ (lambda (x:i64 y)
+ (if (> x y)
+ x
+ y)))
+
+ (cond-test1 12 13))
+
+;; returns boolean true
+(xtmtest '(bind-func cond-test2
+ (lambda (x:i64)
+ (cond ((= x 1) (printf "A\n"))
+ ((= x 2) (printf "B\n"))
+ ((= x 3) (printf "C\n"))
+ ((= x 4) (printf "D\n"))
+ (else (printf "E\n")))
+ #t))
+
+ (cond-test2 1))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; making a linear envelop generator
+;; for signal processing and alike
+
+(bind-func envelope-segments
+ (lambda (points:double* num-of-points:i64)
+ (let ((lines:[double,double]** (zone-alloc num-of-points))
+ (k 0))
+ (dotimes (k num-of-points)
+ (let* ((idx (* k 2))
+ (x1 (pointer-ref points (+ idx 0)))
+ (y1 (pointer-ref points (+ idx 1)))
+ (x2 (pointer-ref points (+ idx 2)))
+ (y2 (pointer-ref points (+ idx 3)))
+ (m (if (= 0.0 (- x2 x1)) 0.0 (/ (- y2 y1) (- x2 x1))))
+ (c (- y2 (* m x2)))
+ (l (lambda (time) (+ (* m time) c))))
+ (pointer-set! lines k l)))
+ lines)))
+
+(bind-func make-envelope
+ (lambda (points:double* num-of-points)
+ (let ((klines:[double,double]** (envelope-segments points num-of-points))
+ (line-length num-of-points))
+ (lambda (time)
+ (let ((res -1.0)
+ (k:i64 0))
+ (dotimes (k num-of-points)
+ (let ((line (pointer-ref klines k))
+ (time-point (pointer-ref points (* k 2))))
+ (if (or (= time time-point)
+ (< time-point time))
+ (set! res (line time)))))
+ res)))))
+
+;; make a convenience wrapper
+(xtmtest '(bind-func env-wrap
+ (let* ((points 3)
+ (data:double* (zone-alloc (* points 2))))
+ (pointer-set! data 0 0.0) ;; point data
+ (pset! data 1 0.0)
+ (pset! data 2 2.0)
+ (pset! data 3 1.0)
+ (pset! data 4 4.0)
+ (pset! data 5 0.0)
+ (let ((f (make-envelope data points)))
+ (lambda (time:double)
+ (f time)))))
+ (env-wrap 0.0) 0.0)
+
+(xtmtest-result (env-wrap 1.0) 0.5)
+(xtmtest-result (env-wrap 2.0) 1.0)
+(xtmtest-result (env-wrap 2.5) 0.75)
+(xtmtest-result (env-wrap 4.0) 0.0)
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; direct access to a closures environment
+;;
+;; it is possible to directly access a closures
+;; environment in order to read or modify data
+;; at runtime.
+;;
+;; You do this using a dot operator
+;; To access an environment slot you use
+;; closure.slot:type
+;; So for example
+;; (f.a:i32)
+;; would return the 32bit integer symbol 'a'
+;; from the closure 'f'
+;;
+;; To set an environment slot you just
+;; add a value of the correct type
+;; for example
+;; (f.a:i32 565)
+;; would set 'a' in 'f' to 565
+;;
+;; let's create a closure that capture's 'a'
+
+
+(xtmtest '(bind-func dot-access-test1
+ (let ((a:i32 6))
+ (lambda ()
+ (printf "a:%d\n" a)
+ a)))
+ (dot-access-test1))
+
+;; now let's create a new function
+;; that calls my-test14 twice
+;; once normally
+;; then we directly set the closures 'a' binding
+;; then call again
+;;
+(xtmtest '(bind-func dot-access-test2
+ (lambda (x:i32)
+ (dot-access-test1)
+ (dot-access-test1.a:i32 x)
+ (dot-access-test1)))
+
+ (dot-access-test2 9))
+
+;; of course this works just as well for
+;; non-global closures
+(xtmtest '(bind-func dot-access-test3
+ (lambda (a:i32)
+ (let ((f (lambda ()
+ (* 3 a))))
+ f)))
+ (dot-access-test3 1))
+
+(xtmtest '(bind-func dot-access-test4
+ (lambda ()
+ (let ((f (dot-access-test3 5)))
+ (f.a:i32 7)
+ (f))))
+
+ (dot-access-test4)
+ 21)
+
+;; and you can get and set closures also!
+(xtmtest '(bind-func dot-access-test5
+ (lambda ()
+ (let ((f (lambda (x:i64) x)))
+ (lambda (z)
+ (f z)))))
+
+ (dot-access-test5))
+
+(xtmtest '(bind-func dot-access-test6
+ (lambda ()
+ (let ((t1 (dot-access-test5))
+ (t2 (dot-access-test5)))
+ ;; identity of 5
+ (printf "%lld:%lld\n" (t1 5) (t2 5))
+ (t1.f:[i64,i64]* (lambda (x:i64) (* x x)))
+ ;; square of 5
+ (printf "%lld:%lld\n" (t1 5) (t2 5))
+ ;; cube of 5
+ (t2.f:[i64,i64]* (lambda (y:i64) (* y y y)))
+ (printf "%lld:%lld\n" (t1 5) (t2 5))
+ void)))
+
+ (dot-access-test6)) ;; 5:5 > 25:5 > 25:125
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; named types
+
+;; it can sometimes be helpful to allocate
+;; a predefined tuple type on the stack
+;; you can do this using allocate
+(bind-type vec3 <double,double,double>)
+
+;; String printing!
+(bind-func vec3_print:[void,vec3*]*
+ (lambda (x)
+ (printf "<%d,%d,%d>" (tref x 0) (tref x 1) (tref x 2))
+ void))
+
+(bind-poly print vec3_print)
+
+;; note that point is deallocated at the
+;; end of the function call. You can
+;; stack allocate (stack-alloc)
+;; any valid type (i64 for example)
+(xtmtest '(bind-func salloc-test
+ (lambda ()
+ (let ((point:vec3* (stack-alloc)))
+ (tset! point 0 0.0)
+ (tset! point 1 -1.0)
+ (tset! point 2 1.0)
+ 1)))
+
+ (salloc-test)) ;; 1
+
+;; all named types have 2 default constructors
+;; name (zone alloation) + name_h (heap allocation)
+;; and a default print poly
+(xtmtest '(bind-func data-constructor-test
+ (lambda ()
+ (let ((v1 (vec3 1.0 2.0 3.0))
+ (v2 (vec3_h 4.0 5.0 6.0)))
+ (println v1 v2)
+ ;; halloced vec3 needs freeing
+ (free v2)
+ void)))
+
+ (data-constructor-test))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; aref-ptr and tref-ptr
+;;
+
+;; aref-ptr and tref-ptr return a pointer to an element
+;; just as aref and tref return elements aref-ptr and
+;; tref-ptr return a pointer to those elements.
+
+;; This allows you to do things like create an array
+;; with an offset
+(xtmtest '(bind-func aref-ptr-test
+ (lambda ()
+ (let ((arr:|32,i64|* (alloc))
+ (arroff (aref-ptr arr 16))
+ (i 0)
+ (k 0))
+ ;; load arr
+ (dotimes (i 32) (aset! arr i i))
+ (dotimes (k 16)
+ (printf "index: %lld\tarr: %lld\tarroff: %lld\n"
+ k (aref arr k) (pref arroff k))))))
+
+ (aref-ptr-test))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; arrays
+;; Extempore lang supports arrays as for first class
+;; aggregate types (in other words as distinct from
+;; a pointer).
+;;
+;; an array is made up of a size and a type
+;; |32,i64| is an array of 32 elements of type i64
+;;
+
+(bind-type tuple-with-array <double,|32,|4,i32||,float>)
+
+(xtmtest '(bind-func array-test5
+ (lambda ()
+ (let ((tup:tuple-with-array* (stack-alloc))
+ (t2:|32,i64|* (stack-alloc)))
+ (aset! t2 0 9)
+ (tset! tup 2 5.5)
+ (aset! (aref-ptr (tref-ptr tup 1) 0) 0 0)
+ (aset! (aref-ptr (tref-ptr tup 1) 0) 1 1)
+ (aset! (aref-ptr (tref-ptr tup 1) 0) 2 2)
+ (printf "val: %lld %lld %f\n"
+ (aref (aref-ptr (tref-ptr tup 1) 0) 1)
+ (aref t2 0) (ftod (tref tup 2)))
+ (aref (aref-ptr (tref-ptr tup 1) 0) 1))))
+
+ (array-test5) 1) ;; val: 1 9 5.5
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Global Variables
+;;
+;; You can allocate global variables using bind-val
+;;
+
+(bind-val g_var_a i32 5)
+
+;; increment g_var_a by inc
+;; and return new value of g_var_a
+(xtmtest '(bind-func global_var_test1
+ (lambda (incr)
+ (set! g_var_a (+ g_var_a incr))
+ g_var_a))
+
+ (global_var_test1 3) 8) ;; 8
+
+;; you can bind any primitive type
+(bind-val g_var_b double 5.5)
+(bind-val g_var_c i1 0)
+
+(xtmtest '(bind-func global_var_test1b
+ (lambda ()
+ (* g_var_b (if g_var_c 1.0 4.0))))
+
+ (global_var_test1b) 22.0)
+
+;; global strings
+
+(bind-val g_cstring i8* "Jiblet.")
+
+(xtmtest '(bind-func test_g_cstring
+ (lambda ()
+ (let ((i 0))
+ (dotimes (i 7)
+ (printf "g_cstring[%lld] = %c\n" i (pref g_cstring i)))
+ (printf "\nSpells... %s\n" g_cstring))))
+
+ (test_g_cstring))
+
+(xtmtest '(bind-func test_g_cstring1
+ (lambda ()
+ (let ((test_cstring "Niblot.")
+ (i 0)
+ (total 0))
+ (dotimes (i 7)
+ (let ((c1 (pref g_cstring i))
+ (c2 (pref test_cstring i)))
+ (printf "checking %c against %c\n" c1 c2)
+ (if (= c1 c2)
+ (set! total (+ total 1)))))
+ total)))
+
+ (test_g_cstring1) 5)
+
+
+
+
+
+;; for tuples, arrays and vectors, bind-val only takes *two*
+;; arguments. The tuple/array/vector will be initialised to zero.
+
+(bind-val g_tuple1 <i64,i64>)
+(bind-val g_tuple2 <double,double>)
+
+(xtmtest '(bind-func test_g_tuple
+ (lambda ()
+ (tfill! g_tuple1 1 4)
+ (tfill! g_tuple2 4.0 1.0)
+ (and (= (tref g_tuple1 0) (dtoi64 (tref g_tuple2 1)))
+ (= (dtoi64 (tref g_tuple2 0)) (tref g_tuple1 1)))))
+
+ (test_g_tuple) 1)
+
+;; same thing with arrays
+
+(bind-val g_array1 |10,double|)
+(bind-val g_array2 |10,i64|)
+
+;; if we just loop over and print the values in each array
+
+(xtmtest '(bind-func test_g_array11
+ (lambda ()
+ (let ((i 0))
+ (dotimes (i 10)
+ (printf "garray_1[%lld] = %f garray_2[%lld] = %lld\n"
+ i (aref g_array1 i) i (aref g_array2 i))))))
+
+ (test_g_array11) 1)
+
+;; but if we loop over and set some values into the arrays
+
+(xtmtest '(bind-func test_g_array2
+ (lambda ()
+ (let ((i 0))
+ (dotimes (i 10)
+ (aset! g_array1 i (i64tod i))
+ (aset! g_array2 i i)
+ (printf "garray_1[%lld] = %f garray_2[%lld] = %lld\n"
+ i (aref g_array1 i) i (aref g_array2 i)))
+ (= (dtoi64 (aref g_array1 5))
+ (aref g_array2 5)))))
+
+ (test_g_array2) 1)
+
+;; just to test, let's try a large array
+
+(bind-val g_array3 |100000000,i64|)
+
+(xtmtest '(bind-func test_g_array3
+ (lambda ()
+ (let ((i 0))
+ (dotimes (i 100000000)
+ (aset! g_array3 i i))
+ (= (pref g_array3 87654321)
+ 87654321))))
+
+ (test_g_array3) 1)
+
+;; if you want to bind a global pointer, then the third 'value'
+;; argument is the size of the memory to allocate (in elements, not in bytes)
+
+(bind-val g_ptr0 double* 10)
+
+(xtmtest '(bind-func test_g_ptr0
+ (lambda ()
+ (let ((total 0.0)
+ (i 0))
+ (dotimes (i 10)
+ (pset! g_ptr0 i (i64tod i))
+ (set! total (+ total (pref g_ptr0 i))))
+ total)))
+
+ (test_g_ptr0) 45.0)
+
+(bind-val g_ptr1 |4,i32|* 2)
+(bind-val g_ptr2 <i64,double>* 4)
+
+(xtmtest '(bind-func test_g_ptr1
+ (lambda ()
+ (afill! g_ptr1 11 66 35 81)
+ (tset! g_ptr2 1 35.0)
+ (printf "%f :: %d\n" (tref g_ptr2 1) (aref g_ptr1 2))
+ (aref g_ptr1 3)))
+
+ (test_g_ptr1) 81) ;; should also print 35.000000 :: 35
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Callbacks
+
+(xtmtest '(bind-func callback-test
+ (lambda (time:i64 count:i64)
+ (printf "time: %lld:%lld\n" time count)
+ (callback (+ time 1000) callback-test (+ time 22050) (+ count 1))))
+
+ (callback-test (now) 0))
+
+;; compiling this will stop the callbacks
+;;
+;; of course we need to keep the type
+;; signature the same [void,i64,i64]*
+;;
+(xtmtest '(bind-func callback-test
+ (lambda (time:i64 count:i64)
+ #t))
+
+ (callback-test))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; some memzone tests
+
+(xtmtest '(bind-func memzone-test1
+ (lambda ()
+ (let ((b:|5,double|* (zalloc)))
+ (aset! b 0
+ (memzone 1024
+ (let ((a:|10,double|* (zalloc)))
+ (aset! a 0 3.5)
+ (aref a 0))))
+ (let ((c:|9,i32|* (zalloc)))
+ (aset! c 0 99)
+ (aref b 0)))))
+
+ (memzone-test1) 3.5)
+
+(xtmtest '(bind-func memzone-test2
+ (lambda ()
+ (memzone 1024
+ (let ((k:|15,double|* (zalloc))
+ (f (lambda (fa:|15,double|*)
+ (memzone 1024
+ (let ((a:|10,double|* (zalloc))
+ (i 0))
+ (dotimes (i 10)
+ (aset! a i (* (aref fa i) (random))))
+ a)))))
+ (f k)))))
+
+ (memzone-test2))
+
+(xtmtest '(bind-func memzone-test3
+ (lambda ()
+ (let ((v (memzone-test2))
+ (i 0))
+ (dotimes (i 10) (printf "%lld:%f\n" i (aref v i))))))
+
+ (memzone-test3)) ;; should print all 0.0's
+
+(xtmtest '(bind-func memzone-test4
+ (lambda ()
+ (memzone 1024 (* 44100 10)
+ (let ((a:|5,double|* (alloc)))
+ (aset! a 0 5.5)
+ (aref a 0)))))
+
+ (memzone-test4) 5.50000)
+
+;;
+;; Large allocation of memory on BUILD (i.e. when the closure is created)
+;; requires an optional argument (i.e. an amount of memory to allocate
+;; specifically for closure creation)
+;;
+;; This memory is automatically free'd whenever you recompile the closure
+;; (it will be destroyed and replaced by a new allocation of the
+;; same amount or whatever new amount you have allocated for closure
+;; compilation)
+;;
+(xtmtest '(bind-func closure-zalloc-test 1000000
+ (let ((k:|100000,double|* (zalloc)))
+ (lambda ()
+ (aset! k 0 1.0)
+ (aref k 0))))
+
+ (closure-zalloc-test 1000000))
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Ad-Hoc Polymorphism
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; extempore supports ad-hoc polymorphism
+;; at some stage in the future this will
+;; be implicit - but for the moment
+;; it is explicitly defined using bind-poly
+
+;; ad-hoc polymorphism allows you to provide
+;; different specialisations depending on
+;; type. In other words, a single 'name'
+;; can be bound to multiple function
+;; implementations each with a uniqute
+;; type.
+
+
+;; poly variables can be for functions of
+;; mixed argument lengths
+;;
+;; so for example:
+(bind-func poly-test4
+ (lambda (a:i8*)
+ (printf "%s\n" a)))
+
+(bind-func poly-test5
+ (lambda (a:i8* b:i8*)
+ (printf "%s %s\n" a b)))
+
+(bind-func poly-test6
+ (lambda (a:i8* b:i8* c:i8*)
+ (printf "%s %s %s\n" a b c)))
+
+;; bind these three functions to poly 'print'
+(bind-poly testprint poly-test4)
+(bind-poly testprint poly-test5)
+(bind-poly testprint poly-test6)
+
+(xtmtest '(bind-func poly-test7
+ (lambda ()
+ (testprint "extempore's")
+ (testprint "extempore's" "polymorphism")
+ (testprint "extempore's" "polymorphism" "rocks")))
+
+ (poly-test7))
+
+;; polys can Also specialize
+;; on the return type
+(bind-func poly-test8
+ (lambda (a:double)
+ (* a a)))
+
+(bind-func poly-test9
+ (lambda (a:double)
+ (dtoi64 (* a a))))
+
+(bind-poly sqrd poly-test8)
+(bind-poly sqrd poly-test9)
+
+;; specialize on [i64,double]*
+;;
+(xtmtest '(bind-func poly-test10:[i64,double]*
+ (lambda (a)
+ (+ 1 (sqrd a))))
+ (poly-test10 5.0))
+
+;; specialize on [double,doube]*
+(xtmtest '(bind-func poly-test11:[double,double]*
+ (lambda (a)
+ (+ 1.0 (sqrd a))))
+
+ (poly-test11 5.0))
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; a little test for zone cleanup
+;;
+(bind-func MyLittleCleanupTest
+ (lambda ()
+ (let ((tmp2:i8* (alloc 8)))
+ (cleanup (println "Clean up before leaving zone!"))
+ tmp2)))
+
+(xtmtest '(bind-func cleanup-test
+ (lambda ()
+ (letz ((tmp:i8* (alloc 8))
+ (t2 (MyLittleCleanupTest)))
+ (begin
+ (println "In Zone ...")
+ 1))
+ (println "Out of zone ...")
+ void))
+
+ (cleanup-test))
+
+;;;;;;;;;;;;;;;;;;
+;; vector types
+
+;; (bind-func vector-test1
+;; (lambda ()
+;; (let ((v1:/4,float/* (alloc))
+;; (v2:/4,float/* (alloc))
+;; (v3:/4,float/* (alloc)))
+;; (vfill! v1 4.0 3.0 2.0 1.0)
+;; (vfill! v2 1.0 2.0 3.0 4.0)
+;; (vfill! v3 5.0 5.0 5.0 5.0)
+;; (let ((v4 (* v1 v2))
+;; (v5 (> v3 v4))) ;; unforunately vector conditionals don't work!
+;; (printf "mul:%f:%f:%f:%f\n" (ftod (vref v4 0)) (ftod (vref v4 1)) (ftod (vref v4 2)) (ftod (vref v4 3)))
+;; (printf "cmp:%d:%d:%d:%d\n" (i1toi32 (vref v5 0)) (i1toi32 (vref v5 1)) (i1toi32 (vref v5 2)) (i1toi32 (vref v5 3)))
+;; void))))
+
+;; (test-xtfunc (vector-test1))
+
+(bind-func vector-test2
+ (lambda ()
+ (let ((v1:/4,float/* (alloc))
+ (v2:/4,float/* (alloc)))
+ (vfill! v1 1.0 2.0 4.0 8.0)
+ (vfill! v2 2.0 2.5 2.25 2.125)
+ (* v1 v2))))
+
+(xtmtest '(bind-func vector-test3
+ (lambda ()
+ (let ((a (vector-test2)))
+ (printf "%f:%f:%f:%f\n"
+ (ftod (vref a 0))
+ (ftod (vref a 1))
+ (ftod (vref a 2))
+ (ftod (vref a 3)))
+ void)))
+
+ (vector-test3))
+
+;; vectorised sine func
+(bind-func vsinf4
+ (let ((p:/4,float/* (alloc))
+ (b:/4,float/* (alloc))
+ (c:/4,float/* (alloc))
+ (f1:/4,float/* (alloc))
+ (f2:/4,float/* (alloc))
+ (i:i32 0)
+ (p_ 0.225)
+ (b_ (dtof (/ 4.0 3.1415)))
+ (c_ (dtof (/ -4.0 (* 3.1415 3.1415)))))
+ (dotimes (i 4) (vset! p i p_) (vset! b i b_) (vset! c i c_))
+ (lambda (x:/4,float/)
+ ;; no SIMD for abs yet!
+ (dotimes (i 4) (vset! f1 i (fabs (vref x i))))
+ (let ((y (+ (* b x) (* c x f1))))
+ ;; no SIMD for abs yet!
+ (dotimes (i 4) (vset! f2 i (fabs (vref y i))))
+ (+ (* p (- (* y f2) y)) y)))))
+
+(bind-func vcosf4
+ (let ((p:/4,float/* (alloc))
+ (b:/4,float/* (alloc))
+ (c:/4,float/* (alloc))
+ (d:/4,float/* (alloc))
+ (f1:/4,float/* (alloc))
+ (f2:/4,float/* (alloc))
+ (i:i32 0)
+ (p_ 0.225)
+ (d_ (dtof (/ 3.1415 2.0)))
+ (b_ (dtof (/ 4.0 3.1415)))
+ (c_ (dtof (/ -4.0 (* 3.1415 3.1415)))))
+ (dotimes (i 4)
+ (vset! p i p_) (vset! b i b_) (vset! c i c_) (vset! d i d_))
+ (lambda (x:/4,float/)
+ ;; offset x for cos
+ (set! x (+ x d))
+ ;; no SIMD for abs yet!
+ (dotimes (i 4) (vset! f1 i (fabs (vref x i))))
+ (let ((y (+ (* b x) (* c x f1))))
+ ;; no SIMD for abs yet!
+ (dotimes (i 4) (vset! f2 i (fabs (vref y i))))
+ (+ (* p (- (* y f2) y)) y)))))
+
+
+(xtmtest '(bind-func vector-test4
+ (lambda ()
+ (let ((a:/4,float/* (alloc)))
+ (vfill! a 0.1 0.2 0.3 0.4)
+ (let ((b (vsinf4 (pref a 0)))
+ (c (vcosf4 (pref a 0))))
+ (printf "precision inaccuracy is expected:\n")
+ (printf " sinf:\t%f,%f,%f,%f\n"
+ (ftod (sin 0.1:f))
+ (ftod (sin 0.2:f))
+ (ftod (sin 0.3:f))
+ (ftod (sin 0.4:f)))
+ (printf "vsinf:\t%f,%f,%f,%f\n"
+ (ftod (vref b 0))
+ (ftod (vref b 1))
+ (ftod (vref b 2))
+ (ftod (vref b 3)))
+ (printf " cosf:\t%f,%f,%f,%f\n"
+ (ftod (cos 0.1:f))
+ (ftod (cos 0.2:f))
+ (ftod (cos 0.3:f))
+ (ftod (cos 0.4:f)))
+ (printf "vcosf:\t%f,%f,%f,%f\n"
+ (ftod (vref c 0))
+ (ftod (vref c 1))
+ (ftod (vref c 2))
+ (ftod (vref c 3)))
+ void))))
+
+ (vector-test4))
+
+;; test the call-as-xtlang macro
+
+;; make sure it'll handle multiple body forms
+(xtmtest-result (call-as-xtlang (println 1) (println 2) 5)
+ 5)
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; test globalvar as closure
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(bind-func testinc
+ (lambda (incr:i64)
+ (lambda (x:i64)
+ (+ x incr))))
+
+(bind-val GlobalInc [i64,i64]* (testinc 2))
+
+(xtmtest '(bind-func ginc
+ (lambda ()
+ (GlobalInc 5)))
+ (ginc) 7)
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; syntax highlighting tests ;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; these don't return any values, they're visual tests---do they look
+;; right?
+
+(bind-func hl_test1a:[i32,double,|4,i32|**]* 4000
+ "docstring"
+ (lambda (a b)
+ (printf "done\n")))
+
+(bind-func hl_test1b:[i32]*
+ (lambda ()
+ (let ((i:i32 6))
+ (printf "done\n"))))
+
+(bind-val hl_test2 <i32,i32>)
+(bind-val hl_test3 |4,i8|)
+(bind-val hl_test4 double* 10)
+(bind-val hl_test5 i8* "teststr")
+
+(bind-type hl_test_type <i64>)
+
+(println '(bind-lib testlib testfn [i32,i32]*))
+
+;; (and 4 5)
+;; (bind-val hl_test4 double* 10)
+;; (bind-type hl_test_type <i64> "docstring")
+;; (bind-lib testlib testfn [i32,i32]*)
diff --git a/tests/examplefiles/example.yaml b/tests/examplefiles/example.yaml
index 9c0ed9d0..17544c02 100644
--- a/tests/examplefiles/example.yaml
+++ b/tests/examplefiles/example.yaml
@@ -1,3 +1,12 @@
+#
+# Regression tests
+#
+
+%TAG ! tag:example.com:foo/
+---
+test: !foo/bar {a: 'asdf'}
+test2: fred
+...
#
# Examples from the Preview section of the YAML specification
diff --git a/tests/examplefiles/example2.cpp b/tests/examplefiles/example2.cpp
new file mode 100644
index 00000000..ccd99383
--- /dev/null
+++ b/tests/examplefiles/example2.cpp
@@ -0,0 +1,20 @@
+/*
+ * A Test file for the different string literals.
+ */
+
+#include <iostream>
+
+int main() {
+ char *_str = "a normal string";
+ wchar_t *L_str = L"a wide string";
+ char *u8_str = u8"utf-8 string";
+ char16_t *u_str = u"utf-16 string";
+ char32_t *U_str = U"utf-32 string";
+ char *R_str = R""""(raw string with
+"""
+as a delimiter)"""";
+
+ std::cout << R_str << std::endl;
+
+ return 0;
+}
diff --git a/tests/examplefiles/fibonacci.tokigun.aheui b/tests/examplefiles/fibonacci.tokigun.aheui
new file mode 100644
index 00000000..afa2ca05
--- /dev/null
+++ b/tests/examplefiles/fibonacci.tokigun.aheui
@@ -0,0 +1,4 @@
+바싹반박나싼순
+뿌멓떠벌번멍뻐
+쌀삭쌀살다순옭
+어어선썬설썩옭
diff --git a/tests/examplefiles/flatline_example b/tests/examplefiles/flatline_example
new file mode 100644
index 00000000..5ea73408
--- /dev/null
+++ b/tests/examplefiles/flatline_example
@@ -0,0 +1,186 @@
+(field "another field" 2)
+(f "000001" -2)
+
+(missing? "a field" 23)
+
+(random-value "age")
+(weighted-random-value "000001")
+
+(if (missing? "00000") (random-value "000000") (f "000000"))
+
+(ensure-value "000000")
+(ensure-weighted-value "000000")
+
+(normalize "000001")
+(normalize "length" 8 23)
+
+(z-score "a numeric field")
+(z-score 23)
+
+(field-prop string "00023" name)
+(field-prop numeric "00023" summary missing_count)
+
+(category-count "species" "Iris-versicolor")
+(category-count "species" (f "000004"))
+(bin-count "age" (f "bin-selector"))
+(bin-center "000003" 3)
+(bin-center (field "field-selector") 4)
+
+(let (v (f "age"))
+ (cond (< v 2) "baby"
+ (< v 10) "child"
+ (< v 20) "teenager"
+ "adult"))
+
+(segment-label "000000" "baby" 2 "child" 10 "teenager" 20 "adult")
+(segment-label 0 "1st fourth" "2nd fourth" "3rd fourth" "4th fourth")
+
+(let (max (maximum 0)
+ min (minimum 0)
+ step (/ (- max min) 4))
+ (segment-label 0 "1st fourth" (+ min step)
+ "2nd fourth" (+ min step step)
+ "3rd fourth" (+ min step step step)
+ "4th fourth"))
+
+(contains-items? "000000" "blue" "green" "darkblue")
+
+(<= (percentile "age" 0.5) (f "age") (percentile "age" 0.95))
+
+(within-percentiles? "age" 0.5 0.95)
+
+(percentile-label "000023" "1st" "2nd" "3rd" "4th")
+
+(cond (within-percentiles? "000023" 0 0.25) "1st"
+ (within-percentiles? "000023" 0.25 0.5) "2nd"
+ (within-percentiles? "000023" 0.5 0.75) "3rd"
+ "4th")
+
+(str 1 "hello " (field "a"))
+(str "value_" (+ 3 4) "/" (name "000001"))
+
+(length "abc")
+(length "")
+
+(levenshtein (f 0) "a random string")
+(if (< (levenshtein (f 0) "bluething") 5) "bluething" (f 0))
+
+(occurrences "howdy woman, howdy" "howdy")
+(occurrences "howdy woman" "Man" true)
+(occurrences "howdy man" "Man" true)
+(occurrences "hola, Holas" "hola" true "es")
+
+(md5 "a text")
+(sha1 "a text")
+(sha256 "")
+
+(matches? (field "name") ".*\\sHal\\s.*")
+(matches? (field "name") "(?i).*\\shal\\s.*")
+
+(if (matches? (f "result") (re-quote (f "target"))) "GOOD" "MISS")
+(matches? (f "name") (str "^" (re-quote (f "salutation")) "\\s *$"))
+
+(replace "Almost Pig Latin" "\\b(\\w)(\\w+)\\b" "$2$1ay")
+(replace-first "swap first two words" "(\\w+)(\\s+)(\\w+)" "$3$2$1")
+
+(language "this is an English phrase")
+
+(< (field 0) (field 1))
+(<= (field 0 -1) (field 0) (field 0 1))
+(> (field "date") "07-14-1969")
+(>= 23 (f "000004" -2))
+
+(= "Dante" (field "Author"))
+(= 1300 (field "Year"))
+(= (field "Year" -2) (field "Year" -1) (field "Year"))
+(!= (field "00033" -1) (field "00033" 1))
+
+(and (= 3 (field 1)) (= "meh" (f "a")) (< (f "pregnancies") 5))
+(not true)
+
+(linear-regression 1 1 2 2 3 3 4 4)
+(linear-regression 2.0 3.1 2.3 3.3 24.3 45.2)
+
+(epoch-fields (f "milliseconds"))
+(epoch-year (* 1000 (f "seconds")))
+
+(/ (f "a-datetime-string") 1000)
+(/ (epoch (f "a-datetime-string")) 1000)
+
+(epoch-fields (epoch "1969-14-07T06:00:12"))
+(epoch-hour (epoch "11~22~30" "hh~mm~ss"))
+
+(let (x (+ (window "a" -10 10))
+ a (/ (* x 3) 4.34)
+ y (if (< a 10) "Good" "Bad"))
+ (list x (str (f 10) "-" y) a y))
+
+(list (let (z (f 0)) (* 2 (* z z) (log z)))
+ (let (pi 3.141592653589793 r (f "radius")) (* 4 pi r r)))
+
+(if (< (field "age") 18) "non-adult" "adult")
+
+(if (= "oh" (field "000000")) "OH")
+
+(if (> (field "000001") (mean "000001"))
+ "above average"
+ (if (< (field "000001") (mean "000001"))
+ "below average"
+ "mediocre"))
+
+(cond (> (f "000001") (mean "000001")) "above average"
+ (= (f "000001") (mean "000001")) "below average"
+ "mediocre")
+
+(cond (or (= "a" (f 0)) (= "a+" (f 0))) 1
+ (or (= "b" (f 0)) (= "b+" (f 0))) 0
+ (or (= "c" (f 0)) (= "c+" (f 0))) -1)
+
+(cond (< (f "age") 2) "baby"
+ (and (<= 2 (f "age") 10) (= "F" (f "sex"))) "girl"
+ (and (<= 2 (f "age") 10) (= "M" (f "sex"))) "boy"
+ (< 10 (f "age") 20) "teenager"
+ "adult")
+
+(list (field "age")
+ (field "weight" -1)
+ (population "age"))
+
+(list 1.23
+ (if (< (field "age") 10) "child" "adult")
+ (field 3))
+
+(head (cons x lst))
+(tail (cons x lst))
+
+(count (list (f 1) (f 2)))
+(mode (list a b b c b a c c c))
+(max (list -1 2 -2 0.38))
+(min (list -1.3 2 1))
+(avg (list -1 -2 1 2 0.8 -0.8))
+
+(in 3 (1 2 3 2))
+(in "abc" (1 2 3))
+(in (f "size") ("X" "XXL"))
+
+(< _ 3)
+(+ (f "000001" _) 3)
+(< -18 _ (f 3))
+
+(map (* 2 _) (list (f 0 -1) (f 0) (f 0 1)))
+
+(all-but "id" "000023")
+(fields "000003" 3 "a field" "another" "0002a3b-3")
+
+(all-with-defaults "species" "Iris-versicolor"
+ "petal-width" 2.8
+ "000002" 0)
+
+(all-with-numeric-default "median")
+(all-with-numeric-default 0)
+
+(window "000001" -1 2)
+(filter (< _ 99.9) (map (+ 32 (* 1.8 _)) (window "Temp" -2 0)))
+
+(let (now (f "epoch"))
+ (avg (cond-window "temperature" (< (- (f "epoch") now) 240))))
diff --git a/tests/examplefiles/guidance.smv b/tests/examplefiles/guidance.smv
new file mode 100644
index 00000000..671d1e1c
--- /dev/null
+++ b/tests/examplefiles/guidance.smv
@@ -0,0 +1,1124 @@
+--
+-- Shuttle Digital Autopilot
+-- by Sergey Berezin (berez@cs.cmu.edu)
+--
+MODULE cont_3eo_mode_select(start,smode5,vel,q_bar,apogee_alt_LT_alt_ref,
+ h_dot_LT_hdot_reg2,alpha_n_GRT_alpha_reg2,
+ delta_r_GRT_del_r_usp,v_horiz_dnrng_LT_0,
+ high_rate_sep,meco_confirmed)
+
+VAR cont_3EO_start: boolean;
+ RTLS_abort_declared: boolean;
+ region_selected : boolean;
+ m_mode: {mm102, mm103, mm601};
+ r: {reg-1, reg0, reg1, reg2, reg3, reg102};
+ step : {1,2,3,4,5,6,7,8,9,10, exit, undef};
+
+ASSIGN
+ init(cont_3EO_start) := FALSE;
+ init(m_mode) := {mm102, mm103};
+ init(region_selected) := FALSE;
+ init(RTLS_abort_declared) := FALSE;
+ init(r) := reg-1;
+ init(step) := undef;
+
+ next(step) :=
+ case
+ step = 1 & m_mode = mm102 : exit;
+ step = 1 : 2;
+ step = 2 & smode5 : 5;
+ step = 2 & vel = GRT_vi_3eo_max: exit;
+ step = 2 : 3;
+ step = 3 & vel = LEQ_vi_3eo_min : 6;
+ step = 3 : 4;
+ step = 4 & apogee_alt_LT_alt_ref: exit;
+ step = 4 : 6;
+ step = 5 : 6;
+ step = 6 & r = reg0 : exit;
+ step = 6 : 7;
+ step = 7 : 8;
+ step = 8 & q_bar = GRT_qbar_reg3 & !high_rate_sep : 10;
+ step = 8 : 9;
+ step = 9 : 10;
+ step = 10: exit;
+ next(start): 1;
+ step = exit : undef;
+ TRUE: step;
+ esac;
+
+ next(cont_3EO_start) :=
+ case
+ step = 1 & m_mode = mm102 : TRUE;
+ step = 10 & meco_confirmed : TRUE;
+ TRUE : cont_3EO_start;
+ esac;
+
+ next(r) :=
+ case
+ step = 1 & m_mode = mm102 : reg102;
+ step = 2 & !smode5 & vel = GRT_vi_3eo_max: reg0;
+ step = 4 & apogee_alt_LT_alt_ref: reg0;
+ step = 5 & v_horiz_dnrng_LT_0 & delta_r_GRT_del_r_usp : reg0;
+ step = 8 & q_bar = GRT_qbar_reg3 & !high_rate_sep : reg3;
+ step = 9: case
+ (h_dot_LT_hdot_reg2 & alpha_n_GRT_alpha_reg2 &
+ q_bar = GRT_qbar_reg1) | high_rate_sep : reg2;
+ TRUE : reg1;
+ esac;
+ next(step) = 1 : reg-1;
+ TRUE: r;
+ esac;
+
+ next(RTLS_abort_declared) :=
+ case
+ step = 10 & meco_confirmed & m_mode = mm103 : TRUE;
+ TRUE: RTLS_abort_declared;
+ esac;
+
+ next(m_mode) :=
+ case
+ step = 10 & meco_confirmed & m_mode = mm103 : mm601;
+ TRUE: m_mode;
+ esac;
+
+ next(region_selected) :=
+ case
+ next(step) = 1 : FALSE;
+ next(step) = exit : TRUE;
+ TRUE : region_selected;
+ esac;
+
+MODULE cont_3eo_guide(start,cont_3EO_start, mode_select_completed, et_sep_cmd,
+ h_dot_LT_0, q_bar_a_GRT_qbar_max_sep, m_mode, r0,
+ cont_minus_z_compl, t_nav-t_et_sep_GRT_dt_min_z_102,
+ ABS_q_orb_GRT_q_minus_z_max, ABS_r_orb_GRT_r_minus_z_max,
+ excess_OMS_propellant, q_bar_a_LT_qbar_oms_dump,
+ entry_mnvr_couter_LE_0, rcs_all_jet_inhibit,
+ alt_GRT_alt_min_102_dump, t_nav-t_gmtlo_LT_t_dmp_last,
+ pre_sep, cond_18, q_orb_LT_0, ABS_alf_err_LT_alf_sep_err,
+ cond_20b, cond_21, ABS_beta_n_GRT_beta_max, cond_24, cond_26,
+ cond_27, cond_29, mm602_OK)
+VAR
+ step: {1,a1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,
+ b20, c20, d20, 21,22,23,24,25,26,27,28,29,exit, undef};
+ call_RTLS_abort_task : boolean;
+ first3: boolean; -- indicates if it is the first pass
+ first8: boolean;
+ first27: boolean;
+ s_unconv : boolean;
+ mode_2_indicator : boolean;
+ et_sep_man_initiate : boolean;
+ emerg_sep : boolean;
+ cont_3eo_pr_delay : {minus_z_reg1, minus_z_reg2,
+ minus_z_reg3, minus_z_reg4, minus_z_reg102, 0, 5};
+ etsep_y_drift : {undef, minus_z_reg1, minus_z_reg2,
+ minus_z_reg3, minus_z_reg4, minus_z_reg102, 0};
+ fwd_rcs_dump_enable : boolean;
+ fcs_accept_icnct : boolean;
+ oms_rcs_i_c_inh_ena_cmd : boolean;
+ orbiter_dump_ena : boolean;
+ frz_3eo : boolean;
+ high_rate_sep: boolean;
+ entry_gains : boolean;
+ cont_sep_cplt : boolean;
+ pch_cmd_reg4 : boolean;
+ alpha_ok : boolean;
+ r : {reg-1, reg0, reg1, reg2, reg3, reg4, reg102};
+ early_sep : boolean;
+--------------------------------------------
+----- Additional Variables -----------------
+--------------------------------------------
+ rtls_lo_f_d_delay : {undef, 0};
+ wcb2 : {undef, reg1_0, reg2_neg4, wcb2_3eo, reg4_0,
+ reg102_undef, post_sep_0};
+ q_gcb_i : {undef, quat_reg1, quat_reg2, quat_reg3, quat_reg4,
+ quat_reg102_undef, quat_entry_M50_to_cmdbody};
+ oms_nz_lim : {undef, oms_nz_lim_3eo, oms_nz_lim_iload, oms_nz_lim_std};
+ contingency_nz_lim : {undef, contingency_nz_lim_3eo,
+ contingency_nz_lim_iload, contingency_nz_lim_std};
+
+
+
+ASSIGN
+ init(entry_gains) := FALSE;
+ init(frz_3eo) := FALSE;
+ init(cont_3eo_pr_delay) := 5;
+ init(etsep_y_drift) := undef;
+ init(r) := reg-1;
+ init(step) := undef;
+ init(call_RTLS_abort_task) := FALSE;
+ init(first3) := TRUE;
+ init(first8) := TRUE;
+ init(first27) := TRUE;
+ init(cont_sep_cplt) := FALSE;
+ init(et_sep_man_initiate) := FALSE;
+ init(alpha_ok) := FALSE;
+ init(pch_cmd_reg4) := FALSE;
+
+-- Assumed initializations:
+
+ init(rtls_lo_f_d_delay) := undef;
+ init(wcb2) := undef;
+ init(q_gcb_i) := undef;
+ init(oms_nz_lim) := undef;
+ init(contingency_nz_lim) := undef;
+ init(oms_rcs_i_c_inh_ena_cmd) := FALSE;
+ init(orbiter_dump_ena) := FALSE;
+-- init(early_sep) := FALSE;
+
+-------------
+
+ next(step) := nextstep;
+
+ next(r) :=
+ case
+ step = a1 & (cont_3EO_start | mode_select_completed) : r0;
+ step = 21 & cond_21 : reg4;
+ step = 23 & ABS_beta_n_GRT_beta_max & !high_rate_sep : reg1;
+ TRUE : r;
+ esac;
+
+ next(first3) :=
+ case
+ step = 3 & cont_3EO_start : FALSE;
+ TRUE : first3;
+ esac;
+
+ next(first8) :=
+ case
+ step = 8 & excess_OMS_propellant & cont_3EO_start : FALSE;
+ TRUE : first8;
+ esac;
+
+ next(first27) :=
+ case
+ step = 27 : FALSE;
+ TRUE: first27;
+ esac;
+
+ next(s_unconv) :=
+ case
+ step = 3 : FALSE;
+ TRUE : s_unconv;
+ esac;
+
+ next(call_RTLS_abort_task) :=
+ case
+ step = 3 : TRUE;
+ TRUE : call_RTLS_abort_task;
+ esac;
+
+ next(mode_2_indicator) :=
+ case
+ step = 4 : TRUE;
+ TRUE : mode_2_indicator;
+ esac;
+
+ next(et_sep_man_initiate) :=
+ case
+ step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102 : TRUE;
+ step = 14 & pre_sep : TRUE;
+ step = 19 & q_orb_LT_0 : TRUE;
+ step = d20 : TRUE;
+ step = 26 & cond_26 : TRUE;
+ step = 29 & cond_29 : TRUE;
+ TRUE : et_sep_man_initiate;
+ esac;
+
+ next(emerg_sep) :=
+ case
+ next(step) = 1 : FALSE;
+ step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102: TRUE;
+ TRUE : emerg_sep;
+ esac;
+
+ next(cont_3eo_pr_delay) :=
+ case
+ next(step) = 1 : 5;
+ step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102 :
+ minus_z_reg3;
+ step = 7 & !cont_minus_z_compl & r = reg102 &
+ t_nav-t_et_sep_GRT_dt_min_z_102 &
+ (ABS_q_orb_GRT_q_minus_z_max | ABS_r_orb_GRT_r_minus_z_max) : 0;
+ step = 14 & pre_sep : minus_z_reg102;
+ step = 19 & q_orb_LT_0 : minus_z_reg4;
+ step = d20 : minus_z_reg3;
+ step = 26 & cond_26 : minus_z_reg2;
+ step = 27 & first27 : minus_z_reg1;
+ TRUE : cont_3eo_pr_delay;
+ esac;
+
+ next(etsep_y_drift) :=
+ case
+ step = 5 & h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep & m_mode != mm102 :
+ minus_z_reg3;
+ step = 7 & !cont_minus_z_compl & r = reg102 &
+ t_nav-t_et_sep_GRT_dt_min_z_102 &
+ (ABS_q_orb_GRT_q_minus_z_max | ABS_r_orb_GRT_r_minus_z_max) : 0;
+ step = 14 & pre_sep : minus_z_reg102;
+ step = 19 & q_orb_LT_0 : minus_z_reg4;
+ step = d20 : minus_z_reg3;
+ step = 26 & cond_26 : minus_z_reg2;
+ step = 27 & first27 : minus_z_reg1;
+ TRUE : etsep_y_drift;
+ esac;
+
+ next(fwd_rcs_dump_enable) :=
+ case
+ step = 8 & excess_OMS_propellant & first8 : FALSE;
+ TRUE : fwd_rcs_dump_enable;
+ esac;
+
+ next(fcs_accept_icnct) :=
+ case
+ step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : TRUE;
+ TRUE : fcs_accept_icnct;
+ esac;
+
+ next(oms_rcs_i_c_inh_ena_cmd) :=
+ case
+-- next(step) = 1 & oms_rcs_i_c_inh_ena_cmd : {0,1};
+ next(step) = 1 & oms_rcs_i_c_inh_ena_cmd : FALSE; -- Assumed initialization
+ step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : TRUE;
+ TRUE : oms_rcs_i_c_inh_ena_cmd;
+ esac;
+
+ next(orbiter_dump_ena) :=
+ case
+ next(start) = TRUE : FALSE; -- Assumed initialization
+ step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : TRUE;
+ step = 13 & alt_GRT_alt_min_102_dump & t_nav-t_gmtlo_LT_t_dmp_last : TRUE;
+ TRUE : orbiter_dump_ena;
+ esac;
+
+ next(frz_3eo) :=
+ case
+ next(step) = 1 : FALSE;
+ step = 10 & entry_mnvr_couter_LE_0 & !rcs_all_jet_inhibit : FALSE;
+ step = 28 & !et_sep_man_initiate : TRUE;
+ TRUE : frz_3eo;
+ esac;
+
+ next(high_rate_sep) :=
+ case
+ step = 10 & entry_mnvr_couter_LE_0 & !rcs_all_jet_inhibit : FALSE;
+ step = 25 : TRUE;
+ TRUE : high_rate_sep;
+ esac;
+
+ next(entry_gains) :=
+ case
+ next(step) = 1 : FALSE;
+ step = 10 & entry_mnvr_couter_LE_0 & !rcs_all_jet_inhibit : TRUE;
+ TRUE : entry_gains;
+ esac;
+
+ next(cont_sep_cplt) :=
+ case
+ next(step) = 1 : FALSE;
+ step = 12 & mm602_OK : TRUE;
+ TRUE : cont_sep_cplt;
+ esac;
+
+ next(pch_cmd_reg4) :=
+ case
+ next(step) = 1 : FALSE;
+ step = 18 & !pch_cmd_reg4 & cond_18 : TRUE;
+ TRUE : pch_cmd_reg4;
+ esac;
+
+ next(alpha_ok) :=
+ case
+ next(step) = 1 : FALSE;
+ step = 20 & ABS_alf_err_LT_alf_sep_err : TRUE;
+ TRUE : alpha_ok;
+ esac;
+
+ next(early_sep) :=
+ case
+ step = 27 & first27 :
+ case
+ cond_27 : TRUE;
+ TRUE : FALSE;
+ esac;
+ TRUE : early_sep;
+ esac;
+
+--------------------------------------------
+----- Additional Variables -----------------
+--------------------------------------------
+
+ next(rtls_lo_f_d_delay) :=
+ case
+ next(start) = TRUE : undef; -- Assumed initialization
+ step = 8 & first8 & excess_OMS_propellant : 0;
+ TRUE : rtls_lo_f_d_delay;
+ esac;
+
+ next(wcb2) :=
+ case
+ next(start) = TRUE : undef; -- Assumed initialization
+ step = 10 & entry_mnvr_couter_LE_0 : post_sep_0;
+ step = 12 : case
+ r = reg4 : reg4_0;
+ TRUE : wcb2_3eo;
+ esac;
+ step = 14 & pre_sep : reg102_undef;
+ step = 15 : case
+ r = reg4 : reg4_0;
+ TRUE : wcb2_3eo;
+ esac;
+ step = 25 : reg2_neg4;
+ TRUE : wcb2;
+ esac;
+
+ next(q_gcb_i) :=
+ case
+ next(start) = TRUE : undef; -- Assumed initialization
+ step = 11 : quat_entry_M50_to_cmdbody;
+ step = 14 & pre_sep : quat_reg102_undef;
+ step = 16 : case
+ r = reg4 : quat_reg4;
+ TRUE : quat_reg3;
+ esac;
+ step = 22 : quat_reg2;
+
+-- Without this step the value "quat_reg2" would remain in "reg1":
+-- step = 23 & ABS_beta_n_GRT_beta_max & !high_rate_sep : undef;
+
+ TRUE : q_gcb_i;
+ esac;
+
+ next(oms_nz_lim) :=
+ case
+ next(start) = TRUE : undef; -- Assumed initialization
+ step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 : oms_nz_lim_3eo;
+ step = 12 & mm602_OK : oms_nz_lim_std;
+ TRUE : oms_nz_lim;
+ esac;
+
+ next(contingency_nz_lim) :=
+ case
+ next(start) = TRUE : undef; -- Assumed initialization
+ step = 9 & q_bar_a_LT_qbar_oms_dump & r != reg102 :
+ contingency_nz_lim_3eo;
+ step = 12 & mm602_OK : contingency_nz_lim_std;
+ TRUE : contingency_nz_lim;
+ esac;
+
+DEFINE
+ finished := step = exit;
+ idle := step = undef;
+
+ start_cont_3eo_mode_select :=
+ case
+ step = 1 & !cont_3EO_start : TRUE;
+ TRUE : FALSE;
+ esac;
+
+ nextstep :=
+ case
+ step = 1 : a1;
+ step = a1 : case
+ (cont_3EO_start | mode_select_completed) : 2;
+ TRUE : step;
+ esac;
+ step = 2 : case
+ !cont_3EO_start : exit;
+ first3 : 3;
+ TRUE: 4;
+ esac;
+ step = 3 : 4;
+ step = 4 : case
+ et_sep_cmd : 7;
+ TRUE : 5;
+ esac;
+ step = 5 : case
+ h_dot_LT_0 & q_bar_a_GRT_qbar_max_sep &
+ m_mode != mm102 : exit;
+ TRUE : 6;
+ esac;
+ step = 6 :
+ case
+ r = reg102 : 13;
+ r in {reg3, reg4} : 15;
+ r = reg2 : 22;
+ r = reg1 : 27;
+ TRUE : exit;
+ esac;
+ step = 7 : case
+ cont_minus_z_compl : 8;
+ TRUE : exit;
+ esac;
+ step = 8 : case
+ excess_OMS_propellant & first8 : 9;
+ TRUE : 10;
+ esac;
+ step = 9 : exit;
+ step = 10 : case
+ !entry_mnvr_couter_LE_0 | rcs_all_jet_inhibit : exit;
+ TRUE : 11;
+ esac;
+ step = 11 : 12;
+ step = 12 : exit;
+ step = 13 : 14;
+ step = 14 : exit;
+ step = 15 : 16;
+ step = 16 : 17;
+ step = 17 : case
+ r = reg4 : 18;
+ TRUE : 20;
+ esac;
+ step = 18 : case
+ pch_cmd_reg4 | cond_18 : 19;
+ TRUE : exit;
+ esac;
+ step = 19 : exit;
+ step = 20 : case
+ ABS_alf_err_LT_alf_sep_err : b20;
+ TRUE : c20;
+ esac;
+ step = b20 : case
+ cond_20b : d20;
+ TRUE : exit;
+ esac;
+ step = c20 : case
+ alpha_ok : d20;
+ TRUE : 21;
+ esac;
+ step = d20 : exit;
+ TRUE : nextstep21;
+ esac;
+
+ nextstep21 :=
+ case
+ step = 21 : case
+ cond_21 : 15;
+ TRUE : exit;
+ esac;
+ step = 22 : 23;
+ step = 23 : case
+ ABS_beta_n_GRT_beta_max & !high_rate_sep : 27;
+ TRUE : 24;
+ esac;
+ step = 24 : case
+ cond_24 | high_rate_sep : 25;
+ TRUE : exit;
+ esac;
+ step = 25 : 26;
+ step = 26 : exit;
+ step = 27 : 28;
+ step = 28 : case
+ !et_sep_man_initiate : 29;
+ TRUE : exit;
+ esac;
+ step = 29 : exit;
+ start : 1;
+ step = exit : undef;
+ TRUE : step;
+ esac;
+
+ post_sep_mode := step in {7,8,9,10,11,12};
+
+------------------------------------------------------------------
+------------------------------------------------------------------
+
+MODULE main
+VAR
+ smode5: boolean;
+ vel : {GRT_vi_3eo_max, GRT_vi_3eo_min, LEQ_vi_3eo_min};
+ q_bar: {GRT_qbar_reg3, GRT_qbar_reg1, LEQ_qbar_reg1};
+ q_bar_a_GRT_qbar_max_sep : boolean;
+ q_bar_a_LT_qbar_oms_dump : boolean;
+ apogee_alt_LT_alt_ref : boolean;
+ h_dot_LT_hdot_reg2 : boolean;
+ h_dot_LT_0 : boolean;
+ alpha_n_GRT_alpha_reg2 : boolean;
+ delta_r_GRT_del_r_usp : boolean;
+ v_horiz_dnrng_LT_0: boolean;
+ meco_confirmed: boolean;
+ et_sep_cmd : boolean;
+ cont_minus_z_compl : boolean;
+ t_nav-t_et_sep_GRT_dt_min_z_102 : boolean;
+ ABS_q_orb_GRT_q_minus_z_max : boolean;
+ ABS_r_orb_GRT_r_minus_z_max : boolean;
+ excess_OMS_propellant : boolean;
+ entry_mnvr_couter_LE_0 : boolean;
+ rcs_all_jet_inhibit : boolean;
+ alt_GRT_alt_min_102_dump : boolean;
+ t_nav-t_gmtlo_LT_t_dmp_last : boolean;
+ pre_sep : boolean;
+ cond_18 : boolean;
+ q_orb_LT_0 : boolean;
+ ABS_alf_err_LT_alf_sep_err : boolean;
+ cond_20b : boolean;
+ cond_21 : boolean;
+ ABS_beta_n_GRT_beta_max : boolean;
+ cond_24 : boolean;
+ cond_26 : boolean;
+ cond_27 : boolean;
+ cond_29 : boolean;
+ mm602_OK : boolean;
+ start_guide : boolean;
+ mated_coast_mnvr : boolean;
+
+ cs: cont_3eo_mode_select(cg.start_cont_3eo_mode_select,
+ smode5,vel,q_bar,apogee_alt_LT_alt_ref,
+ h_dot_LT_hdot_reg2,alpha_n_GRT_alpha_reg2,
+ delta_r_GRT_del_r_usp,v_horiz_dnrng_LT_0,
+ cg.high_rate_sep,meco_confirmed);
+
+ cg: cont_3eo_guide(start_guide,
+ cs.cont_3EO_start, cs.region_selected, et_sep_cmd,
+ h_dot_LT_0, q_bar_a_GRT_qbar_max_sep, cs.m_mode, cs.r,
+ cont_minus_z_compl, t_nav-t_et_sep_GRT_dt_min_z_102,
+ ABS_q_orb_GRT_q_minus_z_max, ABS_r_orb_GRT_r_minus_z_max,
+ excess_OMS_propellant, q_bar_a_LT_qbar_oms_dump,
+ entry_mnvr_couter_LE_0, rcs_all_jet_inhibit,
+ alt_GRT_alt_min_102_dump, t_nav-t_gmtlo_LT_t_dmp_last,
+ pre_sep, cond_18, q_orb_LT_0, ABS_alf_err_LT_alf_sep_err,
+ cond_20b, cond_21, ABS_beta_n_GRT_beta_max, cond_24, cond_26,
+ cond_27, cond_29, mm602_OK);
+
+ASSIGN
+ init(start_guide) := FALSE;
+ init(mated_coast_mnvr) := FALSE;
+
+ next(entry_mnvr_couter_LE_0) :=
+ case
+ !entry_mnvr_couter_LE_0 : {FALSE, TRUE};
+ TRUE : TRUE;
+ esac;
+
+---------------------------------------------------------------------
+---------------------------------------------------------------------
+ next(start_guide) :=
+ case
+ start_guide : FALSE;
+ !cg.idle : FALSE;
+ TRUE : {FALSE, TRUE};
+ esac;
+
+ next(smode5) :=
+ case
+ fixed_values : smode5;
+ cg.idle : { FALSE, TRUE };
+ TRUE : smode5;
+ esac;
+
+ next(vel) :=
+ case
+ fixed_values : vel;
+ cg.idle : {GRT_vi_3eo_max, GRT_vi_3eo_min, LEQ_vi_3eo_min};
+ TRUE : vel;
+ esac;
+
+ next(q_bar) :=
+ case
+ fixed_values : q_bar;
+ cg.idle : {GRT_qbar_reg3, GRT_qbar_reg1, LEQ_qbar_reg1};
+ TRUE : q_bar;
+ esac;
+
+ next(q_bar_a_GRT_qbar_max_sep) :=
+ case
+ fixed_values : q_bar_a_GRT_qbar_max_sep;
+ cg.idle : { FALSE, TRUE };
+ TRUE : q_bar_a_GRT_qbar_max_sep;
+ esac;
+
+ next(apogee_alt_LT_alt_ref) :=
+ case
+ fixed_values : apogee_alt_LT_alt_ref;
+ cg.idle : { FALSE, TRUE };
+ TRUE : apogee_alt_LT_alt_ref;
+ esac;
+
+ next(h_dot_LT_hdot_reg2) :=
+ case
+ fixed_values : h_dot_LT_hdot_reg2;
+ cg.idle : { FALSE, TRUE };
+ TRUE : h_dot_LT_hdot_reg2;
+ esac;
+
+ next(h_dot_LT_0) :=
+ case
+ fixed_values : h_dot_LT_0;
+ cg.idle : { FALSE, TRUE };
+ TRUE : h_dot_LT_0;
+ esac;
+
+ next(alpha_n_GRT_alpha_reg2) :=
+ case
+ fixed_values : alpha_n_GRT_alpha_reg2;
+ cg.idle : { FALSE, TRUE };
+ TRUE : alpha_n_GRT_alpha_reg2;
+ esac;
+
+ next(delta_r_GRT_del_r_usp) :=
+ case
+ fixed_values : delta_r_GRT_del_r_usp;
+ cg.idle : { FALSE, TRUE };
+ TRUE : delta_r_GRT_del_r_usp;
+ esac;
+
+ next(v_horiz_dnrng_LT_0) :=
+ case
+ fixed_values : v_horiz_dnrng_LT_0;
+ cg.idle : { FALSE, TRUE };
+ TRUE : v_horiz_dnrng_LT_0;
+ esac;
+
+ next(meco_confirmed) :=
+ case
+ fixed_values : meco_confirmed;
+ meco_confirmed : TRUE;
+ cg.idle : { FALSE, TRUE };
+ TRUE : meco_confirmed;
+ esac;
+
+ next(et_sep_cmd) :=
+ case
+ fixed_values : et_sep_cmd;
+ et_sep_cmd : TRUE;
+ cg.idle : { FALSE, TRUE };
+ TRUE : et_sep_cmd;
+ esac;
+
+ next(cont_minus_z_compl) :=
+ case
+ fixed_values : cont_minus_z_compl;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cont_minus_z_compl;
+ esac;
+
+ next(t_nav-t_et_sep_GRT_dt_min_z_102) :=
+ case
+ fixed_values : t_nav-t_et_sep_GRT_dt_min_z_102;
+ cg.idle : { FALSE, TRUE };
+ TRUE : t_nav-t_et_sep_GRT_dt_min_z_102;
+ esac;
+
+ next(ABS_q_orb_GRT_q_minus_z_max) :=
+ case
+ fixed_values : ABS_q_orb_GRT_q_minus_z_max;
+ cg.idle : { FALSE, TRUE };
+ TRUE : ABS_q_orb_GRT_q_minus_z_max;
+ esac;
+
+ next(ABS_r_orb_GRT_r_minus_z_max) :=
+ case
+ fixed_values : ABS_r_orb_GRT_r_minus_z_max;
+ cg.idle : { FALSE, TRUE };
+ TRUE : ABS_r_orb_GRT_r_minus_z_max;
+ esac;
+
+ next(excess_OMS_propellant) :=
+ case
+ fixed_values : excess_OMS_propellant;
+ cg.idle & excess_OMS_propellant : { FALSE, TRUE };
+ TRUE : excess_OMS_propellant;
+ esac;
+
+ next(q_bar_a_LT_qbar_oms_dump) :=
+ case
+ fixed_values : q_bar_a_LT_qbar_oms_dump;
+ cg.idle : { FALSE, TRUE };
+ TRUE : q_bar_a_LT_qbar_oms_dump;
+ esac;
+
+ next(rcs_all_jet_inhibit) :=
+ case
+ fixed_values : rcs_all_jet_inhibit;
+ cg.idle : { FALSE, TRUE };
+ TRUE : rcs_all_jet_inhibit;
+ esac;
+
+ next(alt_GRT_alt_min_102_dump) :=
+ case
+ fixed_values : alt_GRT_alt_min_102_dump;
+ cg.idle : { FALSE, TRUE };
+ TRUE : alt_GRT_alt_min_102_dump;
+ esac;
+
+ next(t_nav-t_gmtlo_LT_t_dmp_last) :=
+ case
+ fixed_values : t_nav-t_gmtlo_LT_t_dmp_last;
+ cg.idle : { FALSE, TRUE };
+ TRUE : t_nav-t_gmtlo_LT_t_dmp_last;
+ esac;
+
+ next(pre_sep) :=
+ case
+ fixed_values : pre_sep;
+ cg.idle : { FALSE, TRUE };
+ TRUE : pre_sep;
+ esac;
+
+ next(cond_18) :=
+ case
+ fixed_values : cond_18;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_18;
+ esac;
+
+ next(q_orb_LT_0) :=
+ case
+ fixed_values : q_orb_LT_0;
+ cg.idle : { FALSE, TRUE };
+ TRUE : q_orb_LT_0;
+ esac;
+
+ next(ABS_alf_err_LT_alf_sep_err) :=
+ case
+ fixed_values : ABS_alf_err_LT_alf_sep_err;
+ cg.idle : { FALSE, TRUE };
+ TRUE : ABS_alf_err_LT_alf_sep_err;
+ esac;
+
+ next(cond_20b) :=
+ case
+ fixed_values : cond_20b;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_20b;
+ esac;
+
+ next(cond_21) :=
+ case
+ fixed_values : cond_21;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_21;
+ esac;
+
+ next(ABS_beta_n_GRT_beta_max) :=
+ case
+ fixed_values : ABS_beta_n_GRT_beta_max;
+ cg.idle : { FALSE, TRUE };
+ TRUE : ABS_beta_n_GRT_beta_max;
+ esac;
+
+ next(cond_24) :=
+ case
+ fixed_values : cond_24;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_24;
+ esac;
+
+ next(cond_26) :=
+ case
+ fixed_values : cond_26;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_26;
+ esac;
+
+ next(cond_27) :=
+ case
+ fixed_values : cond_27;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_27;
+ esac;
+
+ next(cond_29) :=
+ case
+ fixed_values : cond_29;
+ cg.idle : { FALSE, TRUE };
+ TRUE : cond_29;
+ esac;
+
+ next(mm602_OK) :=
+ case
+ fixed_values : mm602_OK;
+ cg.idle : { FALSE, TRUE };
+ TRUE : mm602_OK;
+ esac;
+
+ next(mated_coast_mnvr) :=
+ case
+ next(cg.step) = 1 : FALSE;
+ cg.step = 6 & cg.r in {reg1, reg2, reg3, reg4, reg102} : TRUE;
+ TRUE : mated_coast_mnvr;
+ esac;
+
+---------------------------------------------------------------------
+---------------------------------------------------------------------
+DEFINE
+ fixed_values := FALSE;
+
+ output_ok :=
+ case
+ cg.q_gcb_i = undef | cg.wcb2 = undef |
+ cg.cont_3eo_pr_delay = 5 |
+ cg.etsep_y_drift = undef :
+ case
+ !mated_coast_mnvr: 1;
+ TRUE : undef;
+ esac;
+ !mated_coast_mnvr: toint(cg.q_gcb_i = quat_entry_M50_to_cmdbody &
+ cg.wcb2 = post_sep_0);
+-- reg1 never happens?
+-- cg.r = reg1 : (cg.q_gcb_i = quat_reg1 & cg.wcb2 = reg1_0 &
+-- cg.cont_3eo_pr_delay = minus_z_reg1 &
+-- cg.etsep_y_drift = minus_z_reg1) | cg.emerg_sep;
+ cg.r = reg2 : toint((cg.q_gcb_i = quat_reg2 & cg.wcb2 = reg2_neg4 &
+ cg.cont_3eo_pr_delay = minus_z_reg2 &
+ cg.etsep_y_drift = minus_z_reg2) | cg.emerg_sep);
+
+ cg.r = reg3 : toint((cg.q_gcb_i = quat_reg3 & cg.wcb2 = wcb2_3eo &
+ cg.cont_3eo_pr_delay = minus_z_reg3 &
+ cg.etsep_y_drift = minus_z_reg3) | cg.emerg_sep);
+ cg.r = reg4 : toint((cg.q_gcb_i = quat_reg4 & cg.wcb2 = reg4_0 &
+ cg.cont_3eo_pr_delay = minus_z_reg4 &
+ cg.etsep_y_drift = minus_z_reg4) | cg.emerg_sep);
+ cg.r = reg102 : toint((cg.q_gcb_i = quat_reg102_undef &
+ cg.wcb2 = reg102_undef &
+ cg.cont_3eo_pr_delay = minus_z_reg102 &
+ cg.etsep_y_drift = minus_z_reg102) | cg.emerg_sep);
+ TRUE : 0;
+ esac;
+
+---------------------------------------------------------------------
+-------- Specifications ---------------------------------------------
+---------------------------------------------------------------------
+
+-- Contingency Guide terminates
+
+SPEC AG(!cg.idle -> AF(cg.finished))
+
+-- Contingency guide can be executed infinitely often
+
+SPEC AG( (cg.idle | cg.finished) ->
+ EF(!(cg.idle | cg.finished) & EF(cg.finished)))
+
+-- Contingency mode select task works fine
+
+SPEC AG(cs.cont_3EO_start & cs.region_selected ->
+ ((cs.m_mode = mm102 | meco_confirmed) &
+ cs.r != reg-1 & cs.r != reg0))
+
+-- Bad (initial) value never happens again once region is computed
+-- unless we restart the task
+
+--SPEC AG(cs.r != reg-1 -> !E[!cg.start_cont_3eo_mode_select U
+-- cs.r = reg-1 & !cg.start_cont_3eo_mode_select])
+
+-- Comment out each of the regions and see if this is still true
+-- (Check, if ALL of the regions can happen)
+
+--SPEC AG(cs.r in {reg-1
+-- ,reg0
+-- ,reg1
+-- ,reg2
+-- ,reg3
+-- ,reg102
+-- })
+
+-- Comment out each of the regions and see if this is still true
+-- (Check, if ALL of the regions can happen)
+
+--SPEC AG(cg.r in {reg-1
+-- ,reg0
+-- ,reg1
+-- ,reg2
+-- ,reg3
+-- ,reg4
+-- ,reg102
+-- })
+
+-- Mode_select starts at the next step after its "start" bit is set:
+
+--SPEC AG(!cg.start_cont_3eo_mode_select ->
+-- AX(cg.start_cont_3eo_mode_select & cs.step in {exit, undef} ->
+-- AX(cs.step = 1 & !cs.region_selected)))
+
+-- During major mode 103, the inertial velocity is monitored.
+-- Below an I-loaded velocity, a MECO would constitute a contingency
+-- abort. (Must NOT be in SMODE=5 (??))
+
+SPEC AG(cg.start_cont_3eo_mode_select & cs.m_mode = mm103 &
+ vel = LEQ_vi_3eo_min & meco_confirmed & !smode5 ->
+ A[!cs.region_selected U cs.region_selected & cs.cont_3EO_start])
+
+-- Above a certain inertial velocity (in mode 103), the 3E/O field
+-- is blanked, indicating that a MECO at this point would not require
+-- an OPS 6 contingency abort.
+
+SPEC AG(cs.region_selected ->
+ (cs.m_mode = mm103 & vel = GRT_vi_3eo_max -> !cs.cont_3EO_start))
+
+-- Between the two velocities, an apogee altitude - velocity curve is
+-- constructed based on the current inertial velocity. If the apogee
+-- altitude is above this curve, a contingency abort capability is
+-- still required and a 3E/O region index will be calculated.
+-- Otherwise, the 3E/O field is blanked out and no further contingency
+-- abort calculations will be performed. (Must NOT be in SMODE=5 (??))
+
+SPEC AG(cg.start_cont_3eo_mode_select & cs.m_mode = mm103 &
+ vel = GRT_vi_3eo_min & meco_confirmed & !smode5 ->
+ A[!cs.region_selected U cs.region_selected &
+ apogee_alt_LT_alt_ref = !cs.cont_3EO_start])
+
+-- For an RTLS trajectory (SMODE=5), a check is made on the downrange
+-- velocity to see if the vehicle is heading away from the landing site.
+-- If this is the case, a 3E/O region index is calculated. If the vehicle
+-- is heading back to the landing site, and the current range to the MECO
+-- R-V line is greater than an I-loaded value, a 3E/O region index is
+-- calculated. Otherwise, an intact abort is possible and the 3E/O field
+-- is blanked.
+
+SPEC AG(cg.start_cont_3eo_mode_select & smode5 & meco_confirmed &
+ (!v_horiz_dnrng_LT_0 | !delta_r_GRT_del_r_usp) ->
+ A[!cs.region_selected U cs.region_selected & cs.cont_3EO_start])
+
+-- If this task is called prior to SRB separation [mm102], the 3E/O region
+-- index is set to 102 and the 3E/O contingency flag is set.
+
+SPEC AG(cs.m_mode = mm102 & cg.start_cont_3eo_mode_select ->
+ AX (A [ !cs.region_selected U cs.region_selected &
+ cs.r = reg102 & cs.cont_3EO_start]))
+
+-- After SRB separation, on every pass that the 3E/O region index is
+-- calculated, a check is made to see if MECO confirmed has occured. If
+-- so, a check is made to see if the major mode is 103. If so, an RTLS is
+-- automatically invoked to transition to major mode 601.
+
+SPEC AG(!cs.region_selected & cs.m_mode = mm103 & meco_confirmed ->
+ A[!cs.region_selected U cs.region_selected & cs.r != reg0 ->
+ cs.m_mode = mm601 & cs.RTLS_abort_declared])
+
+-- Once the 3E/O contingency flag has been set, this task is no longer
+-- executed.
+
+SPEC AG(cs.cont_3EO_start -> AG(!cg.start_cont_3eo_mode_select))
+
+-- If MECO confirmed occurs in MM103 and an OPS 6 contingency abort
+-- procedure is still required, contingency 3E/O guidance sets the
+-- CONT_3EO_START flag ON. Contingency 3E/O guidance then switches
+-- from its display support function into an actual auto guidance
+-- steering process. [...] Contingency 3E/O guidance sets the RTLS abort
+-- declared flag and the MSC performs the transition from from major mode
+-- 103 to 601.
+
+SPEC AG(!cg.idle & !cg.finished & !cs.region_selected & cs.m_mode = mm103 ->
+ A[ !cg.finished U cg.finished & cs.region_selected &
+ (cs.cont_3EO_start -> cs.m_mode = mm601 & cs.RTLS_abort_declared) ])
+
+-- If MECO confirmed occurs in a major mode 601 and a contingency abort
+-- procedure is still required, contingency 3E/O guidance sets the
+-- CONT_3EO_START flag ON. [...] Contingency 3E/O guidance then commands
+-- 3E/O auto maneuvers in major mode 601. [What are these maneuvers??]
+
+SPEC AG(cg.finished & cs.m_mode = mm601 & !et_sep_cmd &
+ meco_confirmed & cs.cont_3EO_start ->
+ cg.q_gcb_i in {quat_reg1, quat_reg2, quat_reg3, quat_reg4, undef}
+ | cg.emerg_sep)
+
+-- If MECO confirmed occurs in a first stage (MM102) [...], contingency
+-- 3E/O guidance will command a fast ET separation during SRB tailoff in
+-- major mode 102. CONT 3E/O GUID will then command maneuver post-sep in
+-- MM601 (???). [ I'm not sure what indicates fast ET sep.: emerg_sep or
+-- early_sep, or what? ]
+
+SPEC AG(cg.finished & cs.m_mode = mm102 & meco_confirmed & pre_sep ->
+ cg.emerg_sep | et_sep_cmd
+ | cg.et_sep_man_initiate
+ | cg.early_sep
+ )
+
+---------------------------------------------
+-- Invariants from Murphi code --------------
+---------------------------------------------
+
+--SPEC AG(cg.finished -> (output_ok != 0 | (output_ok = undef &
+-- (cg.emerg_sep | !cg.cont_sep_cplt))))
+
+--SPEC AG(!cg.finished & !cg.idle -> !mated_coast_mnvr | !et_sep_cmd)
+
+-- Stronger version !!!
+
+SPEC AG(cg.finished -> output_ok != 0)
+
+-- Contingency Guidance shall command an ET separation
+-- [under certain conditions :-].
+
+SPEC AG(cs.cont_3EO_start & cg.finished &
+ (cg.r = reg1 -> cond_29) &
+ (cg.r = reg2 -> cond_24 & cond_26) &
+ (cg.r = reg3 -> cg.alpha_ok &
+ (ABS_alf_err_LT_alf_sep_err -> cond_20b)) &
+ (cg.r = reg4 -> cond_18 & q_orb_LT_0) &
+ (cg.r = reg102 -> pre_sep) ->
+ et_sep_cmd | cg.et_sep_man_initiate
+ | cg.early_sep
+ | cg.emerg_sep
+ )
+
+-- Contingency Guidance shall command at most one interconnected OMS dump.
+
+SPEC AG(cg.finished & cg.oms_rcs_i_c_inh_ena_cmd ->
+ AG(!cg.oms_rcs_i_c_inh_ena_cmd -> AG(!cg.oms_rcs_i_c_inh_ena_cmd)))
+
+-- Contingency Guidance shall command a transition to glide RTLS
+-- (flight mode 602)
+
+SPEC AG(cg.finished & cs.m_mode = mm601 ->
+ --cg.cont_sep_cplt | cg.emerg_sep |
+ cg.call_RTLS_abort_task)
+
+-- Paper, p. 28, unstated assumption 2: at step 6 the region is
+-- among 102, 1-4.
+
+SPEC AG(cg.step = 6 -> cg.r in {reg102, reg1, reg2, reg3, reg4})
+
+-- The transition to mode 602 shall not occur until the entry maneuver
+-- has been calculated
+
+SPEC !E[cg.q_gcb_i = undef U cg.cont_sep_cplt & cg.q_gcb_i = undef]
+
+-- The entry maneuver calculations shall not commence until the OMS/RCS
+-- interconnect, if any, is complete (??? What does it exactly mean???)
+-- !!!
+--SPEC AG(cg.oms_rcs_i_c_inh_ena_cmd ->
+-- !E[cg.oms_rcs_i_c_inh_ena_cmd U
+-- cg.q_gcb_i != undef & cg.oms_rcs_i_c_inh_ena_cmd])
+
+SPEC AG(cg.oms_rcs_i_c_inh_ena_cmd ->
+ !E[rcs_all_jet_inhibit U
+ cg.q_gcb_i != undef & rcs_all_jet_inhibit])
+
+-- The OMS dump shall not be considered until the -Z translation is complete.
+
+SPEC !E[!cont_minus_z_compl & cg.r != reg102 U cg.orbiter_dump_ena]
+
+-- Completion of -Z translation shall not be checked until ET separation
+-- has been commanded
+
+SPEC !E[!et_sep_cmd U cg.step = 7]
+
+-- ET separation shall be commanded if and only if an abort maneuver
+-- region is assigned [and again there are *certain conditions*].
+
+SPEC AG(cg.finished & cs.cont_3EO_start &
+ (cg.r = reg1 -> cond_29) &
+ (cg.r = reg2 -> cond_24 & cond_26) &
+ (cg.r = reg3 -> cg.alpha_ok &
+ (ABS_alf_err_LT_alf_sep_err -> cond_20b)) &
+ (cg.r = reg4 -> cond_18 & q_orb_LT_0) &
+ (cg.r = reg102 -> pre_sep) ->
+ (cg.et_sep_man_initiate | et_sep_cmd
+ <-> cg.r in {reg1, reg2, reg3, reg4, reg102}))
+
+-- The assigned region can not change arbitrarily.
+
+-- Regions 1 and 2 may interchange, but will not switch to any other region:
+
+SPEC AG(cg.finished & cs.cont_3EO_start & cg.r in {reg1,reg2} ->
+ AG(cg.finished -> cg.r in {reg1,reg2}))
+
+-- Regions 3 and 4 may interchange, but will not switch to any other region:
+
+SPEC AG(cg.finished & cs.cont_3EO_start & cg.r in {reg3,reg4} ->
+ AG(cg.finished -> cg.r in {reg3,reg4}))
+
+-- Region 102 never changes:
+
+SPEC AG(cg.finished & cg.r = reg102 -> AG(cg.finished -> cg.r = reg102))
diff --git a/tests/examplefiles/hello-world.puzzlet.aheui b/tests/examplefiles/hello-world.puzzlet.aheui
new file mode 100644
index 00000000..e7ef3a62
--- /dev/null
+++ b/tests/examplefiles/hello-world.puzzlet.aheui
@@ -0,0 +1,10 @@
+밤밣따빠밣밟따뿌
+빠맣파빨받밤뚜뭏
+돋밬탕빠맣붏두붇
+볻뫃박발뚷투뭏붖
+뫃도뫃희멓뭏뭏붘
+뫃봌토범더벌뿌뚜
+뽑뽀멓멓더벓뻐뚠
+뽀덩벐멓뻐덕더벅
+
+https://github.com/aheui/snippets/blob/master/hello-world/hello-world.puzzlet.aheui
diff --git a/tests/examplefiles/plain.bst b/tests/examplefiles/plain.bst
new file mode 100644
index 00000000..7adf4bb0
--- /dev/null
+++ b/tests/examplefiles/plain.bst
@@ -0,0 +1,1097 @@
+% BibTeX standard bibliography style `plain'
+ % Version 0.99b (8-Dec-10 release) for BibTeX versions 0.99a or later.
+ % Copyright (C) 1984, 1985, 1988, 2010 Howard Trickey and Oren Patashnik.
+ % Unlimited copying and redistribution of this file are permitted as long as
+ % it is unmodified. Modifications (and redistribution of modified versions)
+ % are also permitted, but only if the resulting file is renamed to something
+ % besides btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst.
+ % This restriction helps ensure that all standard styles are identical.
+ % The file btxbst.doc has the documentation for this style.
+
+ENTRY
+ { address
+ author
+ booktitle
+ chapter
+ edition
+ editor
+ howpublished
+ institution
+ journal
+ key
+ month
+ note
+ number
+ organization
+ pages
+ publisher
+ school
+ series
+ title
+ type
+ volume
+ year
+ }
+ {}
+ { label }
+
+INTEGERS { output.state before.all mid.sentence after.sentence after.block }
+
+FUNCTION {init.state.consts}
+{ #0 'before.all :=
+ #1 'mid.sentence :=
+ #2 'after.sentence :=
+ #3 'after.block :=
+}
+
+STRINGS { s t }
+
+FUNCTION {output.nonnull}
+{ 's :=
+ output.state mid.sentence =
+ { ", " * write$ }
+ { output.state after.block =
+ { add.period$ write$
+ newline$
+ "\newblock " write$
+ }
+ { output.state before.all =
+ 'write$
+ { add.period$ " " * write$ }
+ if$
+ }
+ if$
+ mid.sentence 'output.state :=
+ }
+ if$
+ s
+}
+
+FUNCTION {output}
+{ duplicate$ empty$
+ 'pop$
+ 'output.nonnull
+ if$
+}
+
+FUNCTION {output.check}
+{ 't :=
+ duplicate$ empty$
+ { pop$ "empty " t * " in " * cite$ * warning$ }
+ 'output.nonnull
+ if$
+}
+
+FUNCTION {output.bibitem}
+{ newline$
+ "\bibitem{" write$
+ cite$ write$
+ "}" write$
+ newline$
+ ""
+ before.all 'output.state :=
+}
+
+FUNCTION {fin.entry}
+{ add.period$
+ write$
+ newline$
+}
+
+FUNCTION {new.block}
+{ output.state before.all =
+ 'skip$
+ { after.block 'output.state := }
+ if$
+}
+
+FUNCTION {new.sentence}
+{ output.state after.block =
+ 'skip$
+ { output.state before.all =
+ 'skip$
+ { after.sentence 'output.state := }
+ if$
+ }
+ if$
+}
+
+FUNCTION {not}
+{ { #0 }
+ { #1 }
+ if$
+}
+
+FUNCTION {and}
+{ 'skip$
+ { pop$ #0 }
+ if$
+}
+
+FUNCTION {or}
+{ { pop$ #1 }
+ 'skip$
+ if$
+}
+
+FUNCTION {new.block.checka}
+{ empty$
+ 'skip$
+ 'new.block
+ if$
+}
+
+FUNCTION {new.block.checkb}
+{ empty$
+ swap$ empty$
+ and
+ 'skip$
+ 'new.block
+ if$
+}
+
+FUNCTION {new.sentence.checka}
+{ empty$
+ 'skip$
+ 'new.sentence
+ if$
+}
+
+FUNCTION {new.sentence.checkb}
+{ empty$
+ swap$ empty$
+ and
+ 'skip$
+ 'new.sentence
+ if$
+}
+
+FUNCTION {field.or.null}
+{ duplicate$ empty$
+ { pop$ "" }
+ 'skip$
+ if$
+}
+
+FUNCTION {emphasize}
+{ duplicate$ empty$
+ { pop$ "" }
+ { "{\em " swap$ * "}" * }
+ if$
+}
+
+INTEGERS { nameptr namesleft numnames }
+
+FUNCTION {format.names}
+{ 's :=
+ #1 'nameptr :=
+ s num.names$ 'numnames :=
+ numnames 'namesleft :=
+ { namesleft #0 > }
+ { s nameptr "{ff~}{vv~}{ll}{, jj}" format.name$ 't :=
+ nameptr #1 >
+ { namesleft #1 >
+ { ", " * t * }
+ { numnames #2 >
+ { "," * }
+ 'skip$
+ if$
+ t "others" =
+ { " et~al." * }
+ { " and " * t * }
+ if$
+ }
+ if$
+ }
+ 't
+ if$
+ nameptr #1 + 'nameptr :=
+ namesleft #1 - 'namesleft :=
+ }
+ while$
+}
+
+FUNCTION {format.authors}
+{ author empty$
+ { "" }
+ { author format.names }
+ if$
+}
+
+FUNCTION {format.editors}
+{ editor empty$
+ { "" }
+ { editor format.names
+ editor num.names$ #1 >
+ { ", editors" * }
+ { ", editor" * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.title}
+{ title empty$
+ { "" }
+ { title "t" change.case$ }
+ if$
+}
+
+FUNCTION {n.dashify}
+{ 't :=
+ ""
+ { t empty$ not }
+ { t #1 #1 substring$ "-" =
+ { t #1 #2 substring$ "--" = not
+ { "--" *
+ t #2 global.max$ substring$ 't :=
+ }
+ { { t #1 #1 substring$ "-" = }
+ { "-" *
+ t #2 global.max$ substring$ 't :=
+ }
+ while$
+ }
+ if$
+ }
+ { t #1 #1 substring$ *
+ t #2 global.max$ substring$ 't :=
+ }
+ if$
+ }
+ while$
+}
+
+FUNCTION {format.date}
+{ year empty$
+ { month empty$
+ { "" }
+ { "there's a month but no year in " cite$ * warning$
+ month
+ }
+ if$
+ }
+ { month empty$
+ 'year
+ { month " " * year * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.btitle}
+{ title emphasize
+}
+
+FUNCTION {tie.or.space.connect}
+{ duplicate$ text.length$ #3 <
+ { "~" }
+ { " " }
+ if$
+ swap$ * *
+}
+
+FUNCTION {either.or.check}
+{ empty$
+ 'pop$
+ { "can't use both " swap$ * " fields in " * cite$ * warning$ }
+ if$
+}
+
+FUNCTION {format.bvolume}
+{ volume empty$
+ { "" }
+ { "volume" volume tie.or.space.connect
+ series empty$
+ 'skip$
+ { " of " * series emphasize * }
+ if$
+ "volume and number" number either.or.check
+ }
+ if$
+}
+
+FUNCTION {format.number.series}
+{ volume empty$
+ { number empty$
+ { series field.or.null }
+ { output.state mid.sentence =
+ { "number" }
+ { "Number" }
+ if$
+ number tie.or.space.connect
+ series empty$
+ { "there's a number but no series in " cite$ * warning$ }
+ { " in " * series * }
+ if$
+ }
+ if$
+ }
+ { "" }
+ if$
+}
+
+FUNCTION {format.edition}
+{ edition empty$
+ { "" }
+ { output.state mid.sentence =
+ { edition "l" change.case$ " edition" * }
+ { edition "t" change.case$ " edition" * }
+ if$
+ }
+ if$
+}
+
+INTEGERS { multiresult }
+
+FUNCTION {multi.page.check}
+{ 't :=
+ #0 'multiresult :=
+ { multiresult not
+ t empty$ not
+ and
+ }
+ { t #1 #1 substring$
+ duplicate$ "-" =
+ swap$ duplicate$ "," =
+ swap$ "+" =
+ or or
+ { #1 'multiresult := }
+ { t #2 global.max$ substring$ 't := }
+ if$
+ }
+ while$
+ multiresult
+}
+
+FUNCTION {format.pages}
+{ pages empty$
+ { "" }
+ { pages multi.page.check
+ { "pages" pages n.dashify tie.or.space.connect }
+ { "page" pages tie.or.space.connect }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.vol.num.pages}
+{ volume field.or.null
+ number empty$
+ 'skip$
+ { "(" number * ")" * *
+ volume empty$
+ { "there's a number but no volume in " cite$ * warning$ }
+ 'skip$
+ if$
+ }
+ if$
+ pages empty$
+ 'skip$
+ { duplicate$ empty$
+ { pop$ format.pages }
+ { ":" * pages n.dashify * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.chapter.pages}
+{ chapter empty$
+ 'format.pages
+ { type empty$
+ { "chapter" }
+ { type "l" change.case$ }
+ if$
+ chapter tie.or.space.connect
+ pages empty$
+ 'skip$
+ { ", " * format.pages * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.in.ed.booktitle}
+{ booktitle empty$
+ { "" }
+ { editor empty$
+ { "In " booktitle emphasize * }
+ { "In " format.editors * ", " * booktitle emphasize * }
+ if$
+ }
+ if$
+}
+
+FUNCTION {empty.misc.check}
+{ author empty$ title empty$ howpublished empty$
+ month empty$ year empty$ note empty$
+ and and and and and
+ key empty$ not and
+ { "all relevant fields are empty in " cite$ * warning$ }
+ 'skip$
+ if$
+}
+
+FUNCTION {format.thesis.type}
+{ type empty$
+ 'skip$
+ { pop$
+ type "t" change.case$
+ }
+ if$
+}
+
+FUNCTION {format.tr.number}
+{ type empty$
+ { "Technical Report" }
+ 'type
+ if$
+ number empty$
+ { "t" change.case$ }
+ { number tie.or.space.connect }
+ if$
+}
+
+FUNCTION {format.article.crossref}
+{ key empty$
+ { journal empty$
+ { "need key or journal for " cite$ * " to crossref " * crossref *
+ warning$
+ ""
+ }
+ { "In {\em " journal * "\/}" * }
+ if$
+ }
+ { "In " key * }
+ if$
+ " \cite{" * crossref * "}" *
+}
+
+FUNCTION {format.crossref.editor}
+{ editor #1 "{vv~}{ll}" format.name$
+ editor num.names$ duplicate$
+ #2 >
+ { pop$ " et~al." * }
+ { #2 <
+ 'skip$
+ { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" =
+ { " et~al." * }
+ { " and " * editor #2 "{vv~}{ll}" format.name$ * }
+ if$
+ }
+ if$
+ }
+ if$
+}
+
+FUNCTION {format.book.crossref}
+{ volume empty$
+ { "empty volume in " cite$ * "'s crossref of " * crossref * warning$
+ "In "
+ }
+ { "Volume" volume tie.or.space.connect
+ " of " *
+ }
+ if$
+ editor empty$
+ editor field.or.null author field.or.null =
+ or
+ { key empty$
+ { series empty$
+ { "need editor, key, or series for " cite$ * " to crossref " *
+ crossref * warning$
+ "" *
+ }
+ { "{\em " * series * "\/}" * }
+ if$
+ }
+ { key * }
+ if$
+ }
+ { format.crossref.editor * }
+ if$
+ " \cite{" * crossref * "}" *
+}
+
+FUNCTION {format.incoll.inproc.crossref}
+{ editor empty$
+ editor field.or.null author field.or.null =
+ or
+ { key empty$
+ { booktitle empty$
+ { "need editor, key, or booktitle for " cite$ * " to crossref " *
+ crossref * warning$
+ ""
+ }
+ { "In {\em " booktitle * "\/}" * }
+ if$
+ }
+ { "In " key * }
+ if$
+ }
+ { "In " format.crossref.editor * }
+ if$
+ " \cite{" * crossref * "}" *
+}
+
+FUNCTION {article}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.title "title" output.check
+ new.block
+ crossref missing$
+ { journal emphasize "journal" output.check
+ format.vol.num.pages output
+ format.date "year" output.check
+ }
+ { format.article.crossref output.nonnull
+ format.pages output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {book}
+{ output.bibitem
+ author empty$
+ { format.editors "author and editor" output.check }
+ { format.authors output.nonnull
+ crossref missing$
+ { "author and editor" editor either.or.check }
+ 'skip$
+ if$
+ }
+ if$
+ new.block
+ format.btitle "title" output.check
+ crossref missing$
+ { format.bvolume output
+ new.block
+ format.number.series output
+ new.sentence
+ publisher "publisher" output.check
+ address output
+ }
+ { new.block
+ format.book.crossref output.nonnull
+ }
+ if$
+ format.edition output
+ format.date "year" output.check
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {booklet}
+{ output.bibitem
+ format.authors output
+ new.block
+ format.title "title" output.check
+ howpublished address new.block.checkb
+ howpublished output
+ address output
+ format.date output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {inbook}
+{ output.bibitem
+ author empty$
+ { format.editors "author and editor" output.check }
+ { format.authors output.nonnull
+ crossref missing$
+ { "author and editor" editor either.or.check }
+ 'skip$
+ if$
+ }
+ if$
+ new.block
+ format.btitle "title" output.check
+ crossref missing$
+ { format.bvolume output
+ format.chapter.pages "chapter and pages" output.check
+ new.block
+ format.number.series output
+ new.sentence
+ publisher "publisher" output.check
+ address output
+ }
+ { format.chapter.pages "chapter and pages" output.check
+ new.block
+ format.book.crossref output.nonnull
+ }
+ if$
+ format.edition output
+ format.date "year" output.check
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {incollection}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.title "title" output.check
+ new.block
+ crossref missing$
+ { format.in.ed.booktitle "booktitle" output.check
+ format.bvolume output
+ format.number.series output
+ format.chapter.pages output
+ new.sentence
+ publisher "publisher" output.check
+ address output
+ format.edition output
+ format.date "year" output.check
+ }
+ { format.incoll.inproc.crossref output.nonnull
+ format.chapter.pages output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {inproceedings}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.title "title" output.check
+ new.block
+ crossref missing$
+ { format.in.ed.booktitle "booktitle" output.check
+ format.bvolume output
+ format.number.series output
+ format.pages output
+ address empty$
+ { organization publisher new.sentence.checkb
+ organization output
+ publisher output
+ format.date "year" output.check
+ }
+ { address output.nonnull
+ format.date "year" output.check
+ new.sentence
+ organization output
+ publisher output
+ }
+ if$
+ }
+ { format.incoll.inproc.crossref output.nonnull
+ format.pages output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {conference} { inproceedings }
+
+FUNCTION {manual}
+{ output.bibitem
+ author empty$
+ { organization empty$
+ 'skip$
+ { organization output.nonnull
+ address output
+ }
+ if$
+ }
+ { format.authors output.nonnull }
+ if$
+ new.block
+ format.btitle "title" output.check
+ author empty$
+ { organization empty$
+ { address new.block.checka
+ address output
+ }
+ 'skip$
+ if$
+ }
+ { organization address new.block.checkb
+ organization output
+ address output
+ }
+ if$
+ format.edition output
+ format.date output
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {mastersthesis}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.title "title" output.check
+ new.block
+ "Master's thesis" format.thesis.type output.nonnull
+ school "school" output.check
+ address output
+ format.date "year" output.check
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {misc}
+{ output.bibitem
+ format.authors output
+ title howpublished new.block.checkb
+ format.title output
+ howpublished new.block.checka
+ howpublished output
+ format.date output
+ new.block
+ note output
+ fin.entry
+ empty.misc.check
+}
+
+FUNCTION {phdthesis}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.btitle "title" output.check
+ new.block
+ "PhD thesis" format.thesis.type output.nonnull
+ school "school" output.check
+ address output
+ format.date "year" output.check
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {proceedings}
+{ output.bibitem
+ editor empty$
+ { organization output }
+ { format.editors output.nonnull }
+ if$
+ new.block
+ format.btitle "title" output.check
+ format.bvolume output
+ format.number.series output
+ address empty$
+ { editor empty$
+ { publisher new.sentence.checka }
+ { organization publisher new.sentence.checkb
+ organization output
+ }
+ if$
+ publisher output
+ format.date "year" output.check
+ }
+ { address output.nonnull
+ format.date "year" output.check
+ new.sentence
+ editor empty$
+ 'skip$
+ { organization output }
+ if$
+ publisher output
+ }
+ if$
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {techreport}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.title "title" output.check
+ new.block
+ format.tr.number output.nonnull
+ institution "institution" output.check
+ address output
+ format.date "year" output.check
+ new.block
+ note output
+ fin.entry
+}
+
+FUNCTION {unpublished}
+{ output.bibitem
+ format.authors "author" output.check
+ new.block
+ format.title "title" output.check
+ new.block
+ note "note" output.check
+ format.date output
+ fin.entry
+}
+
+FUNCTION {default.type} { misc }
+
+MACRO {jan} {"January"}
+
+MACRO {feb} {"February"}
+
+MACRO {mar} {"March"}
+
+MACRO {apr} {"April"}
+
+MACRO {may} {"May"}
+
+MACRO {jun} {"June"}
+
+MACRO {jul} {"July"}
+
+MACRO {aug} {"August"}
+
+MACRO {sep} {"September"}
+
+MACRO {oct} {"October"}
+
+MACRO {nov} {"November"}
+
+MACRO {dec} {"December"}
+
+MACRO {acmcs} {"ACM Computing Surveys"}
+
+MACRO {acta} {"Acta Informatica"}
+
+MACRO {cacm} {"Communications of the ACM"}
+
+MACRO {ibmjrd} {"IBM Journal of Research and Development"}
+
+MACRO {ibmsj} {"IBM Systems Journal"}
+
+MACRO {ieeese} {"IEEE Transactions on Software Engineering"}
+
+MACRO {ieeetc} {"IEEE Transactions on Computers"}
+
+MACRO {ieeetcad}
+ {"IEEE Transactions on Computer-Aided Design of Integrated Circuits"}
+
+MACRO {ipl} {"Information Processing Letters"}
+
+MACRO {jacm} {"Journal of the ACM"}
+
+MACRO {jcss} {"Journal of Computer and System Sciences"}
+
+MACRO {scp} {"Science of Computer Programming"}
+
+MACRO {sicomp} {"SIAM Journal on Computing"}
+
+MACRO {tocs} {"ACM Transactions on Computer Systems"}
+
+MACRO {tods} {"ACM Transactions on Database Systems"}
+
+MACRO {tog} {"ACM Transactions on Graphics"}
+
+MACRO {toms} {"ACM Transactions on Mathematical Software"}
+
+MACRO {toois} {"ACM Transactions on Office Information Systems"}
+
+MACRO {toplas} {"ACM Transactions on Programming Languages and Systems"}
+
+MACRO {tcs} {"Theoretical Computer Science"}
+
+READ
+
+FUNCTION {sortify}
+{ purify$
+ "l" change.case$
+}
+
+INTEGERS { len }
+
+FUNCTION {chop.word}
+{ 's :=
+ 'len :=
+ s #1 len substring$ =
+ { s len #1 + global.max$ substring$ }
+ 's
+ if$
+}
+
+FUNCTION {sort.format.names}
+{ 's :=
+ #1 'nameptr :=
+ ""
+ s num.names$ 'numnames :=
+ numnames 'namesleft :=
+ { namesleft #0 > }
+ { nameptr #1 >
+ { " " * }
+ 'skip$
+ if$
+ s nameptr "{vv{ } }{ll{ }}{ ff{ }}{ jj{ }}" format.name$ 't :=
+ nameptr numnames = t "others" = and
+ { "et al" * }
+ { t sortify * }
+ if$
+ nameptr #1 + 'nameptr :=
+ namesleft #1 - 'namesleft :=
+ }
+ while$
+}
+
+FUNCTION {sort.format.title}
+{ 't :=
+ "A " #2
+ "An " #3
+ "The " #4 t chop.word
+ chop.word
+ chop.word
+ sortify
+ #1 global.max$ substring$
+}
+
+FUNCTION {author.sort}
+{ author empty$
+ { key empty$
+ { "to sort, need author or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { author sort.format.names }
+ if$
+}
+
+FUNCTION {author.editor.sort}
+{ author empty$
+ { editor empty$
+ { key empty$
+ { "to sort, need author, editor, or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { editor sort.format.names }
+ if$
+ }
+ { author sort.format.names }
+ if$
+}
+
+FUNCTION {author.organization.sort}
+{ author empty$
+ { organization empty$
+ { key empty$
+ { "to sort, need author, organization, or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { "The " #4 organization chop.word sortify }
+ if$
+ }
+ { author sort.format.names }
+ if$
+}
+
+FUNCTION {editor.organization.sort}
+{ editor empty$
+ { organization empty$
+ { key empty$
+ { "to sort, need editor, organization, or key in " cite$ * warning$
+ ""
+ }
+ { key sortify }
+ if$
+ }
+ { "The " #4 organization chop.word sortify }
+ if$
+ }
+ { editor sort.format.names }
+ if$
+}
+
+FUNCTION {presort}
+{ type$ "book" =
+ type$ "inbook" =
+ or
+ 'author.editor.sort
+ { type$ "proceedings" =
+ 'editor.organization.sort
+ { type$ "manual" =
+ 'author.organization.sort
+ 'author.sort
+ if$
+ }
+ if$
+ }
+ if$
+ " "
+ *
+ year field.or.null sortify
+ *
+ " "
+ *
+ title field.or.null
+ sort.format.title
+ *
+ #1 entry.max$ substring$
+ 'sort.key$ :=
+}
+
+ITERATE {presort}
+
+SORT
+
+STRINGS { longest.label }
+
+INTEGERS { number.label longest.label.width }
+
+FUNCTION {initialize.longest.label}
+{ "" 'longest.label :=
+ #1 'number.label :=
+ #0 'longest.label.width :=
+}
+
+FUNCTION {longest.label.pass}
+{ number.label int.to.str$ 'label :=
+ number.label #1 + 'number.label :=
+ label width$ longest.label.width >
+ { label 'longest.label :=
+ label width$ 'longest.label.width :=
+ }
+ 'skip$
+ if$
+}
+
+EXECUTE {initialize.longest.label}
+
+ITERATE {longest.label.pass}
+
+FUNCTION {begin.bib}
+{ preamble$ empty$
+ 'skip$
+ { preamble$ write$ newline$ }
+ if$
+ "\begin{thebibliography}{" longest.label * "}" * write$ newline$
+}
+
+EXECUTE {begin.bib}
+
+EXECUTE {init.state.consts}
+
+ITERATE {call.type$}
+
+FUNCTION {end.bib}
+{ newline$
+ "\end{thebibliography}" write$ newline$
+}
+
+EXECUTE {end.bib}
diff --git a/tests/examplefiles/postgresql_test.txt b/tests/examplefiles/postgresql_test.txt
index 190d184f..28db5ee3 100644
--- a/tests/examplefiles/postgresql_test.txt
+++ b/tests/examplefiles/postgresql_test.txt
@@ -45,3 +45,37 @@ $$;
SELECT U&'\0441\043B\043E\043D'
FROM U&"\0441\043B\043E\043D";
+-- Escapes
+SELECT E'1\n2\n3';
+
+-- DO example from postgresql documentation
+/*
+ * PostgreSQL is Copyright © 1996-2016 by the PostgreSQL Global Development Group.
+ *
+ * Postgres95 is Copyright © 1994-5 by the Regents of the University of California.
+ *
+ * Permission to use, copy, modify, and distribute this software and its
+ * documentation for any purpose, without fee, and without a written agreement
+ * is hereby granted, provided that the above copyright notice and this paragraph
+ * and the following two paragraphs appear in all copies.
+ *
+ * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+ * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
+ * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,
+ * EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS-IS" BASIS,
+ * AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE,
+ * SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ */
+DO $$DECLARE r record;
+BEGIN
+ FOR r IN SELECT table_schema, table_name FROM information_schema.tables
+ WHERE table_type = 'VIEW' AND table_schema = 'public'
+ LOOP
+ EXECUTE 'GRANT ALL ON ' || quote_ident(r.table_schema) || '.' || quote_ident(r.table_name) || ' TO webuser';
+ END LOOP;
+END$$;
diff --git a/tests/examplefiles/scope.cirru b/tests/examplefiles/scope.cirru
index d3bd8f16..c3d1a2c6 100644
--- a/tests/examplefiles/scope.cirru
+++ b/tests/examplefiles/scope.cirru
@@ -209,3 +209,29 @@ for (a x i) (.log console x i)
set a 0
while (< a 10) (+= a 1) (.log console a)
+
+-- WebAssembly variable names
+
+-- ":(c) 2015 Andreas Rossberg"
+
+module
+ export :even $even
+ export "odd" $odd
+
+ func $even (param $n i32) (result i32)
+ if (i32.eq (get_local $n) (i32.const 0))
+ i32.const 1
+ call $odd (i32.sub (get_local $n) (i32.const 1))
+
+ func $odd (param $n i32) (result i32)
+ store_global $scratch (get_local $n)
+ if (i32.eq (get_local $n) (i32.const 0)
+ i32.const 0
+ call $even (i32.sub (get_local $n) (i32.const 1))
+
+ global $scratch i32
+
+assert_eq (invoke :even (i32.const 13)) (i32.const 0)
+assert_eq (invoke :even (i32.const 20)) (i32.const 1)
+assert_eq (invoke :odd (i32.const 13)) (i32.const 1)
+assert_eq (invoke :odd (i32.const 20)) (i32.const 0)
diff --git a/tests/examplefiles/sparql.rq b/tests/examplefiles/sparql.rq
index 70b594e1..d979d203 100644
--- a/tests/examplefiles/sparql.rq
+++ b/tests/examplefiles/sparql.rq
@@ -29,8 +29,8 @@ SELECT ?person (COUNT(?nick) AS ?nickCount) {
ex:float5 .0e1 ;
ex:float6 5e11 ;
ex:float7 1. ;
- ex:À "" ;
- ex:豈 "" ;
+ ex:aUnicodeÀExample "somestring" ;
+ ex:catName "Kitty", "Kitty_" ; # object list
ex:escape "\n\u00c0\U00010000";
ex:catAge ?catage ;
dcterms:description "Someone with a cat called \"cat\"."@en . # language tag
diff --git a/tests/examplefiles/test.bib b/tests/examplefiles/test.bib
new file mode 100644
index 00000000..87e558d8
--- /dev/null
+++ b/tests/examplefiles/test.bib
@@ -0,0 +1,77 @@
+This is an example BibTeX file.
+This text is a comment.
+
+@preamble{"%%% example BibTeX file"}
+
+@Preamble{"\newcommand{\noopsort}[1]{} "
+ "\newcommand{\noopsort}[1]{} "}
+
+@String{SCI = "Science"}
+
+@STRING{JFernandez = "Fernandez, Julio M."}
+@StRiNg{HGaub = "Gaub, Hermann E."}
+@string{MGautel = "Gautel, Mathias"}
+@String{FOesterhelt = "Oesterhelt, Filipp"}
+@String{MRief = "Rief, Matthias"}
+
+@Article{rief97b,
+ author = MRief #" and "# MGautel #" and "# FOesterhelt
+ #" and "# JFernandez #" and "# HGaub,
+ title = "Reversible Unfolding of Individual Titin
+ Immunoglobulin Domains by {AFM}",
+ journal = SCI,
+ volume = 276,
+ number = 5315,
+ pages = "1109--1112",
+ year = 1997,
+ doi = "10.1126/science.276.5315.1109",
+ URL = "http://www.sciencemag.org/cgi/content/abstract/276/5315/1109",
+ eprint = "http://www.sciencemag.org/cgi/reprint/276/5315/1109.pdf",
+}
+
+
+Parens can be used instead of braces:
+
+@ARTICLE(ruckenstein-diffusion,
+ author = "Liu, Hongquin and Ruckenstein, Eli",
+ language = "english",
+ title = "Predicting the Diffusion Coefficient in Supercritical Fluids",
+ journal = "Ind. Eng. Chem. Res.",
+ volume = "36",
+ year = "1997",
+ pages = "888-895"
+)
+
+@book{
+ viktorov-methods,
+ author = "Викторов, Михаил Маркович",
+ publisher = "Л.: <<Химия>>",
+ title = "Методы вычисления физико-химических величин и прикладные расчёты",
+ language = "russian",
+ year = "1977",
+ isbn = "000-0000000000",
+}
+
+@comment{jackson-commented-out,
+ author = "Jackson, P\'eter",
+ publisher = "Some Publisher",
+ language = "english",
+ title = "Some Title",
+ series = "Some series",
+ booktitle = "Commented Out",
+ number = "3",
+ edition = "Second",
+ year = "1933",
+ pages = "44--59"
+}
+
+@booklet{test-booklet,
+ author = "de Last, Jr., First Middle",
+ language = "english",
+ title = "Just a booklet",
+ year = 2006,
+ month = jan,
+ address = "Moscow",
+ howpublished = "Published by Foo"
+}
+
diff --git a/tests/examplefiles/test.cr b/tests/examplefiles/test.cr
new file mode 100644
index 00000000..028ff6f3
--- /dev/null
+++ b/tests/examplefiles/test.cr
@@ -0,0 +1,2871 @@
+# Examples taken from http://crystal-lang.org/docs/
+# Copyright 2012-2016 Manas Technology Solutions.
+
+
+require "http/server"
+
+server = HTTP::Server.new(8080) do |context|
+ context.response.content_type = "text/plain"
+ context.response.print "Hello world! The time is #{Time.now}"
+end
+
+puts "Listening on http://0.0.0.0:8080"
+server.listen
+
+
+module HTTP
+ class RequestHandler
+ end
+end
+
+alias NumericValue = Float32 | Float64 | Int32 | Int64
+
+enum Time::DayOfWeek
+end
+
+
+$global_greeting = "Hello world"
+
+class Greeting
+ @@default_greeting = "Hello world"
+
+ def initialize(@custom_greeting = nil)
+ end
+
+ def print_greeting
+ greeting = @custom_greeting || @@default_greeting
+ puts greeting
+ end
+end
+
+
+LUCKY_NUMBERS = [3, 7, 11]
+DOCUMENTATION_URL = "http://crystal-lang.org/docs"
+
+
+module Scorecard
+ class Parser
+ def parse(score_text)
+ begin
+ score_text.scan(SCORE_PATTERN) do |match|
+ handle_match(match)
+ end
+ rescue err : ParseError
+ # handle error ...
+ end
+ end
+ end
+end
+
+
+module Money
+ CURRENCIES = {
+ "EUR" => 1.0,
+ "ARS" => 10.55,
+ "USD" => 1.12,
+ "JPY" => 134.15,
+ }
+
+ class Amount
+ getter :currency, :value
+
+ def initialize(@currency, @value)
+ end
+ end
+
+ class CurrencyConversion
+ def initialize(@amount, @target_currency)
+ end
+
+ def amount
+ # implement conversion ...
+ end
+ end
+end
+
+
+i = 0
+while i < 10
+ proc = ->(x : Int32) do
+ spawn do
+ puts(x)
+ end
+ end
+ proc.call(i)
+ i += 1
+end
+
+Fiber.yield
+
+
+# A buffered channel of capacity 2
+channel = Channel(Int32).new(2)
+
+spawn do
+ channel.send(1)
+ channel.send(2)
+ channel.send(3)
+end
+
+3.times do |i|
+ puts channel.receive
+end
+
+
+class MyDictionary(K, V)
+end
+
+
+MyBox.new(1) #:: MyBox(Int32)
+MyBox.new("hello") #:: MyBox(String)
+
+
+module Moo(T)
+ def t
+ T
+ end
+end
+
+class Foo(U)
+ include Moo(U)
+
+ def initialize(@value : U)
+ end
+end
+
+foo = Foo.new(1)
+foo.t # Int32
+
+
+class Parent(T)
+end
+
+class Int32Child < Parent(Int32)
+end
+
+class GenericChild(T) < Parent(T)
+end
+
+
+class Person
+end
+
+
+a = 1
+ptr = pointerof(a)
+ptr[100_000] = 2 # undefined behaviour, probably a segmentation fault
+
+
+alias Int32OrString = Int32 | String
+
+
+alias Int32OrNil = Int32?
+
+
+alias Int32OrNil_ = Int32 | ::Nil
+
+
+alias Int32Ptr = Int32*
+
+
+alias Int32Ptr_ = Pointer(Int32)
+
+
+alias Int32_8 = Int32[8]
+
+
+alias Int32_8_ = StaticArray(Int32, 8)
+
+
+alias Int32StringTuple = {Int32, String}
+
+
+alias Int32StringTuple_ = Tuple(Int32, String)
+
+
+alias Int32ToString = Int32 -> String
+
+
+alias Int32ToString_ = Proc(Int32, String)
+
+
+alias ProcThatReturnsInt32 = -> Int32
+
+
+alias Int32AndCharToString = Int32, Char -> String
+
+
+alias ComplexProc = (Int32 -> Int32) -> String
+
+
+def foo(x : Int32)
+ "instance"
+end
+
+def foo(x : Int32.class)
+ "class"
+end
+
+foo 1 # "instance"
+foo Int32 # "class"
+
+
+class Parent
+end
+
+class Child1 < Parent
+end
+
+class Child2 < Parent
+end
+
+ary = [] of Parent.class
+ary << Child1
+ary << Child2
+
+
+# Same as not specifying a restriction, not very useful
+def foo(x : _)
+end
+
+# A bit more useful: any two arguments Proc that returns an Int32:
+def foo(x : _, _ -> Int32)
+end
+
+
+#alias SameAsInt32 = typeof(2)
+#alias Int32OrString_ = typeof(1, "a")
+
+
+class Person
+ def initialize(name)
+ @name = name
+ @age = 0
+ end
+
+ def name
+ @name
+ end
+
+ def age
+ @age
+ end
+end
+
+
+john = Person.new "John"
+peter = Person.new "Peter"
+
+john.name #=> "John"
+john.age #=> 0
+
+peter.name #=> "Peter"
+
+
+class Person
+ def self.new(name)
+ instance = Person.allocate
+ instance.initialize(name)
+ instance
+ end
+ end
+
+
+if a.is_a?(String)
+ # here a is a String
+end
+
+if b.is_a?(Number)
+ # here b is a Number
+end
+
+
+a = some_condition ? 1 : "hello"
+# a : Int32 | String
+
+if a.is_a?(Number)
+ # a : Int32
+else
+ # a : String
+end
+
+
+if a.is_a?(String) && b.is_a?(Number)
+ # here a is a String and b is a Number
+end
+
+
+a.+(b)
+
+
+struct Vector2
+ getter x, y
+
+ def initialize(@x, @y)
+ end
+
+ def +(other)
+ Vector2.new(x + other.x, y + other.y)
+ end
+end
+
+v1 = Vector2.new(1, 2)
+v2 = Vector2.new(3, 4)
+v1 + v2 #=> Vector2(@x=4, @y=6)
+
+
+
+
+struct Vector2
+ def -
+ Vector2.new(-x, -y)
+ end
+end
+
+v1 = Vector2.new(1, 2)
+-v1 #=> Vector2(@x=-1, @y=-2)
+
+
+
+
+
+class MyArray
+ def [](index)
+ # ...
+ end
+
+ def [](index1, index2, index3)
+ # ...
+ end
+
+ def []=(index, value)
+ # ...
+ end
+end
+
+array = MyArray.new
+
+array[1] # invokes the first method
+array[1, 2, 3] # invokes the second method
+array[1] = 2 # invokes the third method
+
+array.[](1) # invokes the first method
+array.[](1, 2, 3) # invokes the second method
+array.[]=(1, 2) # invokes the third method
+
+
+raise "OH NO!"
+raise Exception.new("Some error")
+
+
+class MyException < Exception
+end
+
+
+begin
+ raise MyException.new("OH NO!")
+rescue ex : MyException
+ puts "Rescued MyException: #{ex.message}"
+end
+
+
+begin
+ # ...
+rescue ex : MyException | MyOtherException
+ # only MyException or MyOtherException
+rescue
+ # any other kind of exception
+ensure
+ puts "Cleanup..."
+end
+
+
+def some_method
+ something_dangerous
+rescue
+ # execute if an exception is raised
+end
+
+
+array = [1, 2, 3]
+array[4] # raises because of IndexError
+array[4]? # returns nil because of index out of bounds
+
+
+def some_proc(&block : Int32 -> Int32)
+ block
+end
+
+x = 0
+proc = ->(i : Int32) { x += i }
+proc = some_proc(&proc)
+proc.call(1) #=> 1
+proc.call(10) #=> 11
+x #=> 11
+
+
+def add(x, y)
+ x + y
+end
+
+adder = ->add(Int32, Int32)
+adder.call(1, 2) #=> 3
+
+
+module Curses
+ class Window
+ end
+end
+
+Curses::Window.new
+
+
+module ItemsSize
+ def size
+ items.size
+ end
+end
+
+class Items
+ include ItemsSize
+
+ def items
+ [1, 2, 3]
+ end
+end
+
+items = Items.new
+items.size #=> 3
+
+
+module Base64
+ extend self
+
+ def encode64(string)
+ # ...
+ end
+
+ def decode64(string)
+ # ...
+ end
+end
+
+Base64.encode64 "hello" #=> "aGVsbG8="
+
+
+if some_condition
+ a = 1
+else
+ a = "hello"
+end
+
+a_as_int = a as Int32
+a_as_int.abs # works, compiler knows that a_as_int is Int32
+
+
+ptr = Pointer(Int32).malloc(1)
+ptr as Int8* #:: Pointer(Int8)
+
+
+array = [1, 2, 3]
+
+# object_id returns the address of an object in memory,
+# so we create a pointer with that address
+ptr = Pointer(Void).new(array.object_id)
+
+# Now we cast that pointer to the same type, and
+# we should get the same value
+array2 = ptr as Array(Int32)
+array2.same?(array) #=> true
+
+
+a = 1
+b = a as Int32 | Float64
+b #:: Int32 | Float64
+
+
+ary = [1, 2, 3]
+
+# We want to create an array 1, 2, 3 of Int32 | Float64
+ary2 = ary.map { |x| x as Int32 | Float64 }
+
+ary2 #:: Array(Int32 | Float64)
+ary2 << 1.5 # OK
+
+
+class Person
+ def initialize(@name)
+ end
+
+ def name
+ @name
+ end
+end
+
+a = [] of Person
+x = a.map { |f| f.name } # Error: can't infer block return type
+
+
+a = [] of Person
+x = a.map { |f| f.name as String } # OK
+
+
+Person.new "John"
+
+a = [] of Person
+x = a.map { |f| f.name } # OK
+
+
+loop do
+ do_something
+ break if some_condition
+end
+
+
+class Point
+ def initialize(@x, @y)
+ end
+end
+
+Point.new 1, 2
+
+# 2 x Int32 = 2 x 4 = 8
+instance_sizeof(Point) #=> 12
+
+
+a = 1
+while a < 5
+ a += 1
+ if a == 3
+ next
+ end
+ puts a
+end
+# The above prints the numbers 2, 4 and 5
+
+
+lib C
+ # In C: double cos(double x)
+ fun cos(value : Float64) : Float64
+
+ fun getch : Int32
+
+ fun srand(seed : UInt32)
+
+ fun exit(status : Int32) : NoReturn
+
+ fun printf(format : UInt8*, ...) : Int32
+end
+
+C.cos(1.5) #=> 0.0707372
+C.srand(1_u32)
+
+a = 1
+b = 2
+C.printf "%d + %d = %d\n", a, b, a + b
+
+
+lib LibSDL
+ fun init = SDL_Init(flags : UInt32) : Int32
+end
+
+lib LLVMIntrinsics
+ fun ceil_f32 = "llvm.ceil.f32"(value : Float32) : Float32
+end
+
+lib MyLib
+ fun my_fun(some_size : LibC::SizeT)
+end
+
+@[Link("pcre")]
+lib LibPCRE
+end
+
+
+lib C
+ ifdef x86_64
+ alias SizeT = UInt64
+ else
+ alias SizeT = UInt32
+ end
+
+ fun memcmp(p1 : Void*, p2 : Void*, size : C::SizeT) : Int32
+end
+
+
+lib X
+ enum SomeEnum
+ Ten = 10
+ Twenty = 10 * 2
+ ThirtyTwo = 1 << 5
+ end
+end
+
+
+lib X
+ enum SomeEnum
+ A = 1_u32
+ end
+end
+
+
+X::SomeEnum::Zero #=> 0_i8
+X::SomeEnum::Two #=> 2_i8
+
+
+lib X
+ fun callback(f : Int32 -> Int32)
+end
+
+
+f = ->(x : Int32) { x + 1 }
+X.callback(f)
+
+
+X.callback ->(x) { x + 1 }
+
+
+X.callback nil
+
+
+lib LibFoo
+ fun store_callback(callback : ->)
+ fun execute_callback
+end
+
+LibFoo.store_callback ->{ raise "OH NO!" }
+LibFoo.execute_callback
+
+
+lib LibFoo
+ fun store_callback(callback : ->)
+
+ @[Raises]
+ fun execute_callback
+end
+
+
+@[Link("pcre")]
+lib PCRE
+ INFO_CAPTURECOUNT = 2
+end
+
+PCRE::INFO_CAPTURECOUNT #=> 2
+
+
+lib U
+ # In C:
+ #
+ # union IntOrFloat {
+ # int some_int;
+ # double some_float;
+ # };
+ union IntOrFloat
+ some_int : Int32
+ some_float : Float64
+ end
+end
+
+
+value = U::IntOrFloat.new
+
+
+value = uninitialized U::IntOrFlaot
+value.some_int #=> some garbage value
+
+
+value = U::IntOrFloat.new
+value.some_int = 1
+value.some_int #=> 1
+value.some_float #=> 4.94066e-324
+
+
+def change_it(value)
+ value.some_int = 1
+end
+
+value = U::IntOrFloat.new
+change_it value
+value.some_int #=> 0
+
+
+lib C
+ # In C:
+ #
+ # struct TimeZone {
+ # int minutes_west;
+ # int dst_time;
+ # };
+ struct TimeZone
+ minutes_west : Int32
+ dst_time : Int32
+ end
+end
+
+
+lib C
+ # This is a forward declaration
+ struct Node
+ end
+
+ struct Node
+ node : Node*
+ end
+end
+
+
+tz = C::TimeZone.new
+
+
+tz = uninitialized C::TimeZone
+tz.minutes_west #=> some garbage value
+
+
+tz = C::TimeZone.new
+tz.minutes_west = 1
+tz.minutes_west #=> 1
+
+
+tz = C::TimeZone.new minutes_west: 1, dst_time: 2
+tz.minutes_west #=> 1
+tz.dst_time #=> 2
+
+
+def change_it(tz)
+ tz.minutes_west = 1
+end
+
+tz = C::TimeZone.new
+change_it tz
+tz.minutes_west #=> 0
+
+
+lib C
+ $errno : Int32
+end
+
+
+C.errno #=> some value
+C.errno = 0
+C.errno #=> 0
+
+
+lib C
+ @[ThreadLocal]
+ $errno : Int32
+end
+
+
+lib C
+ fun waitpid(pid : Int32, status_ptr : Int32*, options : Int32) : Int32
+end
+
+
+status_ptr = uninitialized Int32
+
+C.waitpid(pid, pointerof(status_ptr), options)
+
+
+C.waitpid(pid, out status_ptr, options)
+
+
+lib X
+ type CInt = Int32
+end
+
+
+ifdef x86_64
+ # some specific code for 64 bits platforms
+else
+ # some specific code for non-64 bits platforms
+end
+
+
+ifdef linux && x86_64
+ # some specific code for linux 64 bits
+end
+
+
+lib C
+ ifdef linux
+ struct SomeStruct
+ some_field : Int32
+ end
+ else
+ struct SomeStruct
+ some_field : Int64
+ end
+ end
+end
+
+
+# Assigns to a local variable
+local = 1
+
+# Assigns to a global variable
+$global = 4
+
+class Testing
+ # Assigns to an instance variable
+ @instance = 2
+
+ # Assigns to a class variable
+ @@class = 3
+end
+
+
+local += 1 # same as: local = local + 1
+
+# The above is valid with these operators:
+# +, -, *, /, %, |, &, ^, **, <<, >>
+
+local ||= 1 # same as: local || (local = 1)
+local &&= 1 # same as: local && (local = 1)
+
+
+# A setter
+person.name=("John")
+
+# The above can be written as:
+person.name = "John"
+
+# An indexed assignment
+objects.[]=(2, 3)
+
+# The above can be written as:
+objects[2] = 3
+
+# Not assignment-related, but also syntax sugar:
+objects.[](2, 3)
+
+# The above can be written as:
+objects[2, 3]
+
+
+person.age += 1 # same as: person.age = person.age + 1
+
+person.name ||= "John" # same as: person.name || (person.name = "John")
+person.name &&= "John" # same as: person.name && (person.name = "John")
+
+objects[1] += 2 # same as: objects[1] = objects[1] + 2
+
+objects[1] ||= 2 # same as: objects[1]? || (objects[1] = 2)
+objects[1] &&= 2 # same as: objects[1]? && (objects[1] = 2)
+
+
+alias PInt32 = Pointer(Int32)
+
+ptr = PInt32.malloc(1) # : Pointer(Int32)
+
+
+alias RecArray = Array(Int32) | Array(RecArray)
+
+ary = [] of RecArray
+ary.push [1, 2, 3]
+ary.push ary
+ary #=> [[1, 2, 3], [...]]
+
+
+module Json
+ alias Type = Nil |
+ Bool |
+ Int64 |
+ Float64 |
+ String |
+ Array(Type) |
+ Hash(String, Type)
+end
+
+
+a = 1
+if a > 0
+ a = 10
+end
+a #=> 10
+
+b = 1
+if b > 2
+ b = 10
+else
+ b = 20
+end
+b #=> 20
+
+
+if some_condition
+ do_something
+elsif some_other_condition
+ do_something_else
+else
+ do_that
+end
+
+
+a = 1
+if some_condition
+ a = "hello"
+else
+ a = true
+end
+# a : String | Bool
+
+b = 1
+if some_condition
+ b = "hello"
+end
+# b : Int32 | String
+
+if some_condition
+ c = 1
+else
+ c = "hello"
+end
+# c : Int32 | String
+
+if some_condition
+ d = 1
+end
+# d : Int32 | Nil
+
+
+a = 1
+if some_condition
+ a = "hello"
+ # a : String
+ a.size
+end
+# a : String | Int32
+
+
+if some_condition
+ e = 1
+else
+ e = "hello"
+ # e : String
+ return
+end
+# e : Int32
+
+
+enum Color : UInt8
+ Red # 0
+ Green # 1
+ Blue = 5 # overwritten to 5
+ Yellow # 6 (5 + 1)
+
+ def red?
+ self == Color::Red
+ end
+end
+
+Color::Red.value #:: UInt8
+
+
+@[Flags]
+enum IOMode
+ Read # 1
+ Write # 2
+ Async # 4
+end
+
+
+IOMode::None.value #=> 0
+IOMode::All.value #=> 7
+
+
+puts(Color::Red) # prints "Red"
+puts(IOMode::Write | IOMode::Async) # prints "Write, Async"
+
+
+puts Color.new(1) #=> prints "Green"
+
+
+puts Color.new(10) #=> prints "10"
+
+
+Color::Red.red? #=> true
+Color::Blue.red? #=> false
+
+
+def paint(color : Color)
+ case color
+ when Color::Red
+ # ...
+ else
+ # Unusual, but still can happen
+ raise "unknown color: #{color}"
+ end
+end
+
+paint Color::Red
+
+
+def paint(color : Symbol)
+ case color
+ when :red
+ # ...
+ else
+ raise "unknown color: #{color}"
+ end
+end
+
+paint :red
+
+
+name = "Crystal"
+age = 1
+
+
+flower = "Tulip"
+# At this point 'flower' is a String
+
+flower = 1
+# At this point 'flower' is an Int32
+
+
+class Foo
+ def finalize
+ # Invoked when Foo is garbage-collected
+ puts "Bye bye from #{self}!"
+ end
+end
+
+# Prints "Bye bye ...!" for ever
+loop do
+ Foo.new
+end
+
+
+# Defines a method in the program
+def add(x, y)
+ x + y
+end
+
+# Invokes the add method in the program
+add(1, 2) #=> 3
+
+
+def even?(num)
+ if num % 2 == 0
+ return true
+ end
+
+ return false
+end
+
+
+def add(x, y)
+ x + y
+end
+
+class Foo
+ def bar
+ # invokes the program's add method
+ add(1, 2)
+
+ # invokes Foo's baz method
+ baz(1, 2)
+ end
+
+ def baz(x, y)
+ x * y
+ end
+end
+
+
+def baz(x, y)
+ x + y
+end
+
+class Foo
+ def bar
+ baz(4, 2) #=> 2
+ ::baz(4, 2) #=> 6
+ end
+
+ def baz(x, y)
+ x - y
+ end
+end
+
+
+x = 1
+
+def add(y)
+ x + y # error: undefined local variable or method 'x'
+end
+
+add(2)
+
+
+add 1, 2 # same as add(1, 2)
+
+
+class Counter
+ @@instances = 0
+
+ def initialize
+ @@instances += 1
+ end
+
+ def self.instances
+ @@instances
+ end
+end
+
+Counter.instances #=> 0
+Counter.new
+Counter.new
+Counter.new
+Counter.instances #=> 3
+
+
+class Counter
+ def self.increment
+ @@instances += 1
+ end
+end
+
+Counter.increment # Error: undefined method '+' for Nil
+
+
+class Parent
+ @@counter = 0
+end
+
+class Child < Parent
+ def self.counter
+ @@counter
+ end
+end
+
+Child.counter #=> nil
+
+
+unless some_condition
+ then_expression
+else
+ else_expression
+end
+
+# Can also be written as a suffix
+close_door unless door_closed?
+
+
+a = 1
+b = typeof(a) #=> Int32
+
+
+typeof(1, "a", 'a') #=> (Int32 | String | Char)
+
+
+hash = {} of Int32 => String
+another_hash = typeof(hash).new #:: Hash(Int32, String)
+
+
+class Array
+ def self.elem_type(typ)
+ if typ.is_a?(Array)
+ elem_type(typ.first)
+ else
+ typ
+ end
+ end
+end
+
+nest = [1, ["b", [:c, ['d']]]]
+flat = Array(typeof(Array.elem_type(nest))).new
+typeof(nest) #=> Array(Int32 | Array(String | Array(Symbol | Array(Char))))
+typeof(flat) #=> Array(String | Int32 | Symbol | Char)
+
+
+a = 2 if some_condition
+
+
+x = 0
+proc = ->{ x += 1; x }
+proc.call #=> 1
+proc.call #=> 2
+x #=> 2
+
+
+def counter
+ x = 0
+ ->{ x += 1; x }
+end
+
+proc = counter
+proc.call #=> 1
+proc.call #=> 2
+
+
+def foo
+ yield
+end
+
+x = 1
+foo do
+ x = "hello"
+end
+x # : Int32 | String
+
+
+x = 1
+foo do
+ x = "hello"
+end
+x # : Int32 | String
+
+x = 'a'
+x # : Char
+
+
+def capture(&block)
+ block
+end
+
+x = 1
+capture { x = "hello" }
+
+x = 'a'
+x # : Int32 | String | Char
+
+
+def capture(&block)
+ block
+end
+
+x = 1
+->{ x = "hello" }
+
+x = 'a'
+x # : Int32 | String | Char
+
+
+abstract class Animal
+ # Makes this animal talk
+ abstract def talk
+end
+
+class Dog < Animal
+ def talk
+ "Woof!"
+ end
+end
+
+class Cat < Animal
+ def talk
+ "Miau"
+ end
+end
+
+class Person
+ getter pet
+
+ def initialize(@name, @pet)
+ end
+end
+
+john = Person.new "John", Dog.new
+peter = Person.new "Peter", Cat.new
+
+
+john.pet.talk #=> "Woof!"
+
+
+a = 1 > 2 ? 3 : 4
+
+# The above is the same as:
+a = if 1 > 2
+ 3
+ else
+ 4
+ end
+
+
+def some_method : String
+ "hello"
+end
+
+
+PI = 3.14
+
+module Earth
+ RADIUS = 6_371_000
+end
+
+PI #=> 3.14
+Earth::RADIUS #=> 6_371_000
+
+
+TEN = begin
+ a = 0
+ while a < 10
+ a += 1
+ end
+ a
+end
+
+TEN #=> 10
+
+
+class Person
+ getter name
+
+ def initialize(@name)
+ @age = 0
+ end
+end
+
+john = Person.new "John"
+john.name #=> "John"
+john.name.size #=> 4
+
+
+one = Person.new 1
+one.name #=> 1
+one.name + 2 #=> 3
+
+
+john = Person.new "John"
+one = Person.new 1
+
+
+john = Person.new "John"
+one = Person.new 1
+
+# Error: undefined method 'size' for Int32
+john.name.size
+
+# Error: no overload matches 'String#+' with types Int32
+john.name + 3
+
+
+john = Person.new "John"
+john.name.size
+one = Person.new 1
+
+
+class Person
+ getter name
+
+ def initialize(@name)
+ @age = 0
+ end
+
+ def address
+ @address
+ end
+
+ def address=(@address)
+ end
+end
+
+john = Person.new "John"
+john.address = "Argentina"
+
+
+# Error: undefined method 'size' for Nil
+john.address.size
+
+
+class Person
+ @age = 0
+
+ def initialize(@name)
+ end
+end
+
+
+class Person
+ @age : Int32
+
+ def initialize(@name)
+ @age = 0
+ end
+end
+
+
+a = if 2 > 1
+ 3
+ else
+ 4
+ end
+a #=> 3
+
+
+if 1 > 2
+else
+ 3
+end
+
+
+def twice(&block)
+ yield
+ yield
+end
+
+
+twice() do
+ puts "Hello!"
+end
+
+twice do
+ puts "Hello!"
+end
+
+twice { puts "Hello!" }
+
+
+def twice
+ yield 1
+ yield 2
+end
+
+twice do |i|
+ puts "Got #{i}"
+end
+
+
+twice { |i| puts "Got #{i}" }
+
+
+def many
+ yield 1, 2, 3
+end
+
+many do |x, y, z|
+ puts x + y + z
+end
+
+# Output: 6
+
+
+def many
+ yield 1, 2, 3
+end
+
+many do |x, y|
+ puts x + y
+end
+
+# Output: 3
+
+
+def twice
+ yield
+ yield
+end
+
+twice do |i|
+ puts i.inspect
+end
+
+
+def some
+ yield 1, 'a'
+ yield true, "hello"
+ yield 2
+end
+
+some do |first, second|
+ # first is Int32 | Bool
+ # second is Char | String | Nil
+end
+
+
+method do |argument|
+ argument.some_method
+end
+
+
+method(&.some_method)
+
+
+method &.some_method(arg1, arg2)
+
+
+method &.+(2)
+method &.[index]
+
+
+def twice
+ v1 = yield 1
+ puts v1
+
+ v2 = yield 2
+ puts v2
+end
+
+twice do |i|
+ i + 1
+end
+
+
+ary = [1, 2, 3]
+ary.map { |x| x + 1 } #=> [2, 3, 4]
+ary.select { |x| x % 2 == 1 } #=> [1, 3]
+
+
+def transform(value)
+ yield value
+end
+
+transform(1) { |x| x + 1 } #=> 2
+
+
+def thrice
+ puts "Before 1"
+ yield 1
+ puts "Before 2"
+ yield 2
+ puts "Before 3"
+ yield 3
+ puts "After 3"
+end
+
+thrice do |i|
+ if i == 2
+ break
+ end
+end
+
+
+def twice
+ yield 1
+ yield 2
+end
+
+twice { |i| i + 1 } #=> 3
+twice { |i| break "hello" } #=> "hello"
+
+
+value = twice do |i|
+ if i == 1
+ break "hello"
+ end
+ i + 1
+end
+value #:: Int32 | String
+
+
+values = twice { break 1, 2 }
+values #=> {1, 2}
+
+
+value = twice { break }
+value #=> nil
+
+
+def twice
+ yield 1
+ yield 2
+end
+
+twice do |i|
+ if i == 1
+ puts "Skipping 1"
+ next
+ end
+
+ puts "Got #{i}"
+end
+
+
+
+def twice
+ v1 = yield 1
+ puts v1
+
+ v2 = yield 2
+ puts v2
+end
+
+twice do |i|
+ if i == 1
+ next 10
+ end
+
+ i + 1
+end
+
+# Output
+# 10
+# 3
+
+
+class Foo
+ def one
+ 1
+ end
+
+ def yield_with_self
+ with self yield
+ end
+
+ def yield_normally
+ yield
+ end
+end
+
+def one
+ "one"
+end
+
+Foo.new.yield_with_self { one } # => 1
+Foo.new.yield_normally { one } # => "one"
+
+
+def twice
+ yield 1
+ yield 2
+end
+
+twice do |i|
+ puts "Got: #{i}"
+end
+
+
+i = 1
+puts "Got: #{i}"
+i = 2
+puts "Got: #{i}"
+
+
+3.times do |i|
+ puts i
+end
+
+
+struct Int
+ def times
+ i = 0
+ while i < self
+ yield i
+ i += 1
+ end
+ end
+end
+
+
+i = 0
+while i < 3
+ puts i
+ i += 1
+end
+
+
+class Person
+ def initialize(@name)
+ end
+
+ def greet
+ puts "Hi, I'm #{@name}"
+ end
+end
+
+class Employee < Person
+end
+
+employee = Employee.new "John"
+employee.greet # "Hi, I'm John"
+
+
+class Person
+ def initialize(@name)
+ end
+end
+
+class Employee < Person
+ def initialize(@name, @company_name)
+ end
+end
+
+Employee.new "John", "Acme" # OK
+Employee.new "Peter" # Error: wrong number of arguments
+ # for 'Employee:Class#new' (1 for 2)
+
+
+class Person
+ def greet(msg)
+ puts "Hi, #{msg}"
+ end
+end
+
+class Employee < Person
+ def greet(msg)
+ puts "Hello, #{msg}"
+ end
+end
+
+p = Person.new
+p.greet "everyone" # "Hi, everyone"
+
+e = Employee.new
+e.greet "everyone" # "Hello, everyone"
+
+
+class Person
+ def greet(msg)
+ puts "Hi, #{msg}"
+ end
+end
+
+class Employee < Person
+ def greet(msg : Int32)
+ puts "Hi, this is a number: #{msg}"
+ end
+end
+
+e = Employee.new
+e.greet "everyone" # "Hi, everyone"
+
+e.greet 1 # "Hi, this is a number: 1"
+
+
+class Person
+ def greet(msg)
+ puts "Hello, "#{msg}"
+ end
+end
+
+class Employee < Person
+ def greet(msg)
+ super # Same as: super(msg)
+ super("another message")
+ end
+end
+
+
+def int_to_int(&block : Int32 -> Int32)
+ block
+end
+
+proc = int_to_int { |x| x + 1 }
+proc.call(1) #=> 2
+
+
+class Model
+ def on_save(&block)
+ @on_save_callback = block
+ end
+
+ def save
+ if callback = @on_save_callback
+ callback.call
+ end
+ end
+end
+
+model = Model.new
+model.on_save { puts "Saved!" }
+model.save # prints "Saved!"
+
+
+def some_proc(&block : Int32 ->)
+ block
+end
+
+proc = some_proc { |x| x + 1 }
+proc.call(1) # void
+
+
+def some_proc(&block : Int32 -> _)
+ block
+end
+
+proc = some_proc { |x| x + 1 }
+proc.call(1) # 2
+
+proc = some_proc { |x| x.to_s }
+proc.call(1) # "1"
+
+
+macro update_x
+ x = 1
+end
+
+x = 0
+update_x
+x #=> 1
+
+
+macro dont_update_x
+ %x = 1
+ puts %x
+end
+
+x = 0
+dont_update_x # outputs 1
+x #=> 0
+
+
+macro fresh_vars_sample(*names)
+ # First declare vars
+ {% for name, index in names %}
+ print "Declaring: ", "%name{index}", '\n'
+ %name{index} = {{index}}
+ {% end %}
+
+ # Then print them
+ {% for name, index in names %}
+ print "%name{index}: ", %name{index}, '\n'
+ {% end %}
+end
+
+fresh_vars_sample a, b, c
+
+# Sample output:
+# Declaring: __temp_255
+# Declaring: __temp_256
+# Declaring: __temp_257
+# __temp_255: 0
+# __temp_256: 1
+# __temp_257: 2
+
+
+class Object
+ macro def instance_vars_names : Array(String)
+ {{ @type.instance_vars.map &.name.stringify }}
+ end
+end
+
+class Person
+ def initialize(@name, @age)
+ end
+end
+
+person = Person.new "John", 30
+person.instance_vars_names #=> ["name", "age"]
+
+
+class Object
+ macro def has_instance_var?(name) : Bool
+ # We cannot access name inside the macro expansion here,
+ # instead we need to use the macro language to construct an array
+ # and do the inclusion check at runtime.
+ {{ @type.instance_vars.map &.name.stringify }}.includes? name
+ end
+end
+
+person = Person.new "John", 30
+person.has_instance_var?("name") #=> true
+person.has_instance_var?("birthday") #=> false
+
+
+class Parent
+ macro inherited
+ def {{@type.name.downcase.id}}
+ 1
+ end
+ end
+end
+
+class Child < Parent
+end
+
+Child.new.child #=> 1
+
+
+macro method_missing(name, args, block)
+ print "Got ", {{name.id.stringify}}, " with ", {{args.size}}, " arguments", '\n'
+end
+
+foo # Prints: Got foo with 0 arguments
+bar 'a', 'b' # Prints: Got bar with 2 arguments
+
+
+sizeof(Int32) #=> 4
+sizeof(Int64) #=> 8
+
+
+# On a 64 bits machine
+sizeof(Pointer(Int32)) #=> 8
+sizeof(String) #=> 8
+
+
+a = 1
+sizeof(typeof(a)) #=> 4
+
+
+class Foo
+ macro emphasize(value)
+ "***#{ {{value}} }***"
+ end
+
+ def yield_with_self
+ with self yield
+ end
+end
+
+Foo.new.yield_with_self { emphasize(10) } #=> "***10***"
+
+
+# This generates:
+#
+# def :foo
+# 1
+# end
+define_method :foo, 1
+
+
+macro define_method(name, content)
+ def {{name.id}}
+ {{content}}
+ end
+end
+
+# This correctly generates:
+#
+# def foo
+# 1
+# end
+define_method :foo, 1
+
+
+macro define_method(name, content)
+ def {{name}}
+ {% if content == 1 %}
+ "one"
+ {% else %}
+ {{content}}
+ {% end %}
+ end
+end
+
+define_method foo, 1
+define_method bar, 2
+
+foo #=> one
+bar #=> 2
+
+
+{% if env("TEST") %}
+ puts "We are in test mode"
+{% end %}
+
+
+macro define_dummy_methods(names)
+ {% for name, index in names %}
+ def {{name.id}}
+ {{index}}
+ end
+ {% end %}
+end
+
+define_dummy_methods [foo, bar, baz]
+
+foo #=> 0
+bar #=> 1
+baz #=> 2
+
+
+macro define_dummy_methods(hash)
+ {% for key, value in hash %}
+ def {{key.id}}
+ {{value}}
+ end
+ {% end %}
+end
+define_dummy_methods({foo: 10, bar: 20})
+foo #=> 10
+bar #=> 20
+
+
+{% for name, index in ["foo", "bar", "baz"] %}
+ def {{name.id}}
+ {{index}}
+ end
+{% end %}
+
+foo #=> 0
+bar #=> 1
+baz #=> 2
+
+
+macro define_dummy_methods(*names)
+ {% for name, index in names %}
+ def {{name.id}}
+ {{index}}
+ end
+ {% end %}
+end
+
+define_dummy_methods foo, bar, baz
+
+foo #=> 0
+bar #=> 1
+baz #=> 2
+
+
+macro println(*values)
+ print {{*values}}, '\n'
+end
+
+println 1, 2, 3 # outputs 123\n
+
+
+VALUES = [1, 2, 3]
+
+{% for value in VALUES %}
+ puts {{value}}
+{% end %}
+
+
+until some_condition
+ do_this
+end
+
+# The above is the same as:
+while !some_condition
+ do_this
+end
+
+
+a = some_condition ? nil : 3
+# a is Int32 or Nil
+
+if a
+ # Since the only way to get here is if a is truthy,
+ # a can't be nil. So here a is Int32.
+ a.abs
+end
+
+
+if a = some_expression
+ # here a is not nil
+end
+
+
+if a && b
+ # here both a and b are guaranteed not to be Nil
+end
+
+
+if @a
+ # here @a can be nil
+end
+
+
+# First option: assign it to a variable
+if a = @a
+ # here a can't be nil
+end
+
+# Second option: use `Object#try` found in the standard library
+@a.try do |a|
+ # here a can't be nil
+end
+
+
+if method # first call to a method that can return Int32 or Nil
+ # here we know that the first call did not return Nil
+ method # second call can still return Int32 or Nil
+end
+
+
+class Person
+ def become_older(by = 1)
+ @age += by
+ end
+end
+
+john = Person.new "John"
+john.age #=> 0
+
+john.become_older
+john.age #=> 1
+
+john.become_older 2
+john.age #=> 3
+
+
+john.become_older by: 5
+
+
+def some_method(x, y = 1, z = 2, w = 3)
+ # do something...
+end
+
+some_method 10 # x = 10, y = 1, z = 2, w = 3
+some_method 10, z: 10 # x = 10, y = 1, z = 10, w = 3
+some_method 10, w: 1, y: 2, z: 3 # x = 10, y = 2, z = 3, w = 1
+
+
+case exp
+when value1, value2
+ do_something
+when value3
+ do_something_else
+else
+ do_another_thing
+end
+
+
+case var
+when String
+ # var : String
+ do_something
+when Int32
+ # var : Int32
+ do_something_else
+else
+ # here var is neither a String nor an Int32
+ do_another_thing
+end
+
+
+case num
+when .even?
+ do_something
+when .odd?
+ do_something_else
+end
+
+
+case
+when cond1, cond2
+ do_something
+when cond3
+ do_something_else
+end
+
+
+a = 1
+a.responds_to?(:abs) #=> true
+a.responds_to?(:size) #=> false
+
+
+foo_or_bar = /foo|bar/
+heeello = /h(e+)llo/
+integer = /\d+/
+
+
+r = /foo/imx
+
+
+slash = /\//
+
+
+r = %r(regex with slash: /)
+
+
+"hello world"
+
+
+"\"" # double quote
+"\\" # backslash
+"\e" # escape
+"\f" # form feed
+"\n" # newline
+"\r" # carriage return
+"\t" # tab
+"\v" # vertical tab
+
+
+"\101" # == "A"
+"\123" # == "S"
+"\12" # == "\n"
+"\1" # string with one character with code point 1
+
+
+"\u0041" # == "A"
+
+
+"\u{41}" # == "A"
+"\u{1F52E}" # == "🔮"
+
+
+"hello
+ world" # same as "hello\n world"
+
+
+"hello " \
+"world, " \
+"no newlines" # same as "hello world, no newlines"
+
+
+"hello \
+ world, \
+ no newlines" # same as "hello world, no newlines"
+
+
+# Supports double quotes and nested parenthesis
+%(hello ("world")) # same as "hello (\"world\")"
+
+# Supports double quotes and nested brackets
+%[hello ["world"]] # same as "hello [\"world\"]"
+
+# Supports double quotes and nested curlies
+%{hello {"world"}} # same as "hello {\"world\"}"
+
+# Supports double quotes and nested angles
+%<hello <"world">> # same as "hello <\"world\">"
+
+
+<<-XML
+<parent>
+ <child />
+</parent>
+XML
+
+
+# Same as "Hello\n world"
+<<-STRING
+ Hello
+ world
+ STRING
+
+# Same as " Hello\n world"
+<<-STRING
+ Hello
+ world
+ STRING
+
+
+a = 1
+b = 2
+"sum = #{a + b}" # "sum = 3"
+
+
+1.0 # Float64
+1.0_f32 # Float32
+1_f32 # Float32
+
+1e10 # Float64
+1.5e10 # Float64
+1.5e-7 # Float64
+
++1.3 # Float64
+-0.5 # Float64
+
+
+1_000_000.111_111 # better than 1000000.111111
+
+
+'a'
+'z'
+'0'
+'_'
+'あ'
+
+
+'\'' # single quote
+'\\' # backslash
+'\e' # escape
+'\f' # form feed
+'\n' # newline
+'\r' # carriage return
+'\t' # tab
+'\v' # vertical tab
+
+
+'\101' # == 'A'
+'\123' # == 'S'
+'\12' # == '\n'
+'\1' # code point 1
+
+
+'\u0041' # == 'A'
+
+
+'\u{41}' # == 'A'
+'\u{1F52E}' # == '🔮'
+
+
+{1 => 2, 3 => 4} # Hash(Int32, Int32)
+{1 => 2, 'a' => 3} # Hash(Int32 | Char, Int32)
+
+
+{} of Int32 => Int32 # same as Hash(Int32, Int32).new
+
+
+{key1: 'a', key2: 'b'} # Hash(Symbol, Char)
+
+
+{"key1": 'a', "key2": 'b'} # Hash(String, Char)
+
+
+MyType{"foo": "bar"}
+
+
+tmp = MyType.new
+tmp["foo"] = "bar"
+tmp
+
+
+tmp = MyType(typeof("foo"), typeof("bar")).new
+tmp["foo"] = "bar"
+tmp
+
+
+MyType(String, String) {"foo": "bar"}
+
+
+:hello
+:good_bye
+
+# With spaces and symbols
+:"symbol with spaces"
+
+# Ending with question and exclamation marks
+:question?
+:exclamation!
+
+# For the operators
+:+
+:-
+:*
+:/
+:==
+:<
+:<=
+:>
+:>=
+:!
+:!=
+:=~
+:!~
+:&
+:|
+:^
+:~
+:**
+:>>
+:<<
+:%
+:[]
+:[]?
+:[]=
+:<=>
+:===
+
+
+x..y # an inclusive range, in mathematics: [x, y]
+x...y # an exclusive range, in mathematics: [x, y)
+
+
+# A proc without arguments
+->{ 1 } # Proc(Int32)
+
+# A proc with one argument
+->(x : Int32) { x.to_s } # Proc(Int32, String)
+
+# A proc with two arguments:
+->(x : Int32, y : Int32) { x + y } # Proc(Int32, Int32, Int32)
+
+
+Proc(Int32, String).new { |x| x.to_s } # Proc(Int32, String)
+
+
+proc = ->(x : Int32, y : Int32) { x + y }
+proc.call(1, 2) #=> 3
+
+
+def one
+ 1
+end
+
+proc = ->one
+proc.call #=> 1
+
+
+def plus_one(x)
+ x + 1
+end
+
+proc = ->plus_one(Int32)
+proc.call(41) #=> 42
+
+
+str = "hello"
+proc = ->str.count(Char)
+proc.call('e') #=> 1
+proc.call('l') #=> 2
+
+
+tuple = {1, "hello", 'x'} # Tuple(Int32, String, Char)
+tuple[0] #=> 1 (Int32)
+tuple[1] #=> "hello" (String)
+tuple[2] #=> 'x' (Char)
+
+
+[1, 2, 3] # Array(Int32)
+[1, "hello", 'x'] # Array(Int32 | String | Char)
+
+
+[] of Int32 # same as Array(Int32).new
+
+
+%w(one two three) # ["one", "two", "three"]
+
+
+%i(one two three) # [:one, :two, :three]
+
+
+MyType{1, 2, 3}
+
+
+tmp = MyType.new
+tmp << 1
+tmp << 2
+tmp << 3
+tmp
+
+
+tmp = MyType(typeof(1, 2, 3)).new
+tmp << 1
+tmp << 2
+tmp << 3
+tmp
+
+
+MyType(Int32 | String) {1, 2, "foo"}
+
+
+nil
+
+
+1 # Int32
+
+1_i8 # Int8
+1_i16 # Int16
+1_i32 # Int32
+1_i64 # Int64
+
+1_u8 # UInt8
+1_u16 # UInt16
+1_u32 # UInt32
+1_u64 # UInt64
+
++10 # Int32
+-20 # Int32
+
+2147483648 # Int64
+9223372036854775808 # UInt64
+
+
+1_000_000 # better than 1000000
+
+
+0b1101 # == 13
+
+
+0o123 # == 83
+
+
+0xFE012D # == 16646445
+0xfe012d # == 16646445
+
+
+true # A Bool that is true
+false # A Bool that is false
+
+
+a = 1
+
+ptr = pointerof(a)
+ptr.value = 2
+
+a #=> 2
+
+
+class Point
+ def initialize(@x, @y)
+ end
+
+ def x
+ @x
+ end
+
+ def x_ptr
+ pointerof(@x)
+ end
+end
+
+point = Point.new 1, 2
+
+ptr = point.x_ptr
+ptr.value = 10
+
+point.x #=> 10
+
+
+def add(x : Number, y : Number)
+ x + y
+end
+
+# Ok
+add 1, 2 # Ok
+
+# Error: no overload matches 'add' with types Bool, Bool
+add true, false
+
+
+def add(x, y)
+ x + y
+end
+
+add true, false
+
+
+# A class that has a + method but isn't a Number
+class Six
+ def +(other)
+ 6 + other
+ end
+end
+
+# add method without type restrictions
+def add(x, y)
+ x + y
+end
+
+# OK
+add Six.new, 10
+
+# add method with type restrictions
+def restricted_add(x : Number, y : Number)
+ x + y
+end
+
+# Error: no overload matches 'restricted_add' with types Six, Int32
+restricted_add Six.new, 10
+
+
+class Person
+ def ==(other : self)
+ other.name == name
+ end
+
+ def ==(other)
+ false
+ end
+end
+
+john = Person.new "John"
+another_john = Person.new "John"
+peter = Person.new "Peter"
+
+john == another_john #=> true
+john == peter #=> false (names differ)
+john == 1 #=> false (because 1 is not a Person)
+
+
+class Person
+ def self.compare(p1 : self, p2 : self)
+ p1.name == p2.name
+ end
+end
+
+john = Person.new "John"
+peter = Person.new "Peter"
+
+Person.compare(john, peter) # OK
+
+
+def foo(x : Int32)
+end
+
+foo 1 # OK
+foo "hello" # Error
+
+
+def foo(x : Int32.class)
+end
+
+foo Int32 # OK
+foo String # Error
+
+
+def foo(x : Int32.class)
+ puts "Got Int32"
+end
+
+def foo(x : String.class)
+ puts "Got String"
+end
+
+foo Int32 # prints "Got Int32"
+foo String # prints "Got String"
+
+
+def foo(*args : Int32)
+end
+
+def foo(*args : String)
+end
+
+foo 1, 2, 3 # OK, invokes first overload
+foo "a", "b", "c" # OK, invokes second overload
+foo 1, 2, "hello" # Error
+foo() # Error
+
+
+def foo
+ # This is the empty-tuple case
+end
+
+
+def foo(x : T)
+ T
+end
+
+foo(1) #=> Int32
+foo("hello") #=> String
+
+
+def foo(x : Array(T))
+ T
+end
+
+foo([1, 2]) #=> Int32
+foo([1, "a"]) #=> (Int32 | String)
+
+
+def foo(x : T.class)
+ Array(T)
+end
+
+foo(Int32) #=> Array(Int32)
+foo(String) #=> Array(String)
+
+
+class Person
+ # Increases age by one
+ def become_older
+ @age += 1
+ end
+
+ # Increases age by the given number of years
+ def become_older(years : Int32)
+ @age += years
+ end
+
+ # Increases age by the given number of years, as a String
+ def become_older(years : String)
+ @age += years.to_i
+ end
+
+ # Yields the current age of this person and increases
+ # its age by the value returned by the block
+ def become_older
+ @age += yield @age
+ end
+end
+
+person = Person.new "John"
+
+person.become_older
+person.age #=> 1
+
+person.become_older 5
+person.age #=> 6
+
+person.become_older "12"
+person.age #=> 18
+
+person.become_older do |current_age|
+ current_age < 20 ? 10 : 30
+end
+person.age #=> 28
+
+
+a = 1
+a.is_a?(Int32) #=> true
+a.is_a?(String) #=> false
+a.is_a?(Number) #=> true
+a.is_a?(Int32 | String) #=> true
+
+
+# One for each thread
+@[ThreadLocal]
+$values = [] of Int32
+
+
+@[AlwaysInline]
+def foo
+ 1
+end
+
+
+@[NoInline]
+def foo
+ 1
+end
+
+
+lib LibFoo
+ @[CallConvention("X86_StdCall")]
+ fun foo : Int32
+end
+
+
+def sum(*elements)
+ total = 0
+ elements.each do |value|
+ total += value
+ end
+ total
+end
+
+# elements is Tuple(Int32, Int32, Int32, Float64)
+sum 1, 2, 3, 4.5
+
+
+if a.responds_to?(:abs)
+ # here a's type will be reduced to those responding to the 'abs' method
+end
+
+
+a = some_condition ? 1 : "hello"
+# a : Int32 | String
+
+if a.responds_to?(:abs)
+ # here a will be Int32, since Int32#abs exists but String#abs doesn't
+else
+ # here a will be String
+end
+
+
+if (a = @a).responds_to?(:abs)
+ # here a is guaranteed to respond to `abs`
+end
+
+
+def capture(&block)
+ block
+end
+
+def invoke(&block)
+ block.call
+end
+
+proc = capture { puts "Hello" }
+invoke(&proc) # prints "Hello"
+
+
+
+
+def capture(&block)
+ block
+end
+
+def twice
+ yield
+ yield
+end
+
+proc = capture { puts "Hello" }
+twice &proc
+
+
+twice &->{ puts "Hello" }
+
+
+def say_hello
+ puts "Hello"
+end
+
+twice &->say_hello
+
+
+def foo
+ yield 1
+end
+
+def wrap_foo
+ puts "Before foo"
+ foo do |x|
+ yield x
+ end
+ puts "After foo"
+end
+
+wrap_foo do |i|
+ puts i
+end
+
+
+def foo
+ yield 1
+end
+
+def wrap_foo(&block : Int32 -> _)
+ puts "Before foo"
+ foo(&block)
+ puts "After foo"
+end
+
+wrap_foo do |i|
+ puts i
+end
+
+
+foo_forward do |i|
+ break # error
+end
+
+
+a = 2
+while (a += 1) < 20
+ if a == 10
+ # goes to 'puts a'
+ break
+ end
+end
+puts a #=> 10
+
+
+class Person
+ private def say(message)
+ puts message
+ end
+
+ def say_hello
+ say "hello" # OK, no receiver
+ self.say "hello" # Error, self is a receiver
+
+ other = Person.new "Other"
+ other.say "hello" # Error, other is a receiver
+ end
+end
+
+
+class Employee < Person
+ def say_bye
+ say "bye" # OK
+ end
+end
+
+
+module Namespace
+ class Foo
+ protected def foo
+ puts "Hello"
+ end
+ end
+
+ class Bar
+ def bar
+ # Works, because Foo and Bar are under Namespace
+ Foo.new.foo
+ end
+ end
+end
+
+Namespace::Bar.new.bar
+
+
+class Person
+ protected def self.say(message)
+ puts message
+ end
+
+ def say_hello
+ Person.say "hello"
+ end
+end
+
+
+buffer = uninitialized UInt8[256]
diff --git a/tests/examplefiles/test.erl b/tests/examplefiles/test.erl
index 5b983e75..d4ab4825 100644
--- a/tests/examplefiles/test.erl
+++ b/tests/examplefiles/test.erl
@@ -152,6 +152,18 @@ a_binary() ->
a_list_comprehension() ->
[X*2 || X <- [1,2,3]].
+a_map() ->
+ M0 = #{ a => 1, b => 2 },
+ M1 = M0#{ b := 200 }.
+
+escape_sequences() ->
+ [ "\b\d\e\f\n\r\s\t\v\'\"\\"
+ , "\1\12\123" % octal
+ , "\x01" % short hex
+ , "\x{fff}" % long hex
+ , "\^a\^A" % control characters
+ ].
+
map(Fun, [H|T]) ->
[Fun(H) | map(Fun, T)];
diff --git a/tests/examplefiles/test.escript b/tests/examplefiles/test.escript
new file mode 100644
index 00000000..3fafb803
--- /dev/null
+++ b/tests/examplefiles/test.escript
@@ -0,0 +1,4 @@
+#!/usr/bin/env escript
+
+main(_Args) ->
+ ok.
diff --git a/tests/examplefiles/test.hsail b/tests/examplefiles/test.hsail
new file mode 100644
index 00000000..f9c25091
--- /dev/null
+++ b/tests/examplefiles/test.hsail
@@ -0,0 +1,62 @@
+module &__llvm_hsail_module:1:0:$full:$large:$near;
+
+prog kernel &mmul2d(
+ kernarg_u64 %__arg_p0,
+ kernarg_u64 %__arg_p1,
+ kernarg_u64 %__arg_p2,
+ kernarg_u64 %__arg_p3)
+{
+ pragma "AMD RTI", "ARGSTART:mmul2d";
+ pragma "AMD RTI", "version:3:1:104";
+ pragma "AMD RTI", "device:generic";
+ pragma "AMD RTI", "uniqueid:1025";
+ pragma "AMD RTI", "function:1:0";
+ pragma "AMD RTI", "memory:64bitABI";
+ pragma "AMD RTI", "privateid:1";
+ pragma "AMD RTI", "ARGEND:mmul2d";
+ // BB#0: // %top
+ mov_f64 $d1, 0.0E+0;
+ gridsize_u32 $s0, 0;
+ workitemabsid_u32 $s1, 1;
+ workitemabsid_u32 $s2, 0;
+ cvt_u64_u32 $d0, $s2;
+ cvt_u64_u32 $d3, $s1;
+ cvt_u64_u32 $d4, $s0;
+ ld_kernarg_align(8)_width(all)_u64 $d2, [%__arg_p2];
+ ld_kernarg_align(8)_width(all)_u64 $d6, [%__arg_p1];
+ ld_kernarg_align(8)_width(all)_u64 $d5, [%__arg_p3];
+ ld_kernarg_align(8)_width(all)_u64 $d7, [%__arg_p0];
+ cmp_lt_b1_s64 $c0, $d5, 1;
+ cbr_b1 $c0, @BB0_3;
+ // BB#1: // %L.preheader
+ mul_u64 $d1, $d5, $d3;
+ shl_u64 $d1, $d1, 3;
+ shl_u64 $d8, $d0, 3;
+ add_u64 $d8, $d7, $d8;
+ add_u64 $d6, $d6, $d1;
+ shl_u64 $d7, $d4, 3;
+ mov_f64 $d1, 0D0000000000000000;
+
+@BB0_2:
+ // %L
+ add_u64 $d9, $d8, $d7;
+ ld_global_f64 $d8, [$d8];
+ ld_global_f64 $d10, [$d6];
+ mul_f64 $d8, $d8, $d10;
+ add_f64 $d1, $d1, $d8;
+ add_u64 $d6, $d6, 8;
+ add_u64 $d5, $d5, 18446744073709551615;
+ cmp_ne_b1_s64 $c0, $d5, 0;
+ mov_b64 $d8, $d9;
+ cbr_b1 $c0, @BB0_2;
+
+@BB0_3:
+ // %L.7
+ mul_u64 $d3, $d3, $d4;
+ add_u64 $d0, $d3, $d0;
+ shl_u64 $d0, $d0, 3;
+ add_u64 $d0, $d2, $d0;
+ st_global_f64 $d1, [$d0];
+ ret;
+};
+
diff --git a/tests/examplefiles/test.ncl b/tests/examplefiles/test.ncl
new file mode 100644
index 00000000..f20f8159
--- /dev/null
+++ b/tests/examplefiles/test.ncl
@@ -0,0 +1,20 @@
+load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_code.ncl"
+load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/gsn_csm.ncl"
+load "$NCARG_ROOT/lib/ncarg/nclscripts/csm/contributed.ncl"
+begin
+ int_num = 1
+ float_num = 0.1
+ str = "A void map"
+ array = (/1, 2, 3, 4, 5/)
+
+
+ wks = gsn_open_wks("X11", "test_for_pygments")
+
+ res = True
+ res@mpMinLonF = 90.
+ res@mpMaxLonF = 180.
+ res@mpMinLatF = 0.
+ res@mpMaxLatF = 90.
+
+ plot = gsn_csm_map_ce(wks, res)
+end \ No newline at end of file
diff --git a/tests/examplefiles/test.php b/tests/examplefiles/test.php
index 2ce4023e..e8efdc6a 100644
--- a/tests/examplefiles/test.php
+++ b/tests/examplefiles/test.php
@@ -505,11 +505,40 @@ function &byref() {
return $x;
}
+// Test highlighting of magic methods and variables
+class MagicClass {
+ public $magic_str;
+ public $ordinary_str;
+
+ public function __construct($some_var) {
+ $this->magic_str = __FILE__;
+ $this->ordinary_str = $some_var;
+ }
+
+ public function __toString() {
+ return $this->magic_str;
+ }
+
+ public function nonMagic() {
+ return $this->ordinary_str;
+ }
+}
+
+$magic = new MagicClass(__DIR__);
+__toString();
+$magic->nonMagic();
+$magic->__toString();
+
echo <<<EOF
Test the heredocs...
EOF;
+echo <<<"some_delimiter"
+more heredoc testing
+continues on this line
+some_delimiter;
+
?>
diff --git a/tests/examplefiles/test.sil b/tests/examplefiles/test.sil
new file mode 100644
index 00000000..3bcee835
--- /dev/null
+++ b/tests/examplefiles/test.sil
@@ -0,0 +1,206 @@
+domain Option__Node {
+ unique function Option__Node__Some(): Option__Node
+ unique function Option__Node__None(): Option__Node
+
+ function variantOfOptionNode(self: Ref): Option__Node
+
+ function isOptionNode(self: Ref): Bool
+
+ axiom ax_variantOfOptionNodeChoices {
+ forall x: Ref :: { variantOfOptionNode(x) }
+ (variantOfOptionNode(x) == Option__Node__Some() || variantOfOptionNode(x) == Option__Node__None())
+ }
+
+ axiom ax_isCounterState {
+ forall x: Ref :: { variantOfOptionNode(x) }
+ isOptionNode(x) == (variantOfOptionNode(x) == Option__Node__Some() ||
+ variantOfOptionNode(x) == Option__Node__None())
+ }
+}
+
+predicate validOption(this: Ref) {
+ isOptionNode(this) &&
+ variantOfOptionNode(this) == Option__Node__Some() ==> (
+ acc(this.Option__Node__Some__1, write) &&
+ acc(validNode(this.Option__Node__Some__1))
+ )
+}
+
+field Option__Node__Some__1: Ref
+
+field Node__v: Int
+field Node__next: Ref
+
+predicate validNode(this: Ref) {
+ acc(this.Node__v) &&
+ acc(this.Node__next) &&
+ acc(validOption(this.Node__next))
+}
+
+
+function length(this: Ref): Int
+ requires acc(validNode(this), write)
+ ensures result >= 1
+{
+ (unfolding acc(validNode(this), write) in
+ unfolding acc(validOption(this.Node__next)) in
+ (variantOfOptionNode(this.Node__next) == Option__Node__None()) ?
+ 1 : 1 + length(this.Node__next.Option__Node__Some__1)
+ )
+}
+
+function itemAt(this: Ref, i: Int): Int
+ requires acc(validNode(this), write)
+ requires 0 <= i && i < length(this)
+{
+ unfolding acc(validNode(this), write) in unfolding acc(validOption(this.Node__next)) in (
+ (i == 0) ?
+ this.Node__v:
+ (variantOfOptionNode(this.Node__next) == Option__Node__Some()) ?
+ itemAt(this.Node__next.Option__Node__Some__1, i-1) : this.Node__v
+ )
+}
+
+function sum(this$1: Ref): Int
+ requires acc(validNode(this$1), write)
+{
+ (unfolding acc(validNode(this$1), write) in unfolding acc(validOption(this$1.Node__next)) in
+ (variantOfOptionNode(this$1.Node__next) == Option__Node__None()) ? this$1.Node__v : this$1.Node__v + sum(this$1.Node__next.Option__Node__Some__1))
+}
+
+method append(this: Ref, val: Int)
+ requires acc(validNode(this), write)
+ ensures acc(validNode(this), write) /* POST1 */
+ ensures length(this) == (old(length(this)) + 1) /* POST2 */
+ ensures (forall i: Int :: (0 <= i && i < old(length(this))) ==> (itemAt(this, i) == old(itemAt(this, i)))) /* POST3 */
+ ensures itemAt(this, length(this) - 1) == val /* POST4 */
+ ensures true ==> true
+{
+ var tmp_node: Ref
+ var tmp_option: Ref
+
+ unfold acc(validNode(this), write)
+ unfold acc(validOption(this.Node__next), write)
+
+ if (variantOfOptionNode(this.Node__next) == Option__Node__None()) {
+ tmp_node := new(Node__next, Node__v)
+ tmp_node.Node__next := null
+ tmp_node.Node__v := val
+
+ assume variantOfOptionNode(tmp_node.Node__next) == Option__Node__None()
+ fold acc(validOption(tmp_node.Node__next))
+ fold acc(validNode(tmp_node), write)
+
+ tmp_option := new(Option__Node__Some__1)
+ tmp_option.Option__Node__Some__1 := tmp_node
+ assume variantOfOptionNode(tmp_option) == Option__Node__Some()
+ fold acc(validOption(tmp_option))
+
+ this.Node__next := tmp_option
+
+
+ unfold validOption(tmp_option)
+ assert length(tmp_node) == 1 /* TODO: Required by Silicon, POST2 fails otherwise */
+ assert itemAt(tmp_node, 0) == val /* TODO: Required by Silicon, POST4 fails otherwise */
+ fold validOption(tmp_option)
+ } else {
+ append(this.Node__next.Option__Node__Some__1, val)
+ fold acc(validOption(this.Node__next), write)
+ }
+
+ fold acc(validNode(this), write)
+}
+
+method prepend(tail: Ref, val: Int) returns (res: Ref)
+ requires acc(validNode(tail))
+ ensures acc(validNode(res))
+ //ensures acc(validNode(tail))
+ ensures length(res) == old(length(tail)) + 1
+
+ ensures (forall i: Int :: (1 <= i && i < length(res)) ==> (itemAt(res, i) == old(itemAt(tail, i-1)))) /* POST3 */
+ ensures itemAt(res, 0) == val
+{
+ var tmp_option: Ref
+
+ res := new(Node__v, Node__next)
+ res.Node__v := val
+
+ tmp_option := new(Option__Node__Some__1)
+ tmp_option.Option__Node__Some__1 := tail
+ assume variantOfOptionNode(tmp_option) == Option__Node__Some()
+
+ res.Node__next := tmp_option
+
+ assert acc(validNode(tail))
+ fold acc(validOption(res.Node__next))
+ fold acc(validNode(res))
+}
+
+method length_iter(list: Ref) returns (len: Int)
+ requires acc(validNode(list), write)
+ ensures old(length(list)) == len
+ // TODO we have to preserve this property
+ // ensures acc(validNode(list))
+{
+ var curr: Ref := list
+ var tmp: Ref := list
+
+ len := 1
+
+ unfold acc(validNode(curr))
+ unfold acc(validOption(curr.Node__next))
+ while(variantOfOptionNode(curr.Node__next) == Option__Node__Some())
+ invariant acc(curr.Node__v)
+ invariant acc(curr.Node__next)
+ invariant (variantOfOptionNode(curr.Node__next) == Option__Node__Some() ==> (
+ acc(curr.Node__next.Option__Node__Some__1, write) &&
+ acc(validNode(curr.Node__next.Option__Node__Some__1))
+ ))
+ invariant (variantOfOptionNode(curr.Node__next) == Option__Node__Some() ==> len + length(curr.Node__next.Option__Node__Some__1) == old(length(list)))
+ invariant (variantOfOptionNode(curr.Node__next) == Option__Node__None() ==> len == old(length(list)))
+ {
+ assert acc(validNode(curr.Node__next.Option__Node__Some__1))
+ len := len + 1
+ tmp := curr
+ curr := curr.Node__next.Option__Node__Some__1
+ unfold acc(validNode(curr))
+ unfold acc(validOption(curr.Node__next))
+ }
+}
+
+method t1()
+{
+ var l: Ref
+
+ l := new(Node__v, Node__next)
+ l.Node__next := null
+ l.Node__v := 1
+ assume variantOfOptionNode(l.Node__next) == Option__Node__None()
+
+ fold validOption(l.Node__next)
+ fold validNode(l)
+
+ assert length(l) == 1
+ assert itemAt(l, 0) == 1
+
+ append(l, 7)
+ assert itemAt(l, 1) == 7
+ assert itemAt(l, 0) == 1
+ assert length(l) == 2
+
+ l := prepend(l, 10)
+ assert itemAt(l, 2) == 7
+ assert itemAt(l, 1) == 1
+ assert itemAt(l, 0) == 10
+ assert length(l) == 3
+
+ //assert sum(l) == 18
+}
+
+method t2(l: Ref) returns (res: Ref)
+ requires acc(validNode(l), write)
+ ensures acc(validNode(res), write)
+ ensures length(res) > old(length(l))
+{
+ res := prepend(l, 10)
+}
diff --git a/tests/examplefiles/tsql_example.sql b/tests/examplefiles/tsql_example.sql
new file mode 100644
index 00000000..cbd76091
--- /dev/null
+++ b/tests/examplefiles/tsql_example.sql
@@ -0,0 +1,72 @@
+-- Example Transact-SQL file.
+
+-- Single line comment
+/* A comment
+ * spawning two lines. */
+ /* An indented comment
+ * spawning multiple
+ * lines. */
+/* A /* nested */ comment. */
+
+select
+ left(emp.firstname, 1) + '.' + [emp.surname] as "Name",
+ dep.name as [Department]
+into
+ #temp_employee
+from
+ employee as emp
+ inner join department as dep on
+ dep.ident_code = emp.department_id
+where
+ emp.date_of_birth >= '1990-01-01';
+go
+
+declare @TextToFind nvarchar(100) = N'some
+text across
+multiple lines';
+
+set @TextToFind varchar(32) = 'hello' + ' world';
+set @TextTiFind += '!';
+
+declare @Count int = 17 * (3 - 5);
+
+delete from
+ [server].[database].[schema].[table]
+where
+ [Text] = @TextToFind and author Not LIKE '%some%';
+
+goto overthere;
+overthere:
+
+select
+ 123 as "int 1",
+ +123 as "int 2",
+ -123 as "int 3",
+ 0x20 as "hex int",
+ 123.45 as "float 1",
+ -1.23e45 as "float 2"
+ +1.23E+45 as "float 3",
+ -1.23e-45 as "float 4",
+ 1. as "float 5",
+ .1 as "float 6",
+ 1.e2 as "float 7",
+ .1e2 as "float 8";
+
+Select @@Error, $PARTITion.RangePF1(10);
+
+select top 3 Ähnliches from Müll;
+
+-- Example transaction
+BEGIN TRAN
+
+BEGIN TRY
+ INSERT INTO #temp_employe(Name, Department) VALUES ('L. Miller', 'Sales')
+ iNsErT inTO #temp_employe(Name, Department) VaLuEs ('M. Webster', 'Helpdesk')
+ COMMIT TRAN
+END TRY
+BEGIN CATCH
+ print 'cannot perform transaction; rolling back';
+ ROLLBACK TRAN
+END CATCH
+
+-- Comment at end without newline. \ No newline at end of file
diff --git a/tests/examplefiles/example.ts b/tests/examplefiles/typescript_example
index 760e2543..760e2543 100644
--- a/tests/examplefiles/example.ts
+++ b/tests/examplefiles/typescript_example
diff --git a/tests/examplefiles/typoscript_example b/tests/examplefiles/typoscript_example
new file mode 100644
index 00000000..e2fccf5d
--- /dev/null
+++ b/tests/examplefiles/typoscript_example
@@ -0,0 +1,1930 @@
+# ***************************************************************************
+# Notice: "styles." (and "temp.") objects are UNSET after template parsing!
+# Use "lib." for persisting storage of objects.
+# ***************************************************************************
+
+<INCLUDE_TYPOSCRIPT: source="FILE: EXT:www_tue_nl/Configuration/TypoScript/Setup/Root.ts">
+
+page.80 = RECORDS
+page.80 {
+ source = 1
+ tables = tt_address
+ conf.tt_address = COA
+ conf.tt_address {
+ 20 = TEXT
+ 20.field = email
+ 20.typolink.parameter.field = email
+ }
+}
+
+ /*
+page.200 = PHP_SCRIPT_EXT
+page.200 {
+ 1 = TMENU
+ 1.wrap = <div style="width:200px; border: 1px solid;">|</div>
+ 1.expAll = 1
+ 1.submenuObjSuffixes = a |*| |*| b
+ 1.NO.allWrap = <b>|</b><br/>
+
+ 2 = TMENU
+ 2.NO.allWrap = <div style="background:red;">|</div>
+
+ 2a = TMENU
+ 2a.NO.allWrap = <div style="background:yellow;">|</div>
+*
+ 2b = TMENU
+ 2b.NO.allWrap = <div style="background:green;">|</div>
+}
+*/
+
+ # Add the CSS and JS files
+page {
+ includeCSS { # comment at the end of a line
+ file99 = fileadmin/your-fancybox.css
+ }
+ includeJSFooter {
+ fancybox = fileadmin/your-fancybox.js
+ }
+}
+
+ # Change the default rendering of images to match lightbox requirements
+tt_content.image.20.1.imageLinkWrap {
+ JSwindow = 0
+ test = MyExtension\Path\To\Class
+
+ directImageLink = 1
+ linkParams.ATagParams {
+ dataWrap = class= "lightbox" rel="fancybox{field:uid}"
+ }
+}
+
+tt_content.image.20.1.imageLinkWrap >
+tt_content.image.20.1.imageLinkWrap = 1
+tt_content.image.20.1.imageLinkWrap {
+ enable = 1
+ typolink {
+ # directly link to the recent image
+ parameter.cObject = IMG_RESOURCE
+ parameter.cObject.file.import.data = TSFE:lastImageInfo|origFile
+ parameter.cObject.file.maxW = {$styles.content.imgtext.maxW}
+ parameter.override.listNum.stdWrap.data = register : IMAGE_NUM_CURRENT
+ title.field = imagecaption // title
+ title.split.token.char = 10
+ title.if.isTrue.field = imagecaption // header
+ title.split.token.char = 10
+ title.split.returnKey.data = register : IMAGE_NUM_CURRENT
+ parameter.cObject = IMG_RESOURCE
+ parameter.cObject.file.import.data = TSFE:lastImageInfo|origFile
+ ATagParams = target="_blank"
+ }
+}
+
+10 = IMAGE
+10 {
+ # point to the image
+ file = fileadmin/demo/lorem_ipsum/images/a4.jpg
+ # make it rather small
+ file.width = 80
+ # add a link to tx_cms_showpic.php that shows the original image
+ imageLinkWrap = 1
+ imageLinkWrap {
+ enable = 1
+ # JSwindow = 1
+ }
+}
+
+# Clear out any constants in this reserved room!
+styles.content >
+
+# get content
+styles.content.get = CONTENT
+styles.content.get {
+ table = tt_content
+ select.orderBy = sorting
+ select.where = colPos=0
+ select.languageField = sys_language_uid
+}
+
+# get content, left
+styles.content.getLeft < styles.content.get
+styles.content.getLeft.select.where = colPos=1
+
+# get content, right
+styles.content.getRight < styles.content.get
+styles.content.getRight.select.where = colPos=2
+
+# get content, margin
+styles.content.getBorder < styles.content.get
+styles.content.getBorder.select.where = colPos=3
+
+# get news
+styles.content.getNews < styles.content.get
+styles.content.getNews.select.pidInList = {$styles.content.getNews.newsPid}
+
+# Edit page object:
+styles.content.editPanelPage = COA
+styles.content.editPanelPage {
+ 10 = EDITPANEL
+ 10 {
+ allow = toolbar,move,hide
+ label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.page
+ label.wrap = |&nbsp;<b>%s</b>
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+# *********************************************************************
+# "lib." objects are preserved from unsetting after template parsing
+# *********************************************************************
+
+# Creates persistent ParseFunc setup for non-HTML content. This is recommended to use (as a reference!)
+lib.parseFunc {
+ makelinks = 1
+ makelinks.http.keep = {$styles.content.links.keep}
+ makelinks.http.extTarget = {$styles.content.links.extTarget}
+ makelinks.mailto.keep = path
+ tags {
+ link = TEXT
+ link {
+ current = 1
+ typolink.parameter.data = parameters : allParams
+ typolink.extTarget = {$styles.content.links.extTarget}
+ typolink.target = {$styles.content.links.target}
+ parseFunc.constants =1
+ }
+ }
+ allowTags = {$styles.content.links.allowTags}
+ denyTags = *
+ sword = <span class="csc-sword">|</span>
+ constants = 1
+
+ nonTypoTagStdWrap.HTMLparser = 1
+ nonTypoTagStdWrap.HTMLparser {
+ keepNonMatchedTags = 1
+ htmlSpecialChars = 2
+ }
+}
+
+# good old parsefunc in "styles.content.parseFunc" is created for backwards compatibility. Don't use it, just ignore.
+styles.content.parseFunc < lib.parseFunc
+
+# Creates persistent ParseFunc setup for RTE content (which is mainly HTML) based on the "ts_css" transformation.
+lib.parseFunc_RTE < lib.parseFunc
+lib.parseFunc_RTE {
+ // makelinks >
+ # Processing <table> and <blockquote> blocks separately
+ externalBlocks = table, blockquote, dd, dl, ol, ul, div
+ externalBlocks {
+ # The blockquote content is passed into parseFunc again...
+ blockquote.stripNL=1
+ blockquote.callRecursive=1
+ blockquote.callRecursive.tagStdWrap.HTMLparser = 1
+ blockquote.callRecursive.tagStdWrap.HTMLparser.tags.blockquote.overrideAttribs = style="margin-bottom:0;margin-top:0;"
+
+ ol.stripNL=1
+ ol.stdWrap.parseFunc = < lib.parseFunc
+
+ ul.stripNL=1
+ ul.stdWrap.parseFunc = < lib.parseFunc
+
+ table.stripNL=1
+ table.stdWrap.HTMLparser = 1
+ table.stdWrap.HTMLparser.tags.table.fixAttrib.class {
+ default = contenttable
+ always = 1
+ list = contenttable
+ }
+ table.stdWrap.HTMLparser.keepNonMatchedTags = 1
+ table.HTMLtableCells=1
+ table.HTMLtableCells {
+ default.callRecursive=1
+ addChr10BetweenParagraphs=1
+ }
+ div.stripNL = 1
+ div.callRecursive = 1
+
+ # Definition list processing
+ dl < .div
+ dd < .div
+ }
+ nonTypoTagStdWrap.encapsLines {
+ encapsTagList = p,pre,h1,h2,h3,h4,h5,h6,hr,dt
+ remapTag.DIV = P
+ nonWrappedTag = P
+ innerStdWrap_all.ifBlank = &nbsp;
+ addAttributes.P.class = bodytext
+ addAttributes.P.class.setOnly=blank
+ }
+ nonTypoTagStdWrap.HTMLparser = 1
+ nonTypoTagStdWrap.HTMLparser {
+ keepNonMatchedTags = 1
+ htmlSpecialChars = 2
+ }
+}
+
+
+# Content header:
+lib.stdheader = COA
+lib.stdheader {
+
+ # Create align style-attribute for <Hx> tags
+ 2 = LOAD_REGISTER
+ 2.headerStyle.field = header_position
+ 2.headerStyle.required = 1
+ 2.headerStyle.noTrimWrap = | style="text-align:|;"|
+
+ # Create class="csc-firstHeader" attribute for <Hx> tags
+ 3 = LOAD_REGISTER
+ 3.headerClass = csc-firstHeader
+ 3.headerClass.if.value=1
+ 3.headerClass.if.equals.data = cObj:parentRecordNumber
+ 3.headerClass.noTrimWrap = | class="|"|
+
+ # Date format:
+ 5 = TEXT
+ 5.field = date
+ 5.if.isTrue.field = date
+ 5.strftime = %x
+ 5.wrap = <p class="csc-header-date">|</p>
+ 5.prefixComment = 2 | Header date:
+
+ # This CASE cObject renders the header content:
+ # currentValue is set to the header data, possibly wrapped in link-tags.
+ 10 = CASE
+ 10.setCurrent {
+ field = header
+ htmlSpecialChars = 1
+ typolink.parameter.field = header_link
+ }
+ 10.key.field = header_layout
+ 10.key.ifEmpty = {$content.defaultHeaderType}
+ 10.key.ifEmpty.override.data = register: defaultHeaderType
+
+ 10.1 = TEXT
+ 10.1.current = 1
+ 10.1.dataWrap = <h1{register:headerStyle}{register:headerClass}>|</h1>
+
+ 10.2 < .10.1
+ 10.2.dataWrap = <h2{register:headerStyle}{register:headerClass}>|</h2>
+
+ 10.3 < .10.1
+ 10.3.dataWrap = <h3{register:headerStyle}{register:headerClass}>|</h3>
+
+ 10.4 < .10.1
+ 10.4.dataWrap = <h4{register:headerStyle}{register:headerClass}>|</h4>
+
+ 10.5 < .10.1
+ 10.5.dataWrap = <h5{register:headerStyle}{register:headerClass}>|</h5>
+
+ # Pops the used registers off the stack:
+ 98 = RESTORE_REGISTER
+ 99 = RESTORE_REGISTER
+
+ # Post-processing:
+ stdWrap.fieldRequired = header
+ stdWrap.if {
+ equals.field = header_layout
+ value = 100
+ negate = 1
+ }
+
+ stdWrap.editIcons = tt_content : header, [header_layout | header_position], [header_link|date]
+ stdWrap.editIcons.beforeLastTag = 1
+ stdWrap.editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.header
+
+ stdWrap.dataWrap = <div class="csc-header csc-header-n{cObj:parentRecordNumber}">|</div>
+ stdWrap.prefixComment = 2 | Header:
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+#******************************************************
+# Including library for processing of some elements:
+#******************************************************
+includeLibs.tx_cssstyledcontent_pi1 = EXT:css_styled_content/pi1/class.tx_cssstyledcontent_pi1.php
+
+
+#**********************************
+# tt_content is started
+#**********************************
+tt_content >
+tt_content = CASE
+tt_content.key.field = CType
+tt_content.stdWrap {
+ innerWrap.cObject = CASE
+ innerWrap.cObject {
+ key.field = section_frame
+
+ default = COA
+ default {
+ 10 = TEXT
+ 10 {
+ value = <div id="c{field:uid}"
+ override.cObject = TEXT
+ override.cObject {
+ value = <div
+ if.value = div
+ if.equals.field = CType
+ }
+ insertData = 1
+ }
+
+ 15 = TEXT
+ 15 {
+ value = csc-default
+ noTrimWrap = | class="|" |
+ required = 1
+ }
+
+ 20 = COA
+ 20 {
+ 10 = COA
+ 10 {
+ 10 = TEXT
+ 10 {
+ value = {$content.spaceBefore}
+ wrap = |+
+ if.isTrue = {$content.spaceBefore}
+ }
+
+ 20 = TEXT
+ 20 {
+ field = spaceBefore
+ }
+
+ stdWrap {
+ prioriCalc = intval
+ wrap = margin-top:|px;
+ required = 1
+ ifEmpty.value =
+ }
+ }
+
+ 20 = COA
+ 20 {
+ 10 = TEXT
+ 10 {
+ value = {$content.spaceAfter}
+ wrap = |+
+ if.isTrue = {$content.spaceAfter}
+ }
+
+ 20 = TEXT
+ 20 {
+ field = spaceAfter
+ }
+
+ stdWrap {
+ prioriCalc = intval
+ wrap = margin-bottom:|px;
+ required = 1
+ ifEmpty.value =
+ }
+ }
+
+ stdWrap.noTrimWrap = | style="|" |
+ stdWrap.required = 1
+ }
+ 30 = TEXT
+ 30.value = >|</div>
+ }
+
+ 1 =< tt_content.stdWrap.innerWrap.cObject.default
+ 1.15.value = csc-frame csc-frame-invisible
+
+ 5 =< tt_content.stdWrap.innerWrap.cObject.default
+ 5.15.value = csc-frame csc-frame-rulerBefore
+
+ 6 =< tt_content.stdWrap.innerWrap.cObject.default
+ 6.15.value = csc-frame csc-frame-rulerAfter
+
+ 10 =< tt_content.stdWrap.innerWrap.cObject.default
+ 10.15.value = csc-frame csc-frame-indent
+
+ 11 =< tt_content.stdWrap.innerWrap.cObject.default
+ 11.15.value = csc-frame csc-frame-indent3366
+
+ 12 =< tt_content.stdWrap.innerWrap.cObject.default
+ 12.15.value = csc-frame csc-frame-indent6633
+
+ 20 =< tt_content.stdWrap.innerWrap.cObject.default
+ 20.15.value = csc-frame csc-frame-frame1
+
+ 21 =< tt_content.stdWrap.innerWrap.cObject.default
+ 21.15.value = csc-frame csc-frame-frame2
+
+ 66 = COA
+ 66 {
+ 10 = TEXT
+ 10 {
+ value = <a id="c{field:uid}"></a>
+ insertData = 1
+ }
+
+ 20 = COA
+ 20 {
+ 10 = TEXT
+ 10 {
+ value = {$content.spaceBefore}
+ wrap = |+
+ if.isTrue = {$content.spaceBefore}
+ }
+
+ 20 = TEXT
+ 20 {
+ field = spaceBefore
+ }
+
+ stdWrap {
+ prioriCalc = intval
+ wrap = margin-top:|px;
+ required = 1
+ ifEmpty.value =
+ wrap2 = <div style="|"></div>
+ }
+ }
+
+ 30 = TEXT
+ 30 {
+ value = |
+ }
+
+ 40 < .20
+ 40 {
+ 10 {
+ value = {$content.spaceAfter}
+ if.isTrue = {$content.spaceAfter}
+ }
+ 20.field = spaceAfter
+ stdWrap.wrap = margin-bottom:|px;
+ }
+ }
+
+ }
+
+ innerWrap2 = | <p class="csc-linkToTop"><a href="#">{LLL:EXT:css_styled_content/pi1/locallang.xml:label.toTop}</a></p>
+ innerWrap2.insertData = 1
+ innerWrap2.fieldRequired = linkToTop
+
+ prepend = TEXT
+ prepend.dataWrap = <a id="c{field:_LOCALIZED_UID}"></a>
+ prepend.if.isTrue.field = _LOCALIZED_UID
+
+ editPanel = 1
+ editPanel {
+ allow = move,new,edit,hide,delete
+ line = 5
+ label = %s
+ onlyCurrentPid = 1
+ previewBorder = 4
+ edit.displayRecord = 1
+ }
+
+ prefixComment = 1 | CONTENT ELEMENT, uid:{field:uid}/{field:CType}
+}
+
+
+
+# *****************
+# CType: header
+# *****************
+# See Object path "lib.stdheader"
+tt_content.header = COA
+tt_content.header {
+ 10 = < lib.stdheader
+
+ 20 = TEXT
+ 20 {
+ field = subheader
+ required = 1
+
+ dataWrap = <p class="csc-subheader csc-subheader-{field:layout}">|</p>
+ htmlSpecialChars = 1
+
+ editIcons = tt_content:subheader,layout
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.subheader
+
+ prefixComment = 2 | Subheader:
+ }
+}
+
+
+
+# *****************
+# CType: text
+# *****************
+tt_content.text = COA
+tt_content.text {
+ 10 = < lib.stdheader
+
+ 20 = TEXT
+ 20 {
+ field = bodytext
+ required = 1
+
+ parseFunc = < lib.parseFunc_RTE
+
+ editIcons = tt_content:bodytext, rte_enabled
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.bodytext
+
+ prefixComment = 2 | Text:
+ }
+}
+
+
+
+# *****************
+# CType: image
+# *****************
+# (also used for rendering 'textpic' type):
+tt_content.image = COA
+tt_content.image.10 = < lib.stdheader
+tt_content.image.20 = USER
+tt_content.image.20 {
+ userFunc = tx_cssstyledcontent_pi1->render_textpic
+
+ # Image source
+ imgList.field = image
+ imgPath = uploads/pics/
+
+ # Single image rendering
+ imgObjNum = 1
+ 1 {
+ file.import.current = 1
+ file.width.field = imagewidth
+ imageLinkWrap = 1
+ imageLinkWrap {
+ bodyTag = <body style="margin:0; background:#fff;">
+ wrap = <a href="javascript:close();"> | </a>
+ width = {$styles.content.imgtext.linkWrap.width}
+ height = {$styles.content.imgtext.linkWrap.height}
+ effects = {$styles.content.imgtext.linkWrap.effects}
+
+ JSwindow = 1
+ JSwindow.newWindow = {$styles.content.imgtext.linkWrap.newWindow}
+ JSwindow.if.isFalse = {$styles.content.imgtext.linkWrap.lightboxEnabled}
+
+ directImageLink = {$styles.content.imgtext.linkWrap.lightboxEnabled}
+
+ enable.field = image_zoom
+ enable.ifEmpty.typolink.parameter.field = image_link
+ enable.ifEmpty.typolink.parameter.listNum.splitChar = 10
+ enable.ifEmpty.typolink.parameter.listNum.stdWrap.data = register : IMAGE_NUM_CURRENT
+ enable.ifEmpty.typolink.returnLast = url
+
+ typolink.parameter.field = image_link
+ typolink.parameter.listNum.splitChar = 10
+ typolink.parameter.listNum.stdWrap.data = register : IMAGE_NUM_CURRENT
+ typolink.target = {$styles.content.links.target}
+ typolink.extTarget = {$styles.content.links.extTarget}
+
+ linkParams.ATagParams.dataWrap = class="{$styles.content.imgtext.linkWrap.lightboxCssClass}" rel="{$styles.content.imgtext.linkWrap.lightboxRelAttribute}"
+ }
+
+ altText = TEXT
+ altText {
+ field = altText
+ stripHtml = 1
+ split.token.char = 10
+ split.token.if.isTrue = {$styles.content.imgtext.imageTextSplit}
+ split.returnKey.data = register : IMAGE_NUM_CURRENT
+ }
+
+ titleText < .altText
+ titleText.field = titleText
+
+ longdescURL < .altText
+ longdescURL.field = longdescURL
+
+ emptyTitleHandling = {$styles.content.imgtext.emptyTitleHandling}
+ titleInLink = {$styles.content.imgtext.titleInLink}
+ titleInLinkAndImg = {$styles.content.imgtext.titleInLinkAndImg}
+ }
+
+ textPos.field = imageorient
+ maxW = {$styles.content.imgtext.maxW}
+ maxW.override.data = register:maxImageWidth
+ maxWInText = {$styles.content.imgtext.maxWInText}
+ maxWInText.override.data = register:maxImageWidthInText
+
+ equalH.field = imageheight
+
+ image_compression.field = image_compression
+ image_effects.field = image_effects
+
+ noRows.field = image_noRows
+
+ cols.field = imagecols
+ border.field = imageborder
+
+ caption {
+ 1 = TEXT
+ 1 {
+ field = imagecaption
+ required = 1
+ parseFunc =< lib.parseFunc
+ br = 1
+ split.token.char = 10
+ split.token.if.isPositive = {$styles.content.imgtext.imageTextSplit} + {$styles.content.imgtext.captionSplit}
+ split.returnKey.data = register : IMAGE_NUM_CURRENT
+ }
+ }
+ # captionSplit is deprecated, use imageTextSplit instead
+ captionSplit = {$styles.content.imgtext.captionSplit}
+ captionAlign.field = imagecaption_position
+ # caption/alttext/title/longdescURL splitting
+ imageTextSplit = {$styles.content.imgtext.imageTextSplit}
+
+ borderCol = {$styles.content.imgtext.borderColor}
+ borderThick = {$styles.content.imgtext.borderThick}
+ borderClass = {$styles.content.imgtext.borderClass}
+ colSpace = {$styles.content.imgtext.colSpace}
+ rowSpace = {$styles.content.imgtext.rowSpace}
+ textMargin = {$styles.content.imgtext.textMargin}
+
+ borderSpace = {$styles.content.imgtext.borderSpace}
+ separateRows = {$styles.content.imgtext.separateRows}
+ addClasses =
+ addClassesImage =
+ addClassesImage.ifEmpty = csc-textpic-firstcol csc-textpic-lastcol
+ addClassesImage.override = csc-textpic-firstcol |*| |*| csc-textpic-lastcol
+ addClassesImage.override.if {
+ isGreaterThan.field = imagecols
+ value = 1
+ }
+
+ #
+ imageStdWrap.dataWrap = <div class="csc-textpic-imagewrap" style="width:{register:totalwidth}px;"> | </div>
+ imageStdWrapNoWidth.wrap = <div class="csc-textpic-imagewrap"> | </div>
+
+ # if noRows is set, wrap around each column:
+ imageColumnStdWrap.dataWrap = <div class="csc-textpic-imagecolumn" style="width:{register:columnwidth}px;"> | </div>
+
+ layout = CASE
+ layout {
+ key.field = imageorient
+ # above-center
+ default = TEXT
+ default.value = <div class="csc-textpic csc-textpic-center csc-textpic-above###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ # above-right
+ 1 = TEXT
+ 1.value = <div class="csc-textpic csc-textpic-right csc-textpic-above###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ # above-left
+ 2 = TEXT
+ 2.value = <div class="csc-textpic csc-textpic-left csc-textpic-above###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ # below-center
+ 8 = TEXT
+ 8.value = <div class="csc-textpic csc-textpic-center csc-textpic-below###CLASSES###">###TEXT######IMAGES###</div><div class="csc-textpic-clear"><!-- --></div>
+ # below-right
+ 9 = TEXT
+ 9.value = <div class="csc-textpic csc-textpic-right csc-textpic-below###CLASSES###">###TEXT######IMAGES###</div><div class="csc-textpic-clear"><!-- --></div>
+ # below-left
+ 10 = TEXT
+ 10.value = <div class="csc-textpic csc-textpic-left csc-textpic-below###CLASSES###">###TEXT######IMAGES###</div><div class="csc-textpic-clear"><!-- --></div>
+ # intext-right
+ 17 = TEXT
+ 17.value = <div class="csc-textpic csc-textpic-intext-right###CLASSES###">###IMAGES######TEXT###</div>
+ 17.override = <div class="csc-textpic csc-textpic-intext-right###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ 17.override.if.isTrue = {$styles.content.imgtext.addIntextClearer}
+ # intext-left
+ 18 = TEXT
+ 18.value = <div class="csc-textpic csc-textpic-intext-left###CLASSES###">###IMAGES######TEXT###</div>
+ 18.override = <div class="csc-textpic csc-textpic-intext-left###CLASSES###">###IMAGES######TEXT###</div><div class="csc-textpic-clear"><!-- --></div>
+ 18.override.if.isTrue = {$styles.content.imgtext.addIntextClearer}
+ # intext-right-nowrap
+ 25 = TEXT
+ 25.value = <div class="csc-textpic csc-textpic-intext-right-nowrap###CLASSES###">###IMAGES###<div style="margin-right:{register:rowWidthPlusTextMargin}px;">###TEXT###</div></div><div class="csc-textpic-clear"><!-- --></div>
+ 25.insertData = 1
+ # intext-left-nowrap
+ 26 = TEXT
+ 26.value = <div class="csc-textpic csc-textpic-intext-left-nowrap###CLASSES###">###IMAGES###<div style="margin-left:{register:rowWidthPlusTextMargin}px;">###TEXT###</div></div><div class="csc-textpic-clear"><!-- --></div>
+ 26.insertData = 1
+ }
+
+ rendering {
+ dl {
+ # Choose another rendering for special edge cases
+ fallbackRendering = COA
+ fallbackRendering {
+ # Just one image without a caption => don't need the dl-overhead, use the "simple" rendering
+ 10 = TEXT
+ 10 {
+ if {
+ isFalse.field = imagecaption
+ value = 1
+ equals.data = register:imageCount
+ }
+ value = simple
+ }
+
+ # Multiple images and one global caption => "ul"
+ 20 = TEXT
+ 20 {
+ if {
+ value = 1
+ isGreaterThan.data = register:imageCount
+ isTrue.if.isTrue.data = register:renderGlobalCaption
+ isTrue.field = imagecaption
+ }
+ value = ul
+ }
+
+ # Multiple images and no caption at all => "ul"
+ 30 = TEXT
+ 30 {
+ if {
+ value = 1
+ isGreaterThan.data = register:imageCount
+ isFalse.field = imagecaption
+ }
+ value = ul
+ }
+ }
+ imageRowStdWrap.dataWrap = <div class="csc-textpic-imagerow" style="width:{register:rowwidth}px;"> | </div>
+ imageLastRowStdWrap.dataWrap = <div class="csc-textpic-imagerow csc-textpic-imagerow-last" style="width:{register:rowwidth}px;"> | </div>
+ noRowsStdWrap.wrap =
+ oneImageStdWrap.dataWrap = <dl class="csc-textpic-image###CLASSES###" style="width:{register:imagespace}px;"> | </dl>
+ imgTagStdWrap.wrap = <dt> | </dt>
+ editIconsStdWrap.wrap = <dd> | </dd>
+ caption {
+ required = 1
+ wrap = <dd class="csc-textpic-caption"> | </dd>
+ }
+ }
+ ul {
+ # Just one image without a caption => don't need the ul-overhead, use the "simple" rendering
+ fallbackRendering < tt_content.image.20.rendering.dl.fallbackRendering.10
+ imageRowStdWrap.dataWrap = <div class="csc-textpic-imagerow" style="width:{register:rowwidth}px;"><ul> | </ul></div>
+ imageLastRowStdWrap.dataWrap = <div class="csc-textpic-imagerow csc-textpic-imagerow-last" style="width:{register:rowwidth}px;"><ul> | </ul></div>
+ noRowsStdWrap.wrap = <ul> | </ul>
+ oneImageStdWrap.dataWrap = <li class="csc-textpic-image###CLASSES###" style="width:{register:imagespace}px;"> | </li>
+ imgTagStdWrap.wrap =
+ editIconsStdWrap.wrap = <div> | </div>
+ caption.wrap = <div class="csc-textpic-caption"> | </div>
+ }
+ div {
+ # Just one image without a caption => don't need the div-overhead, use the "simple" rendering
+ fallbackRendering < tt_content.image.20.rendering.dl.fallbackRendering.10
+ imageRowStdWrap.dataWrap = <div class="csc-textpic-imagerow" style="width:{register:rowwidth}px;"> | </div>
+ imageLastRowStdWrap.dataWrap = <div class="csc-textpic-imagerow csc-textpic-imagerow-last" style="width:{register:rowwidth}px;"> | </div>
+ noRowsStdWrap.wrap =
+ oneImageStdWrap.dataWrap = <div class="csc-textpic-image###CLASSES###" style="width:{register:imagespace}px;"> | </div>
+ imgTagStdWrap.wrap = <div> | </div>
+ editIconsStdWrap.wrap = <div> | </div>
+ caption.wrap = <div class="csc-textpic-caption"> | </div>
+ }
+ simple {
+ imageRowStdWrap.dataWrap = |
+ imageLastRowStdWrap.dataWrap = |
+ noRowsStdWrap.wrap =
+ oneImageStdWrap.dataWrap = |
+ imgTagStdWrap.wrap = |
+ editIconsStdWrap.wrap = |
+ caption.wrap = <div class="csc-textpic-caption"> | </div>
+ imageStdWrap.dataWrap = <div class="csc-textpic-imagewrap csc-textpic-single-image" style="width:{register:totalwidth}px;"> | </div>
+ imageStdWrapNoWidth.wrap = <div class="csc-textpic-imagewrap csc-textpic-single-image"> | </div>
+ }
+ }
+ renderMethod = dl
+
+ editIcons = tt_content : image [imageorient|imagewidth|imageheight], [imagecols|image_noRows|imageborder],[image_link|image_zoom],[image_compression|image_effects|image_frames],imagecaption[imagecaption_position]
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.images
+
+ caption.editIcons = tt_content : imagecaption[imagecaption_position]
+ caption.editIcons.beforeLastTag=1
+ caption.editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.caption
+
+ stdWrap.prefixComment = 2 | Image block:
+}
+
+# *****************
+# CType: textpic
+# *****************
+tt_content.textpic = COA
+tt_content.textpic {
+ 10 = COA
+ 10.if.value = 25
+ 10.if.isLessThan.field = imageorient
+ 10.10 = < lib.stdheader
+
+ 20 = < tt_content.image.20
+ 20 {
+ text.10 = COA
+ text.10 {
+ if.value = 24
+ if.isGreaterThan.field = imageorient
+ 10 = < lib.stdheader
+ 10.stdWrap.dataWrap = <div class="csc-textpicHeader csc-textpicHeader-{field:imageorient}">|</div>
+ }
+ text.20 = < tt_content.text.20
+ text.wrap = <div class="csc-textpic-text"> | </div>
+ }
+}
+
+
+
+# *****************
+# CType: bullet
+# *****************
+tt_content.bullets = COA
+tt_content.bullets {
+ 10 = < lib.stdheader
+
+ 20 = TEXT
+ 20 {
+ field = bodytext
+ trim = 1
+ split{
+ token.char = 10
+ cObjNum = |*|1|| 2|*|
+ 1.current = 1
+ 1.parseFunc =< lib.parseFunc
+ 1.wrap = <li class="odd">|</li>
+
+ 2.current = 1
+ 2.parseFunc =< lib.parseFunc
+ 2.wrap = <li class="even">|</li>
+ }
+ dataWrap = <ul class="csc-bulletlist csc-bulletlist-{field:layout}">|</ul>
+ editIcons = tt_content: bodytext, [layout]
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.php:eIcon.bullets
+
+ prefixComment = 2 | Bullet list:
+ }
+}
+
+
+
+# *****************
+# CType: table
+# *****************
+# Rendered by a PHP function specifically written to handle CE tables. See css_styled_content/pi1/class.tx_cssstyledcontent_pi1.php
+tt_content.table = COA
+tt_content.table {
+ 10 = < lib.stdheader
+
+ 20 = USER
+ 20.userFunc = tx_cssstyledcontent_pi1->render_table
+ 20.field = bodytext
+
+ 20.color {
+ default =
+ 1 = #EDEBF1
+ 2 = #F5FFAA
+ }
+ 20.tableParams_0 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_1 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_2 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_3 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.innerStdWrap.wrap = |
+ 20.innerStdWrap.parseFunc = < lib.parseFunc
+
+ 20.stdWrap {
+ editIcons = tt_content: cols, bodytext, [layout], [table_bgColor|table_border|table_cellspacing|table_cellpadding]
+ editIcons.beforeLastTag = 1
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.table
+
+ prefixComment = 2 | Table:
+ }
+}
+
+
+# *****************
+# CType: uploads
+# *****************
+# Rendered by a PHP function specifically written to handle CE filelists. See css_styled_content/pi1/class.tx_cssstyledcontent_pi1.php
+tt_content.uploads = COA
+tt_content.uploads {
+ 10 = < lib.stdheader
+
+ 20 = USER
+ 20.userFunc = tx_cssstyledcontent_pi1->render_uploads
+ 20.field = media
+ 20.filePath.field = select_key
+
+ 20 {
+ # Rendering for each file (e.g. rows of the table) as a cObject
+ itemRendering = COA
+ itemRendering {
+ wrap = <tr class="tr-odd tr-first">|</tr> |*| <tr class="tr-even">|</tr> || <tr class="tr-odd">|</tr> |*|
+
+ 10 = TEXT
+ 10.data = register:linkedIcon
+ 10.wrap = <td class="csc-uploads-icon">|</td>
+ 10.if.isPositive.field = layout
+
+ 20 = COA
+ 20.wrap = <td class="csc-uploads-fileName">|</td>
+ 20.1 = TEXT
+ 20.1 {
+ data = register:linkedLabel
+ wrap = <p>|</p>
+ }
+ 20.2 = TEXT
+ 20.2 {
+ data = register:description
+ wrap = <p class="csc-uploads-description">|</p>
+ required = 1
+ htmlSpecialChars = 1
+ }
+
+ 30 = TEXT
+ 30.if.isTrue.field = filelink_size
+ 30.data = register:fileSize
+ 30.wrap = <td class="csc-uploads-fileSize">|</td>
+ 30.bytes = 1
+ 30.bytes.labels = {$styles.content.uploads.filesizeBytesLabels}
+ }
+ useSpacesInLinkText = 0
+ stripFileExtensionFromLinkText = 0
+ }
+
+ 20.color {
+ default =
+ 1 = #EDEBF1
+ 2 = #F5FFAA
+ }
+ 20.tableParams_0 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_1 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_2 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+ 20.tableParams_3 {
+ border =
+ cellpadding =
+ cellspacing =
+ }
+
+ 20.linkProc {
+ target = _blank
+ jumpurl = {$styles.content.uploads.jumpurl}
+ jumpurl.secure = {$styles.content.uploads.jumpurl_secure}
+ jumpurl.secure.mimeTypes = {$styles.content.uploads.jumpurl_secure_mimeTypes}
+ removePrependedNumbers = 1
+
+ iconCObject = IMAGE
+ iconCObject.file.import.data = register : ICON_REL_PATH
+ iconCObject.file.width = 150
+ }
+
+ 20.filesize {
+ bytes = 1
+ bytes.labels = {$styles.content.uploads.filesizeBytesLabels}
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: media, layout [table_bgColor|table_border|table_cellspacing|table_cellpadding], filelink_size, imagecaption
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.filelist
+
+ prefixComment = 2 | File list:
+ }
+}
+
+
+# ******************
+# CType: multimedia
+# ******************
+tt_content.multimedia = COA
+tt_content.multimedia {
+ 10 = < lib.stdheader
+
+ 20 = MULTIMEDIA
+ 20.file.field = multimedia
+ 20.file.wrap = uploads/media/
+ 20.file.listNum = 0
+ 20.params.field = bodytext
+
+ 20.stdWrap {
+ editIcons = tt_content: multimedia, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | Multimedia element:
+ }
+}
+
+# *****************
+# CType: swfobject
+# *****************
+tt_content.swfobject = COA
+tt_content.swfobject {
+ 10 = < lib.stdheader
+
+ 20 = SWFOBJECT
+ 20 {
+ file =
+ width =
+ height =
+
+ flexParams.field = pi_flexform
+
+ alternativeContent.field = bodytext
+
+ layout = ###SWFOBJECT###
+
+ video {
+ player = {$styles.content.media.videoPlayer}
+
+ defaultWidth = {$styles.content.media.defaultVideoWidth}
+ defaultHeight = {$styles.content.media.defaultVideoHeight}
+
+ default {
+ params.quality = high
+ params.menu = false
+ params.allowScriptAccess = sameDomain
+ params.allowFullScreen = true
+ }
+ mapping {
+
+ }
+ }
+
+ audio {
+ player = {$styles.content.media.audioPlayer}
+
+ defaultWidth = {$styles.content.media.defaultAudioWidth}
+ defaultHeight = {$styles.content.media.defaultAudioHeight}
+
+ default {
+ params.quality = high
+ params.allowScriptAccess = sameDomain
+ params.menu = false
+ }
+ mapping {
+ flashvars.file = soundFile
+ }
+ }
+
+ }
+ 20.stdWrap {
+ editIcons = tt_content: multimedia, imagewidth, imageheight, pi_flexform, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | SWFobject element:
+ }
+}
+
+# *****************
+# CType: qtobject
+# *****************
+tt_content.qtobject = COA
+tt_content.qtobject {
+ 10 = < lib.stdheader
+
+ 20 = QTOBJECT
+ 20 {
+ file =
+ width =
+ height =
+
+ flexParams.field = pi_flexform
+
+ alternativeContent.field = bodytext
+
+ layout = ###QTOBJECT###
+
+ video {
+ player = {$styles.content.media.videoPlayer}
+
+ defaultWidth = {$styles.content.media.defaultVideoWidth}
+ defaultHeight = {$styles.content.media.defaultVideoHeight}
+
+ default {
+ params.quality = high
+ params.menu = false
+ params.allowScriptAccess = sameDomain
+ params.allowFullScreen = true
+ }
+ mapping {
+
+ }
+ }
+
+ audio {
+ player = {$styles.content.media.audioPlayer}
+
+ defaultWidth = {$styles.content.media.defaultAudioWidth}
+ defaultHeight = {$styles.content.media.defaultAudioHeight}
+
+ default {
+ params.quality = high
+ params.allowScriptAccess = sameDomain
+ params.menu = false
+ }
+ mapping {
+ flashvars.file = soundFile
+ }
+ }
+ }
+ 20.stdWrap {
+ editIcons = tt_content: multimedia, imagewidth, imageheight, pi_flexform, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | QTobject element:
+ }
+}
+
+# *****************
+# CType: media
+# *****************
+tt_content.media = COA
+tt_content.media {
+ 10 = < lib.stdheader
+
+ 20 = MEDIA
+ 20 {
+
+ flexParams.field = pi_flexform
+ alternativeContent < tt_content.text.20
+ alternativeContent.field = bodytext
+
+ type = video
+ renderType = auto
+ allowEmptyUrl = 0
+ forcePlayer = 1
+
+ fileExtHandler {
+ default = MEDIA
+ avi = MEDIA
+ asf = MEDIA
+ class = MEDIA
+ wmv = MEDIA
+ mp3 = SWF
+ mp4 = SWF
+ m4v = SWF
+ swa = SWF
+ flv = SWF
+ swf = SWF
+ mov = QT
+ m4v = QT
+ m4a = QT
+ }
+
+ mimeConf.swfobject < tt_content.swfobject.20
+ mimeConf.qtobject < tt_content.qtobject.20
+
+ }
+ 20.stdWrap {
+ editIcons = tt_content: pi_flexform, bodytext
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.multimedia
+
+ prefixComment = 2 | Media element:
+ }
+}
+
+# ******************
+# CType: mailform
+# ******************
+tt_content.mailform = COA
+tt_content.mailform.10 = < lib.stdheader
+tt_content.mailform.20 = FORM
+tt_content.mailform.20 {
+ accessibility = 1
+ noWrapAttr=1
+ formName = mailform
+ dontMd5FieldNames = 1
+ layout = <div class="csc-mailform-field">###LABEL### ###FIELD###</div>
+ labelWrap.wrap = |
+ commentWrap.wrap = |
+ radioWrap.wrap = |<br />
+ radioWrap.accessibilityWrap = <fieldset###RADIO_FIELD_ID###><legend>###RADIO_GROUP_LABEL###</legend>|</fieldset>
+ REQ = 1
+ REQ.labelWrap.wrap = |
+ COMMENT.layout = <div class="csc-mailform-label">###LABEL###</div>
+ RADIO.layout = <div class="csc-mailform-field">###LABEL### <span class="csc-mailform-radio">###FIELD###</span></div>
+ LABEL.layout = <div class="csc-mailform-field">###LABEL### <span class="csc-mailform-label">###FIELD###</span></div>
+ target = {$styles.content.mailform.target}
+ goodMess = {$styles.content.mailform.goodMess}
+ badMess = {$styles.content.mailform.badMess}
+ redirect.field = pages
+ redirect.listNum = 0
+ recipient.field = subheader
+ data.field = bodytext
+ locationData = 1
+ hiddenFields.stdWrap.wrap = <div style="display:none;">|</div>
+
+ params.radio = class="csc-mailform-radio"
+ params.check = class="csc-mailform-check"
+ params.submit = class="csc-mailform-submit"
+
+ stdWrap.wrap = <fieldset class="csc-mailform"> | </fieldset>
+ stdWrap {
+ editIcons = tt_content: bodytext, pages, subheader
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.form
+
+ prefixComment = 2 | Mail form inserted:
+ }
+}
+
+
+# ******************
+# CType: search
+# ******************
+tt_content.search = COA
+tt_content.search.10 = < lib.stdheader
+# Result:
+tt_content.search.20 = SEARCHRESULT
+tt_content.search.20 {
+ allowedCols = pages.title-subtitle-keywords-description : tt_content.header-bodytext-imagecaption : tt_address.name-title-address-email-company-city-country : tt_links.title-note-note2-url : tt_board.subject-message-author-email : tt_calender.title-note : tt_products.title-note-itemnumber
+ languageField.tt_content = sys_language_uid
+ renderObj = COA
+ renderObj {
+
+ 10 = TEXT
+ 10.field = pages_title
+ 10.htmlSpecialChars = 1
+ 10.typolink {
+ parameter.field = uid
+ target = {$styles.content.searchresult.resultTarget}
+ additionalParams.data = register:SWORD_PARAMS
+ additionalParams.required = 1
+ additionalParams.wrap = &no_cache=1
+ }
+ 10.htmlSpecialChars = 1
+ 10.wrap = <h3 class="csc-searchResultHeader">|</h3>
+
+ 20 = COA
+ 20 {
+ 10 = TEXT
+ 10.field = tt_content_bodytext
+ 10.stripHtml = 1
+ 10.htmlSpecialChars = 1
+ }
+ 20.stdWrap.crop = 200 | ...
+ 20.stdWrap.wrap = <p class="csc-searchResult">|</p>
+ }
+
+ layout = COA
+ layout {
+ wrap = <table border="0" cellspacing="0" cellpadding="2" class="csc-searchResultInfo"><tr> | </tr></table> ###RESULT###
+
+ 10 = TEXT
+ 10.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.resultRange
+ 10.wrap = <td class="csc-searchResultRange"><p>|</p></td>
+
+ 20 = TEXT
+ 20.value = ###PREV###&nbsp;&nbsp;&nbsp;###NEXT###
+ 20.wrap = <td class="csc-searchResultPrevNext"><p>|</p></td>
+ }
+
+ noResultObj = COA
+ noResultObj {
+ 10 = TEXT
+ 10.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.emptySearch
+ 10.wrap = <h3 class="csc-noSearchResultMsg">|</h3>
+ }
+
+ next = TEXT
+ next.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchResultNext
+
+ prev = TEXT
+ prev.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchResultPrev
+
+ target = {$styles.content.searchresult.target}
+ range = 20
+
+ stdWrap.prefixComment = 2 | Search result:
+}
+
+# Form:
+tt_content.search.30 < tt_content.mailform.20
+tt_content.search.30 {
+ goodMess = {$styles.content.searchform.goodMess}
+ redirect >
+ recipient >
+ data >
+ dataArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchWord
+ 10.type = sword=input
+ 20.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchIn
+ 20.type = scols=select
+ 20.valueArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.headersKeywords
+ 10.value = pages.title-subtitle-keywords-description:tt_content.header
+ 20.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.pageContent
+ 20.value = tt_content.header-bodytext-imagecaption
+ }
+ 30.type = stype=hidden
+ 30.value = L0
+ 40.type = submit=submit
+ 40.value.data = LLL:EXT:css_styled_content/pi1/locallang.xml:search.searchButton
+ }
+ type.field = pages
+ type.listNum = 0
+ locationData = HTTP_POST_VARS
+ no_cache = 1
+
+ stdWrap.wrap = <table border="0" cellspacing="1" cellpadding="1" class="csc-searchform"> | </table>
+ stdWrap {
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.search
+
+ prefixComment = 2 | Search form inserted:
+ }
+}
+
+
+# ******************
+# CType: login
+# ******************
+tt_content.login < tt_content.mailform
+tt_content.login.10 = < lib.stdheader
+tt_content.login.20 {
+ goodMess = {$styles.content.loginform.goodMess}
+ redirect >
+ recipient >
+ data >
+ dataArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.username
+ 10.type = *user=input
+ 20.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.password
+ 20.type = *pass=password
+ 30.type = logintype=hidden
+ 30.value = login
+ 40.type = submit=submit
+ 40.value.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.login
+ }
+ type.field = pages
+ type.listNum = 0
+ target = {$styles.content.loginform.target}
+ locationData = 0
+ hiddenFields.pid = TEXT
+ hiddenFields.pid {
+ value = {$styles.content.loginform.pid}
+ override.field = pages
+ override.listNum = 1
+ }
+
+ stdWrap.wrap = <div class="csc-loginform"> | </div>
+ stdWrap {
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.login
+
+ prefixComment = 2 | Login/Logout form:
+ }
+}
+[loginUser = *]
+tt_content.login.20 {
+ dataArray >
+ dataArray {
+ 10.label.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.username
+ 10.label.wrap = |&nbsp;<!--###USERNAME###-->
+ 30.type = logintype=hidden
+ 30.value = logout
+ 40.type = submit=submit
+ 40.value.data = LLL:EXT:css_styled_content/pi1/locallang.xml:login.logout
+ }
+}
+[global]
+
+
+# ******************
+# CType: splash
+# ******************
+# Deprecated element.
+# Still here for backwards compliance with plugins using the "text box" type.
+tt_content.splash = CASE
+tt_content.splash.key.field = splash_layout
+tt_content.splash.stdWrap {
+ prefixComment = 2 | Textbox inserted (Deprecated)
+}
+tt_content.splash.default = COA
+tt_content.splash.default {
+ 20 = CTABLE
+ 20 {
+ c.1 = < tt_content.text
+ lm.1 = IMAGE
+ lm.1.file {
+ import = uploads/pics/
+ import.field = image
+ import.listNum = 0
+ maxW.field = imagewidth
+ maxW.ifEmpty = 200
+ }
+ cMargins = 30,0,0,0
+ }
+}
+tt_content.splash.1 < tt_content.splash.default
+tt_content.splash.1.20.lm.1.file >
+tt_content.splash.1.20.lm.1.file = GIFBUILDER
+tt_content.splash.1.20.lm.1.file {
+ XY = [10.w]+10,[10.h]+10
+ backColor = {$content.splash.bgCol}
+ backColor.override.data = register:pageColor
+ format = jpg
+ 5 = BOX
+ 5.dimensions = 3,3,[10.w],[10.h]
+ 5.color = #333333
+ 7 = EFFECT
+ 7.value = blur=99|blur=99|blur=99|blur=99|blur=99|blur=99|blur=99
+ 10 = IMAGE
+ 10.file {
+ import = uploads/pics/
+ import.field = image
+ import.listNum = 0
+ maxW.field = imagewidth
+ maxW.ifEmpty = 200
+ }
+}
+// The image frames are not available unless TypoScript code from styles.content.imgFrames.x is provided manually:
+tt_content.splash.2 < tt_content.splash.default
+#tt_content.splash.2.20.lm.1.file.m < styles.content.imgFrames.1
+tt_content.splash.3 < tt_content.splash.default
+#tt_content.splash.3.20.lm.1.file.m < styles.content.imgFrames.2
+
+// From plugin.postit1, if included:
+tt_content.splash.20 = < plugin.postit1
+
+
+
+# ****************
+# CType: menu
+# ****************
+tt_content.menu = COA
+tt_content.menu {
+ 10 = < lib.stdheader
+
+ 20 = CASE
+ 20 {
+ key.field = menu_type
+
+ # "Menu of these pages"
+ default = HMENU
+ default {
+ special = list
+ special.value.field = pages
+ wrap = <ul class="csc-menu csc-menu-def">|</ul>
+ 1 = TMENU
+ 1 {
+ target = {$PAGE_TARGET}
+ NO {
+ stdWrap.htmlSpecialChars = 1
+ wrapItemAndSub = <li>|</li>
+ ATagTitle.field = description // title
+ }
+ noBlur = 1
+ }
+ }
+
+ # "Menu of subpages to these pages"
+ 1 < .default
+ 1 {
+ special = directory
+ wrap = <ul class="csc-menu csc-menu-1">|</ul>
+ }
+
+ # "Sitemap - liststyle"
+ 2 = HMENU
+ 2 {
+ wrap = <div class="csc-sitemap">|</div>
+ 1 = TMENU
+ 1 {
+ target = {$PAGE_TARGET}
+ noBlur = 1
+ expAll = 1
+ wrap = <ul>|</ul>
+ NO {
+ stdWrap.htmlSpecialChars = 1
+ wrapItemAndSub = <li>|</li>
+ ATagTitle.field = description // title
+ }
+ }
+ 2 < .1
+ 3 < .1
+ 4 < .1
+ 5 < .1
+ 6 < .1
+ 7 < .1
+ }
+
+ # "Section index (pagecontent w/Index checked - liststyle)"
+ 3 < styles.content.get
+ 3 {
+ wrap = <ul class="csc-menu csc-menu-3">|</ul>
+ select.andWhere = sectionIndex!=0
+ select.pidInList.override.field = pages
+ renderObj = TEXT
+ renderObj {
+ fieldRequired = header
+ trim = 1
+ field = header
+ htmlSpecialChars = 1
+ noBlur = 1
+ wrap = <li class="csc-section">|</li>
+ typolink.parameter.field = pid
+ typolink.section.field = uid
+ }
+ }
+
+ # "Menu of subpages to these pages (with abstract)"
+ 4 < .1
+ 4 {
+ wrap = <dl class="csc-menu csc-menu-4">|</dl>
+ 1.NO {
+ wrapItemAndSub >
+ linkWrap = <dt>|</dt>
+ after {
+ data = field : abstract // field : description // field : subtitle
+ required = 1
+ htmlSpecialChars = 1
+ wrap = <dd>|</dd>
+ }
+ ATagTitle.field = description // title
+ }
+ }
+
+ # "Recently updated pages"
+ 5 < .default
+ 5 {
+ wrap = <ul class="csc-menu csc-menu-5">|</ul>
+ special = updated
+ special {
+ maxAge = 3600*24*7
+ excludeNoSearchPages = 1
+ }
+ }
+
+ # "Related pages (based on keywords)"
+ 6 < .default
+ 6 {
+ wrap = <ul class="csc-menu csc-menu-6">|</ul>
+ special = keywords
+ special {
+ excludeNoSearchPages = 1
+ }
+ }
+
+ # "Menu of subpages to these pages + sections - liststyle"
+ 7 < .1
+ 7 {
+ wrap = <ul class="csc-menu csc-menu-7">|</ul>
+ 1.expAll = 1
+ 2 < .1
+ 2 {
+ sectionIndex = 1
+ sectionIndex.type = header
+ wrap = <ul>|</ul>
+ NO.wrapItemAndSub = <li class="csc-section">|</li>
+ }
+ }
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: menu_type, pages
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.menuSitemap
+
+ prefixComment = 2 | Menu/Sitemap element:
+ }
+}
+
+
+
+# ****************
+# CType: shortcut
+# ****************
+# Should be a complete copy from the old static template "content (default)"
+tt_content.shortcut = COA
+tt_content.shortcut {
+ 20 = CASE
+ 20.key.field = layout
+ 20.0= RECORDS
+ 20.0 {
+ source.field = records
+ tables = {$content.shortcut.tables}
+ # THESE are OLD plugins. Modern plugins registers themselves automatically!
+ conf.tt_content = < tt_content
+ conf.tt_address = < tt_address
+ conf.tt_links = < tt_links
+ conf.tt_guest = < tt_guest
+ conf.tt_board = < tt_board
+ conf.tt_calender = < tt_calender
+ conf.tt_rating < tt_rating
+ conf.tt_products = < tt_products
+ conf.tt_news = < tt_news
+ conf.tt_poll = < plugin.tt_poll
+ }
+ 20.1= RECORDS
+ 20.1 {
+ source.field = records
+ tables = {$content.shortcut.tables}
+ conf.tt_poll = < plugin.tt_poll
+ conf.tt_poll.code = RESULT,SUBMITTEDVOTE
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: records
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.recordList
+
+ prefixComment = 2 | Inclusion of other records (by reference):
+ }
+}
+
+
+# ****************
+# CType: list
+# ****************
+# Should be a complete copy from the old static template "content (default)" (except "lib.stdheader")
+tt_content.list = COA
+tt_content.list {
+ 10 = < lib.stdheader
+
+ 20 = CASE
+ 20.key.field = list_type
+ 20 {
+ # LIST element references (NOT copy of objects!)
+ # THESE are OLD plugins. Modern plugins registers themselves automatically!
+ 3 = CASE
+ 3.key.field = layout
+ 3.0 = < plugin.tt_guest
+
+ 4 = CASE
+ 4.key.field = layout
+ 4.0 = < plugin.tt_board_list
+ 4.1 = < plugin.tt_board_tree
+
+ 2 = CASE
+ 2.key.field = layout
+ 2.0 = < plugin.tt_board_tree
+
+ 5 = CASE
+ 5.key.field = layout
+ 5.0 = < plugin.tt_products
+
+ 7 = CASE
+ 7.key.field = layout
+ 7.0 = < plugin.tt_calender
+
+ 8 = CASE
+ 8.key.field = layout
+ 8.0 = < plugin.tt_rating
+
+ 9 = CASE
+ 9.key.field = layout
+ 9.0 = < plugin.tt_news
+
+ 11 = CASE
+ 11.key.field = layout
+ 11.0 = < plugin.tipafriend
+
+ 20 = CASE
+ 20.key.field = layout
+ 20.0 = < plugin.feadmin.fe_users
+
+ 21 = CASE
+ 21.key.field = layout
+ 21.0 = < plugin.feadmin.dmailsubscription
+ }
+
+ 20.stdWrap {
+ editIcons = tt_content: list_type, layout, select_key, pages [recursive]
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.plugin
+
+ prefixComment = 2 | Plugin inserted:
+ }
+}
+
+
+# ****************
+# CType: script
+# ****************
+# OBSOLETE! Please make extensions instead. The "script" content element was meant for these custom purposes in the past. Today extensions will do the job better.
+tt_content.script = TEXT
+tt_content.script {
+ value =
+
+ prefixComment = 2 | Script element (Deprecated)
+}
+
+
+# ****************
+# CType: div
+# ****************
+tt_content.div = TEXT
+tt_content.div {
+ value = <hr />
+ wrap = <div class="divider">|</div>
+ prefixComment = 2 | Div element
+}
+
+
+# ****************
+# CType: html
+# ****************
+# This truely IS a content object, launched from inside the PHP class of course.
+# Should be a complete copy from the old static template "content (default)"
+tt_content.html = TEXT
+tt_content.html {
+ field = bodytext
+
+ editIcons = tt_content: pages
+ editIcons.iconTitle.data = LLL:EXT:css_styled_content/pi1/locallang.xml:eIcon.html
+
+ prefixComment = 2 | Raw HTML content:
+}
+
+
+# ****************
+# Default error msg:
+# ****************
+tt_content.default = TEXT
+tt_content.default {
+ field = CType
+ wrap = <p style="background-color: yellow;"><b>ERROR:</b> Content Element type "|" has no rendering definition!</p>
+
+ prefixComment = 2 | Unknown element message:
+}
+
+# *********************************************************************
+# ACCESSIBILTY MODE
+# *********************************************************************
+
+
+
+
+
+
+
+plugin.tx_cssstyledcontent._CSS_DEFAULT_STYLE (
+ /* Captions */
+ DIV.csc-textpic-caption-c .csc-textpic-caption { text-align: center; }
+ DIV.csc-textpic-caption-r .csc-textpic-caption { text-align: right; }
+ DIV.csc-textpic-caption-l .csc-textpic-caption { text-align: left; }
+
+ /* Needed for noRows setting */
+ DIV.csc-textpic DIV.csc-textpic-imagecolumn { float: left; display: inline; }
+
+ /* Border just around the image */
+ {$styles.content.imgtext.borderSelector} {
+ border: {$styles.content.imgtext.borderThick}px solid {$styles.content.imgtext.borderColor};
+ padding: {$styles.content.imgtext.borderSpace}px {$styles.content.imgtext.borderSpace}px;
+ }
+
+ DIV.csc-textpic-imagewrap { padding: 0; }
+
+ DIV.csc-textpic IMG { border: none; }
+
+ /* DIV: This will place the images side by side */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DIV.csc-textpic-image { float: left; }
+
+ /* UL: This will place the images side by side */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap UL { list-style: none; margin: 0; padding: 0; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap UL LI { float: left; margin: 0; padding: 0; }
+
+ /* DL: This will place the images side by side */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image { float: left; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image DT { float: none; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image DD { float: none; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap DL.csc-textpic-image DD IMG { border: none; } /* FE-Editing Icons */
+ DL.csc-textpic-image { margin: 0; }
+ DL.csc-textpic-image DT { margin: 0; display: inline; }
+ DL.csc-textpic-image DD { margin: 0; }
+
+ /* Clearer */
+ DIV.csc-textpic-clear { clear: both; }
+
+ /* Margins around images: */
+
+ /* Pictures on left, add margin on right */
+ DIV.csc-textpic-left DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-left-nowrap DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-left DIV.csc-textpic-imagewrap .csc-textpic-image {
+ display: inline; /* IE fix for double-margin bug */
+ margin-right: {$styles.content.imgtext.colSpace}px;
+ }
+
+ /* Pictures on right, add margin on left */
+ DIV.csc-textpic-right DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-right-nowrap DIV.csc-textpic-imagewrap .csc-textpic-image,
+ DIV.csc-textpic-intext-right DIV.csc-textpic-imagewrap .csc-textpic-image {
+ display: inline; /* IE fix for double-margin bug */
+ margin-left: {$styles.content.imgtext.colSpace}px;
+ }
+
+ /* Pictures centered, add margin on left */
+ DIV.csc-textpic-center DIV.csc-textpic-imagewrap .csc-textpic-image {
+ display: inline; /* IE fix for double-margin bug */
+ margin-left: {$styles.content.imgtext.colSpace}px;
+ }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-image .csc-textpic-caption { margin: 0; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-image IMG { margin: 0; vertical-align:bottom; }
+
+ /* Space below each image (also in-between rows) */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-image { margin-bottom: {$styles.content.imgtext.rowSpace}px; }
+ DIV.csc-textpic-equalheight DIV.csc-textpic-imagerow { margin-bottom: {$styles.content.imgtext.rowSpace}px; display: block; }
+ DIV.csc-textpic DIV.csc-textpic-imagerow { clear: both; }
+ DIV.csc-textpic DIV.csc-textpic-single-image IMG { margin-bottom: {$styles.content.imgtext.rowSpace}px; }
+
+ /* IE7 hack for margin between image rows */
+ *+html DIV.csc-textpic DIV.csc-textpic-imagerow .csc-textpic-image { margin-bottom: 0; }
+ *+html DIV.csc-textpic DIV.csc-textpic-imagerow { margin-bottom: {$styles.content.imgtext.rowSpace}px; }
+
+ /* No margins around the whole image-block */
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-firstcol { margin-left: 0px !important; }
+ DIV.csc-textpic DIV.csc-textpic-imagewrap .csc-textpic-lastcol { margin-right: 0px !important; }
+
+ /* Add margin from image-block to text (in case of "Text w/ images") */
+ DIV.csc-textpic-intext-left DIV.csc-textpic-imagewrap,
+ DIV.csc-textpic-intext-left-nowrap DIV.csc-textpic-imagewrap {
+ margin-right: {$styles.content.imgtext.textMargin}px !important;
+ }
+ DIV.csc-textpic-intext-right DIV.csc-textpic-imagewrap,
+ DIV.csc-textpic-intext-right-nowrap DIV.csc-textpic-imagewrap {
+ margin-left: {$styles.content.imgtext.textMargin}px !important;
+ }
+
+ /* Positioning of images: */
+
+ /* Above */
+ DIV.csc-textpic-above DIV.csc-textpic-text { clear: both; }
+
+ /* Center (above or below) */
+ DIV.csc-textpic-center { text-align: center; /* IE-hack */ }
+ DIV.csc-textpic-center DIV.csc-textpic-imagewrap { margin: 0 auto; }
+ DIV.csc-textpic-center DIV.csc-textpic-imagewrap .csc-textpic-image { text-align: left; /* Remove IE-hack */ }
+ DIV.csc-textpic-center DIV.csc-textpic-text { text-align: left; /* Remove IE-hack */ }
+
+ /* Right (above or below) */
+ DIV.csc-textpic-right DIV.csc-textpic-imagewrap { float: right; }
+ DIV.csc-textpic-right DIV.csc-textpic-text { clear: right; }
+
+ /* Left (above or below) */
+ DIV.csc-textpic-left DIV.csc-textpic-imagewrap { float: left; }
+ DIV.csc-textpic-left DIV.csc-textpic-text { clear: left; }
+
+ /* Left (in text) */
+ DIV.csc-textpic-intext-left DIV.csc-textpic-imagewrap { float: left; }
+
+ /* Right (in text) */
+ DIV.csc-textpic-intext-right DIV.csc-textpic-imagewrap { float: right; }
+
+ /* Right (in text, no wrap around) */
+ DIV.csc-textpic-intext-right-nowrap DIV.csc-textpic-imagewrap { float: right; clear: both; }
+ /* Hide from IE5-mac. Only IE-win sees this. \*/
+ * html DIV.csc-textpic-intext-right-nowrap .csc-textpic-text { height: 1%; }
+ /* End hide from IE5/mac */
+
+ /* Left (in text, no wrap around) */
+ DIV.csc-textpic-intext-left-nowrap DIV.csc-textpic-imagewrap { float: left; clear: both; }
+ /* Hide from IE5-mac. Only IE-win sees this. \*/
+ * html DIV.csc-textpic-intext-left-nowrap .csc-textpic-text,
+ * html .csc-textpic-intext-left ol,
+ * html .csc-textpic-intext-left ul { height: 1%; }
+ /* End hide from IE5/mac */
+
+ DIV.csc-textpic DIV.csc-textpic-imagerow-last { margin-bottom: 0; }
+
+ /* Browser fixes: */
+
+ /* Fix for unordered and ordered list with image "In text, left" */
+ .csc-textpic-intext-left ol, .csc-textpic-intext-left ul {padding-left: 40px; overflow: auto; }
+)
+
+# TYPO3 SVN ID: $Id$
+
diff --git a/tests/examplefiles/varnish.vcl b/tests/examplefiles/varnish.vcl
new file mode 100644
index 00000000..6258c313
--- /dev/null
+++ b/tests/examplefiles/varnish.vcl
@@ -0,0 +1,187 @@
+# This is the VCL configuration Varnish will automatically append to your VCL
+# file during compilation/loading. See the vcl(7) man page for details on syntax
+# and semantics.
+# New users is recommended to use the example.vcl file as a starting point.
+
+vcl 4.0;
+
+backend foo { .host = "192.168.1.1"; }
+
+probe blatti { .url = "foo"; }
+probe fooy {
+ .url = "beh";
+
+}
+
+acl foo {
+ "192.168.1.1";
+ "192.168.0.0"/24;
+ ! "192.168.0.1";
+}
+
+include "foo.vcl";
+
+import std;
+
+sub vcl_init {
+ new b = director.foo();
+}
+
+sub vcl_recv {
+ ban(req.url ~ "foo");
+ rollback();
+}
+sub vcl_recv {
+ if (req.method == "PRI") {
+ /* We do not support SPDY or HTTP/2.0 */
+ return (synth(405));
+ }
+ if (req.method != "GET" &&
+ req.method != "HEAD" &&
+ req.method != "PUT" &&
+ req.method != "POST" &&
+ req.method != "TRACE" &&
+ req.method != "OPTIONS" &&
+ req.method != "DELETE") {
+ /* Non-RFC2616 or CONNECT which is weird. */
+ return (pipe);
+ }
+
+ if (req.method != "GET" && req.method != "HEAD") {
+ /* We only deal with GET and HEAD by default */
+ return (pass);
+ }
+ if (req.http.Authorization || req.http.Cookie) {
+ /* Not cacheable by default */
+ return (pass);
+ }
+ return (hash);
+}
+
+sub vcl_pipe {
+ # By default Connection: close is set on all piped requests, to stop
+ # connection reuse from sending future requests directly to the
+ # (potentially) wrong backend. If you do want this to happen, you can undo
+ # it here.
+ # unset bereq.http.connection;
+ return (pipe);
+}
+
+sub vcl_pass {
+ return (fetch);
+}
+
+sub vcl_hash {
+ hash_data(req.url);
+ if (req.http.host) {
+ hash_data(req.http.host);
+ } else {
+ hash_data(server.ip);
+ }
+ return (lookup);
+}
+
+sub vcl_purge {
+ return (synth(200, "Purged"));
+}
+
+sub vcl_hit {
+ if (obj.ttl >= 0s) {
+ // A pure unadultered hit, deliver it
+ return (deliver);
+ }
+ if (obj.ttl + obj.grace > 0s) {
+ // Object is in grace, deliver it
+ // Automatically triggers a background fetch
+ return (deliver);
+ }
+ // fetch & deliver once we get the result
+ return (miss);
+}
+
+sub vcl_miss {
+ return (fetch);
+}
+
+sub vcl_deliver {
+ set resp.http.x-storage = storage.s0.free;
+ return (deliver);
+}
+
+/*
+ * We can come here "invisibly" with the following errors: 413, 417 & 503
+ */
+sub vcl_synth {
+ set resp.http.Content-Type = "text/html; charset=utf-8";
+ set resp.http.Retry-After = "5";
+ synthetic( {"<!DOCTYPE html>
+<html>
+ <head>
+ <title>"} + resp.status + " " + resp.reason + {"</title>
+ </head>
+ <body>
+ <h1>Error "} + resp.status + " " + resp.reason + {"</h1>
+ <p>"} + resp.reason + {"</p>
+ <h3>Guru Meditation:</h3>
+ <p>XID: "} + req.xid + {"</p>
+ <hr>
+ <p>Varnish cache server</p>
+ </body>
+</html>
+"} );
+ return (deliver);
+}
+
+#######################################################################
+# Backend Fetch
+
+sub vcl_backend_fetch {
+ return (fetch);
+}
+
+sub vcl_backend_response {
+ if (beresp.ttl <= 0s ||
+ beresp.http.Set-Cookie ||
+ beresp.http.Surrogate-control ~ "no-store" ||
+ (!beresp.http.Surrogate-Control &&
+ beresp.http.Cache-Control ~ "no-cache|no-store|private") ||
+ beresp.http.Vary == "*") {
+ /*
+ * Mark as "Hit-For-Pass" for the next 2 minutes
+ */
+ set beresp.ttl = 120s;
+ set beresp.uncacheable = true;
+ }
+ return (deliver);
+}
+
+sub vcl_backend_error {
+ set beresp.http.Content-Type = "text/html; charset=utf-8";
+ set beresp.http.Retry-After = "5";
+ synthetic( {"<!DOCTYPE html>
+<html>
+ <head>
+ <title>"} + beresp.status + " " + beresp.reason + {"</title>
+ </head>
+ <body>
+ <h1>Error "} + beresp.status + " " + beresp.reason + {"</h1>
+ <p>"} + beresp.reason + {"</p>
+ <h3>Guru Meditation:</h3>
+ <p>XID: "} + bereq.xid + {"</p>
+ <hr>
+ <p>Varnish cache server</p>
+ </body>
+</html>
+"} );
+ return (deliver);
+}
+
+#######################################################################
+# Housekeeping
+
+sub vcl_init {
+}
+
+sub vcl_fini {
+ return (ok);
+}
diff --git a/tests/examplefiles/wdiff_example1.wdiff b/tests/examplefiles/wdiff_example1.wdiff
new file mode 100644
index 00000000..ca760812
--- /dev/null
+++ b/tests/examplefiles/wdiff_example1.wdiff
@@ -0,0 +1,731 @@
+.. -*- mode: rst -*-
+
+{+.. highlight:: python+}
+
+====================
+Write your own lexer
+====================
+
+If a lexer for your favorite language is missing in the Pygments package, you
+can easily write your own and extend Pygments.
+
+All you need can be found inside the :mod:`pygments.lexer` module. As you can
+read in the :doc:`API documentation <api>`, a lexer is a class that is
+initialized with some keyword arguments (the lexer options) and that provides a
+:meth:`.get_tokens_unprocessed()` method which is given a string or unicode
+object with the data to [-parse.-] {+lex.+}
+
+The :meth:`.get_tokens_unprocessed()` method must return an iterator or iterable
+containing tuples in the form ``(index, token, value)``. Normally you don't
+need to do this since there are [-numerous-] base lexers {+that do most of the work and that+}
+you can subclass.
+
+
+RegexLexer
+==========
+
+[-A very powerful (but quite easy to use)-]
+
+{+The+} lexer {+base class used by almost all of Pygments' lexers+} is the
+:class:`RegexLexer`. This
+[-lexer base-] class allows you to define lexing rules in terms of
+*regular expressions* for different *states*.
+
+States are groups of regular expressions that are matched against the input
+string at the *current position*. If one of these expressions matches, a
+corresponding action is performed [-(normally-] {+(such as+} yielding a token with a specific
+[-type),-]
+{+type, or changing state),+} the current position is set to where the last match
+ended and the matching process continues with the first regex of the current
+state.
+
+Lexer states are kept [-in-] {+on+} a [-state-] stack: each time a new state is entered, the new
+state is pushed onto the stack. The most basic lexers (like the `DiffLexer`)
+just need one state.
+
+Each state is defined as a list of tuples in the form (`regex`, `action`,
+`new_state`) where the last item is optional. In the most basic form, `action`
+is a token type (like `Name.Builtin`). That means: When `regex` matches, emit a
+token with the match text and type `tokentype` and push `new_state` on the state
+stack. If the new state is ``'#pop'``, the topmost state is popped from the
+stack instead. [-(To-] {+To+} pop more than one state, use ``'#pop:2'`` and so [-on.)-] {+on.+}
+``'#push'`` is a synonym for pushing the current state on the stack.
+
+The following example shows the `DiffLexer` from the builtin lexers. Note that
+it contains some additional attributes `name`, `aliases` and `filenames` which
+aren't required for a lexer. They are used by the builtin lexer lookup
+functions.
+
+[-.. sourcecode:: python-] {+::+}
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class DiffLexer(RegexLexer):
+ name = 'Diff'
+ aliases = ['diff']
+ filenames = ['*.diff']
+
+ tokens = {
+ 'root': [
+ (r' .*\n', Text),
+ (r'\+.*\n', Generic.Inserted),
+ (r'-.*\n', Generic.Deleted),
+ (r'@.*\n', Generic.Subheading),
+ (r'Index.*\n', Generic.Heading),
+ (r'=.*\n', Generic.Heading),
+ (r'.*\n', Text),
+ ]
+ }
+
+As you can see this lexer only uses one state. When the lexer starts scanning
+the text, it first checks if the current character is a space. If this is true
+it scans everything until newline and returns the [-parsed-] data as {+a+} `Text` [-token.-] {+token (which
+is the "no special highlighting" token).+}
+
+If this rule doesn't match, it checks if the current char is a plus sign. And
+so on.
+
+If no rule matches at the current position, the current char is emitted as an
+`Error` token that indicates a [-parsing-] {+lexing+} error, and the position is increased by
+[-1.-]
+{+one.+}
+
+
+Adding and testing a new lexer
+==============================
+
+To make [-pygments-] {+Pygments+} aware of your new lexer, you have to perform the following
+steps:
+
+First, change to the current directory containing the [-pygments-] {+Pygments+} source code:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ cd .../pygments-main
+
+{+Select a matching module under ``pygments/lexers``, or create a new module for
+your lexer class.+}
+
+Next, make sure the lexer is known from outside of the module. All modules in
+the ``pygments.lexers`` specify ``__all__``. For example, [-``other.py`` sets:
+
+.. sourcecode:: python-] {+``esoteric.py`` sets::+}
+
+ __all__ = ['BrainfuckLexer', 'BefungeLexer', ...]
+
+Simply add the name of your lexer class to this list.
+
+Finally the lexer can be made [-publically-] {+publicly+} known by rebuilding the lexer mapping:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ make mapfiles
+
+To test the new lexer, store an example file with the proper extension in
+``tests/examplefiles``. For example, to test your ``DiffLexer``, add a
+``tests/examplefiles/example.diff`` containing a sample diff output.
+
+Now you can use pygmentize to render your example to HTML:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ ./pygmentize -O full -f html -o /tmp/example.html tests/examplefiles/example.diff
+
+Note that this [-explicitely-] {+explicitly+} calls the ``pygmentize`` in the current directory
+by preceding it with ``./``. This ensures your modifications are used.
+Otherwise a possibly already installed, unmodified version without your new
+lexer would have been called from the system search path (``$PATH``).
+
+To view the result, open ``/tmp/example.html`` in your browser.
+
+Once the example renders as expected, you should run the complete test suite:
+
+.. [-sourcecode::-] {+code-block::+} console
+
+ $ make test
+
+{+It also tests that your lexer fulfills the lexer API and certain invariants,
+such as that the concatenation of all token text is the same as the input text.+}
+
+
+Regex Flags
+===========
+
+You can either define regex flags {+locally+} in the regex (``r'(?x)foo bar'``) or
+{+globally+} by adding a `flags` attribute to your lexer class. If no attribute is
+defined, it defaults to `re.MULTILINE`. For more [-informations-] {+information+} about regular
+expression flags see the {+page about+} `regular expressions`_ [-help page-] in the [-python-] {+Python+}
+documentation.
+
+.. _regular expressions: [-http://docs.python.org/lib/re-syntax.html-] {+http://docs.python.org/library/re.html#regular-expression-syntax+}
+
+
+Scanning multiple tokens at once
+================================
+
+{+So far, the `action` element in the rule tuple of regex, action and state has
+been a single token type. Now we look at the first of several other possible
+values.+}
+
+Here is a more complex lexer that highlights INI files. INI files consist of
+sections, comments and [-key-] {+``key+} = [-value pairs:
+
+.. sourcecode:: python-] {+value`` pairs::+}
+
+ from pygments.lexer import RegexLexer, bygroups
+ from pygments.token import *
+
+ class IniLexer(RegexLexer):
+ name = 'INI'
+ aliases = ['ini', 'cfg']
+ filenames = ['*.ini', '*.cfg']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r';.*?$', Comment),
+ (r'\[.*?\]$', Keyword),
+ (r'(.*?)(\s*)(=)(\s*)(.*?)$',
+ bygroups(Name.Attribute, Text, Operator, Text, String))
+ ]
+ }
+
+The lexer first looks for whitespace, comments and section names. [-And later-] {+Later+} it
+looks for a line that looks like a key, value pair, separated by an ``'='``
+sign, and optional whitespace.
+
+The `bygroups` helper [-makes sure that-] {+yields+} each {+capturing+} group [-is yielded-] {+in the regex+} with a different
+token type. First the `Name.Attribute` token, then a `Text` token for the
+optional whitespace, after that a `Operator` token for the equals sign. Then a
+`Text` token for the whitespace again. The rest of the line is returned as
+`String`.
+
+Note that for this to work, every part of the match must be inside a capturing
+group (a ``(...)``), and there must not be any nested capturing groups. If you
+nevertheless need a group, use a non-capturing group defined using this syntax:
+[-``r'(?:some|words|here)'``-]
+{+``(?:some|words|here)``+} (note the ``?:`` after the beginning parenthesis).
+
+If you find yourself needing a capturing group inside the regex which shouldn't
+be part of the output but is used in the regular expressions for backreferencing
+(eg: ``r'(<(foo|bar)>)(.*?)(</\2>)'``), you can pass `None` to the bygroups
+function and [-it will skip-] that group will be skipped in the output.
+
+
+Changing states
+===============
+
+Many lexers need multiple states to work as expected. For example, some
+languages allow multiline comments to be nested. Since this is a recursive
+pattern it's impossible to lex just using regular expressions.
+
+Here is [-the solution:
+
+.. sourcecode:: python-] {+a lexer that recognizes C++ style comments (multi-line with ``/* */``
+and single-line with ``//`` until end of line)::+}
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import *
+
+ class [-ExampleLexer(RegexLexer):-] {+CppCommentLexer(RegexLexer):+}
+ name = 'Example Lexer with states'
+
+ tokens = {
+ 'root': [
+ (r'[^/]+', Text),
+ (r'/\*', Comment.Multiline, 'comment'),
+ (r'//.*?$', Comment.Singleline),
+ (r'/', Text)
+ ],
+ 'comment': [
+ (r'[^*/]', Comment.Multiline),
+ (r'/\*', Comment.Multiline, '#push'),
+ (r'\*/', Comment.Multiline, '#pop'),
+ (r'[*/]', Comment.Multiline)
+ ]
+ }
+
+This lexer starts lexing in the ``'root'`` state. It tries to match as much as
+possible until it finds a slash (``'/'``). If the next character after the slash
+is [-a star-] {+an asterisk+} (``'*'``) the `RegexLexer` sends those two characters to the
+output stream marked as `Comment.Multiline` and continues [-parsing-] {+lexing+} with the rules
+defined in the ``'comment'`` state.
+
+If there wasn't [-a star-] {+an asterisk+} after the slash, the `RegexLexer` checks if it's a
+[-singleline-]
+{+Singleline+} comment [-(eg:-] {+(i.e.+} followed by a second slash). If this also wasn't the
+case it must be a single [-slash-] {+slash, which is not a comment starter+} (the separate
+regex for a single slash must also be given, else the slash would be marked as
+an error token).
+
+Inside the ``'comment'`` state, we do the same thing again. Scan until the
+lexer finds a star or slash. If it's the opening of a multiline comment, push
+the ``'comment'`` state on the stack and continue scanning, again in the
+``'comment'`` state. Else, check if it's the end of the multiline comment. If
+yes, pop one state from the stack.
+
+Note: If you pop from an empty stack you'll get an `IndexError`. (There is an
+easy way to prevent this from happening: don't ``'#pop'`` in the root state).
+
+If the `RegexLexer` encounters a newline that is flagged as an error token, the
+stack is emptied and the lexer continues scanning in the ``'root'`` state. This
+[-helps-]
+{+can help+} producing error-tolerant highlighting for erroneous input, e.g. when a
+single-line string is not closed.
+
+
+Advanced state tricks
+=====================
+
+There are a few more things you can do with states:
+
+- You can push multiple states onto the stack if you give a tuple instead of a
+ simple string as the third item in a rule tuple. For example, if you want to
+ match a comment containing a directive, something [-like::-] {+like:
+
+ .. code-block:: text+}
+
+ /* <processing directive> rest of comment */
+
+ you can use this [-rule:
+
+ .. sourcecode:: python-] {+rule::+}
+
+ tokens = {
+ 'root': [
+ (r'/\* <', Comment, ('comment', 'directive')),
+ ...
+ ],
+ 'directive': [
+ (r'[^>]*', Comment.Directive),
+ (r'>', Comment, '#pop'),
+ ],
+ 'comment': [
+ (r'[^*]+', Comment),
+ (r'\*/', Comment, '#pop'),
+ (r'\*', Comment),
+ ]
+ }
+
+ When this encounters the above sample, first ``'comment'`` and ``'directive'``
+ are pushed onto the stack, then the lexer continues in the directive state
+ until it finds the closing ``>``, then it continues in the comment state until
+ the closing ``*/``. Then, both states are popped from the stack again and
+ lexing continues in the root state.
+
+ .. versionadded:: 0.9
+ The tuple can contain the special ``'#push'`` and ``'#pop'`` (but not
+ ``'#pop:n'``) directives.
+
+
+- You can include the rules of a state in the definition of another. This is
+ done by using `include` from [-`pygments.lexer`:
+
+ .. sourcecode:: python-] {+`pygments.lexer`::+}
+
+ from pygments.lexer import RegexLexer, bygroups, include
+ from pygments.token import *
+
+ class ExampleLexer(RegexLexer):
+ tokens = {
+ 'comments': [
+ (r'/\*.*?\*/', Comment),
+ (r'//.*?\n', Comment),
+ ],
+ 'root': [
+ include('comments'),
+ (r'(function )(\w+)( {)',
+ bygroups(Keyword, Name, Keyword), 'function'),
+ (r'.', Text),
+ ],
+ 'function': [
+ (r'[^}/]+', Text),
+ include('comments'),
+ (r'/', Text),
+ [-(r'}',-]
+ {+(r'\}',+} Keyword, '#pop'),
+ ]
+ }
+
+ This is a hypothetical lexer for a language that consist of functions and
+ comments. Because comments can occur at toplevel and in functions, we need
+ rules for comments in both states. As you can see, the `include` helper saves
+ repeating rules that occur more than once (in this example, the state
+ ``'comment'`` will never be entered by the lexer, as it's only there to be
+ included in ``'root'`` and ``'function'``).
+
+- Sometimes, you may want to "combine" a state from existing ones. This is
+ possible with the [-`combine`-] {+`combined`+} helper from `pygments.lexer`.
+
+ If you, instead of a new state, write ``combined('state1', 'state2')`` as the
+ third item of a rule tuple, a new anonymous state will be formed from state1
+ and state2 and if the rule matches, the lexer will enter this state.
+
+ This is not used very often, but can be helpful in some cases, such as the
+ `PythonLexer`'s string literal processing.
+
+- If you want your lexer to start lexing in a different state you can modify the
+ stack by [-overloading-] {+overriding+} the `get_tokens_unprocessed()` [-method:
+
+ .. sourcecode:: python-] {+method::+}
+
+ from pygments.lexer import RegexLexer
+
+ class [-MyLexer(RegexLexer):-] {+ExampleLexer(RegexLexer):+}
+ tokens = {...}
+
+ def get_tokens_unprocessed(self, [-text):
+ stack = ['root', 'otherstate']-] {+text, stack=('root', 'otherstate')):+}
+ for item in RegexLexer.get_tokens_unprocessed(text, stack):
+ yield item
+
+ Some lexers like the `PhpLexer` use this to make the leading ``<?php``
+ preprocessor comments optional. Note that you can crash the lexer easily by
+ putting values into the stack that don't exist in the token map. Also
+ removing ``'root'`` from the stack can result in strange errors!
+
+- [-An-] {+In some lexers, a state should be popped if anything is encountered that isn't
+ matched by a rule in the state. You could use an+} empty regex at the end of [-a-]
+ {+the+} state list, [-combined with ``'#pop'``, can
+ act as-] {+but Pygments provides+} a [-return point-] {+more obvious way of spelling that:
+ ``default('#pop')`` is equivalent to ``('', Text, '#pop')``.
+
+ .. versionadded:: 2.0
+
+
+Subclassing lexers derived+} from {+RegexLexer
+==========================================
+
+.. versionadded:: 1.6
+
+Sometimes multiple languages are very similar, but should still be lexed by
+different lexer classes.
+
+When subclassing+} a {+lexer derived from RegexLexer, the ``tokens`` dictionaries
+defined in the parent and child class are merged. For example::
+
+ from pygments.lexer import RegexLexer, inherit
+ from pygments.token import *
+
+ class BaseLexer(RegexLexer):
+ tokens = {
+ 'root': [
+ ('[a-z]+', Name),
+ (r'/\*', Comment, 'comment'),
+ ('"', String, 'string'),
+ ('\s+', Text),
+ ],
+ 'string': [
+ ('[^"]+', String),
+ ('"', String, '#pop'),
+ ],
+ 'comment': [
+ ...
+ ],
+ }
+
+ class DerivedLexer(BaseLexer):
+ tokens = {
+ 'root': [
+ ('[0-9]+', Number),
+ inherit,
+ ],
+ 'string': [
+ (r'[^"\\]+', String),
+ (r'\\.', String.Escape),
+ ('"', String, '#pop'),
+ ],
+ }
+
+The `BaseLexer` defines two states, lexing names and strings. The
+`DerivedLexer` defines its own tokens dictionary, which extends the definitions
+of the base lexer:
+
+* The "root"+} state {+has an additional rule and then the special object `inherit`,
+ which tells Pygments to insert the token definitions of the parent class at+}
+ that [-doesn't have a clear end marker.-] {+point.
+
+* The "string" state is replaced entirely, since there is not `inherit` rule.
+
+* The "comment" state is inherited entirely.+}
+
+
+Using multiple lexers
+=====================
+
+Using multiple lexers for the same input can be tricky. One of the easiest
+combination techniques is shown here: You can replace the [-token type-] {+action+} entry in a rule
+tuple [-(the second item)-] with a lexer class. The matched text will then be lexed with that lexer,
+and the resulting tokens will be yielded.
+
+For example, look at this stripped-down HTML [-lexer:
+
+.. sourcecode:: python-] {+lexer::+}
+
+ from pygments.lexer import RegexLexer, bygroups, using
+ from pygments.token import *
+ from [-pygments.lexers.web-] {+pygments.lexers.javascript+} import JavascriptLexer
+
+ class HtmlLexer(RegexLexer):
+ name = 'HTML'
+ aliases = ['html']
+ filenames = ['*.html', '*.htm']
+
+ flags = re.IGNORECASE | re.DOTALL
+ tokens = {
+ 'root': [
+ ('[^<&]+', Text),
+ ('&.*?;', Name.Entity),
+ (r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
+ (r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
+ (r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
+ ],
+ 'script-content': [
+ (r'(.+?)(<\s*/\s*script\s*>)',
+ bygroups(using(JavascriptLexer), Name.Tag),
+ '#pop'),
+ ]
+ }
+
+Here the content of a ``<script>`` tag is passed to a newly created instance of
+a `JavascriptLexer` and not processed by the `HtmlLexer`. This is done using
+the `using` helper that takes the other lexer class as its parameter.
+
+Note the combination of `bygroups` and `using`. This makes sure that the
+content up to the ``</script>`` end tag is processed by the `JavascriptLexer`,
+while the end tag is yielded as a normal token with the `Name.Tag` type.
+
+[-As an additional goodie, if the lexer class is replaced by `this` (imported from
+`pygments.lexer`), the "other" lexer will be the current one (because you cannot
+refer to the current class within the code that runs at class definition time).-]
+
+Also note the ``(r'<\s*script\s*', Name.Tag, ('script-content', 'tag'))`` rule.
+Here, two states are pushed onto the state stack, ``'script-content'`` and
+``'tag'``. That means that first ``'tag'`` is processed, which will [-parse-] {+lex+}
+attributes and the closing ``>``, then the ``'tag'`` state is popped and the
+next state on top of the stack will be ``'script-content'``.
+
+{+Since you cannot refer to the class currently being defined, use `this`
+(imported from `pygments.lexer`) to refer to the current lexer class, i.e.
+``using(this)``. This construct may seem unnecessary, but this is often the
+most obvious way of lexing arbitrary syntax between fixed delimiters without
+introducing deeply nested states.+}
+
+The `using()` helper has a special keyword argument, `state`, which works as
+follows: if given, the lexer to use initially is not in the ``"root"`` state,
+but in the state given by this argument. This [-*only* works-] {+does not work+} with [-a `RegexLexer`.-] {+advanced
+`RegexLexer` subclasses such as `ExtendedRegexLexer` (see below).+}
+
+Any other keywords arguments passed to `using()` are added to the keyword
+arguments used to create the lexer.
+
+
+Delegating Lexer
+================
+
+Another approach for nested lexers is the `DelegatingLexer` which is for example
+used for the template engine lexers. It takes two lexers as arguments on
+initialisation: a `root_lexer` and a `language_lexer`.
+
+The input is processed as follows: First, the whole text is lexed with the
+`language_lexer`. All tokens yielded with [-a-] {+the special+} type of ``Other`` are
+then concatenated and given to the `root_lexer`. The language tokens of the
+`language_lexer` are then inserted into the `root_lexer`'s token stream at the
+appropriate positions.
+
+[-.. sourcecode:: python-] {+::+}
+
+ from pygments.lexer import DelegatingLexer
+ from pygments.lexers.web import HtmlLexer, PhpLexer
+
+ class HtmlPhpLexer(DelegatingLexer):
+ def __init__(self, **options):
+ super(HtmlPhpLexer, self).__init__(HtmlLexer, PhpLexer, **options)
+
+This procedure ensures that e.g. HTML with template tags in it is highlighted
+correctly even if the template tags are put into HTML tags or attributes.
+
+If you want to change the needle token ``Other`` to something else, you can give
+the lexer another token type as the third [-parameter:
+
+.. sourcecode:: python-] {+parameter::+}
+
+ DelegatingLexer.__init__(MyLexer, OtherLexer, Text, **options)
+
+
+Callbacks
+=========
+
+Sometimes the grammar of a language is so complex that a lexer would be unable
+to [-parse-] {+process+} it just by using regular expressions and stacks.
+
+For this, the `RegexLexer` allows callbacks to be given in rule tuples, instead
+of token types (`bygroups` and `using` are nothing else but preimplemented
+callbacks). The callback must be a function taking two arguments:
+
+* the lexer itself
+* the match object for the last matched rule
+
+The callback must then return an iterable of (or simply yield) ``(index,
+tokentype, value)`` tuples, which are then just passed through by
+`get_tokens_unprocessed()`. The ``index`` here is the position of the token in
+the input string, ``tokentype`` is the normal token type (like `Name.Builtin`),
+and ``value`` the associated part of the input string.
+
+You can see an example [-here:
+
+.. sourcecode:: python-] {+here::+}
+
+ from pygments.lexer import RegexLexer
+ from pygments.token import Generic
+
+ class HypotheticLexer(RegexLexer):
+
+ def headline_callback(lexer, match):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+If the regex for the `headline_callback` matches, the function is called with
+the match object. Note that after the callback is done, processing continues
+normally, that is, after the end of the previous match. The callback has no
+possibility to influence the position.
+
+There are not really any simple examples for lexer callbacks, but you can see
+them in action e.g. in the [-`compiled.py`_ source code-] {+`SMLLexer` class+} in [-the `CLexer` and
+`JavaLexer` classes.-] {+`ml.py`_.+}
+
+.. [-_compiled.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/compiled.py-] {+_ml.py: http://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ml.py+}
+
+
+The ExtendedRegexLexer class
+============================
+
+The `RegexLexer`, even with callbacks, unfortunately isn't powerful enough for
+the funky syntax rules of [-some-] languages [-that will go unnamed,-] such as Ruby.
+
+But fear not; even then you don't have to abandon the regular expression
+[-approach. For-]
+{+approach:+} Pygments has a subclass of `RegexLexer`, the `ExtendedRegexLexer`.
+All features known from RegexLexers are available here too, and the tokens are
+specified in exactly the same way, *except* for one detail:
+
+The `get_tokens_unprocessed()` method holds its internal state data not as local
+variables, but in an instance of the `pygments.lexer.LexerContext` class, and
+that instance is passed to callbacks as a third argument. This means that you
+can modify the lexer state in callbacks.
+
+The `LexerContext` class has the following members:
+
+* `text` -- the input text
+* `pos` -- the current starting position that is used for matching regexes
+* `stack` -- a list containing the state stack
+* `end` -- the maximum position to which regexes are matched, this defaults to
+ the length of `text`
+
+Additionally, the `get_tokens_unprocessed()` method can be given a
+`LexerContext` instead of a string and will then process this context instead of
+creating a new one for the string argument.
+
+Note that because you can set the current position to anything in the callback,
+it won't be automatically be set by the caller after the callback is finished.
+For example, this is how the hypothetical lexer above would be written with the
+[-`ExtendedRegexLexer`:
+
+.. sourcecode:: python-]
+{+`ExtendedRegexLexer`::+}
+
+ from pygments.lexer import ExtendedRegexLexer
+ from pygments.token import Generic
+
+ class ExHypotheticLexer(ExtendedRegexLexer):
+
+ def headline_callback(lexer, match, ctx):
+ equal_signs = match.group(1)
+ text = match.group(2)
+ yield match.start(), Generic.Headline, equal_signs + text + equal_signs
+ ctx.pos = match.end()
+
+ tokens = {
+ 'root': [
+ (r'(=+)(.*?)(\1)', headline_callback)
+ ]
+ }
+
+This might sound confusing (and it can really be). But it is needed, and for an
+example look at the Ruby lexer in [-`agile.py`_.-] {+`ruby.py`_.+}
+
+.. [-_agile.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/agile.py
+
+
+Filtering-] {+_ruby.py: https://bitbucket.org/birkenfeld/pygments-main/src/tip/pygments/lexers/ruby.py
+
+
+Handling Lists of Keywords
+==========================
+
+For a relatively short list (hundreds) you can construct an optimized regular
+expression directly using ``words()`` (longer lists, see next section). This
+function handles a few things for you automatically, including escaping
+metacharacters and Python's first-match rather than longest-match in
+alternations. Feel free to put the lists themselves in
+``pygments/lexers/_$lang_builtins.py`` (see examples there), and generated by
+code if possible.
+
+An example of using ``words()`` is something like::
+
+ from pygments.lexer import RegexLexer, words, Name
+
+ class MyLexer(RegexLexer):
+
+ tokens = {
+ 'root': [
+ (words(('else', 'elseif'), suffix=r'\b'), Name.Builtin),
+ (r'\w+', Name),
+ ],
+ }
+
+As you can see, you can add ``prefix`` and ``suffix`` parts to the constructed
+regex.
+
+
+Modifying+} Token Streams
+=======================
+
+Some languages ship a lot of builtin functions (for example PHP). The total
+amount of those functions differs from system to system because not everybody
+has every extension installed. In the case of PHP there are over 3000 builtin
+functions. That's an [-incredible-] {+incredibly+} huge amount of functions, much more than you
+[-can-]
+{+want to+} put into a regular expression.
+
+But because only `Name` tokens can be function names [-it's-] {+this is+} solvable by
+overriding the ``get_tokens_unprocessed()`` method. The following lexer
+subclasses the `PythonLexer` so that it highlights some additional names as
+pseudo [-keywords:
+
+.. sourcecode:: python-] {+keywords::+}
+
+ from [-pygments.lexers.agile-] {+pygments.lexers.python+} import PythonLexer
+ from pygments.token import Name, Keyword
+
+ class MyPythonLexer(PythonLexer):
+ EXTRA_KEYWORDS = [-['foo',-] {+set(('foo',+} 'bar', 'foobar', 'barfoo', 'spam', [-'eggs']-] {+'eggs'))+}
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in PythonLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+The `PhpLexer` and `LuaLexer` use this method to resolve builtin functions.
+
+[-.. note:: Do not confuse this with the :doc:`filter <filters>` system.-]
diff --git a/tests/examplefiles/wdiff_example3.wdiff b/tests/examplefiles/wdiff_example3.wdiff
new file mode 100644
index 00000000..0bbd6d65
--- /dev/null
+++ b/tests/examplefiles/wdiff_example3.wdiff
@@ -0,0 +1,10 @@
+This example is unbalanced open-close.
+We can't treat these easily.
+
+{+ added? -]
+[- deleted? +}
+
+suddenly closed -]
+suddenly closed +}
+
+{+ added? [- deleted?
diff --git a/tests/test_bibtex.py b/tests/test_bibtex.py
new file mode 100644
index 00000000..3b07d899
--- /dev/null
+++ b/tests/test_bibtex.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+"""
+ BibTeX Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import textwrap
+import unittest
+
+from pygments.lexers import BibTeXLexer, BSTLexer
+from pygments.token import Token
+
+
+class BibTeXTest(unittest.TestCase):
+ def setUp(self):
+ self.lexer = BibTeXLexer()
+
+ def testPreamble(self):
+ data = u'@PREAMBLE{"% some LaTeX code here"}'
+ tokens = [
+ (Token.Name.Class, u'@PREAMBLE'),
+ (Token.Punctuation, u'{'),
+ (Token.String, u'"'),
+ (Token.String, u'% some LaTeX code here'),
+ (Token.String, u'"'),
+ (Token.Punctuation, u'}'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(data)), tokens)
+
+ def testString(self):
+ data = u'@STRING(SCI = "Science")'
+ tokens = [
+ (Token.Name.Class, u'@STRING'),
+ (Token.Punctuation, u'('),
+ (Token.Name.Attribute, u'SCI'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'='),
+ (Token.Text, u' '),
+ (Token.String, u'"'),
+ (Token.String, u'Science'),
+ (Token.String, u'"'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(data)), tokens)
+
+ def testEntry(self):
+ data = u"""
+ This is a comment.
+
+ @ARTICLE{ruckenstein-diffusion,
+ author = "Liu, Hongquin" # and # "Ruckenstein, Eli",
+ year = 1997,
+ month = JAN,
+ pages = "888-895"
+ }
+ """
+
+ tokens = [
+ (Token.Comment, u'This is a comment.'),
+ (Token.Text, u'\n\n'),
+ (Token.Name.Class, u'@ARTICLE'),
+ (Token.Punctuation, u'{'),
+ (Token.Name.Label, u'ruckenstein-diffusion'),
+ (Token.Punctuation, u','),
+ (Token.Text, u'\n '),
+ (Token.Name.Attribute, u'author'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'='),
+ (Token.Text, u' '),
+ (Token.String, u'"'),
+ (Token.String, u'Liu, Hongquin'),
+ (Token.String, u'"'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'#'),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u'and'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'#'),
+ (Token.Text, u' '),
+ (Token.String, u'"'),
+ (Token.String, u'Ruckenstein, Eli'),
+ (Token.String, u'"'),
+ (Token.Punctuation, u','),
+ (Token.Text, u'\n '),
+ (Token.Name.Attribute, u'year'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'='),
+ (Token.Text, u' '),
+ (Token.Number, u'1997'),
+ (Token.Punctuation, u','),
+ (Token.Text, u'\n '),
+ (Token.Name.Attribute, u'month'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'='),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u'JAN'),
+ (Token.Punctuation, u','),
+ (Token.Text, u'\n '),
+ (Token.Name.Attribute, u'pages'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'='),
+ (Token.Text, u' '),
+ (Token.String, u'"'),
+ (Token.String, u'888-895'),
+ (Token.String, u'"'),
+ (Token.Text, u'\n'),
+ (Token.Punctuation, u'}'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens)
+
+ def testComment(self):
+ data = '@COMMENT{test}'
+ tokens = [
+ (Token.Comment, u'@COMMENT'),
+ (Token.Comment, u'{test}'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(data)), tokens)
+
+ def testMissingBody(self):
+ data = '@ARTICLE xxx'
+ tokens = [
+ (Token.Name.Class, u'@ARTICLE'),
+ (Token.Text, u' '),
+ (Token.Error, u'x'),
+ (Token.Error, u'x'),
+ (Token.Error, u'x'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(data)), tokens)
+
+ def testMismatchedBrace(self):
+ data = '@PREAMBLE(""}'
+ tokens = [
+ (Token.Name.Class, u'@PREAMBLE'),
+ (Token.Punctuation, u'('),
+ (Token.String, u'"'),
+ (Token.String, u'"'),
+ (Token.Error, u'}'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(data)), tokens)
+
+
+class BSTTest(unittest.TestCase):
+ def setUp(self):
+ self.lexer = BSTLexer()
+
+ def testBasicBST(self):
+ data = """
+ % BibTeX standard bibliography style `plain'
+
+ INTEGERS { output.state before.all }
+
+ FUNCTION {sort.format.title}
+ { 't :=
+ "A " #2
+ "An " #3
+ "The " #4 t chop.word
+ chop.word
+ chop.word
+ sortify
+ #1 global.max$ substring$
+ }
+
+ ITERATE {call.type$}
+ """
+ tokens = [
+ (Token.Comment.SingleLine, "% BibTeX standard bibliography style `plain'"),
+ (Token.Text, u'\n\n'),
+ (Token.Keyword, u'INTEGERS'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'{'),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u'output.state'),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u'before.all'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'}'),
+ (Token.Text, u'\n\n'),
+ (Token.Keyword, u'FUNCTION'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'{'),
+ (Token.Name.Variable, u'sort.format.title'),
+ (Token.Punctuation, u'}'),
+ (Token.Text, u'\n'),
+ (Token.Punctuation, u'{'),
+ (Token.Text, u' '),
+ (Token.Name.Function, u"'t"),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u':='),
+ (Token.Text, u'\n'),
+ (Token.Literal.String, u'"A "'),
+ (Token.Text, u' '),
+ (Token.Literal.Number, u'#2'),
+ (Token.Text, u'\n '),
+ (Token.Literal.String, u'"An "'),
+ (Token.Text, u' '),
+ (Token.Literal.Number, u'#3'),
+ (Token.Text, u'\n '),
+ (Token.Literal.String, u'"The "'),
+ (Token.Text, u' '),
+ (Token.Literal.Number, u'#4'),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u't'),
+ (Token.Text, u' '),
+ (Token.Name.Variable, u'chop.word'),
+ (Token.Text, u'\n '),
+ (Token.Name.Variable, u'chop.word'),
+ (Token.Text, u'\n'),
+ (Token.Name.Variable, u'chop.word'),
+ (Token.Text, u'\n'),
+ (Token.Name.Variable, u'sortify'),
+ (Token.Text, u'\n'),
+ (Token.Literal.Number, u'#1'),
+ (Token.Text, u' '),
+ (Token.Name.Builtin, u'global.max$'),
+ (Token.Text, u' '),
+ (Token.Name.Builtin, u'substring$'),
+ (Token.Text, u'\n'),
+ (Token.Punctuation, u'}'),
+ (Token.Text, u'\n\n'),
+ (Token.Keyword, u'ITERATE'),
+ (Token.Text, u' '),
+ (Token.Punctuation, u'{'),
+ (Token.Name.Builtin, u'call.type$'),
+ (Token.Punctuation, u'}'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(list(self.lexer.get_tokens(textwrap.dedent(data))), tokens)
diff --git a/tests/test_crystal.py b/tests/test_crystal.py
new file mode 100644
index 00000000..9a1588f2
--- /dev/null
+++ b/tests/test_crystal.py
@@ -0,0 +1,308 @@
+# -*- coding: utf-8 -*-
+"""
+ Basic CrystalLexer Test
+ ~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import unicode_literals
+import unittest
+
+from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Error
+from pygments.lexers import CrystalLexer
+
+
+class CrystalTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = CrystalLexer()
+ self.maxDiff = None
+
+ def testRangeSyntax1(self):
+ fragment = '1...3\n'
+ tokens = [
+ (Number.Integer, '1'),
+ (Operator, '...'),
+ (Number.Integer, '3'),
+ (Text, '\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testRangeSyntax2(self):
+ fragment = '1 .. 3\n'
+ tokens = [
+ (Number.Integer, '1'),
+ (Text, ' '),
+ (Operator, '..'),
+ (Text, ' '),
+ (Number.Integer, '3'),
+ (Text, '\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testInterpolationNestedCurly(self):
+ fragment = (
+ '"A#{ (3..5).group_by { |x| x/2}.map '
+ 'do |k,v| "#{k}" end.join }" + "Z"\n')
+ tokens = [
+ (String.Double, '"'),
+ (String.Double, 'A'),
+ (String.Interpol, '#{'),
+ (Text, ' '),
+ (Punctuation, '('),
+ (Number.Integer, '3'),
+ (Operator, '..'),
+ (Number.Integer, '5'),
+ (Punctuation, ')'),
+ (Operator, '.'),
+ (Name, 'group_by'),
+ (Text, ' '),
+ (String.Interpol, '{'),
+ (Text, ' '),
+ (Operator, '|'),
+ (Name, 'x'),
+ (Operator, '|'),
+ (Text, ' '),
+ (Name, 'x'),
+ (Operator, '/'),
+ (Number.Integer, '2'),
+ (String.Interpol, '}'),
+ (Operator, '.'),
+ (Name, 'map'),
+ (Text, ' '),
+ (Keyword, 'do'),
+ (Text, ' '),
+ (Operator, '|'),
+ (Name, 'k'),
+ (Punctuation, ','),
+ (Name, 'v'),
+ (Operator, '|'),
+ (Text, ' '),
+ (String.Double, '"'),
+ (String.Interpol, '#{'),
+ (Name, 'k'),
+ (String.Interpol, '}'),
+ (String.Double, '"'),
+ (Text, ' '),
+ (Keyword, 'end'),
+ (Operator, '.'),
+ (Name, 'join'),
+ (Text, ' '),
+ (String.Interpol, '}'),
+ (String.Double, '"'),
+ (Text, ' '),
+ (Operator, '+'),
+ (Text, ' '),
+ (String.Double, '"'),
+ (String.Double, 'Z'),
+ (String.Double, '"'),
+ (Text, '\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testOperatorMethods(self):
+ fragment = '([] of Int32).[]?(5)\n'
+ tokens = [
+ (Punctuation, '('),
+ (Operator, '['),
+ (Operator, ']'),
+ (Text, ' '),
+ (Keyword, 'of'),
+ (Text, ' '),
+ (Name.Builtin, 'Int32'),
+ (Punctuation, ')'),
+ (Operator, '.'),
+ (Name.Operator, '[]?'),
+ (Punctuation, '('),
+ (Number.Integer, '5'),
+ (Punctuation, ')'),
+ (Text, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testArrayAccess(self):
+ fragment = '[5][5]?\n'
+ tokens = [
+ (Operator, '['),
+ (Number.Integer, '5'),
+ (Operator, ']'),
+ (Operator, '['),
+ (Number.Integer, '5'),
+ (Operator, ']?'),
+ (Text, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testNumbers(self):
+ for kind, testset in [
+ (Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'),
+ (Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'),
+ (Number.Bin, '0b1001_0110 0b0u8'),
+ (Number.Oct, '0o17 0o7_i32'),
+ (Number.Hex, '0xdeadBEEF'),
+ ]:
+ for fragment in testset.split():
+ self.assertEqual([(kind, fragment), (Text, '\n')],
+ list(self.lexer.get_tokens(fragment + '\n')))
+
+ for fragment in '01 0b2 0x129g2 0o12358'.split():
+ self.assertEqual(next(self.lexer.get_tokens(fragment + '\n'))[0],
+ Error)
+
+ def testChars(self):
+ for fragment in ["'a'", "'я'", "'\\u{1234}'", "'\n'"]:
+ self.assertEqual([(String.Char, fragment), (Text, '\n')],
+ list(self.lexer.get_tokens(fragment + '\n')))
+ self.assertEqual(next(self.lexer.get_tokens("'abc'"))[0], Error)
+
+ def testMacro(self):
+ fragment = (
+ 'def<=>(other : self) : Int\n'
+ '{%for field in %w(first_name middle_name last_name)%}\n'
+ 'cmp={{field.id}}<=>other.{{field.id}}\n'
+ 'return cmp if cmp!=0\n'
+ '{%end%}\n'
+ '0\n'
+ 'end\n')
+ tokens = [
+ (Keyword, 'def'),
+ (Name.Function, '<=>'),
+ (Punctuation, '('),
+ (Name, 'other'),
+ (Text, ' '),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Keyword.Pseudo, 'self'),
+ (Punctuation, ')'),
+ (Text, ' '),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name.Builtin, 'Int'),
+ (Text, '\n'),
+ (String.Interpol, '{%'),
+ (Keyword, 'for'),
+ (Text, ' '),
+ (Name, 'field'),
+ (Text, ' '),
+ (Keyword, 'in'),
+ (Text, ' '),
+ (String.Other, '%w('),
+ (String.Other, 'first_name middle_name last_name'),
+ (String.Other, ')'),
+ (String.Interpol, '%}'),
+ (Text, '\n'),
+ (Name, 'cmp'),
+ (Operator, '='),
+ (String.Interpol, '{{'),
+ (Name, 'field'),
+ (Operator, '.'),
+ (Name, 'id'),
+ (String.Interpol, '}}'),
+ (Operator, '<=>'),
+ (Name, 'other'),
+ (Operator, '.'),
+ (String.Interpol, '{{'),
+ (Name, 'field'),
+ (Operator, '.'),
+ (Name, 'id'),
+ (String.Interpol, '}}'),
+ (Text, '\n'),
+ (Keyword, 'return'),
+ (Text, ' '),
+ (Name, 'cmp'),
+ (Text, ' '),
+ (Keyword, 'if'),
+ (Text, ' '),
+ (Name, 'cmp'),
+ (Operator, '!='),
+ (Number.Integer, '0'),
+ (Text, '\n'),
+ (String.Interpol, '{%'),
+ (Keyword, 'end'),
+ (String.Interpol, '%}'),
+ (Text, '\n'),
+ (Number.Integer, '0'),
+ (Text, '\n'),
+ (Keyword, 'end'),
+ (Text, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testLib(self):
+ fragment = (
+ '@[Link("some")]\nlib LibSome\n'
+ '@[CallConvention("X86_StdCall")]\nfun foo="some.foo"(thing : Void*) : LibC::Int\n'
+ 'end\n')
+ tokens = [
+ (Operator, '@['),
+ (Name.Decorator, 'Link'),
+ (Punctuation, '('),
+ (String.Double, '"'),
+ (String.Double, 'some'),
+ (String.Double, '"'),
+ (Punctuation, ')'),
+ (Operator, ']'),
+ (Text, '\n'),
+ (Keyword, 'lib'),
+ (Text, ' '),
+ (Name.Namespace, 'LibSome'),
+ (Text, '\n'),
+ (Operator, '@['),
+ (Name.Decorator, 'CallConvention'),
+ (Punctuation, '('),
+ (String.Double, '"'),
+ (String.Double, 'X86_StdCall'),
+ (String.Double, '"'),
+ (Punctuation, ')'),
+ (Operator, ']'),
+ (Text, '\n'),
+ (Keyword, 'fun'),
+ (Text, ' '),
+ (Name.Function, 'foo'),
+ (Operator, '='),
+ (String.Double, '"'),
+ (String.Double, 'some.foo'),
+ (String.Double, '"'),
+ (Punctuation, '('),
+ (Name, 'thing'),
+ (Text, ' '),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name.Builtin, 'Void'),
+ (Operator, '*'),
+ (Punctuation, ')'),
+ (Text, ' '),
+ (Punctuation, ':'),
+ (Text, ' '),
+ (Name, 'LibC'),
+ (Operator, '::'),
+ (Name.Builtin, 'Int'),
+ (Text, '\n'),
+ (Keyword, 'end'),
+ (Text, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testEscapedBracestring(self):
+ fragment = 'str.gsub(%r{\\\\\\\\}, "/")\n'
+ tokens = [
+ (Name, 'str'),
+ (Operator, '.'),
+ (Name, 'gsub'),
+ (Punctuation, '('),
+ (String.Regex, '%r{'),
+ (String.Regex, '\\\\'),
+ (String.Regex, '\\\\'),
+ (String.Regex, '}'),
+ (Punctuation, ','),
+ (Text, ' '),
+ (String.Double, '"'),
+ (String.Double, '/'),
+ (String.Double, '"'),
+ (Punctuation, ')'),
+ (Text, '\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 567de51f..596d9fbc 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -116,7 +116,7 @@ class HtmlFormatterTest(unittest.TestCase):
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
- self.assertTrue(re.search("<pre><a name=\"foo-1\">", html))
+ self.assertTrue(re.search("<pre><span></span><a name=\"foo-1\">", html))
def test_lineanchors_with_startnum(self):
optdict = dict(lineanchors="foo", linenostart=5)
@@ -124,7 +124,7 @@ class HtmlFormatterTest(unittest.TestCase):
fmt = HtmlFormatter(**optdict)
fmt.format(tokensource, outfile)
html = outfile.getvalue()
- self.assertTrue(re.search("<pre><a name=\"foo-5\">", html))
+ self.assertTrue(re.search("<pre><span></span><a name=\"foo-5\">", html))
def test_valid_output(self):
# test all available wrappers
diff --git a/tests/test_java.py b/tests/test_java.py
index 33a64e99..f4096647 100644
--- a/tests/test_java.py
+++ b/tests/test_java.py
@@ -9,7 +9,7 @@
import unittest
-from pygments.token import Text, Name, Operator, Keyword
+from pygments.token import Text, Name, Operator, Keyword, Number
from pygments.lexers import JavaLexer
@@ -40,3 +40,39 @@ class JavaTest(unittest.TestCase):
]
self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+ def testNumericLiterals(self):
+ fragment = '0 5L 9__542_72l 0xbEEf 0X9_A 0_35 01 0b0___101_0'
+ fragment += ' 0. .7_17F 3e-1_3d 1f 6_01.9e+3 0x.1Fp3 0XEP8D\n'
+ tokens = [
+ (Number.Integer, '0'),
+ (Text, ' '),
+ (Number.Integer, '5L'),
+ (Text, ' '),
+ (Number.Integer, '9__542_72l'),
+ (Text, ' '),
+ (Number.Hex, '0xbEEf'),
+ (Text, ' '),
+ (Number.Hex, '0X9_A'),
+ (Text, ' '),
+ (Number.Oct, '0_35'),
+ (Text, ' '),
+ (Number.Oct, '01'),
+ (Text, ' '),
+ (Number.Bin, '0b0___101_0'),
+ (Text, ' '),
+ (Number.Float, '0.'),
+ (Text, ' '),
+ (Number.Float, '.7_17F'),
+ (Text, ' '),
+ (Number.Float, '3e-1_3d'),
+ (Text, ' '),
+ (Number.Float, '1f'),
+ (Text, ' '),
+ (Number.Float, '6_01.9e+3'),
+ (Text, ' '),
+ (Number.Float, '0x.1Fp3'),
+ (Text, ' '),
+ (Number.Float, '0XEP8D'),
+ (Text, '\n')
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_lexers_other.py b/tests/test_lexers_other.py
index bb667c05..90d05ef8 100644
--- a/tests/test_lexers_other.py
+++ b/tests/test_lexers_other.py
@@ -13,6 +13,7 @@ import unittest
from pygments.lexers import guess_lexer
from pygments.lexers.scripting import EasytrieveLexer, JclLexer, RexxLexer
+
def _exampleFilePath(filename):
return os.path.join(os.path.dirname(__file__), 'examplefiles', filename)
@@ -28,8 +29,8 @@ class AnalyseTextTest(unittest.TestCase):
text = fp.read().decode('utf-8')
probability = lexer.analyse_text(text)
self.assertTrue(probability > 0,
- '%s must recognize %r' % (
- lexer.name, exampleFilePath))
+ '%s must recognize %r' % (
+ lexer.name, exampleFilePath))
guessedLexer = guess_lexer(text)
self.assertEqual(guessedLexer.name, lexer.name)
@@ -45,25 +46,24 @@ class AnalyseTextTest(unittest.TestCase):
class EasyTrieveLexerTest(unittest.TestCase):
def testCanGuessFromText(self):
- self.assertLess(0, EasytrieveLexer.analyse_text('MACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text('\nMACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text(' \nMACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text(' \n MACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text('*\nMACRO'))
- self.assertLess(0, EasytrieveLexer.analyse_text(
+ self.assertTrue(EasytrieveLexer.analyse_text('MACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text('\nMACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text(' \nMACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text(' \n MACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text('*\nMACRO'))
+ self.assertTrue(EasytrieveLexer.analyse_text(
'*\n *\n\n \n*\n MACRO'))
class RexxLexerTest(unittest.TestCase):
def testCanGuessFromText(self):
- self.assertAlmostEqual(0.01,
- RexxLexer.analyse_text('/* */'))
+ self.assertAlmostEqual(0.01, RexxLexer.analyse_text('/* */'))
self.assertAlmostEqual(1.0,
- RexxLexer.analyse_text('''/* Rexx */
+ RexxLexer.analyse_text('''/* Rexx */
say "hello world"'''))
val = RexxLexer.analyse_text('/* */\n'
- 'hello:pRoceduRe\n'
- ' say "hello world"')
+ 'hello:pRoceduRe\n'
+ ' say "hello world"')
self.assertTrue(val > 0.5, val)
val = RexxLexer.analyse_text('''/* */
if 1 > 0 then do
diff --git a/tests/test_praat.py b/tests/test_praat.py
new file mode 100644
index 00000000..471d5e2c
--- /dev/null
+++ b/tests/test_praat.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+"""
+ Praat lexer tests
+ ~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+
+from pygments.token import Token
+from pygments.lexers import PraatLexer
+
+class PraatTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = PraatLexer()
+ self.maxDiff = None
+
+ def testNumericAssignment(self):
+ fragment = u'var = -15e4\n'
+ tokens = [
+ (Token.Text, u'var'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Operator, u'-'),
+ (Token.Literal.Number, u'15e4'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testStringAssignment(self):
+ fragment = u'var$ = "foo"\n'
+ tokens = [
+ (Token.Text, u'var$'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'foo'),
+ (Token.Literal.String, u'"'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testStringEscapedQuotes(self):
+ fragment = u'"it said ""foo"""\n'
+ tokens = [
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'it said '),
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'foo'),
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'"'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testFunctionCall(self):
+ fragment = u'selected("Sound", i+(a*b))\n'
+ tokens = [
+ (Token.Name.Function, u'selected'),
+ (Token.Punctuation, u'('),
+ (Token.Literal.String, u'"'),
+ (Token.Literal.String, u'Sound'),
+ (Token.Literal.String, u'"'),
+ (Token.Punctuation, u','),
+ (Token.Text, u' '),
+ (Token.Text, u'i'),
+ (Token.Operator, u'+'),
+ (Token.Text, u'('),
+ (Token.Text, u'a'),
+ (Token.Operator, u'*'),
+ (Token.Text, u'b'),
+ (Token.Text, u')'),
+ (Token.Punctuation, u')'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testBrokenUnquotedString(self):
+ fragment = u'printline string\n... \'interpolated\' string\n'
+ tokens = [
+ (Token.Keyword, u'printline'),
+ (Token.Text, u' '),
+ (Token.Literal.String, u'string'),
+ (Token.Text, u'\n'),
+ (Token.Punctuation, u'...'),
+ (Token.Text, u' '),
+ (Token.Literal.String.Interpol, u"'"),
+ (Token.Literal.String.Interpol, u'interpolated'),
+ (Token.Literal.String.Interpol, u"'"),
+ (Token.Text, u' '),
+ (Token.Literal.String, u'string'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
+
+ def testInlinIf(self):
+ fragment = u'var = if true == 1 then -1 else 0 fi'
+ tokens = [
+ (Token.Text, u'var'),
+ (Token.Text, u' '),
+ (Token.Operator, u'='),
+ (Token.Text, u' '),
+ (Token.Keyword, u'if'),
+ (Token.Text, u' '),
+ (Token.Text, u'true'),
+ (Token.Text, u' '),
+ (Token.Operator, u'=='),
+ (Token.Text, u' '),
+ (Token.Literal.Number, u'1'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'then'),
+ (Token.Text, u' '),
+ (Token.Operator, u'-'),
+ (Token.Literal.Number, u'1'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'else'),
+ (Token.Text, u' '),
+ (Token.Literal.Number, u'0'),
+ (Token.Text, u' '),
+ (Token.Keyword, u'fi'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
diff --git a/tests/test_sql.py b/tests/test_sql.py
new file mode 100644
index 00000000..c5f5c758
--- /dev/null
+++ b/tests/test_sql.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+"""
+ Pygments SQL lexers tests
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import unittest
+
+from pygments.lexers.sql import TransactSqlLexer
+from pygments.token import Comment, Name, Number, Punctuation, Whitespace
+
+
+class TransactSqlLexerTest(unittest.TestCase):
+
+ def setUp(self):
+ self.lexer = TransactSqlLexer()
+
+ def _assertAreTokensOfType(self, examples, expected_token_type):
+ for test_number, example in enumerate(examples.split(), 1):
+ token_count = 0
+ for token_type, token_value in self.lexer.get_tokens(example):
+ if token_type != Whitespace:
+ token_count += 1
+ self.assertEqual(
+ token_type, expected_token_type,
+ 'token_type #%d for %s is be %s but must be %s' %
+ (test_number, token_value, token_type, expected_token_type))
+ self.assertEqual(
+ token_count, 1,
+ '%s must yield exactly 1 token instead of %d' %
+ (example, token_count))
+
+ def _assertTokensMatch(self, text, expected_tokens_without_trailing_newline):
+ actual_tokens = tuple(self.lexer.get_tokens(text))
+ if (len(actual_tokens) >= 1) and (actual_tokens[-1] == (Whitespace, '\n')):
+ actual_tokens = tuple(actual_tokens[:-1])
+ self.assertEqual(
+ expected_tokens_without_trailing_newline, actual_tokens,
+ 'text must yield expected tokens: %s' % text)
+
+ def test_can_lex_float(self):
+ self._assertAreTokensOfType(
+ '1. 1.e1 .1 1.2 1.2e3 1.2e+3 1.2e-3 1e2', Number.Float)
+ self._assertTokensMatch(
+ '1e2.1e2',
+ ((Number.Float, '1e2'), (Number.Float, '.1e2'))
+ )
+
+ def test_can_reject_almost_float(self):
+ self._assertTokensMatch(
+ '.e1',
+ ((Punctuation, '.'), (Name, 'e1')))
+
+ def test_can_lex_integer(self):
+ self._assertAreTokensOfType(
+ '1 23 456', Number.Integer)
+
+ def test_can_lex_names(self):
+ self._assertAreTokensOfType(
+ u'thingy thingy123 _thingy _ _123 Ähnliches Müll #temp1 ##temp2', Name)
+
+ def test_can_lex_comments(self):
+ self._assertTokensMatch('--\n', ((Comment.Single, '--\n'),))
+ self._assertTokensMatch('/**/', (
+ (Comment.Multiline, '/*'), (Comment.Multiline, '*/')
+ ))
+ self._assertTokensMatch('/*/**/*/', (
+ (Comment.Multiline, '/*'),
+ (Comment.Multiline, '/*'),
+ (Comment.Multiline, '*/'),
+ (Comment.Multiline, '*/'),
+ ))
diff --git a/tests/test_terminal_formatter.py b/tests/test_terminal_formatter.py
index 07337cd5..cb5c6c44 100644
--- a/tests/test_terminal_formatter.py
+++ b/tests/test_terminal_formatter.py
@@ -14,7 +14,13 @@ import re
from pygments.util import StringIO
from pygments.lexers.sql import PlPgsqlLexer
-from pygments.formatters import TerminalFormatter
+from pygments.formatters import TerminalFormatter, Terminal256Formatter, \
+ HtmlFormatter, LatexFormatter
+
+from pygments.style import Style
+from pygments.token import Token
+from pygments.lexers import Python3Lexer
+from pygments import highlight
DEMO_TEXT = '''\
-- comment
@@ -26,9 +32,11 @@ DEMO_TOKENS = list(DEMO_LEXER().get_tokens(DEMO_TEXT))
ANSI_RE = re.compile(r'\x1b[\w\W]*?m')
+
def strip_ansi(x):
return ANSI_RE.sub('', x)
+
class TerminalFormatterTest(unittest.TestCase):
def test_reasonable_output(self):
out = StringIO()
@@ -49,3 +57,46 @@ class TerminalFormatterTest(unittest.TestCase):
for a, b in zip(DEMO_TEXT.splitlines(), plain.splitlines()):
self.assertTrue(a in b)
+
+
+class MyStyle(Style):
+ styles = {
+ Token.Comment: '#ansidarkgray',
+ Token.String: '#ansiblue bg:#ansidarkred',
+ Token.Number: '#ansigreen bg:#ansidarkgreen',
+ Token.Number.Hex: '#ansidarkgreen bg:#ansired',
+ }
+
+
+class Terminal256FormatterTest(unittest.TestCase):
+ code = '''
+# this should be a comment
+print("Hello World")
+async def function(a,b,c, *d, **kwarg:Bool)->Bool:
+ pass
+ return 123, 0xb3e3
+
+'''
+
+ def test_style_html(self):
+ style = HtmlFormatter(style=MyStyle).get_style_defs()
+ self.assertTrue('#555555' in style,
+ "ansigray for comment not html css style")
+
+ def test_others_work(self):
+ """check other formatters don't crash"""
+ highlight(self.code, Python3Lexer(), LatexFormatter(style=MyStyle))
+ highlight(self.code, Python3Lexer(), HtmlFormatter(style=MyStyle))
+
+ def test_256esc_seq(self):
+ """
+ test that a few escape sequences are actualy used when using #ansi<> color codes
+ """
+ def termtest(x):
+ return highlight(x, Python3Lexer(),
+ Terminal256Formatter(style=MyStyle))
+
+ self.assertTrue('32;41' in termtest('0x123'))
+ self.assertTrue('32;42' in termtest('123'))
+ self.assertTrue('30;01' in termtest('#comment'))
+ self.assertTrue('34;41' in termtest('"String"'))
diff --git a/tests/test_token.py b/tests/test_token.py
index c96bd9ef..0c6b02bf 100644
--- a/tests/test_token.py
+++ b/tests/test_token.py
@@ -7,6 +7,7 @@
:license: BSD, see LICENSE for details.
"""
+import copy
import unittest
from pygments import token
@@ -44,3 +45,10 @@ class TokenTest(unittest.TestCase):
for k, v in t.items():
if len(v) > 1:
self.fail("%r has more than one key: %r" % (k, v))
+
+ def test_copying(self):
+ # Token instances are supposed to be singletons, so copying or even
+ # deepcopying should return themselves
+ t = token.String
+ self.assertIs(t, copy.copy(t))
+ self.assertIs(t, copy.deepcopy(t))
diff --git a/tests/test_whiley.py b/tests/test_whiley.py
new file mode 100644
index 00000000..2e957875
--- /dev/null
+++ b/tests/test_whiley.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+"""
+ Whiley Test
+ ~~~~~~~~~~~
+
+ :copyright: Copyright 2006-2016 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import unittest
+
+from pygments.lexers import WhileyLexer
+from pygments.token import Token
+
+
+class WhileyTest(unittest.TestCase):
+ def setUp(self):
+ self.lexer = WhileyLexer()
+
+ def testNeedsName(self):
+ fragment = u'123 \u2200 x\n'
+ tokens = [
+ (Token.Literal.Number.Integer, u'123'),
+ (Token.Text, u' '),
+ (Token.Operator, u'\u2200'),
+ (Token.Text, u' '),
+ (Token.Name, u'x'),
+ (Token.Text, u'\n'),
+ ]
+ self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))