summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAdrian Thurston <thurston@complang.org>2011-03-30 19:04:01 +0000
committerAdrian Thurston <thurston@complang.org>2011-03-30 19:04:01 +0000
commite2d471562ef5e0488c17fe8a874bb6d6f32c7bfe (patch)
tree1dcbff982296e72e2bb2474dfeb9a70d5395c67e
parentfb5430c9c2cf2ad1570150316e969ab142288b58 (diff)
downloadcolm-e2d471562ef5e0488c17fe8a874bb6d6f32c7bfe.tar.gz
Commas gone from expression lists. Now consistent with constructors, patterns,
and parameter lists. refs #246.
-rw-r--r--colm/lmparse.kl8
-rw-r--r--test/accum1.lm3
-rw-r--r--test/accum2.lm2
-rw-r--r--test/commitbt.lm2
-rw-r--r--test/construct2.lm2
-rw-r--r--test/construct3.lm2
-rw-r--r--test/constructex.lm4
-rw-r--r--test/context1.lm6
-rw-r--r--test/context2.lm12
-rw-r--r--test/context3.lm6
-rw-r--r--test/counting1.lm4
-rw-r--r--test/counting2.lm4
-rw-r--r--test/counting3.lm8
-rw-r--r--test/counting4.lm6
-rw-r--r--test/cxx/cxx.lm162
-rw-r--r--test/div.lm2
-rw-r--r--test/dns.lm28
-rw-r--r--test/func.lm2
-rw-r--r--test/heredoc.lm4
-rw-r--r--test/html/html.lm4
-rw-r--r--test/liftattrs.lm2
-rw-r--r--test/matchex.lm4
-rw-r--r--test/nestedcomm.lm2
-rw-r--r--test/python/python.lm28
-rw-r--r--test/rediv.lm2
-rw-r--r--test/reparse.lm2
-rw-r--r--test/repeat.lm8
-rw-r--r--test/rubyhere.lm6
-rw-r--r--test/sprintf.lm2
-rw-r--r--test/superid.lm10
-rw-r--r--test/til.lm2
-rw-r--r--test/translate1.lm4
-rw-r--r--test/translate2.lm8
-rw-r--r--test/travs1.lm8
-rw-r--r--test/travs2.lm4
-rw-r--r--test/treecmp1.lm2
-rw-r--r--test/undofrag1.lm4
-rw-r--r--test/xml/xml.lm2
38 files changed, 185 insertions, 186 deletions
diff --git a/colm/lmparse.kl b/colm/lmparse.kl
index cb89db59..2c4b31b1 100644
--- a/colm/lmparse.kl
+++ b/colm/lmparse.kl
@@ -1440,10 +1440,10 @@ nonterm code_expr_list
ExprVect *exprVect;
};
-code_expr_list: code_expr_list ',' code_expr
+code_expr_list: code_expr_list code_expr
final {
$$->exprVect = $1->exprVect;
- $$->exprVect->append( $3->expr );
+ $$->exprVect->append( $2->expr );
};
code_expr_list: code_expr
final {
@@ -1829,10 +1829,10 @@ nonterm field_init_list
FieldInitVect *fieldInitVect;
};
-field_init_list: field_init_list ',' field_init
+field_init_list: field_init_list field_init
final {
$$->fieldInitVect = $1->fieldInitVect;
- $$->fieldInitVect->append( $3->fieldInit );
+ $$->fieldInitVect->append( $2->fieldInit );
};
field_init_list: field_init
final {
diff --git a/test/accum1.lm b/test/accum1.lm
index d6ba73d8..47f8665c 100644
--- a/test/accum1.lm
+++ b/test/accum1.lm
@@ -26,5 +26,4 @@ for Id: id in Input {
S: start = Output.finish()
-print( S, '\n' )
-
+print( S '\n' )
diff --git a/test/accum2.lm b/test/accum2.lm
index 7570433f..139d6c20 100644
--- a/test/accum2.lm
+++ b/test/accum2.lm
@@ -31,6 +31,6 @@ SP: start_parser = cons start_parser[]
SP.ctx = cons ctx []
SP << stdin
Input: ctx::start = SP.finish()
-print( Input, '\n' )
+print( Input '\n' )
diff --git a/test/commitbt.lm b/test/commitbt.lm
index 42226e60..9131ebf4 100644
--- a/test/commitbt.lm
+++ b/test/commitbt.lm
@@ -100,7 +100,7 @@ token item
{
M: str = input.pull(match_length)
S: sub = parse_stop sub(input)
- input.push( make_token( typeid item, M, S ) )
+ input.push( make_token( typeid item M S ) )
}
token EOL /'\n'/
diff --git a/test/construct2.lm b/test/construct2.lm
index f21699b7..79bfe48f 100644
--- a/test/construct2.lm
+++ b/test/construct2.lm
@@ -8,4 +8,4 @@ lex start
def lang [id*]
-print( construct lang "a b c", '\n' )
+print( construct lang "a b c" '\n' )
diff --git a/test/construct3.lm b/test/construct3.lm
index 02409d9a..35c62221 100644
--- a/test/construct3.lm
+++ b/test/construct3.lm
@@ -13,4 +13,4 @@ def item [id] | [bigger]
def lang [item*]
B: bigger = construct bigger "( b1 b2 )"
-print( construct lang "a [B] c", '\n' )
+print( construct lang "a [B] c" '\n' )
diff --git a/test/constructex.lm b/test/constructex.lm
index dcf7c4d3..441f490e 100644
--- a/test/constructex.lm
+++ b/test/constructex.lm
@@ -32,6 +32,6 @@ NameTag1: tag = construct tag
NameTag2: tag = construct tag
"<name type=person>[Val]</name>"
-print( NameTag1, '\n' )
-print( NameTag2, '\n' )
+print( NameTag1 '\n' )
+print( NameTag2 '\n' )
diff --git a/test/context1.lm b/test/context1.lm
index c49b7f88..6f703956 100644
--- a/test/context1.lm
+++ b/test/context1.lm
@@ -21,7 +21,7 @@ context ctx
i = 0
j = i + 1
k = j + 1
- print( k, '\n' )
+ print( k '\n' )
}
def start
@@ -29,5 +29,5 @@ context ctx
}
CTX: ctx = cons ctx[]
-Input: ctx::start = parse ctx::start( CTX, stdin )
-print( Input, '\n' )
+Input: ctx::start = parse ctx::start( CTX stdin )
+print( Input '\n' )
diff --git a/test/context2.lm b/test/context2.lm
index 570cfa4e..4f6ba2be 100644
--- a/test/context2.lm
+++ b/test/context2.lm
@@ -26,16 +26,16 @@ context ruby_here
HereId = input.pull( match_length )
# Get the data up to the rest of the line.
- ROL: rest_of_line = parse_stop rest_of_line( ctx, input )
+ ROL: rest_of_line = parse_stop rest_of_line( ctx input )
# Parse the heredoc data.
- HereData: here_data = parse_stop here_data( ctx, input )
+ HereData: here_data = parse_stop here_data( ctx input )
# Push the rest-of-line data back to the input stream.
input.push( $ROL )
# Send the here_id token. Attach the heredoc data as an attribute.
- input.push( make_token( typeid here_id, HereId, HereData ) )
+ input.push( make_token( typeid here_id HereId HereData ) )
}
}
@@ -46,11 +46,11 @@ context ruby_here
{
if match_text == HereId + '\n' {
input.push( make_token(
- typeid here_close_id,
+ typeid here_close_id
input.pull( match_length ) ) )
}
else
- input.push( make_token( typeid here_line, input.pull(match_length) ) )
+ input.push( make_token( typeid here_line input.pull(match_length) ) )
}
token here_line
@@ -93,5 +93,5 @@ context ruby_here
CTX: ruby_here = cons ruby_here []
-S: ruby_here::start = parse ruby_here::start( CTX, stdin )
+S: ruby_here::start = parse ruby_here::start( CTX stdin )
print_xml(S)
diff --git a/test/context3.lm b/test/context3.lm
index f0a75335..8a6e8f57 100644
--- a/test/context3.lm
+++ b/test/context3.lm
@@ -27,7 +27,7 @@ context ctx
f()
f()
f()
- print( i, '\n' )
+ print( i '\n' )
}
@@ -36,7 +36,7 @@ context ctx
}
CTX: ctx = cons ctx []
-Input: ctx::start = parse ctx::start( CTX, stdin )
-print( Input, '\n' )
+Input: ctx::start = parse ctx::start( CTX stdin )
+print( Input '\n' )
diff --git a/test/counting1.lm b/test/counting1.lm
index af302c1b..c5a98d54 100644
--- a/test/counting1.lm
+++ b/test/counting1.lm
@@ -80,11 +80,11 @@ def start
{
for List:counted_list in lhs {
match List [Count:number Items:count_items]
- print( 'num items: ', Count.data.atoi(), '\n' )
+ print( 'num items: ' Count.data.atoi() '\n' )
i: int = 1
for Item:item in Items {
- print( ' item ', i, ': ', Item, '\n' )
+ print( ' item ' i ': ' Item '\n' )
i = i + 1
}
}
diff --git a/test/counting2.lm b/test/counting2.lm
index bc1a5f0c..2b3f880c 100644
--- a/test/counting2.lm
+++ b/test/counting2.lm
@@ -71,11 +71,11 @@ def start
{
for List: counted_list in lhs {
match List [CountItems:count_items]
- print( 'num items: ', CountItems.target, '\n' )
+ print( 'num items: ' CountItems.target '\n' )
i: int = 1
for Item:item in CountItems {
- print( ' item ', i, ': ', Item, '\n' )
+ print( ' item ' i ': ' Item '\n' )
i = i + 1
}
}
diff --git a/test/counting3.lm b/test/counting3.lm
index cb7ff7e6..2a497122 100644
--- a/test/counting3.lm
+++ b/test/counting3.lm
@@ -39,7 +39,7 @@ def get_target
{
count = 0
target = r1.data.atoi()
- print( 'target: ', target, '\n' )
+ print( 'target: ' target '\n' )
}
# Arbitrary item.
@@ -67,7 +67,7 @@ def one_item
def counted_list
[get_target count_items]
{
- print( 'trying: ', count, ' for: ', target, '\n' )
+ print( 'trying: ' count ' for: ' target '\n' )
if count < target {
reject
}
@@ -80,11 +80,11 @@ def start
for List: counted_list in lhs {
match List [Count: number Items: count_items]
- print( 'num items: ', Count.data.atoi(), '\n' )
+ print( 'num items: ' Count.data.atoi() '\n' )
i: int = 1
for Item: item in Items {
- print( ' item ', i, ': ', Item, '\n' )
+ print( ' item ' i ': ' Item '\n' )
i = i + 1
}
}
diff --git a/test/counting4.lm b/test/counting4.lm
index f7e43a17..db77b7fc 100644
--- a/test/counting4.lm
+++ b/test/counting4.lm
@@ -39,7 +39,7 @@ def get_target
{
count = 0
target = r1.data.atoi()
- print( 'target: ', target, '\n' )
+ print( 'target: ' target '\n' )
}
# Arbitrary item.
@@ -75,11 +75,11 @@ def start
{
for List: counted_list in lhs {
match List [Count: number Items: count_items]
- print( 'num items: ', Count.data.atoi(), '\n' )
+ print( 'num items: ' Count.data.atoi() '\n' )
i: int = 1
for Item: item in Items {
- print( ' item ', i, ': ', Item, '\n' )
+ print( ' item ' i ': ' Item '\n' )
i = i + 1
}
}
diff --git a/test/cxx/cxx.lm b/test/cxx/cxx.lm
index 1bc76c14..db2ba1ee 100644
--- a/test/cxx/cxx.lm
+++ b/test/cxx/cxx.lm
@@ -73,8 +73,8 @@ global declaratorData: declarator_data_list = construct declarator_data_list []
global templDecl: int_stack = construct int_stack []
# Root namespace object
-global rootNamespace: ptr lang_object = createLangObject( NamespaceType,
- '<root_namespace>', nil )
+global rootNamespace: ptr lang_object = createLangObject( NamespaceType
+ '<root_namespace>' nil )
# Initialize the namespace and declaration stacks with the root namespace
curNamespace.push( rootNamespace )
@@ -86,7 +86,7 @@ qualNs.push( nil )
templDecl.push( 0 )
declarationData.push( construct declaration_data(
- isTypedef: 0, isFriend: 0, isTemplate: 0 ) [] )
+ isTypedef: 0 isFriend: 0 isTemplate: 0 ) [] )
#
# Identifier lookup.
@@ -110,7 +110,7 @@ ptr lang_object lookupInObject( obj: ptr lang_object name: str )
# tree.
ptr lang_object lookupWithInheritance( obj: ptr lang_object name: str )
{
- found: ptr lang_object = lookupInObject( obj, name )
+ found: ptr lang_object = lookupInObject( obj name )
if found
return found
@@ -125,7 +125,7 @@ ptr lang_object lookupWithInheritance( obj: ptr lang_object name: str )
}
# Otherwise look inside the inherited object.
- found = lookupWithInheritance( inh, name )
+ found = lookupWithInheritance( inh name )
if found
return found
}
@@ -140,7 +140,7 @@ ptr lang_object unqualifiedLookup( name: str )
# Start with the objects in the templateParamNs.
localTemplateParamNs: object_list = templateParamNs
for TemplParaObjIter: ptr lang_object in rev_child(localTemplateParamNs) {
- found = lookupWithInheritance( TemplParaObjIter, name )
+ found = lookupWithInheritance( TemplParaObjIter name )
if found
break
}
@@ -150,7 +150,7 @@ ptr lang_object unqualifiedLookup( name: str )
# and going up through the lookup parents.
lookupIn: ptr lang_object = lookupNs.top
while lookupIn {
- found = lookupWithInheritance( lookupIn, name )
+ found = lookupWithInheritance( lookupIn name )
if found
break
lookupIn = lookupIn->lookupParent
@@ -220,7 +220,7 @@ lex start
qualNs.top = nil
# Lookup using the qualification.
- found = lookupWithInheritance( qualObj, name )
+ found = lookupWithInheritance( qualObj name )
}
else {
# No qualification, full search.
@@ -233,9 +233,9 @@ lex start
if found
id = found->typeId
- LookupId: any = make_token( typeid lookup_id,
- input.pull(match_length), found, qualObj )
- input.push( make_tree( id, LookupId ) )
+ LookupId: any = make_token( typeid lookup_id
+ input.pull(match_length) found qualObj )
+ input.push( make_tree( id LookupId ) )
}
@@ -268,10 +268,10 @@ lex start
ptr lang_object createLangObject( typeId: int name: str lookupParent: ptr lang_object )
{
obj: ptr lang_object = new construct lang_object(
- typeId: typeId,
- name: name,
- objectMap: construct object_map [],
- inherited: construct object_list [],
+ typeId: typeId
+ name: name
+ objectMap: construct object_map []
+ inherited: construct object_list []
lookupParent: lookupParent ) []
return obj
}
@@ -285,7 +285,7 @@ int insertObject( definedIn: ptr lang_object name: str obj: ptr lang_object )
ol = construct object_list []
}
ol.append( obj )
- definedIn->objectMap.store( name, ol )
+ definedIn->objectMap.store( name ol )
}
ptr lang_object findClass( inObj: ptr lang_object name: str )
@@ -496,7 +496,7 @@ def declaration_start
{
# LOG print( 'opening new declaration_data with templDecl: ', templDecl.top, '\n' )
declarationData.push( construct declaration_data (
- isTypedef: 0, isFriend: 0, isTemplate: 0 ) [] )
+ isTypedef: 0 isFriend: 0 isTemplate: 0 ) [] )
# Transfer the template flag and reset it.
declarationData.top.isTemplate = templDecl.top
@@ -671,9 +671,9 @@ def elaborated_type_specifier
parentObj = Id.qualObj
# Look for the class in the given scope.
- declaredClass: ptr lang_object = findClass( parentObj, name )
+ declaredClass: ptr lang_object = findClass( parentObj name )
if !declaredClass
- declaredClass = findTemplateClass( parentObj, name )
+ declaredClass = findTemplateClass( parentObj name )
if !declaredClass {
# LOG print( 'creating new class: ', name, '\n' )
@@ -681,13 +681,13 @@ def elaborated_type_specifier
# Class does not exist in the parent scope, create it.
nsType: int = declaredClassType()
- declaredClass = createLangObject( nsType, name, lookupNs.top )
+ declaredClass = createLangObject( nsType name lookupNs.top )
# FIXME: handle friends. Make the class visible only if we are NOT
# in a friend declaration. The new class object is necessary to
# properly process the body of the class.
if declarationData.top.isFriend == 0
- insertObject( parentObj, name, declaredClass )
+ insertObject( parentObj name declaredClass )
}
}
@@ -699,9 +699,9 @@ def elaborated_type_specifier
{
# TODO: should look for existing enums of the same name.
Id: lookup_id = lookup_id in r3
- # LOG print( 'creating enumeration ', Id.data, '\n' )
- enum: ptr lang_object = createLangObject( EnumType, Id.data, lookupNs.top )
- insertObject( declNs.top, Id.data, enum )
+ # LOG print( 'creating enumeration ' Id.data '\n' )
+ enum: ptr lang_object = createLangObject( EnumType Id.data lookupNs.top )
+ insertObject( declNs.top Id.data enum )
}
def decl_specifier_mult_seq_opt
@@ -769,9 +769,9 @@ def enum_specifier
{
# TODO: should look for existing enums of the same name.
Id: lookup_id = lookup_id in r3
- # LOG print( 'creating enumeration ', Id.data, '\n' )
- enum: ptr lang_object = createLangObject( EnumType, Id.data, lookupNs.top )
- insertObject( declNs.top, Id.data, enum )
+ # LOG print( 'creating enumeration ' Id.data '\n' )
+ enum: ptr lang_object = createLangObject( EnumType Id.data lookupNs.top )
+ insertObject( declNs.top Id.data enum )
}
| ['enum' '{' enumerator_list_opt '}']
@@ -799,15 +799,15 @@ def enumerator_definition
[enumerator_id]
{
Id: lookup_id = lookup_id in r1
- enumId: ptr lang_object = createLangObject( IdType, Id.data, lookupNs.top )
- insertObject( declNs.top, Id.data, enumId )
+ enumId: ptr lang_object = createLangObject( IdType Id.data lookupNs.top )
+ insertObject( declNs.top Id.data enumId )
}
| [enumerator_id '=' constant_expression]
{
Id: lookup_id = lookup_id in r1
- enumId: ptr lang_object = createLangObject( IdType, Id.data, lookupNs.top )
- insertObject( declNs.top, Id.data, enumId )
+ enumId: ptr lang_object = createLangObject( IdType Id.data lookupNs.top )
+ insertObject( declNs.top Id.data enumId )
}
def enumerator_id
@@ -1104,7 +1104,7 @@ def compound_statement
def compound_begin
[]
{
- newCompound: ptr lang_object = createLangObject( 0, '<compound_begin>', lookupNs.top )
+ newCompound: ptr lang_object = createLangObject( 0 '<compound_begin>' lookupNs.top )
lookupNs.push( newCompound )
declNs.push( newCompound )
# LOG print( 'opening <compound>\n' )
@@ -1227,49 +1227,49 @@ def declarator_id
isConstructor: bool
if parentObj == r1.lookupId.obj {
isConstructor = true
- # LOG print( 'making declarator ', name, ' a constructor/destructor\n' )
+ # LOG print( 'making declarator ' name ' a constructor/destructor\n' )
}
if parentObj->specializationOf &&
parentObj->specializationOf == r1.lookupId.obj
{
isConstructor = true
- # LOG print( 'making declarator ', name, ' a constructor/destructor\n' )
+ # LOG print( 'making declarator ' name ' a constructor/destructor\n' )
}
obj: ptr lang_object = nil
if name && !isConstructor && declarationData.top.isFriend == 0 {
if declarationData.top.isTypedef {
- obj = createLangObject( TypedefType, name, lookupNs.top )
+ obj = createLangObject( TypedefType name lookupNs.top )
obj->typedefOf = declarationData.top.typeObj
- insertObject( parentObj, name, obj )
+ insertObject( parentObj name obj )
- # LOG print( 'making declarator ', name, ' a typedef\n' )
+ # LOG print( 'making declarator ' name ' a typedef\n' )
}
else {
if !qualObj {
if declarationData.top.isTemplate {
# If in a template declaration and the name is not qualified then
# create the template id.
- obj = createLangObject( TemplateIdType, name, lookupNs.top )
+ obj = createLangObject( TemplateIdType name lookupNs.top )
#object->objType = declarationData.top.type
- insertObject( declNs.top, name, obj )
+ insertObject( declNs.top name obj )
- # LOG print( 'making declarator ', name, ' a template id\n' )
+ # LOG print( 'making declarator ' name ' a template id\n' )
}
else {
- obj = createLangObject( IdType, name, lookupNs.top )
+ obj = createLangObject( IdType name lookupNs.top )
#object->objType = declarationData.top().type;
- insertObject( declNs.top, name, obj )
+ insertObject( declNs.top name obj )
- # LOG print( 'making declarator ', name, ' an id\n' )
+ # LOG print( 'making declarator ' name ' an id\n' )
}
}
}
}
declaratorData.push( construct declarator_data (
- qualObj: qualObj, lookupObj: lookupNs.top ) [] )
+ qualObj: qualObj lookupObj: lookupNs.top ) [] )
# If the declarator is qualified, push the qualification to the lookup
# stack. Also save it in the declarator data so it can be passed to a
@@ -1279,7 +1279,7 @@ def declarator_id
declaratorData.top.lookupObj = qualObj
}
- # LOG print( 'reduced declarator_id: ', name, '\n' )
+ # LOG print( 'reduced declarator_id: ' name '\n' )
}
# Undoes the setup done by declarator_id and pdc_start.
@@ -1395,7 +1395,7 @@ def pdc_start
{
if !declaratorData.top.pdcScope {
# We are going to need a scope for the declarator.
- pdcScope: ptr lang_object = createLangObject( 0, '<pdc_scope>', lookupNs.top )
+ pdcScope: ptr lang_object = createLangObject( 0 '<pdc_scope>' lookupNs.top )
lookupNs.push( pdcScope )
declNs.push( pdcScope )
@@ -1479,8 +1479,8 @@ def function_body
def function_body_begin
[]
{
- newFunctionBody: ptr lang_object = createLangObject( 0,
- '<function_body_begin>', lookupNs.top )
+ newFunctionBody: ptr lang_object = createLangObject( 0
+ '<function_body_begin>' lookupNs.top )
lookupNs.push( newFunctionBody )
declNs.push( newFunctionBody )
templDecl.push( 0 )
@@ -1536,8 +1536,8 @@ def class_head
nsType: int = declaredClassType()
# LOG print( 'creating new anonymous class\n' )
- newClass: ptr lang_object = createLangObject( nsType,
- '<anon_class>', lookupNs.top )
+ newClass: ptr lang_object = createLangObject( nsType
+ '<anon_class>' lookupNs.top )
lookupNs.push( newClass )
declNs.push( newClass )
}
@@ -1553,23 +1553,23 @@ def class_head
parentObj = Id.qualObj
# Look for the class in the given scope.
- declaredClass: ptr lang_object = findClass( parentObj, name )
+ declaredClass: ptr lang_object = findClass( parentObj name )
if !declaredClass
- declaredClass = findTemplateClass( parentObj, name )
+ declaredClass = findTemplateClass( parentObj name )
if !declaredClass {
- # LOG print( 'creating new class: ', name, '\n' )
+ # LOG print( 'creating new class: ' name '\n' )
# Class does not exist in the parent scope, create it.
nsType: int = declaredClassType()
- declaredClass = createLangObject( nsType, name, lookupNs.top )
+ declaredClass = createLangObject( nsType name lookupNs.top )
# FIXME: handle friends. Make the class visible only if we are NOT
# in a friend declaration. The new class object is necessary to
# properly process the body of the class.
if declarationData.top.isFriend == 0
- insertObject( parentObj, name, declaredClass )
+ insertObject( parentObj name declaredClass )
}
# Push the found/new class.
@@ -1586,17 +1586,17 @@ def class_head
# TODO: Try to find the specializaition in the template class object.
# TypeList typeList;
- # makeTypeList( typeList, $6->last );
+ # makeTypeList( typeList $6->last );
declaredClass: ptr lang_object
#declaredClass = classObj->findSpecExact( typeList );
if !declaredClass {
# LOG print( 'making new template specialization\n' )
nsType: int = declaredClassType()
- declaredClass = createLangObject( nsType, id, lookupNs.top )
- # LOG print( 'declaredClass: ', declaredClass, '\n' )
+ declaredClass = createLangObject( nsType id lookupNs.top )
+ # LOG print( 'declaredClass: ' declaredClass '\n' )
declaredClass->specializationOf = classObj
- # $$->typeListMapEl = classObj->typeListMap.insert( typeList, declaredClass );
+ # $$->typeListMapEl = classObj->typeListMap.insert( typeList declaredClass );
}
# Push the found/new class.
@@ -1709,14 +1709,14 @@ def using_declaration
{
obj: ptr lang_object = r2.lookupId.obj
if obj
- insertObject( declNs.top, obj->name, obj )
+ insertObject( declNs.top obj->name obj )
}
| ['using' type_id ';']
{
obj: ptr lang_object = r2.lookupId.obj
if obj
- insertObject( declNs.top, obj->name, obj )
+ insertObject( declNs.top obj->name obj )
}
def using_directive
@@ -1761,17 +1761,17 @@ int addBaseSpecifier( inObject: ptr lang_object inheritedObject: ptr lang_object
def base_specifier
[root_qual_opt nested_name_specifier_opt type_name]
{
- addBaseSpecifier( declNs.top, r3.lookupId.obj )
+ addBaseSpecifier( declNs.top r3.lookupId.obj )
}
| ['virtual' access_specifier_opt root_qual_opt nested_name_specifier_opt type_name]
{
- addBaseSpecifier( declNs.top, r5.lookupId.obj )
+ addBaseSpecifier( declNs.top r5.lookupId.obj )
}
| [access_specifier virtual_opt root_qual_opt nested_name_specifier_opt type_name]
{
- addBaseSpecifier( declNs.top, r5.lookupId.obj )
+ addBaseSpecifier( declNs.top r5.lookupId.obj )
}
def virtual_opt
@@ -1893,7 +1893,7 @@ def tpl_start
{
# Create a new scope for the template parameters.
newTemplateParamScope: ptr lang_object =
- createLangObject( 0, '<tpl_start>', lookupNs.top )
+ createLangObject( 0 '<tpl_start>' lookupNs.top )
templateParamNs.push( newTemplateParamScope )
}
@@ -1936,8 +1936,8 @@ def type_parameter
if Id {
# The lookup ns should be a template param scope.
newClass: ptr lang_object =
- createLangObject( ClassType, Id.data, lookupNs.top )
- insertObject( templateParamNs.top, Id.data, newClass )
+ createLangObject( ClassType Id.data lookupNs.top )
+ insertObject( templateParamNs.top Id.data newClass )
}
}
@@ -1947,8 +1947,8 @@ def type_parameter
if Id {
# The lookup ns should be a template param scope.
newClass: ptr lang_object =
- createLangObject( ClassType, Id.data, lookupNs.top )
- insertObject( templateParamNs.top, Id.data, newClass )
+ createLangObject( ClassType Id.data lookupNs.top )
+ insertObject( templateParamNs.top Id.data newClass )
}
}
@@ -1958,8 +1958,8 @@ def type_parameter
Id: lookup_id = lookup_id in r7
if Id {
newClass: ptr lang_object =
- createLangObject( TemplateClassType, Id.data, lookupNs.top )
- insertObject( templateParamNs.top, Id.data, newClass )
+ createLangObject( TemplateClassType Id.data lookupNs.top )
+ insertObject( templateParamNs.top Id.data newClass )
}
}
@@ -2018,17 +2018,17 @@ def orig_namespace_def_name ['namespace' unknown_id]
{
match r2 [Id: lookup_id]
nspace: ptr lang_object = createLangObject(
- NamespaceType, Id.data, lookupNs.top )
+ NamespaceType Id.data lookupNs.top )
# Insert the new object into the dictionary of the parent.
- insertObject( curNamespace.top, Id.data, nspace )
+ insertObject( curNamespace.top Id.data nspace )
# Push the namespace
curNamespace.push( nspace )
declNs.push( nspace )
lookupNs.push( nspace )
- # LOG print( 'created original namespace: ', Id.data, '\n' )
+ # LOG print( 'created original namespace: ' Id.data '\n' )
}
def namespace_end []
@@ -2058,7 +2058,7 @@ def ext_namespace_def_name ['namespace' namespace_id]
declNs.push( nspace )
lookupNs.push( nspace )
- # LOG print( 'found extended namespace: ', Id.data, '\n' )
+ # LOG print( 'found extended namespace: ' Id.data '\n' )
}
#
@@ -2070,7 +2070,7 @@ def unnamed_namespace_definition
def unnamed_namespace_def_name ['namespace']
{
nspace: ptr lang_object = createLangObject(
- NamespaceType, '<unnamed_namespace>',
+ NamespaceType '<unnamed_namespace>'
lookupNs.top )
# Push the namespace
@@ -2136,7 +2136,7 @@ def start
int printObject( indent: str obj: ptr lang_object )
{
- print( indent, obj->name )
+ print( indent obj->name )
if obj->objectMap.length > 0
print( ' {\n' )
@@ -2144,18 +2144,18 @@ int printObject( indent: str obj: ptr lang_object )
ChildNames: object_map = obj->objectMap
for MapEl: object_list in child( ChildNames ) {
for Obj: ptr lang_object in MapEl
- printObject( indent + ' ', Obj )
+ printObject( indent + ' ' Obj )
}
if obj->objectMap.length > 0
- print( indent, '}' )
+ print( indent '}' )
print( '\n' )
}
int printNamespace()
{
- printObject( '', rootNamespace )
+ printObject( '' rootNamespace )
}
S: start = parse start( stdin )
@@ -2168,6 +2168,6 @@ for DI: declarator_id in S {
if match DI
[root_qual_opt nested_name_specifier_opt '~' UID: unknown_id]
{
- print( UID, '\n' )
+ print( UID '\n' )
}
}
diff --git a/test/div.lm b/test/div.lm
index 3c16a396..2444ca6b 100644
--- a/test/div.lm
+++ b/test/div.lm
@@ -1,6 +1,6 @@
i: int = 0
while ( i < 34 ) {
- print( i / 4, '\n' )
+ print( (i / 4) '\n' )
i = i + 1
}
diff --git a/test/dns.lm b/test/dns.lm
index 8b5efb45..f0fa305f 100644
--- a/test/dns.lm
+++ b/test/dns.lm
@@ -43,7 +43,7 @@ token RR_UNKNOWN
elsif rr_type_value == 16
id = typeid RR_TXT
- input.push( make_token( id, '' ) )
+ input.push( make_token( id '' ) )
}
# Convert two octets in network order into an unsigned 16 bit value.
@@ -97,7 +97,7 @@ def count
count: int
[octet octet]
{
- lhs.count = network_uord16( r1, r2 )
+ lhs.count = network_uord16( r1 r2 )
}
#
@@ -199,7 +199,7 @@ token nb_empty /''/
token nbytes_data
/''/
{
- input.push( make_token( typeid nbytes_data, input.pull(nbytes) ) )
+ input.push( make_token( typeid nbytes_data input.pull(nbytes) ) )
}
def nbytes
@@ -254,14 +254,14 @@ def resource_record
def rr_type
[octet octet]
{
- rr_type_value = network_uord16( r1, r2 )
+ rr_type_value = network_uord16( r1 r2 )
}
def rr_class
value: int
[octet octet]
{
- rr_class_value = network_uord16( r1, r2 )
+ rr_class_value = network_uord16( r1 r2 )
}
def ttl
@@ -270,13 +270,13 @@ def ttl
token rdata_bytes
/''/
{
- input.push( make_token( typeid rdata_bytes, input.pull(rdata_length) ) )
+ input.push( make_token( typeid rdata_bytes input.pull(rdata_length) ) )
}
def rdlength
[octet octet]
{
- rdata_length = network_uord16( r1, r2 )
+ rdata_length = network_uord16( r1 r2 )
}
global rdata_length: int
@@ -431,8 +431,8 @@ int print_RR_A( s: start )
{
for I:rdata in s {
if match I [RR_A o1:octet o2:octet o3:octet o4:octet] {
- print( 'RR_A: ', o1.data.uord8(), '.', o2.data.uord8(), '.',
- o3.data.uord8(), '.', o4.data.uord8(), '\n' )
+ print( 'RR_A: ' o1.data.uord8() '.' o2.data.uord8() '.'
+ o3.data.uord8() '.' o4.data.uord8() '\n' )
}
}
}
@@ -443,15 +443,15 @@ int print_name( n: name m: name_map )
{
for P: name_part in n {
match P [part_len D:nbytes]
- print( D, '.' )
+ print( D '.' )
}
for E:name_end in n {
if match E [o1:octet o2:octet] {
val: int = (o1.data.uord8() - 192) * 256 + o2.data.uord8()
- print( '[', val, ']' )
+ print( '[' val ']' )
nameInMap: name = m.find( val )
- print_name( nameInMap, m )
+ print_name( nameInMap m )
}
}
}
@@ -470,13 +470,13 @@ int print_all_names( s: start )
if match NP [L:octet nbytes name_part*] {
messageOffset: int = L.pos - O.pos
n: name = construct name [NP E]
- m.insert( messageOffset, n )
+ m.insert( messageOffset n )
}
}
}
for I: name in M {
- print_name( I, m )
+ print_name( I m )
print( '\n' )
}
}
diff --git a/test/func.lm b/test/func.lm
index 00aa05d4..52f6f3c5 100644
--- a/test/func.lm
+++ b/test/func.lm
@@ -24,7 +24,7 @@ int func( P: program )
int main()
{
- InputFile: stream = open( 'func.in', "r" )
+ InputFile: stream = open( 'func.in' "r" )
P: program = parse program( InputFile )
func( P )
print( P )
diff --git a/test/heredoc.lm b/test/heredoc.lm
index a8f7e149..59ae2bd8 100644
--- a/test/heredoc.lm
+++ b/test/heredoc.lm
@@ -11,11 +11,11 @@ lex start
{
if HereId && HereId == match_text {
input.push( make_token(
- typeid here_close,
+ typeid here_close
input.pull(match_length - 1) ) )
}
else {
- input.push( make_token( typeid id, input.pull(match_length) ) )
+ input.push( make_token( typeid id input.pull(match_length) ) )
}
}
diff --git a/test/html/html.lm b/test/html/html.lm
index 46789900..e680c439 100644
--- a/test/html/html.lm
+++ b/test/html/html.lm
@@ -73,7 +73,7 @@ lex close_id
}
}
- input.push( make_token( send_id, input.pull(match_length) ) )
+ input.push( make_token( send_id input.pull(match_length) ) )
}
}
@@ -226,7 +226,7 @@ int printLinks( Start: start )
for Attr: attr in AttrList {
if match Attr ["href = " AttrVal: attr_val]
- print( 'link: ', I, '\ntarget: ', AttrVal, '\n\n' )
+ print( 'link: ' I '\ntarget: ' AttrVal '\n\n' )
}
}
}
diff --git a/test/liftattrs.lm b/test/liftattrs.lm
index 5224e4fa..7019c41b 100644
--- a/test/liftattrs.lm
+++ b/test/liftattrs.lm
@@ -71,4 +71,4 @@ for AttrListIter:attr_list in RootItemList {
IL = construct item_list
["<wrapper" CollectedAttrs ">" RootItemList "</wrapper>"]
-print( IL, '\n' )
+print( IL '\n' )
diff --git a/test/matchex.lm b/test/matchex.lm
index ed7e9f56..2212f4bc 100644
--- a/test/matchex.lm
+++ b/test/matchex.lm
@@ -29,6 +29,6 @@ match Tag ["<person name=" Val1:id attr*">" item* "</person>"]
# Style: Literal text with embedded lists of types.
match Tag "<person name=[Val2:id attr*]>[item*]</person>"
-print( Val1, '\n' )
-print( Val2, '\n' )
+print( Val1 '\n' )
+print( Val2 '\n' )
diff --git a/test/nestedcomm.lm b/test/nestedcomm.lm
index 3ea26e2c..3249d543 100644
--- a/test/nestedcomm.lm
+++ b/test/nestedcomm.lm
@@ -39,4 +39,4 @@ def nested [id*]
P: nested = parse nested( stdin )
print_xml( P )
print_xml_ac( P )
-print( P, '\n' )
+print( P '\n' )
diff --git a/test/python/python.lm b/test/python/python.lm
index ad52ce63..c3284092 100644
--- a/test/python/python.lm
+++ b/test/python/python.lm
@@ -78,7 +78,7 @@ lex start
{
# Need to shorten to take off the newline.
# Turn it into ignore.
- input.push_ignore( make_token( typeid WS, input.pull(match_length - 1) ) )
+ input.push_ignore( make_token( typeid WS input.pull(match_length - 1) ) )
}
# Find and ignore comments.
@@ -86,7 +86,7 @@ lex start
/ '#' [^\n]* '\n' /
{
# Need to shorten to take off the newline. Turn it into ignore.
- input.push_ignore( make_token( typeid WS, input.pull(match_length - 1) ) )
+ input.push_ignore( make_token( typeid WS input.pull(match_length - 1) ) )
}
# These tokens are generated
@@ -99,7 +99,7 @@ lex start
/'\n' [ \t]*/
{
# We have squared up INDENTs and DEDENTs. Ignore the entire match.
- input.push_ignore( make_token( typeid WS, input.pull(match_length) ) )
+ input.push_ignore( make_token( typeid WS input.pull(match_length) ) )
# We have already sent the newline, compute the indentation level.
data_length: int = match_length - 1
@@ -107,7 +107,7 @@ lex start
if data_length > IndentStack.top {
# The indentation level is more than the level on the top
# of the stack. This is an indent event. Send as an INDENT.
- input.push( make_token( typeid INDENT, '' ) )
+ input.push( make_token( typeid INDENT '' ) )
# Push to the stack as per python manual.
IndentStack.push( data_length )
@@ -120,7 +120,7 @@ lex start
IndentStack.pop()
# Send as a DEDENT
- input.push( make_token( typeid DEDENT, '' ) )
+ input.push( make_token( typeid DEDENT '' ) )
}
}
@@ -128,7 +128,7 @@ lex start
# means the outdent does not match anything.
# First the newline.
- input.push( make_token( typeid NEWLINE, '' ) )
+ input.push( make_token( typeid NEWLINE '' ) )
}
}
@@ -139,15 +139,15 @@ int print_target_subscriptions_and_slicings( Start: start )
{
for TI: target_ext in Start {
if match TI [subscription] {
- print( 'TARGET SUBSCRIPTION: ', TI, '\n' )
+ print( 'TARGET SUBSCRIPTION: ' TI '\n' )
}
if match TI [simple_slicing] {
- print( 'TARGET SIMPLE SLICING: ', TI, '\n' )
+ print( 'TARGET SIMPLE SLICING: ' TI '\n' )
}
if match TI [extended_slicing] {
- print( 'TARGET EXTENDED SLICING: ', TI, '\n' )
+ print( 'TARGET EXTENDED SLICING: ' TI '\n' )
}
}
@@ -157,15 +157,15 @@ int print_primary_subscriptions_and_slicings( Start: start )
{
for PI: primary_ext in Start {
if match PI [subscription] {
- print( 'PRIMARY SUBSCRIPTION: ', PI, '\n' )
+ print( 'PRIMARY SUBSCRIPTION: ' PI '\n' )
}
if match PI [simple_slicing] {
- print( 'PRIMARY SIMPLE SLICING: ', PI, '\n' )
+ print( 'PRIMARY SIMPLE SLICING: ' PI '\n' )
}
if match PI [extended_slicing] {
- print( 'PRIMARY EXTENDED SLICING: ', PI, '\n' )
+ print( 'PRIMARY EXTENDED SLICING: ' PI '\n' )
}
}
}
@@ -722,12 +722,12 @@ def keyword_item
int print_stmts( S: start )
{
for Stmt: statement in S
- print( 'STMT: ', Stmt, '\n' )
+ print( 'STMT: ' Stmt '\n' )
}
S: start = parse start( stdin )
print_xml( S )
-print( S, '\n' )
+print( S '\n' )
print_stmts( S )
print_target_subscriptions_and_slicings( S )
print_primary_subscriptions_and_slicings( S )
diff --git a/test/rediv.lm b/test/rediv.lm
index cb78ce72..4162a4c0 100644
--- a/test/rediv.lm
+++ b/test/rediv.lm
@@ -88,7 +88,7 @@ S: start = parse start( stdin )
for I:orlit_item in S {
if match I [orlit_chr] {
- print( I, '\n' )
+ print( I '\n' )
}
}
print_xml( S )
diff --git a/test/reparse.lm b/test/reparse.lm
index 340327c9..454bc4e7 100644
--- a/test/reparse.lm
+++ b/test/reparse.lm
@@ -18,5 +18,5 @@ S: start = cons start[ Input ]
Again: start = parse start( Input )
-print( Again, '\n' )
+print( Again '\n' )
diff --git a/test/repeat.lm b/test/repeat.lm
index efde2957..20838ea4 100644
--- a/test/repeat.lm
+++ b/test/repeat.lm
@@ -16,8 +16,8 @@ Input: start = parse start( stdin )
match Input [ItemList: item*]
-for I1: item* in repeat( ItemList )
- print( I1, '\n' )
+for I: item* in repeat( ItemList )
+ print( I '\n' )
-for I2: item* in rev_repeat( ItemList )
- print( I2, '\n' )
+for I: item* in rev_repeat( ItemList )
+ print( I '\n' )
diff --git a/test/rubyhere.lm b/test/rubyhere.lm
index 7a39b96b..2587ecd9 100644
--- a/test/rubyhere.lm
+++ b/test/rubyhere.lm
@@ -33,7 +33,7 @@ lex here_start
input.push( $ROL )
# Send the here_id token. Attach the heredoc data as an attribute.
- input.push( make_token( typeid here_id, HereId, HereData ) )
+ input.push( make_token( typeid here_id HereId HereData ) )
}
}
@@ -44,11 +44,11 @@ lex here_data
{
if match_text == HereId + '\n' {
input.push( make_token(
- typeid here_close_id,
+ typeid here_close_id
input.pull( match_length ) ) )
}
else
- input.push( make_token( typeid here_line, input.pull(match_length) ) )
+ input.push( make_token( typeid here_line input.pull(match_length) ) )
}
token here_line
diff --git a/test/sprintf.lm b/test/sprintf.lm
index d9293093..9af9817c 100644
--- a/test/sprintf.lm
+++ b/test/sprintf.lm
@@ -1 +1 @@
-print( sprintf( "%08x\n", 256 + 11 * 16 ) )
+print( sprintf( "%08x\n" (256 + 11 * 16) ) )
diff --git a/test/superid.lm b/test/superid.lm
index 2dbb1bde..9ceecb53 100644
--- a/test/superid.lm
+++ b/test/superid.lm
@@ -5,7 +5,7 @@ lex start
token id /'a'|'b'/
{
- input.push( make_token( trans_id_to, input.pull(match_length) ) )
+ input.push( make_token( trans_id_to input.pull(match_length) ) )
}
token super_id //
@@ -19,9 +19,9 @@ global trans_id_to: int
def e1
[]
{
- print( 'old_id = ', trans_id_to, '\n' )
+ print( 'old_id = ' trans_id_to '\n' )
trans_id_to = typeid foo
- print( 'new_id = ', trans_id_to, '\n' )
+ print( 'new_id = ' trans_id_to '\n' )
}
def item1
@@ -35,9 +35,9 @@ def item1
def e2
[]
{
- print( 'old_id = ', trans_id_to, '\n' )
+ print( 'old_id = ' trans_id_to '\n' )
trans_id_to = typeid super_id
- print( 'new_id = ', trans_id_to, '\n' )
+ print( 'new_id = ' trans_id_to '\n' )
}
def item2
diff --git a/test/til.lm b/test/til.lm
index b2f63a9f..c969e3db 100644
--- a/test/til.lm
+++ b/test/til.lm
@@ -165,4 +165,4 @@ for S: statement* in P
}
}
-print(P, '\n')
+print(P '\n')
diff --git a/test/translate1.lm b/test/translate1.lm
index a87b7583..f7c5dc9b 100644
--- a/test/translate1.lm
+++ b/test/translate1.lm
@@ -5,7 +5,7 @@ lex start
token id /[a-zA-Z_]+/
{
t: str = input.pull( match_length )
- input.push( make_token( typeid id, t ) )
+ input.push( make_token( typeid id t ) )
}
}
@@ -20,5 +20,5 @@ def start
[item*]
Input: start = parse start( stdin )
-print( Input, '\n' )
+print( Input '\n' )
diff --git a/test/translate2.lm b/test/translate2.lm
index 74c31874..6c6947e1 100644
--- a/test/translate2.lm
+++ b/test/translate2.lm
@@ -24,9 +24,9 @@ context ctx
token ddd /'...'/ {
print('translating\n')
input.pull( match_length )
- input.push( make_token( typeid id, "dot" ) )
- input.push( make_token( typeid id, "dot" ) )
- input.push( make_token( typeid id, "dot" ) )
+ input.push( make_token( typeid id "dot" ) )
+ input.push( make_token( typeid id "dot" ) )
+ input.push( make_token( typeid id "dot" ) )
}
}
@@ -49,6 +49,6 @@ context ctx
}
CTX: ctx = cons ctx []
-Input: ctx::start = parse ctx::start( CTX, stdin )
+Input: ctx::start = parse ctx::start( CTX stdin )
print( Input )
diff --git a/test/travs1.lm b/test/travs1.lm
index d3e7bf6b..bae33e1b 100644
--- a/test/travs1.lm
+++ b/test/travs1.lm
@@ -141,23 +141,23 @@ iter bottomup_rightleft( T: ref any )
print( 'bottomup_leftright\n' )
for T1: any in bottomup_leftright( S )
{
- print( T1, '\n' )
+ print( T1 '\n' )
}
print( 'bottomup_rightleft\n' )
for T2: any in bottomup_rightleft( S )
{
- print( T2, '\n' )
+ print( T2 '\n' )
}
print( 'topdown_leftright\n' )
for T3: any in topdown_leftright( S )
{
- print( T3, '\n' )
+ print( T3 '\n' )
}
print( 'topdown_rightleft\n' )
for T4: any in topdown_rightleft( S )
{
- print( T4, '\n' )
+ print( T4 '\n' )
}
diff --git a/test/travs2.lm b/test/travs2.lm
index 452e808a..18c9902d 100644
--- a/test/travs2.lm
+++ b/test/travs2.lm
@@ -92,7 +92,7 @@ iter fixed_point( ref any T )
for T: any in fixed_point( S )
{
- print( T, '\n' )
+ print( T '\n' )
}
-print( S, '\n' )
+print( S '\n' )
diff --git a/test/treecmp1.lm b/test/treecmp1.lm
index 016beed3..debdf6b2 100644
--- a/test/treecmp1.lm
+++ b/test/treecmp1.lm
@@ -16,5 +16,5 @@ Input: four_ids = parse four_ids( stdin )
for Id: id in Input {
if ( Id == B )
- print( B, '\n' )
+ print( B '\n' )
}
diff --git a/test/undofrag1.lm b/test/undofrag1.lm
index 8821607d..1382199c 100644
--- a/test/undofrag1.lm
+++ b/test/undofrag1.lm
@@ -48,9 +48,9 @@ context ctx
SP << "a b{c}"
CTX: ctx = cons ctx []
-Input: ctx::start = parse ctx::start( CTX, stdin )
+Input: ctx::start = parse ctx::start( CTX stdin )
SP << "{e}f g"
print( Input )
-print( SP.finish(), '\n' )
+print( SP.finish() '\n' )
diff --git a/test/xml/xml.lm b/test/xml/xml.lm
index d7e90771..2cedcf21 100644
--- a/test/xml/xml.lm
+++ b/test/xml/xml.lm
@@ -161,7 +161,7 @@ for Switch:tag in S {
if match Text
["<text>" TextContent:content "</text>"]
{
- print( ' ', TextContent, '\n' )
+ print( ' ' TextContent '\n' )
}
}
}