summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--CHANGES444
-rw-r--r--CODE_OF_CONDUCT.rst180
-rw-r--r--docs/HowToUsePyparsing.rst374
-rw-r--r--examples/0README.html26
-rw-r--r--examples/LAparser.py124
-rw-r--r--examples/Setup.ini4
-rw-r--r--examples/SimpleCalc.py34
-rw-r--r--examples/SingleForm.dfm2
-rw-r--r--examples/TAP.py20
-rw-r--r--examples/adventureEngine.py120
-rw-r--r--examples/antlr_grammar.py16
-rw-r--r--examples/apicheck.py5
-rw-r--r--examples/cLibHeader.py4
-rw-r--r--examples/chemicalFormulas.py5
-rw-r--r--examples/configParse.py21
-rw-r--r--examples/cpp_enum_parser.py2
-rw-r--r--examples/datetimeParseActions.py18
-rw-r--r--examples/deltaTime.py48
-rw-r--r--examples/dfmparse.py12
-rw-r--r--examples/dhcpd_leases_parser.py4
-rw-r--r--examples/dictExample.py4
-rw-r--r--examples/dictExample2.py6
-rw-r--r--examples/ebnf.py2
-rw-r--r--examples/ebnftest.py6
-rw-r--r--examples/eval_arith.py108
-rw-r--r--examples/excelExpr.py24
-rw-r--r--examples/fourFn.py20
-rw-r--r--examples/gen_ctypes.py18
-rw-r--r--examples/getNTPserversNew.py4
-rw-r--r--examples/greetingInGreek.py5
-rw-r--r--examples/greetingInKorean.py2
-rw-r--r--examples/holaMundo.py43
-rw-r--r--examples/htmlStripper.py8
-rw-r--r--examples/httpServerLogParser.py28
-rw-r--r--examples/idlParse.py46
-rw-r--r--examples/indentedGrammarExample.py3
-rw-r--r--examples/invRegex.py20
-rw-r--r--examples/javascript_grammar.g156
-rw-r--r--examples/jsonParser.py32
-rw-r--r--examples/linenoExample.py6
-rw-r--r--examples/listAllMatches.py1
-rw-r--r--examples/lucene_grammar.py8
-rw-r--r--examples/macroExpander.py6
-rw-r--r--examples/matchPreviousDemo.py8
-rw-r--r--examples/nested.py13
-rw-r--r--examples/numerics.py48
-rw-r--r--examples/oc.py10
-rw-r--r--examples/parsePythonValue.py4
-rw-r--r--examples/parseResultsSumExample.py2
-rw-r--r--examples/parseTabularData.py14
-rw-r--r--examples/partial_gene_match.py130
-rw-r--r--examples/position.py16
-rw-r--r--examples/protobuf_parser.py18
-rw-r--r--examples/pymicko.py86
-rw-r--r--examples/pythonGrammarParser.py16
-rw-r--r--examples/rangeCheck.py6
-rw-r--r--examples/readJson.py11
-rw-r--r--examples/removeLineBreaks.py2
-rw-r--r--examples/romanNumerals.py14
-rw-r--r--examples/scanExamples.py8
-rw-r--r--examples/searchParserAppDemo.py2
-rw-r--r--examples/searchparser.py34
-rw-r--r--examples/select_parser.py28
-rw-r--r--examples/sexpParser.py32
-rw-r--r--examples/shapes.py4
-rw-r--r--examples/simpleArith.py7
-rw-r--r--examples/simpleBool.py2
-rw-r--r--examples/simpleSQL.py8
-rw-r--r--examples/simpleWiki.py4
-rw-r--r--examples/snmp_api.h10
-rw-r--r--examples/sparser.py84
-rw-r--r--examples/sql2dot.py20
-rw-r--r--examples/stackish.py48
-rw-r--r--examples/stateMachine2.py54
-rw-r--r--examples/urlExtractor.py7
-rw-r--r--examples/urlExtractorNew.py7
-rw-r--r--examples/verilogParse.py10
-rw-r--r--examples/withAttribute.py2
-rw-r--r--examples/wordsToNum.py8
-rw-r--r--makeRelease.bat1
-rwxr-xr-xscrutinizer-pyenv.sh4
-rw-r--r--simple_unit_tests.py22
-rw-r--r--test/jsonParserTests.py7
-rw-r--r--test/karthik.ini2
-rw-r--r--tox.ini1
-rw-r--r--unitTests.py446
-rw-r--r--update_pyparsing_timestamp.py1
87 files changed, 1617 insertions, 1663 deletions
diff --git a/CHANGES b/CHANGES
index ec877d8..c77b7ad 100644
--- a/CHANGES
+++ b/CHANGES
@@ -58,12 +58,12 @@ Version 2.3.0 - October, 2018
This namespace class also offers access to these sets using their
unicode identifiers.
-- POSSIBLE API CHANGE: Fixed bug where a parse action that explicitly
+- POSSIBLE API CHANGE: Fixed bug where a parse action that explicitly
returned the input ParseResults could add another nesting level in
the results if the current expression had a results name.
vals = pp.OneOrMore(pp.pyparsing_common.integer)("int_values")
-
+
def add_total(tokens):
tokens['total'] = sum(tokens)
return tokens # this line can be removed
@@ -72,7 +72,7 @@ Version 2.3.0 - October, 2018
print(vals.parseString("244 23 13 2343").dump())
Before the fix, this code would print (note the extra nesting level):
-
+
[244, 23, 13, 2343]
- int_values: [244, 23, 13, 2343]
- int_values: [244, 23, 13, 2343]
@@ -80,33 +80,33 @@ Version 2.3.0 - October, 2018
- total: 2623
With the fix, this code now prints:
-
+
[244, 23, 13, 2343]
- int_values: [244, 23, 13, 2343]
- total: 2623
- This fix will change the structure of ParseResults returned if a
- program defines a parse action that returns the tokens that were
- sent in. This is not necessary, and statements like "return tokens"
- in the example above can be safely deleted prior to upgrading to
+ This fix will change the structure of ParseResults returned if a
+ program defines a parse action that returns the tokens that were
+ sent in. This is not necessary, and statements like "return tokens"
+ in the example above can be safely deleted prior to upgrading to
this release, in order to avoid the bug and get the new behavior.
Reported by seron in Issue #22, nice catch!
-- POSSIBLE API CHANGE: Fixed a related bug where a results name
- erroneously created a second level of hierarchy in the returned
+- POSSIBLE API CHANGE: Fixed a related bug where a results name
+ erroneously created a second level of hierarchy in the returned
ParseResults. The intent for accumulating results names into ParseResults
is that, in the absence of Group'ing, all names get merged into a
common namespace. This allows us to write:
-
+
key_value_expr = (Word(alphas)("key") + '=' + Word(nums)("value"))
- result = key_value_expr.parseString("a = 100")
-
- and have result structured as {"key": "a", "value": "100"}
+ result = key_value_expr.parseString("a = 100")
+
+ and have result structured as {"key": "a", "value": "100"}
instead of [{"key": "a"}, {"value": "100"}].
-
- However, if a named expression is used in a higher-level non-Group
- expression that *also* has a name, a false sub-level would be created
+
+ However, if a named expression is used in a higher-level non-Group
+ expression that *also* has a name, a false sub-level would be created
in the namespace:
num = pp.Word(pp.nums)
@@ -116,14 +116,14 @@ Version 2.3.0 - October, 2018
Since there is no grouping, "A", "B", and "values" should all appear
at the same level in the results, as:
-
+
['[', '10', '20', ']']
- A: '10'
- B: '20'
- values: ['10', '20']
Instead, an extra level of "A" and "B" show up under "values":
-
+
['[', '10', '20', ']']
- A: '10'
- B: '20'
@@ -133,11 +133,11 @@ Version 2.3.0 - October, 2018
This bug has been fixed. Now, if this hierarchy is desired, then a
Group should be added:
-
+
num_pair = ("[" + pp.Group(num("A") + num("B"))("values") + "]")
Giving:
-
+
['[', ['10', '20'], ']']
- values: ['10', '20']
- A: '10'
@@ -145,52 +145,52 @@ Version 2.3.0 - October, 2018
But in no case should "A" and "B" appear in multiple levels. This bug-fix
fixes that.
-
+
If you have current code which relies on this behavior, then add or remove
Groups as necessary to get your intended results structure.
-
+
Reported by Athanasios Anastasiou.
-- IndexError's raised in parse actions will get explicitly reraised
- as ParseExceptions that wrap the original IndexError. Since
- IndexError sometimes occurs as part of pyparsing's normal parsing
+- IndexError's raised in parse actions will get explicitly reraised
+ as ParseExceptions that wrap the original IndexError. Since
+ IndexError sometimes occurs as part of pyparsing's normal parsing
logic, IndexErrors that are raised during a parse action may have
- gotten silently reinterpreted as parsing errors. To retain the
- information from the IndexError, these exceptions will now be
- raised as ParseExceptions that reference the original IndexError.
+ gotten silently reinterpreted as parsing errors. To retain the
+ information from the IndexError, these exceptions will now be
+ raised as ParseExceptions that reference the original IndexError.
This wrapping will only be visible when run under Python3, since it
- emulates "raise ... from ..." syntax.
-
+ emulates "raise ... from ..." syntax.
+
Addresses Issue #4, reported by guswns0528.
- Added Char class to simplify defining expressions of a single
character. (Char("abc") is equivalent to Word("abc", exact=1))
-- Added class PrecededBy to perform lookbehind tests. PrecededBy is
+- Added class PrecededBy to perform lookbehind tests. PrecededBy is
used in the same way as FollowedBy, passing in an expression that
must occur just prior to the current parse location.
-
- For fixed-length expressions like a Literal, Keyword, Char, or a
- Word with an `exact` or `maxLen` length given, `PrecededBy(expr)`
- is sufficient. For varying length expressions like a Word with no
- given maximum length, `PrecededBy` must be constructed with an
- integer `retreat` argument, as in
- `PrecededBy(Word(alphas, nums), retreat=10)`, to specify the maximum
- number of characters pyparsing must look backward to make a match.
- pyparsing will check all the values from 1 up to retreat characters
+
+ For fixed-length expressions like a Literal, Keyword, Char, or a
+ Word with an `exact` or `maxLen` length given, `PrecededBy(expr)`
+ is sufficient. For varying length expressions like a Word with no
+ given maximum length, `PrecededBy` must be constructed with an
+ integer `retreat` argument, as in
+ `PrecededBy(Word(alphas, nums), retreat=10)`, to specify the maximum
+ number of characters pyparsing must look backward to make a match.
+ pyparsing will check all the values from 1 up to retreat characters
back from the current parse location.
- When stepping backwards through the input string, PrecededBy does
+ When stepping backwards through the input string, PrecededBy does
*not* skip over whitespace.
-
+
PrecededBy can be created with a results name so that, even though
it always returns an empty parse result, the result *can* include
named results.
-
+
Idea first suggested in Issue #30 by Freakwill.
- Updated FollowedBy to accept expressions that contain named results,
- so that results names defined in the lookahead expression will be
+ so that results names defined in the lookahead expression will be
returned, even though FollowedBy always returns an empty list.
Inspired by the same feature implemented in PrecededBy.
@@ -198,14 +198,14 @@ Version 2.3.0 - October, 2018
Version 2.2.2 - September, 2018
-------------------------------
- Fixed bug in SkipTo, if a SkipTo expression that was skipping to
- an expression that returned a list (such as an And), and the
- SkipTo was saved as a named result, the named result could be
+ an expression that returned a list (such as an And), and the
+ SkipTo was saved as a named result, the named result could be
saved as a ParseResults - should always be saved as a string.
Issue #28, reported by seron.
-- Added simple_unit_tests.py, as a collection of easy-to-follow unit
- tests for various classes and features of the pyparsing library.
- Primary intent is more to be instructional than actually rigorous
+- Added simple_unit_tests.py, as a collection of easy-to-follow unit
+ tests for various classes and features of the pyparsing library.
+ Primary intent is more to be instructional than actually rigorous
testing. Complex tests can still be added in the unitTests.py file.
- New features added to the Regex class:
@@ -213,13 +213,13 @@ Version 2.2.2 - September, 2018
a list
- optional asMatch parameter, returns the raw re.match result
- new sub(repl) method, which adds a parse action calling
- re.sub(pattern, repl, parsed_result). Simplifies creating
+ re.sub(pattern, repl, parsed_result). Simplifies creating
Regex expressions to be used with transformString. Like re.sub,
- repl may be an ordinary string (similar to using pyparsing's
- replaceWith), or may contain references to capture groups by group
- number, or may be a callable that takes an re match group and
+ repl may be an ordinary string (similar to using pyparsing's
+ replaceWith), or may contain references to capture groups by group
+ number, or may be a callable that takes an re match group and
returns a string.
-
+
For instance:
expr = pp.Regex(r"([Hh]\d):\s*(.*)").sub(r"<\1>\2</\1>")
expr.transformString("h1: This is the title")
@@ -227,7 +227,7 @@ Version 2.2.2 - September, 2018
will return
<h1>This is the title</h1>
-- Fixed omission of LICENSE file in source tarball, also added
+- Fixed omission of LICENSE file in source tarball, also added
CODE_OF_CONDUCT.md per GitHub community standards.
@@ -248,7 +248,7 @@ Version 2.2.1 - September, 2018
- Fixed bug in select_parser.py example, group_by_terms was not
reported. Reported on SF bugs by Adam Groszer, thanks Adam!
-- Added "Getting Started" section to the module docstring, to
+- Added "Getting Started" section to the module docstring, to
guide new users to the most common starting points in pyparsing's
API.
@@ -309,7 +309,7 @@ Version 2.1.10 - October, 2016
Version 2.1.9 - September, 2016
-------------------------------
-- Added class CloseMatch, a variation on Literal which matches
+- Added class CloseMatch, a variation on Literal which matches
"close" matches, that is, strings with at most 'n' mismatching
characters.
@@ -317,13 +317,13 @@ Version 2.1.9 - September, 2016
Shinji - nice catch, thanks!
- Minor API change in pyparsing_common. Renamed some of the common
- expressions to PEP8 format (to be consistent with the other
+ expressions to PEP8 format (to be consistent with the other
pyparsing_common expressions):
. signedInteger -> signed_integer
. sciReal -> sci_real
-
+
Also, in trying to stem the API bloat of pyparsing, I've copied
- some of the global expressions and helper parse actions into
+ some of the global expressions and helper parse actions into
pyparsing_common, with the originals to be deprecated and removed
in a future release:
. commaSeparatedList -> pyparsing_common.comma_separated_list
@@ -334,15 +334,15 @@ Version 2.1.9 - September, 2016
quotedString, or the Word-helping strings like alphas, nums, etc.
to migrate to pyparsing_common - they are just too pervasive. As for
the PEP8 vs camelCase naming, all the expressions are PEP8, while
- the parse actions in pyparsing_common are still camelCase. It's a
+ the parse actions in pyparsing_common are still camelCase. It's a
small step - when pyparsing 3.0 comes around, everything will change
to PEP8 snake case.)
- Fixed Python3 compatibility bug when using dict keys() and values()
in ParseResults.getName().
-- After some prodding, I've reworked the unitTests.py file for
- pyparsing over the past few releases. It uses some variations on
+- After some prodding, I've reworked the unitTests.py file for
+ pyparsing over the past few releases. It uses some variations on
unittest to handle my testing style. The test now:
. auto-discovers its test classes (while maintining their order
of definition)
@@ -351,19 +351,19 @@ Version 2.1.9 - September, 2016
Version 2.1.8 - August, 2016
----------------------------
-- Fixed issue in the optimization to _trim_arity, when the full
+- Fixed issue in the optimization to _trim_arity, when the full
stacktrace is retrieved to determine if a TypeError is raised in
pyparsing or in the caller's parse action. Code was traversing
the full stacktrace, and potentially encountering UnicodeDecodeError.
-- Fixed bug in ParserElement.inlineLiteralsUsing, causing infinite
+- Fixed bug in ParserElement.inlineLiteralsUsing, causing infinite
loop with Suppress.
- Fixed bug in Each, when merging named results from multiple
expressions in a ZeroOrMore or OneOrMore. Also fixed bug when
- ZeroOrMore expressions were erroneously treated as required
+ ZeroOrMore expressions were erroneously treated as required
expressions in an Each expression.
-
+
- Added a few more inline doc examples.
- Improved use of runTests in several example scripts.
@@ -371,36 +371,36 @@ Version 2.1.8 - August, 2016
Version 2.1.7 - August, 2016
----------------------------
-- Fixed regression reported by Andrea Censi (surfaced in PyContracts
+- Fixed regression reported by Andrea Censi (surfaced in PyContracts
tests) when using ParseSyntaxExceptions (raised when using operator '-')
with packrat parsing.
- Minor fix to oneOf, to accept all iterables, not just space-delimited
- strings and lists. (If you have a list or set of strings, it is
+ strings and lists. (If you have a list or set of strings, it is
not necessary to concat them using ' '.join to pass them to oneOf,
oneOf will accept the list or set or generator directly.)
Version 2.1.6 - August, 2016
----------------------------
-- *Major packrat upgrade*, inspired by patch provided by Tal Einat -
- many, many, thanks to Tal for working on this! Tal's tests show
- faster parsing performance (2X in some tests), *and* memory reduction
- from 3GB down to ~100MB! Requires no changes to existing code using
+- *Major packrat upgrade*, inspired by patch provided by Tal Einat -
+ many, many, thanks to Tal for working on this! Tal's tests show
+ faster parsing performance (2X in some tests), *and* memory reduction
+ from 3GB down to ~100MB! Requires no changes to existing code using
packratting. (Uses OrderedDict, available in Python 2.7 and later.
For Python 2.6 users, will attempt to import from ordereddict
backport. If not present, will implement pure-Python Fifo dict.)
- Minor API change - to better distinguish between the flexible
- numeric types defined in pyparsing_common, I've changed "numeric"
- (which parsed numbers of different types and returned int for ints,
+ numeric types defined in pyparsing_common, I've changed "numeric"
+ (which parsed numbers of different types and returned int for ints,
float for floats, etc.) and "number" (which parsed numbers of int
or float type, and returned all floats) to "number" and "fnumber"
respectively. I hope the "f" prefix of "fnumber" will be a better
- indicator of its internal conversion of parsed values to floats,
+ indicator of its internal conversion of parsed values to floats,
while the generic "number" is similar to the flexible number syntax
in other languages. Also fixed a bug in pyparsing_common.numeric
- (now renamed to pyparsing_common.number), integers were parsed and
+ (now renamed to pyparsing_common.number), integers were parsed and
returned as floats instead of being retained as ints.
- Fixed bug in upcaseTokens and downcaseTokens introduced in 2.1.5,
@@ -408,71 +408,71 @@ Version 2.1.6 - August, 2016
Reported by Steven Arcangeli from the dql project, thanks for your
patience, Steven!
-- Major change to docs! After seeing some comments on reddit about
- general issue with docs of Python modules, and thinking that I'm a
+- Major change to docs! After seeing some comments on reddit about
+ general issue with docs of Python modules, and thinking that I'm a
little overdue in doing some doc tuneup on pyparsing, I decided to
following the suggestions of the redditor and add more inline examples
- to the pyparsing reference documentation. I hope this addition
+ to the pyparsing reference documentation. I hope this addition
will clarify some of the more common questions people have, especially
when first starting with pyparsing/Python.
- Deprecated ParseResults.asXML. I've never been too happy with this
method, and it usually forces some unnatural code in the parsers in
- order to get decent tag names. The amount of guesswork that asXML
+ order to get decent tag names. The amount of guesswork that asXML
has to do to try to match names with values should have been a red
- flag from day one. If you are using asXML, you will need to implement
+ flag from day one. If you are using asXML, you will need to implement
your own ParseResults->XML serialization. Or consider migrating to
- a more current format such as JSON (which is very easy to do:
+ a more current format such as JSON (which is very easy to do:
results_as_json = json.dumps(parse_result.asDict()) Hopefully, when
- I remove this code in a future version, I'll also be able to simplify
- some of the craziness in ParseResults, which IIRC was only there to try
+ I remove this code in a future version, I'll also be able to simplify
+ some of the craziness in ParseResults, which IIRC was only there to try
to make asXML work.
- Updated traceParseAction parse action decorator to show the repr
- of the input and output tokens, instead of the str format, since
- str has been simplified to just show the token list content.
-
+ of the input and output tokens, instead of the str format, since
+ str has been simplified to just show the token list content.
+
(The change to ParseResults.__str__ occurred in pyparsing 2.0.4, but
it seems that didn't make it into the release notes - sorry! Too
- many users, especially beginners, were confused by the
- "([token_list], {names_dict})" str format for ParseResults, thinking
- they were getting a tuple containing a list and a dict. The full form
+ many users, especially beginners, were confused by the
+ "([token_list], {names_dict})" str format for ParseResults, thinking
+ they were getting a tuple containing a list and a dict. The full form
can be seen if using repr().)
- For tracing tokens in and out of parse actions, the more complete
+ For tracing tokens in and out of parse actions, the more complete
repr form provides important information when debugging parse actions.
Verison 2.1.5 - June, 2016
------------------------------
-- Added ParserElement.split() generator method, similar to re.split().
+- Added ParserElement.split() generator method, similar to re.split().
Includes optional arguments maxsplit (to limit the number of splits),
- and includeSeparators (to include the separating matched text in the
+ and includeSeparators (to include the separating matched text in the
returned output, default=False).
- Added a new parse action construction helper tokenMap, which will
- apply a function and optional arguments to each element in a
+ apply a function and optional arguments to each element in a
ParseResults. So this parse action:
-
+
def lowercase_all(tokens):
return [str(t).lower() for t in tokens]
OneOrMore(Word(alphas)).setParseAction(lowercase_all)
can now be written:
-
+
OneOrMore(Word(alphas)).setParseAction(tokenMap(str.lower))
Also simplifies writing conversion parse actions like:
-
+
integer = Word(nums).setParseAction(lambda t: int(t[0]))
to just:
-
+
integer = Word(nums).setParseAction(tokenMap(int))
If additional arguments are necessary, they can be included in the
call to tokenMap, as in:
-
+
hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))
- Added more expressions to pyparsing_common:
@@ -492,38 +492,38 @@ Verison 2.1.5 - June, 2016
and an output list of each test and its output lines.
- Added failureTests argument (default=False) to runTests, so that
- tests can be run that are expected failures, and runTests' success
+ tests can be run that are expected failures, and runTests' success
value will return True only if all tests *fail* as expected. Also,
parseAll now defaults to True.
- New example numerics.py, shows samples of parsing integer and real
numbers using locale-dependent formats:
- 4.294.967.295,000
- 4 294 967 295,000
- 4,294,967,295.000
-
+ 4.294.967.295,000
+ 4 294 967 295,000
+ 4,294,967,295.000
+
Version 2.1.4 - May, 2016
------------------------------
- Split out the '==' behavior in ParserElement, now implemented
- as the ParserElement.matches() method. Using '==' for string test
+ as the ParserElement.matches() method. Using '==' for string test
purposes will be removed in a future release.
- Expanded capabilities of runTests(). Will now accept embedded
- comments (default is Python style, leading '#' character, but
+ comments (default is Python style, leading '#' character, but
customizable). Comments will be emitted along with the tests and
test output. Useful during test development, to create a test string
- consisting only of test case description comments separated by
+ consisting only of test case description comments separated by
blank lines, and then fill in the test cases. Will also highlight
ParseFatalExceptions with "(FATAL)".
-- Added a 'pyparsing_common' class containing common/helpful little
- expressions such as integer, float, identifier, etc. I used this
- class as a sort of embedded namespace, to contain these helpers
+- Added a 'pyparsing_common' class containing common/helpful little
+ expressions such as integer, float, identifier, etc. I used this
+ class as a sort of embedded namespace, to contain these helpers
without further adding to pyparsing's namespace bloat.
-- Minor enhancement to traceParseAction decorator, to retain the
+- Minor enhancement to traceParseAction decorator, to retain the
parse action's name for the trace output.
- Added optional 'fatal' keyword arg to addCondition, to indicate that
@@ -545,8 +545,8 @@ Version 2.1.2 - May, 2016
- Fixed catastrophic regex backtracking in implementation of the
quoted string expressions (dblQuotedString, sglQuotedString, and
quotedString). Reported on the pyparsing wiki by webpentest,
- good catch! (Also tuned up some other expressions susceptible to the
- same backtracking problem, such as cStyleComment, cppStyleComment,
+ good catch! (Also tuned up some other expressions susceptible to the
+ same backtracking problem, such as cStyleComment, cppStyleComment,
etc.)
@@ -564,7 +564,7 @@ Version 2.1.1 - March, 2016
unit test submitted by robyschek, well done!
- Removed use of functools.partial in replaceWith, as this creates
- an ambiguous signature for the generated parse action, which fails in
+ an ambiguous signature for the generated parse action, which fails in
PyPy. Reported by Evan Hubinger, thanks Evan!
- Added default behavior to QuotedString to convert embedded '\t', '\n',
@@ -574,47 +574,47 @@ Version 2.1.1 - March, 2016
Version 2.1.0 - February, 2016
------------------------------
-- Modified the internal _trim_arity method to distinguish between
+- Modified the internal _trim_arity method to distinguish between
TypeError's raised while trying to determine parse action arity and
those raised within the parse action itself. This will clear up those
- confusing "<lambda>() takes exactly 1 argument (0 given)" error
+ confusing "<lambda>() takes exactly 1 argument (0 given)" error
messages when there is an actual TypeError in the body of the parse
action. Thanks to all who have raised this issue in the past, and
most recently to Michael Cohen, who sent in a proposed patch, and got
me to finally tackle this problem.
-- Added compatibility for pickle protocols 2-4 when pickling ParseResults.
+- Added compatibility for pickle protocols 2-4 when pickling ParseResults.
In Python 2.x, protocol 0 was the default, and protocol 2 did not work.
- In Python 3.x, protocol 3 is the default, so explicitly naming
+ In Python 3.x, protocol 3 is the default, so explicitly naming
protocol 0 or 1 was required to pickle ParseResults. With this release,
all protocols 0-4 are supported. Thanks for reporting this on StackOverflow,
Arne Wolframm, and for providing a nice simple test case!
- Added optional 'stopOn' argument to ZeroOrMore and OneOrMore, to
- simplify breaking on stop tokens that would match the repetition
- expression.
-
- It is a common problem to fail to look ahead when matching repetitive
+ simplify breaking on stop tokens that would match the repetition
+ expression.
+
+ It is a common problem to fail to look ahead when matching repetitive
tokens if the sentinel at the end also matches the repetition
expression, as when parsing "BEGIN aaa bbb ccc END" with:
-
+
"BEGIN" + OneOrMore(Word(alphas)) + "END"
Since "END" matches the repetition expression "Word(alphas)", it will
- never get parsed as the terminating sentinel. Up until now, this has
+ never get parsed as the terminating sentinel. Up until now, this has
to be resolved by the user inserting their own negative lookahead:
-
+
"BEGIN" + OneOrMore(~Literal("END") + Word(alphas)) + "END"
-
+
Using stopOn, they can more easily write:
-
+
"BEGIN" + OneOrMore(Word(alphas), stopOn="END") + "END"
-
+
The stopOn argument can be a literal string or a pyparsing expression.
Inspired by a question by Lamakaha on StackOverflow (and many previous
questions with the same negative-lookahead resolution).
-- Added expression names for many internal and builtin expressions, to
+- Added expression names for many internal and builtin expressions, to
reduce name and error message overhead during parsing.
- Converted helper lambdas to functions to refactor and add docstring
@@ -622,7 +622,7 @@ Version 2.1.0 - February, 2016
- Fixed ParseResults.asDict() to correctly convert nested ParseResults
values to dicts.
-
+
- Cleaned up some examples, fixed typo in fourFn.py identified by
aristotle2600 on reddit.
@@ -651,10 +651,10 @@ Version 2.0.7 - December, 2015
- Fixed bug in ignore() that was introduced in pyparsing 1.5.3, that would
not accept a string literal as the ignore expression.
-- Added new example parseTabularData.py to illustrate parsing of data
+- Added new example parseTabularData.py to illustrate parsing of data
formatted in columns, with detection of empty cells.
-- Updated a number of examples to more current Python and pyparsing
+- Updated a number of examples to more current Python and pyparsing
forms.
@@ -667,18 +667,18 @@ Version 2.0.6 - November, 2015
or parse actions, reported by Max Rothman - thank you, Max!
- Added optional parseAll argument to runTests, whether tests should
- require the entire input string to be parsed or not (similar to
+ require the entire input string to be parsed or not (similar to
parseAll argument to parseString). Plus a little neaten-up of the
output on Python 2 (no stray ()'s).
- Modified exception messages from MatchFirst and Or expressions. These
were formerly misleading as they would only give the first or longest
exception mismatch error message. Now the error message includes all
- the alternatives that were possible matches. Originally proposed by
+ the alternatives that were possible matches. Originally proposed by
a pyparsing user, but I've lost the email thread - finally figured out
a fairly clean way to do this.
-- Fixed a bug in Or, when a parse action on an alternative raises an
+- Fixed a bug in Or, when a parse action on an alternative raises an
exception, other potentially matching alternatives were not always tried.
Reported by TheVeryOmni on the pyparsing wiki, thanks!
@@ -690,18 +690,18 @@ Version 2.0.5 - October, 2015
-----------------------------
- (&$(@#&$(@!!!! Some "print" statements snuck into pyparsing v2.0.4,
breaking Python 3 compatibility! Fixed. Reported by jenshn, thanks!
-
+
Version 2.0.4 - October, 2015
-----------------------------
-- Added ParserElement.addCondition, to simplify adding parse actions
+- Added ParserElement.addCondition, to simplify adding parse actions
that act primarily as filters. If the given condition evaluates False,
pyparsing will raise a ParseException. The condition should be a method
with the same method signature as a parse action, but should return a
boolean. Suggested by Victor Porton, nice idea Victor, thanks!
- Slight mod to srange to accept unicode literals for the input string,
- such as "[а-яА-Я]" instead of "[\u0430-\u044f\u0410-\u042f]". Thanks
+ such as "[а-яА-Я]" instead of "[\u0430-\u044f\u0410-\u042f]". Thanks
to Alexandr Suchkov for the patch!
- Enhanced implementation of replaceWith.
@@ -715,18 +715,18 @@ Version 2.0.4 - October, 2015
adding a new function 'exp', and the leading 'e' of 'exp' was accidentally
parsed as the mathematical constant 'e'. Nice catch, Tom Grydeland - thanks!
-- Adopt new-fangled Python features, like decorators and ternary expressions,
+- Adopt new-fangled Python features, like decorators and ternary expressions,
per suggestions from Williamzjc - thanks William! (Oh yeah, I'm not
supporting Python 2.3 with this code any more...) Plus, some additional
code fixes/cleanup - thanks again!
- Added ParserElement.runTests, a little test bench for quickly running
- an expression against a list of sample input strings. Basically, I got
+ an expression against a list of sample input strings. Basically, I got
tired of writing the same test code over and over, and finally added it
as a test point method on ParserElement.
- Added withClass helper method, a simplified version of withAttribute for
- the common but annoying case when defining a filter on a div's class -
+ the common but annoying case when defining a filter on a div's class -
made difficult because 'class' is a Python reserved word.
@@ -750,7 +750,7 @@ Version 2.0.3 - October, 2014
on Sourceforge by aldanor, thanks!
- Fixed bug in ParseResults __init__ method, when returning non-ParseResults
- types from parse actions that implement __eq__. Raised during discussion
+ types from parse actions that implement __eq__. Raised during discussion
on the pyparsing wiki with cyrfer.
@@ -767,8 +767,8 @@ Version 2.0.2 - April, 2014
and prettified output. Now instead of importing the pprint module
and then writing "pprint.pprint(result)", you can just write
"result.pprint()". This method also accepts addtional positional and
- keyword arguments (such as indent, width, etc.), which get passed
- through directly to the pprint method
+ keyword arguments (such as indent, width, etc.), which get passed
+ through directly to the pprint method
(see http://docs.python.org/2/library/pprint.html#pprint.pprint).
- Removed deprecation warnings when using '<<' for Forward expression
@@ -786,33 +786,33 @@ Version 2.0.2 - April, 2014
- ParseResults emulates the change in list vs. iterator semantics for
methods like keys(), values(), and items(). Under Python 2.x, these
- methods will return lists, under Python 3.x, these methods will
+ methods will return lists, under Python 3.x, these methods will
return iterators.
- ParseResults now has a method haskeys() which returns True or False
depending on whether any results names have been defined. This simplifies
- testing for the existence of results names under Python 3.x, which
+ testing for the existence of results names under Python 3.x, which
returns keys() as an iterator, not a list.
- ParseResults now supports both list and dict semantics for pop().
If passed no argument or an integer argument, it will use list semantics
and pop tokens from the list of parsed tokens. If passed a non-integer
- argument (most likely a string), it will use dict semantics and
+ argument (most likely a string), it will use dict semantics and
pop the corresponding value from any defined results names. A
- second default return value argument is supported, just as in
+ second default return value argument is supported, just as in
dict.pop().
- Fixed bug in markInputline, thanks for reporting this, Matt Grant!
-- Cleaned up my unit test environment, now runs with Python 2.6 and
+- Cleaned up my unit test environment, now runs with Python 2.6 and
3.3.
Version 2.0.1 - July, 2013
--------------------------
-- Removed use of "nonlocal" that prevented using this version of
- pyparsing with Python 2.6 and 2.7. This will make it easier to
- install for packages that depend on pyparsing, under Python
+- Removed use of "nonlocal" that prevented using this version of
+ pyparsing with Python 2.6 and 2.7. This will make it easier to
+ install for packages that depend on pyparsing, under Python
versions 2.6 and later. Those using older versions of Python
will have to manually install pyparsing 1.5.7.
@@ -824,7 +824,7 @@ Version 2.0.1 - July, 2013
Version 2.0.0 - November, 2012
------------------------------
- Rather than release another combined Python 2.x/3.x release
- I've decided to start a new major version that is only
+ I've decided to start a new major version that is only
compatible with Python 3.x (and consequently Python 2.7 as
well due to backporting of key features). This version will
be the main development path from now on, with little follow-on
@@ -838,7 +838,7 @@ Version 2.0.0 - November, 2012
Version 1.5.7 - November, 2012
-----------------------------
-- NOTE: This is the last release of pyparsing that will try to
+- NOTE: This is the last release of pyparsing that will try to
maintain compatibility with Python versions < 2.6. The next
release of pyparsing will be version 2.0.0, using new Python
syntax that will not be compatible for Python version 2.5 or
@@ -855,12 +855,12 @@ Version 1.5.7 - November, 2012
- Fixed bug in ParseResults.__dir__ under Python 3, reported by
Thomas Kluyver, thank you Thomas!
-- Added ParserElement.inlineLiteralsUsing static method, to
+- Added ParserElement.inlineLiteralsUsing static method, to
override pyparsing's default behavior of converting string
literals to Literal instances, to use other classes (such
as Suppress or CaselessLiteral).
-- Added new operator '<<=', which will eventually replace '<<' for
+- Added new operator '<<=', which will eventually replace '<<' for
storing the contents of a Forward(). '<<=' does not have the same
operator precedence problems that '<<' does.
@@ -868,11 +868,11 @@ Version 1.5.7 - November, 2012
description of what this helper function creates. 'operatorPrecedence'
is deprecated, and will be dropped entirely in a future release.
-- Added optional arguments lpar and rpar to operatorPrecedence, so that
+- Added optional arguments lpar and rpar to operatorPrecedence, so that
expressions that use it can override the default suppression of the
grouping characters.
-- Added support for using single argument builtin functions as parse
+- Added support for using single argument builtin functions as parse
actions. Now you can write 'expr.setParseAction(len)' and get back
the length of the list of matched tokens. Supported builtins are:
sum, len, sorted, reversed, list, tuple, set, any, all, min, and max.
@@ -882,25 +882,25 @@ Version 1.5.7 - November, 2012
- Improved linking in generated docs, proposed on the pyparsing wiki
by techtonik, thanks!
-- Fixed a bug in the definition of 'alphas', which was based on the
- string.uppercase and string.lowercase "constants", which in fact
- *aren't* constant, but vary with locale settings. This could make
+- Fixed a bug in the definition of 'alphas', which was based on the
+ string.uppercase and string.lowercase "constants", which in fact
+ *aren't* constant, but vary with locale settings. This could make
parsers locale-sensitive in a subtle way. Thanks to Kef Schecter for
- his diligence in following through on reporting and monitoring
+ his diligence in following through on reporting and monitoring
this bugfix!
- Fixed a bug in the Py3 version of pyparsing, during exception
- handling with packrat parsing enabled, reported by Catherine
+ handling with packrat parsing enabled, reported by Catherine
Devlin - thanks Catherine!
-- Fixed typo in ParseBaseException.__dir__, reported anonymously on
+- Fixed typo in ParseBaseException.__dir__, reported anonymously on
the SourceForge bug tracker, thank you Pyparsing User With No Name.
- Fixed bug in srange when using '\x###' hex character codes.
-- Addeed optional 'intExpr' argument to countedArray, so that you
+- Addeed optional 'intExpr' argument to countedArray, so that you
can define your own expression that will evaluate to an integer,
- to be used as the count for the following elements. Allows you
+ to be used as the count for the following elements. Allows you
to define a countedArray with the count given in hex, for example,
by defining intExpr as "Word(hexnums).setParseAction(int(t[0],16))".
@@ -908,7 +908,7 @@ Version 1.5.7 - November, 2012
Version 1.5.6 - June, 2011
----------------------------
- Cleanup of parse action normalizing code, to be more version-tolerant,
- and robust in the face of future Python versions - much thanks to
+ and robust in the face of future Python versions - much thanks to
Raymond Hettinger for this rewrite!
- Removal of exception cacheing, addressing a memory leak condition
@@ -916,7 +916,7 @@ Version 1.5.6 - June, 2011
their analysis and work on this problem!
- Fixed bug when using packrat parsing, where a previously parsed
- expression would duplicate subsequent tokens - reported by Frankie
+ expression would duplicate subsequent tokens - reported by Frankie
Ribery on stackoverflow, thanks!
- Added 'ungroup' helper method, to address token grouping done
@@ -924,7 +924,7 @@ Version 1.5.6 - June, 2011
And actually returns any text - also inspired by stackoverflow
discussion with Frankie Ribery!
-- Fixed bug in srange, which accepted escaped hex characters of the
+- Fixed bug in srange, which accepted escaped hex characters of the
form '\0x##', but should be '\x##'. Both forms will be supported
for backwards compatibility.
@@ -946,10 +946,10 @@ Version 1.5.6 - June, 2011
- Updated oneOf internal regular expression generation, with improved
parse time performance.
-
+
- Slight performance improvement in transformString, removing empty
strings from the list of string fragments built while scanning the
- source text, before calling ''.join. Especially useful when using
+ source text, before calling ''.join. Especially useful when using
transformString to strip out selected text.
- Enhanced form of using the "expr('name')" style of results naming,
@@ -963,7 +963,7 @@ Version 1.5.6 - June, 2011
. protobuf parser - parses Google's protobuf language
. btpyparse - a BibTex parser contributed by Matthew Brett,
with test suite test_bibparse.py (thanks, Matthew!)
- . groupUsingListAllMatches.py - demo using trailing '*' for results
+ . groupUsingListAllMatches.py - demo using trailing '*' for results
names
@@ -977,8 +977,8 @@ Version 1.5.5 - August, 2010
Version 1.5.4 - August, 2010
----------------------------
-- Fixed __builtins__ and file references in Python 3 code, thanks to
- Greg Watson, saulspatz, sminos, and Mark Summerfield for reporting
+- Fixed __builtins__ and file references in Python 3 code, thanks to
+ Greg Watson, saulspatz, sminos, and Mark Summerfield for reporting
their Python 3 experiences.
- Added new example, apicheck.py, as a sample of scanning a Tcl-like
@@ -995,13 +995,13 @@ Version 1.5.3 - June, 2010
--------------------------
- ======= NOTE: API CHANGE!!!!!!! ===============
- With this release, and henceforward, the pyparsing module is
+ With this release, and henceforward, the pyparsing module is
imported as "pyparsing" on both Python 2.x and Python 3.x versions.
-- Fixed up setup.py to auto-detect Python version and install the
- correct version of pyparsing - suggested by Alex Martelli,
- thanks, Alex! (and my apologies to all those who struggled with
- those spurious installation errors caused by my earlier
+- Fixed up setup.py to auto-detect Python version and install the
+ correct version of pyparsing - suggested by Alex Martelli,
+ thanks, Alex! (and my apologies to all those who struggled with
+ those spurious installation errors caused by my earlier
fumblings!)
- Fixed bug on Python3 when using parseFile, getting bytes instead of
@@ -1014,14 +1014,14 @@ Version 1.5.3 - June, 2010
- Fixed very sneaky bug in Each, in which Optional elements were
not completely recognized as optional - found by Tal Weiss, thanks
for your patience.
-
+
- Fixed off-by-1 bug in line() method when the first line of the
input text was an empty line. Thanks to John Krukoff for submitting
a patch!
-
+
- Fixed bug in transformString if grammar contains Group expressions,
thanks to patch submitted by barnabas79, nice work!
-
+
- Fixed bug in originalTextFor in which trailing comments or otherwised
ignored text got slurped in with the matched expression. Thanks to
michael_ramirez44 on the pyparsing wiki for reporting this just in
@@ -1029,8 +1029,8 @@ Version 1.5.3 - June, 2010
- Added better support for summing ParseResults, see the new example,
parseResultsSumExample.py.
-
-- Added support for composing a Regex using a compiled RE object;
+
+- Added support for composing a Regex using a compiled RE object;
thanks to my new colleague, Mike Thornton!
- In version 1.5.2, I changed the way exceptions are raised in order
@@ -1038,41 +1038,41 @@ Version 1.5.3 - June, 2010
user posted a bug report on SF that this behavior makes it difficult
to debug some complex parsers, or parsers nested within parsers. In
this release I've added a class attribute ParserElement.verbose_stacktrace,
- with a default value of False. If you set this to True, pyparsing will
+ with a default value of False. If you set this to True, pyparsing will
report stacktraces using the pre-1.5.2 behavior.
-- New examples:
+- New examples:
- . pymicko.py, a MicroC compiler submitted by Zarko Zivanov.
- (Note: this example is separately licensed under the GPLv3,
+ . pymicko.py, a MicroC compiler submitted by Zarko Zivanov.
+ (Note: this example is separately licensed under the GPLv3,
and requires Python 2.6 or higher.) Thank you, Zarko!
. oc.py, a subset C parser, using the BNF from the 1996 Obfuscated C
Contest.
- . stateMachine2.py, a modified version of stateMachine.py submitted
- by Matt Anderson, that is compatible with Python versions 2.7 and
+ . stateMachine2.py, a modified version of stateMachine.py submitted
+ by Matt Anderson, that is compatible with Python versions 2.7 and
above - thanks so much, Matt!
-
- . select_parser.py, a parser for reading SQLite SELECT statements,
+
+ . select_parser.py, a parser for reading SQLite SELECT statements,
as specified at http://www.sqlite.org/lang_select.html; this goes
into much more detail than the simple SQL parser included in pyparsing's
source code
-
- . excelExpr.py, a *simplistic* first-cut at a parser for Excel
- expressions, which I originally posted on comp.lang.python in January,
+
+ . excelExpr.py, a *simplistic* first-cut at a parser for Excel
+ expressions, which I originally posted on comp.lang.python in January,
2010; beware, this parser omits many common Excel cases (addition of
numbers represented as strings, references to named ranges)
-
+
. cpp_enum_parser.py, a nice little parser posted my Mark Tolonen on
comp.lang.python in August, 2009 (redistributed here with Mark's
permission). Thanks a bunch, Mark!
-
+
. partial_gene_match.py, a sample I posted to Stackoverflow.com,
implementing a special variation on Literal that does "close" matching,
- up to a given number of allowed mismatches. The application was to
+ up to a given number of allowed mismatches. The application was to
find matching gene sequences, with allowance for one or two mismatches.
-
+
. tagCapture.py, a sample showing how to use a Forward placeholder to
enforce matching of text parsed in a previous expression.
@@ -1084,26 +1084,26 @@ Version 1.5.2 - April, 2009
------------------------------
- Added pyparsing_py3.py module, so that Python 3 users can use
pyparsing by changing their pyparsing import statement to:
-
+
import pyparsing_py3
- Thanks for help from Patrick Laban and his friend Geremy
+ Thanks for help from Patrick Laban and his friend Geremy
Condra on the pyparsing wiki.
-
+
- Removed __slots__ declaration on ParseBaseException, for
compatibility with IronPython 2.0.1. Raised by David
Lawler on the pyparsing wiki, thanks David!
-
-- Fixed bug in SkipTo/failOn handling - caught by eagle eye
+
+- Fixed bug in SkipTo/failOn handling - caught by eagle eye
cpennington on the pyparsing wiki!
- Fixed second bug in SkipTo when using the ignore constructor
argument, reported by Catherine Devlin, thanks!
-
+
- Fixed obscure bug reported by Eike Welk when using a class
as a ParseAction with an errant __getitem__ method.
-- Simplified exception stack traces when reporting parse
+- Simplified exception stack traces when reporting parse
exceptions back to caller of parseString or parseFile - thanks
to a tip from Peter Otten on comp.lang.python.
@@ -1111,16 +1111,16 @@ Version 1.5.2 - April, 2009
expressions that match zero-length strings. Prompted by a
question posted by ellisonbg on the wiki.
-- Enhanced classes that take a list of expressions (And, Or,
+- Enhanced classes that take a list of expressions (And, Or,
MatchFirst, and Each) to accept generator expressions also.
This can be useful when generating lists of alternative
expressions, as in this case, where the user wanted to match
any repetitions of '+', '*', '#', or '.', but not mixtures
of them (that is, match '+++', but not '+-+'):
-
+
codes = "+*#."
format = MatchFirst(Word(c) for c in codes)
-
+
Based on a problem posed by Denis Spir on the Python tutor
list.
@@ -1131,30 +1131,30 @@ Version 1.5.2 - April, 2009
Version 1.5.1 - October, 2008
-------------------------------
- Added new helper method originalTextFor, to replace the use of
- the current keepOriginalText parse action. Now instead of
+ the current keepOriginalText parse action. Now instead of
using the parse action, as in:
-
+
fullName = Word(alphas) + Word(alphas)
fullName.setParseAction(keepOriginalText)
-
+
(in this example, we used keepOriginalText to restore any white
space that may have been skipped between the first and last
names)
You can now write:
-
+
fullName = originalTextFor(Word(alphas) + Word(alphas))
-
+
The implementation of originalTextFor is simpler and faster than
keepOriginalText, and does not depend on using the inspect or
imp modules.
-
+
- Added optional parseAll argument to parseFile, to be consistent
with parseAll argument to parseString. Posted by pboucher on the
pyparsing wiki, thanks!
- Added failOn argument to SkipTo, so that grammars can define
literal strings or pyparsing expressions which, if found in the
- skipped text, will cause SkipTo to fail. Useful to prevent
+ skipped text, will cause SkipTo to fail. Useful to prevent
SkipTo from reading past terminating expression. Instigated by
question posed by Aki Niimura on the pyparsing wiki.
@@ -1178,13 +1178,13 @@ Version 1.5.1 - October, 2008
defined.
- Fixed bug in ParseResults.asXML(), in which the first named
- item within a ParseResults gets reported with an <ITEM> tag
+ item within a ParseResults gets reported with an <ITEM> tag
instead of with the correct results name.
-
-- Fixed bug in '-' error stop, when '-' operator is used inside a
+
+- Fixed bug in '-' error stop, when '-' operator is used inside a
Combine expression.
-- Reverted generator expression to use list comprehension, for
+- Reverted generator expression to use list comprehension, for
better compatibility with old versions of Python. Reported by
jester/artixdesign on the SourceForge pyparsing discussion list.
diff --git a/CODE_OF_CONDUCT.rst b/CODE_OF_CONDUCT.rst
index fe3f47a..4f84d2c 100644
--- a/CODE_OF_CONDUCT.rst
+++ b/CODE_OF_CONDUCT.rst
@@ -1,90 +1,90 @@
-Contributor Covenant Code of Conduct
-====================================
-
-Our Pledge
-----------
-
-In the interest of fostering an open and welcoming environment,
-we as contributors and maintainers pledge to making participation
-in our project and our community a harassment-free experience for
-everyone, regardless of age, body size, disability, ethnicity,
-sex characteristics, gender identity and expression, level of
-experience, education, socio-economic status, nationality,
-personal appearance, race, religion, or sexual identity and
-orientation.
-
-Our Standards
--------------
-
-Examples of behavior that contributes to creating a positive
-environment include:
-
-- Using welcoming and inclusive language
-- Being respectful of differing viewpoints and experiences
-- Gracefully accepting constructive criticism
-- Focusing on what is best for the community
-- Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-- The use of sexualized language or imagery and unwelcome sexual
- attention or advances
-- Trolling, insulting/derogatory comments, and personal or political
- attacks
-- Public or private harassment
-- Publishing others’ private information, such as a physical or
- electronic address, without explicit permission
-- Other conduct which could reasonably be considered
- inappropriate in a professional setting
-
-Our Responsibilities
---------------------
-
-Project maintainers are responsible for clarifying the standards
-of acceptable behavior and are expected to take appropriate and
-fair corrective action in response to any instances of
-unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove,
-edit, or reject comments, commits, code, wiki edits, issues, and
-other contributions that are not aligned to this Code of Conduct,
-or to ban temporarily or permanently any contributor for other
-behaviors that they deem inappropriate, threatening, offensive,
-or harmful.
-
-Scope
------
-
-This Code of Conduct applies both within project spaces and in
-public spaces when an individual is representing the project or
-its community. Examples of representing a project or community
-include using an official project e-mail address, posting via an
-official social media account, or acting as an appointed
-representative at an online or offline event. Representation of
-a project may be further defined and clarified by project
-maintainers.
-
-Enforcement
------------
-
-Instances of abusive, harassing, or otherwise unacceptable
-behavior may be reported by contacting the project team at
-pyparsing@mail.com. All complaints will be reviewed and
-investigated and will result in a response that is deemed
-necessary and appropriate to the circumstances. The project team
-is obligated to maintain confidentiality with regard to the
-reporter of an incident. Further details of specific enforcement
-policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of
-Conduct in good faith may face temporary or permanent
-repercussions as determined by other members of the project’s
-leadership.
-
-Attribution
------------
-
-This Code of Conduct is adapted from the `Contributor Covenant
-<https://www.contributor-covenant.org>`__, version 1.4, available
-at
-https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+Contributor Covenant Code of Conduct
+====================================
+
+Our Pledge
+----------
+
+In the interest of fostering an open and welcoming environment,
+we as contributors and maintainers pledge to making participation
+in our project and our community a harassment-free experience for
+everyone, regardless of age, body size, disability, ethnicity,
+sex characteristics, gender identity and expression, level of
+experience, education, socio-economic status, nationality,
+personal appearance, race, religion, or sexual identity and
+orientation.
+
+Our Standards
+-------------
+
+Examples of behavior that contributes to creating a positive
+environment include:
+
+- Using welcoming and inclusive language
+- Being respectful of differing viewpoints and experiences
+- Gracefully accepting constructive criticism
+- Focusing on what is best for the community
+- Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+- The use of sexualized language or imagery and unwelcome sexual
+ attention or advances
+- Trolling, insulting/derogatory comments, and personal or political
+ attacks
+- Public or private harassment
+- Publishing others’ private information, such as a physical or
+ electronic address, without explicit permission
+- Other conduct which could reasonably be considered
+ inappropriate in a professional setting
+
+Our Responsibilities
+--------------------
+
+Project maintainers are responsible for clarifying the standards
+of acceptable behavior and are expected to take appropriate and
+fair corrective action in response to any instances of
+unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove,
+edit, or reject comments, commits, code, wiki edits, issues, and
+other contributions that are not aligned to this Code of Conduct,
+or to ban temporarily or permanently any contributor for other
+behaviors that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Scope
+-----
+
+This Code of Conduct applies both within project spaces and in
+public spaces when an individual is representing the project or
+its community. Examples of representing a project or community
+include using an official project e-mail address, posting via an
+official social media account, or acting as an appointed
+representative at an online or offline event. Representation of
+a project may be further defined and clarified by project
+maintainers.
+
+Enforcement
+-----------
+
+Instances of abusive, harassing, or otherwise unacceptable
+behavior may be reported by contacting the project team at
+pyparsing@mail.com. All complaints will be reviewed and
+investigated and will result in a response that is deemed
+necessary and appropriate to the circumstances. The project team
+is obligated to maintain confidentiality with regard to the
+reporter of an incident. Further details of specific enforcement
+policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of
+Conduct in good faith may face temporary or permanent
+repercussions as determined by other members of the project’s
+leadership.
+
+Attribution
+-----------
+
+This Code of Conduct is adapted from the `Contributor Covenant
+<https://www.contributor-covenant.org>`__, version 1.4, available
+at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
diff --git a/docs/HowToUsePyparsing.rst b/docs/HowToUsePyparsing.rst
index 01b5026..7d39e30 100644
--- a/docs/HowToUsePyparsing.rst
+++ b/docs/HowToUsePyparsing.rst
@@ -8,7 +8,7 @@ Using the pyparsing module
:revision: 2.0.1a
:date: July, 2013 (minor update August, 2018)
-:copyright: Copyright |copy| 2003-2013 Paul McGuire.
+:copyright: Copyright |copy| 2003-2013 Paul McGuire.
.. |copy| unicode:: 0xA9
@@ -23,7 +23,7 @@ Using the pyparsing module
.. contents:: :depth: 4
-Note: While this content is still valid, there are more detailed
+Note: While this content is still valid, there are more detailed
descriptions and examples at the online doc server at
https://pythonhosted.org/pyparsing/pyparsing-module.html
@@ -36,7 +36,7 @@ To parse an incoming data string, the client code must follow these steps:
this to a program variable. Optional results names or parsing
actions can also be defined at this time.
-2. Call ``parseString()`` or ``scanString()`` on this variable, passing in
+2. Call ``parseString()`` or ``scanString()`` on this variable, passing in
the string to
be parsed. During the matching process, whitespace between
tokens is skipped by default (although this can be changed).
@@ -56,15 +56,15 @@ The following complete Python program will parse the greeting "Hello, World!",
or any other greeting of the form "<salutation>, <addressee>!"::
from pyparsing import Word, alphas
-
+
greet = Word( alphas ) + "," + Word( alphas ) + "!"
greeting = greet.parseString( "Hello, World!" )
print greeting
-
+
The parsed tokens are returned in the following form::
['Hello', ',', 'World', '!']
-
+
Usage notes
-----------
@@ -74,13 +74,13 @@ Usage notes
from text reports with complicated format and structure ("screen
or report scraping"). However, it is possible that your defined
matching patterns may accept invalid inputs. Use pyparsing to
- extract data from strings assumed to be well-formatted.
+ extract data from strings assumed to be well-formatted.
-- To keep up the readability of your code, use operators_ such as ``+``, ``|``,
+- To keep up the readability of your code, use operators_ such as ``+``, ``|``,
``^``, and ``~`` to combine expressions. You can also combine
string literals with ParseExpressions - they will be
automatically converted to Literal objects. For example::
-
+
integer = Word( nums ) # simple unsigned integer
variable = Word( alphas, max=1 ) # single letter variable, such as x, z, m, etc.
arithOp = Word( "+-*/", max=1 ) # arithmetic operators
@@ -94,40 +94,40 @@ Usage notes
grammars, such as the above ``equation``, without having to clutter it up with
extraneous ``ws`` markers. The ``equation`` grammar will successfully parse all of the
following statements::
-
+
x=2+2
x = 2+2
a = 10 * 4
r= 1234/ 100000
-
+
Of course, it is quite simple to extend this example to support more elaborate expressions, with
- nesting with parentheses, floating point numbers, scientific notation, and named constants
+ nesting with parentheses, floating point numbers, scientific notation, and named constants
(such as ``e`` or ``pi``). See ``fourFn.py``, included in the examples directory.
- To modify pyparsing's default whitespace skipping, you can use one or
more of the following methods:
-
+
- use the static method ``ParserElement.setDefaultWhitespaceChars``
to override the normal set of whitespace chars (' \t\n'). For instance
when defining a grammar in which newlines are significant, you should
- call ``ParserElement.setDefaultWhitespaceChars(' \t')`` to remove
+ call ``ParserElement.setDefaultWhitespaceChars(' \t')`` to remove
newline from the set of skippable whitespace characters. Calling
this method will affect all pyparsing expressions defined afterward.
-
- - call ``leaveWhitespace()`` on individual expressions, to suppress the
+
+ - call ``leaveWhitespace()`` on individual expressions, to suppress the
skipping of whitespace before trying to match the expression
-
+
- use ``Combine`` to require that successive expressions must be
adjacent in the input string. For instance, this expression::
-
+
real = Word(nums) + '.' + Word(nums)
-
- will match "3.14159", but will also match "3 . 12". It will also
+
+ will match "3.14159", but will also match "3 . 12". It will also
return the matched results as ['3', '.', '14159']. By changing this
expression to::
-
+
real = Combine( Word(nums) + '.' + Word(nums) )
-
+
it will not match numbers with embedded spaces, and it will return a
single concatenated string '3.14159' as the parsed token.
@@ -136,21 +136,21 @@ Usage notes
repetition count), or by a tuple containing
two integers, or None and an integer, representing min and max repetitions
(with None representing no min or no max, depending whether it is the first or
- second tuple element). See the following examples, where n is used to
+ second tuple element). See the following examples, where n is used to
indicate an integer value:
- ``expr*3`` is equivalent to ``expr + expr + expr``
-
+
- ``expr*(2,3)`` is equivalent to ``expr + expr + Optional(expr)``
-
+
- ``expr*(n,None)`` or ``expr*(n,)`` is equivalent
to ``expr*n + ZeroOrMore(expr)`` (read as "at least n instances of expr")
-
+
- ``expr*(None,n)`` is equivalent to ``expr*(0,n)``
(read as "0 to n instances of expr")
-
+
- ``expr*(None,None)`` is equivalent to ``ZeroOrMore(expr)``
-
+
- ``expr*(1,None)`` is equivalent to ``OneOrMore(expr)``
Note that ``expr*(None,n)`` does not raise an exception if
@@ -158,7 +158,7 @@ Usage notes
``expr*(None,n)`` does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
``expr*(None,n) + ~expr``.
-
+
- ``MatchFirst`` expressions are matched left-to-right, and the first
match found will skip all later expressions within, so be sure
to define less-specific patterns after more-specific patterns.
@@ -166,7 +166,7 @@ Usage notes
expressions (defined using the ``^`` operator) - they will always
match the longest expression, although they are more
compute-intensive.
-
+
- ``Or`` expressions will evaluate all of the specified subexpressions
to determine which is the "best" match, that is, which matches
the longest string in the input data. In case of a tie, the
@@ -174,16 +174,16 @@ Usage notes
- If parsing the contents of an entire file, pass it to the
``parseFile`` method using::
-
+
expr.parseFile( sourceFile )
-
+
- ``ParseExceptions`` will report the location where an expected token
or expression failed to match. For example, if we tried to use our
"Hello, World!" parser to parse "Hello World!" (leaving out the separating
comma), we would get an exception, with the message::
-
+
pyparsing.ParseException: Expected "," (6), (1,7)
-
+
In the case of complex
expressions, the reported location may not be exactly where you
would expect. See more information under ParseException_ .
@@ -192,7 +192,7 @@ Usage notes
sublist. This will help organize your results into more
hierarchical form (the default behavior is to return matching
tokens as a flat list of matching input strings).
-
+
- Punctuation may be significant for matching, but is rarely of
much interest in the parsed results. Use the ``suppress()`` method
to keep these tokens from cluttering up your returned lists of
@@ -201,29 +201,29 @@ Usage notes
default), but only returns a list of the actual expressions -
the delimiters are used for parsing, but are suppressed from the
returned output.
-
+
- Parse actions can be used to convert values from strings to
other data types (ints, floats, booleans, etc.).
-
+
- Results names are recommended for retrieving tokens from complex
expressions. It is much easier to access a token using its field
- name than using a positional index, especially if the expression
+ name than using a positional index, especially if the expression
contains optional elements. You can also shortcut
the ``setResultsName`` call::
-
+
stats = "AVE:" + realNum.setResultsName("average") + \
"MIN:" + realNum.setResultsName("min") + \
- "MAX:" + realNum.setResultsName("max")
+ "MAX:" + realNum.setResultsName("max")
can now be written as this::
-
+
stats = "AVE:" + realNum("average") + \
"MIN:" + realNum("min") + \
- "MAX:" + realNum("max")
-
+ "MAX:" + realNum("max")
+
- Be careful when defining parse actions that modify global variables or
- data structures (as in ``fourFn.py``), especially for low level tokens
- or expressions that may occur within an ``And`` expression; an early element
+ data structures (as in ``fourFn.py``), especially for low level tokens
+ or expressions that may occur within an ``And`` expression; an early element
of an ``And`` may match, but the overall expression may fail.
- Performance of pyparsing may be slow for complex grammars and/or large
@@ -245,25 +245,25 @@ methods for code to use are:
- ``parseString( sourceString, parseAll=False )`` - only called once, on the overall
matching pattern; returns a ParseResults_ object that makes the
- matched tokens available as a list, and optionally as a dictionary,
+ matched tokens available as a list, and optionally as a dictionary,
or as an object with named attributes; if parseAll is set to True, then
parseString will raise a ParseException if the grammar does not process
the complete input string.
- ``parseFile( sourceFile )`` - a convenience function, that accepts an
- input file object or filename. The file contents are passed as a
+ input file object or filename. The file contents are passed as a
string to ``parseString()``. ``parseFile`` also supports the ``parseAll`` argument.
-
+
- ``scanString( sourceString )`` - generator function, used to find and
- extract matching text in the given source string; for each matched text,
+ extract matching text in the given source string; for each matched text,
returns a tuple of:
-
+
- matched tokens (packaged as a ParseResults_ object)
-
+
- start location of the matched text in the given source string
-
+
- end location in the given source string
-
+
``scanString`` allows you to scan through the input source string for
random matches, instead of exhaustively defining the grammar for the entire
source text (as would be required with ``parseString``).
@@ -280,15 +280,15 @@ methods for code to use are:
- ``setName( name )`` - associate a short descriptive name for this
element, useful in displaying exceptions and trace information
-- ``setResultsName( string, listAllMatches=False )`` - name to be given
+- ``setResultsName( string, listAllMatches=False )`` - name to be given
to tokens matching
the element; if multiple tokens within
a repetition group (such as ``ZeroOrMore`` or ``delimitedList``) the
default is to return only the last matching token - if listAllMatches
- is set to True, then a list of all the matching tokens is returned.
+ is set to True, then a list of all the matching tokens is returned.
(New in 1.5.6 - a results name with a trailing '*' character will be
interpreted as setting listAllMatches to True.)
- Note:
+ Note:
``setResultsName`` returns a *copy* of the element so that a single
basic element can be referenced multiple times and given
different names within a complex grammar.
@@ -298,21 +298,21 @@ methods for code to use are:
- ``setParseAction( *fn )`` - specify one or more functions to call after successful
matching of the element; each function is defined as ``fn( s,
loc, toks )``, where:
-
+
- ``s`` is the original parse string
-
+
- ``loc`` is the location in the string where matching started
-
+
- ``toks`` is the list of the matched tokens, packaged as a ParseResults_ object
-
+
Multiple functions can be attached to a ParserElement by specifying multiple
arguments to setParseAction, or by calling setParseAction multiple times.
-
+
Each parse action function can return a modified ``toks`` list, to perform conversion, or
string modifications. For brevity, ``fn`` may also be a
lambda - here is an example of using a parse action to convert matched
integer tokens from strings to integers::
-
+
intNumber = Word(nums).setParseAction( lambda s,l,t: [ int(t[0]) ] )
If ``fn`` does not modify the ``toks`` list, it does not need to return
@@ -326,7 +326,7 @@ methods for code to use are:
attached to each
- ``leaveWhitespace()`` - change default behavior of skipping
- whitespace before starting matching (mostly used internally to the
+ whitespace before starting matching (mostly used internally to the
pyparsing module, rarely used by client code)
- ``setWhitespaceChars( chars )`` - define the set of chars to be ignored
@@ -336,7 +336,7 @@ methods for code to use are:
- ``setDefaultWhitespaceChars( chars )`` - class-level method to override
the default set of whitespace chars for all subsequently created ParserElements
(including copies); useful when defining grammars that treat one or more of the
- default whitespace characters as significant (such as a line-sensitive grammar, to
+ default whitespace characters as significant (such as a line-sensitive grammar, to
omit newline from the list of ignorable whitespace)
- ``suppress()`` - convenience function to suppress the output of the
@@ -347,12 +347,12 @@ methods for code to use are:
repeatedly to specify multiple expressions; useful to specify
patterns of comment syntax, for example
-- ``setDebug( dbgFlag=True )`` - function to enable/disable tracing output
+- ``setDebug( dbgFlag=True )`` - function to enable/disable tracing output
when trying to match this element
- ``validate()`` - function to verify that the defined grammar does not
contain infinitely recursive constructs
-
+
.. _parseWithTabs:
- ``parseWithTabs()`` - function to override default behavior of converting
@@ -360,15 +360,15 @@ methods for code to use are:
specifying whitespace-significant grammars using the White_ class.
- ``enablePackrat()`` - a class-level static method to enable a memoizing
- performance enhancement, known as "packrat parsing". packrat parsing is
+ performance enhancement, known as "packrat parsing". packrat parsing is
disabled by default, since it may conflict with some user programs that use
parse actions. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
- your program uses psyco to "compile as you go", you must call
+ your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
-
+
Basic ParserElement subclasses
------------------------------
@@ -383,48 +383,48 @@ Basic ParserElement subclasses
whitespace, punctuation, or other non-keyword characters; prevents
accidental matching of a non-keyword that happens to begin with a
defined keyword
-
+
- ``CaselessKeyword`` - similar to Keyword, but with caseless matching
behavior
-
+
.. _Word:
- ``Word`` - one or more contiguous characters; construct with a
string containing the set of allowed initial characters, and an
optional second string of allowed body characters; for instance,
a common Word construct is to match a code identifier - in C, a
- valid identifier must start with an alphabetic character or an
+ valid identifier must start with an alphabetic character or an
underscore ('_'), followed by a body that can also include numeric
- digits. That is, ``a``, ``i``, ``MAX_LENGTH``, ``_a1``, ``b_109_``, and
+ digits. That is, ``a``, ``i``, ``MAX_LENGTH``, ``_a1``, ``b_109_``, and
``plan9FromOuterSpace``
are all valid identifiers; ``9b7z``, ``$a``, ``.section``, and ``0debug``
are not. To
define an identifier using a Word, use either of the following::
-
+
- Word( alphas+"_", alphanums+"_" )
- Word( srange("[a-zA-Z_]"), srange("[a-zA-Z0-9_]") )
-
+
If only one
string given, it specifies that the same character set defined
for the initial character is used for the word body; for instance, to
define an identifier that can only be composed of capital letters and
underscores, use::
-
+
- Word( "ABCDEFGHIJKLMNOPQRSTUVWXYZ_" )
- Word( srange("[A-Z_]") )
A Word may
also be constructed with any of the following optional parameters:
-
+
- ``min`` - indicating a minimum length of matching characters
-
+
- ``max`` - indicating a maximum length of matching characters
-
+
- ``exact`` - indicating an exact length of matching characters
If ``exact`` is specified, it will override any values for ``min`` or ``max``.
-
- New in 1.5.6 - Sometimes you want to define a word using all
+
+ New in 1.5.6 - Sometimes you want to define a word using all
characters in a range except for one or two of them; you can do this
with the new ``excludeChars`` argument. This is helpful if you want to define
a word with all printables except for a single delimiter character, such
@@ -439,51 +439,51 @@ Basic ParserElement subclasses
- ``Regex`` - a powerful construct, that accepts a regular expression
to be matched at the current parse position; accepts an optional
``flags`` parameter, corresponding to the flags parameter in the re.compile
- method; if the expression includes named sub-fields, they will be
+ method; if the expression includes named sub-fields, they will be
represented in the returned ParseResults_
- ``QuotedString`` - supports the definition of custom quoted string
formats, in addition to pyparsing's built-in ``dblQuotedString`` and
- ``sglQuotedString``. ``QuotedString`` allows you to specify the following
+ ``sglQuotedString``. ``QuotedString`` allows you to specify the following
parameters:
-
+
- ``quoteChar`` - string of one or more characters defining the quote delimiting string
-
+
- ``escChar`` - character to escape quotes, typically backslash (default=None)
-
+
- ``escQuote`` - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
-
+
- ``multiline`` - boolean indicating whether quotes can span multiple lines (default=False)
-
+
- ``unquoteResults`` - boolean indicating whether the matched text should be unquoted (default=True)
-
+
- ``endQuoteChar`` - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
-
+
- ``SkipTo`` - skips ahead in the input string, accepting any
- characters up to the specified pattern; may be constructed with
+ characters up to the specified pattern; may be constructed with
the following optional parameters:
-
+
- ``include`` - if set to true, also consumes the match expression
(default is false)
-
+
- ``ignore`` - allows the user to specify patterns to not be matched,
to prevent false matches
-
+
- ``failOn`` - if a literal string or expression is given for this argument, it defines an expression that
should cause the ``SkipTo`` expression to fail, and not skip over that expression
.. _White:
-- ``White`` - also similar to Word_, but matches whitespace
+- ``White`` - also similar to Word_, but matches whitespace
characters. Not usually needed, as whitespace is implicitly
ignored by pyparsing. However, some grammars are whitespace-sensitive,
such as those that use leading tabs or spaces to indicating grouping
- or hierarchy. (If matching on tab characters, be sure to call
+ or hierarchy. (If matching on tab characters, be sure to call
parseWithTabs_ on the top-level parse element.)
-
+
- ``Empty`` - a null expression, requiring no characters - will always
match; useful for debugging and for specialized grammars
-
+
- ``NoMatch`` - opposite of Empty, will never match; useful for debugging
and for specialized grammars
@@ -495,25 +495,25 @@ Expression subclasses
match for And to match; can also be created using the '+'
operator; multiple expressions can be Anded together using the '*'
operator as in::
-
+
ipAddress = Word(nums) + ('.'+Word(nums))*3
-
+
A tuple can be used as the multiplier, indicating a min/max::
-
+
usPhoneNumber = Word(nums) + ('-'+Word(nums))*(1,2)
- A special form of ``And`` is created if the '-' operator is used
+ A special form of ``And`` is created if the '-' operator is used
instead of the '+' operator. In the ipAddress example above, if
no trailing '.' and Word(nums) are found after matching the initial
Word(nums), then pyparsing will back up in the grammar and try other
alternatives to ipAddress. However, if ipAddress is defined as::
-
+
strictIpAddress = Word(nums) - ('.'+Word(nums))*3
-
+
then no backing up is done. If the first Word(nums) of strictIpAddress
is matched, then any mismatch after that will raise a ParseSyntaxException,
which will halt the parsing process immediately. By careful use of the
- '-' operator, grammars can provide meaningful error messages close to
+ '-' operator, grammars can provide meaningful error messages close to
the location where the incoming text does not match the specified
grammar.
@@ -530,7 +530,7 @@ Expression subclasses
- ``Each`` - similar to And, in that all of the provided expressions
must match; however, Each permits matching to be done in any order;
can also be created using the '&' operator
-
+
- ``Optional`` - construct with a ParserElement, but this element is
not required to match; can be constructed with an optional ``default`` argument,
containing a default string or object to be supplied if the given optional
@@ -546,7 +546,7 @@ Expression subclasses
expressions, but does not advance the parsing position within the input string
- ``NotAny`` - a negative lookahead expression, prevents matching of named
- expressions, does not advance the parsing position within the input string;
+ expressions, does not advance the parsing position within the input string;
can also be created using the unary '~' operator
@@ -568,16 +568,16 @@ Expression operators
- ``*`` - creates And by multiplying the expression by the integer operand; if
expression is multiplied by a 2-tuple, creates an And of (min,max)
expressions (similar to "{min,max}" form in regular expressions); if
- min is None, intepret as (0,max); if max is None, interpret as
+ min is None, intepret as (0,max); if max is None, interpret as
expr*min + ZeroOrMore(expr)
-
+
- ``-`` - like ``+`` but with no backup and retry of alternatives
- ``*`` - repetition of expression
- ``==`` - matching expression to string; returns True if the string matches the given expression
-- ``<<=`` - inserts the expression following the operator as the body of the
+- ``<<=`` - inserts the expression following the operator as the body of the
Forward expression before the operator
@@ -638,61 +638,61 @@ Other classes
.. _ParseResults:
- ``ParseResults`` - class used to contain and manage the lists of tokens
- created from parsing the input using the user-defined parse
+ created from parsing the input using the user-defined parse
expression. ParseResults can be accessed in a number of ways:
- as a list
-
+
- total list of elements can be found using len()
-
+
- individual elements can be found using [0], [1], [-1], etc.
-
+
- elements can be deleted using ``del``
-
+
- the -1th element can be extracted and removed in a single operation
- using ``pop()``, or any element can be extracted and removed
+ using ``pop()``, or any element can be extracted and removed
using ``pop(n)``
-
+
- as a dictionary
-
- - if ``setResultsName()`` is used to name elements within the
+
+ - if ``setResultsName()`` is used to name elements within the
overall parse expression, then these fields can be referenced
as dictionary elements or as attributes
-
+
- the Dict class generates dictionary entries using the data of the
input text - in addition to ParseResults listed as ``[ [ a1, b1, c1, ...], [ a2, b2, c2, ...] ]``
- it also acts as a dictionary with entries defined as ``{ a1 : [ b1, c1, ... ] }, { a2 : [ b2, c2, ... ] }``;
- this is especially useful when processing tabular data where the first column contains a key
+ it also acts as a dictionary with entries defined as ``{ a1 : [ b1, c1, ... ] }, { a2 : [ b2, c2, ... ] }``;
+ this is especially useful when processing tabular data where the first column contains a key
value for that line of data
-
+
- list elements that are deleted using ``del`` will still be accessible by their
dictionary keys
-
+
- supports ``get()``, ``items()`` and ``keys()`` methods, similar to a dictionary
-
+
- a keyed item can be extracted and removed using ``pop(key)``. Here
- key must be non-numeric (such as a string), in order to use dict
+ key must be non-numeric (such as a string), in order to use dict
extraction instead of list extraction.
-
+
- new named elements can be added (in a parse action, for instance), using the same
syntax as adding an item to a dict (``parseResults["X"]="new item"``); named elements can be removed using ``del parseResults["X"]``
-
+
- as a nested list
-
+
- results returned from the Group class are encapsulated within their
own list structure, so that the tokens can be handled as a hierarchical
tree
-
+
ParseResults can also be converted to an ordinary list of strings
by calling ``asList()``. Note that this will strip the results of any
field names that have been defined for any embedded parse elements.
(The ``pprint`` module is especially good at printing out the nested contents
given by ``asList()``.)
-
+
Finally, ParseResults can be viewed by calling ``dump()``. ``dump()` will first show
the ``asList()`` output, followed by an indented structure listing parsed tokens that
have been assigned results names.
-
+
Exception classes and Troubleshooting
-------------------------------------
@@ -700,17 +700,17 @@ Exception classes and Troubleshooting
.. _ParseException:
- ``ParseException`` - exception returned when a grammar parse fails;
- ParseExceptions have attributes loc, msg, line, lineno, and column; to view the
+ ParseExceptions have attributes loc, msg, line, lineno, and column; to view the
text line and location where the reported ParseException occurs, use::
-
+
except ParseException, err:
print err.line
print " "*(err.column-1) + "^"
print err
-
+
- ``RecursiveGrammarException`` - exception returned by ``validate()`` if
the grammar contains a recursive infinite loop, such as::
-
+
badGrammar = Forward()
goodToken = Literal("A")
badGrammar <<= Optional(goodToken) + badGrammar
@@ -724,7 +724,7 @@ Exception classes and Troubleshooting
a sequence of expressions in an ``And`` expression.
You can also get some insights into the parsing logic using diagnostic parse actions,
-and setDebug(), or test the matching of expression fragments by testing them using
+and setDebug(), or test the matching of expression fragments by testing them using
scanString().
@@ -739,7 +739,7 @@ Helper methods
By default, the delimiters are suppressed, so the returned results contain
only the separate list elements. Can optionally specify ``combine=True``,
indicating that the expressions and delimiters should be returned as one
- combined value (useful for scoped variables, such as ``"a.b.c"``, or
+ combined value (useful for scoped variables, such as ``"a.b.c"``, or
``"a::b::c"``, or paths such as ``"a/b/c"``).
- ``countedArray( expr )`` - convenience function for a pattern where an list of
@@ -750,13 +750,13 @@ Helper methods
is easily reconstructed by using len on the returned array).
- ``oneOf( string, caseless=False )`` - convenience function for quickly declaring an
- alternative set of ``Literal`` tokens, by splitting the given string on
+ alternative set of ``Literal`` tokens, by splitting the given string on
whitespace boundaries. The tokens are sorted so that longer
matches are attempted first; this ensures that a short token does
- not mask a longer one that starts with the same characters. If ``caseless=True``,
+ not mask a longer one that starts with the same characters. If ``caseless=True``,
will create an alternative set of CaselessLiteral tokens.
-- ``dictOf( key, value )`` - convenience function for quickly declaring a
+- ``dictOf( key, value )`` - convenience function for quickly declaring a
dictionary pattern of ``Dict( ZeroOrMore( Group( key + value ) ) )``.
- ``makeHTMLTags( tagName )`` and ``makeXMLTags( tagName )`` - convenience
@@ -766,69 +766,69 @@ Helper methods
are returned as keyed tokens in the returned ParseResults. ``makeHTMLTags`` is less
restrictive than ``makeXMLTags``, especially with respect to case sensitivity.
-- ``infixNotation(baseOperand, operatorList)`` - (formerly named ``operatorPrecedence``) convenience function to define a
- grammar for parsing infix notation
- expressions with a hierarchical precedence of operators. To use the ``infixNotation``
+- ``infixNotation(baseOperand, operatorList)`` - (formerly named ``operatorPrecedence``) convenience function to define a
+ grammar for parsing infix notation
+ expressions with a hierarchical precedence of operators. To use the ``infixNotation``
helper:
-
+
1. Define the base "atom" operand term of the grammar.
For this simple grammar, the smallest operand is either
and integer or a variable. This will be the first argument
to the ``infixNotation`` method.
-
+
2. Define a list of tuples for each level of operator
precendence. Each tuple is of the form
``(opExpr, numTerms, rightLeftAssoc, parseAction)``, where:
-
+
- ``opExpr`` - the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal; if
None, indicates an empty operator, such as the implied
multiplication operation between 'm' and 'x' in "y = mx + b".
-
+
- ``numTerms`` - the number of terms for this operator (must
be 1, 2, or 3)
-
+
- ``rightLeftAssoc`` is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants ``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
-
- - ``parseAction`` is the parse action to be associated with
+
+ - ``parseAction`` is the parse action to be associated with
expressions matching this operator expression (the
``parseAction`` tuple member may be omitted)
-
+
3. Call ``infixNotation`` passing the operand expression and
the operator precedence list, and save the returned value
as the generated pyparsing expression. You can then use
this expression to parse input strings, or incorporate it
into a larger, more complex grammar.
-
-- ``matchPreviousLiteral`` and ``matchPreviousExpr`` - function to define and
+
+- ``matchPreviousLiteral`` and ``matchPreviousExpr`` - function to define and
expression that matches the same content
as was parsed in a previous parse expression. For instance::
-
+
first = Word(nums)
matchExpr = first + ":" + matchPreviousLiteral(first)
-
+
will match "1:1", but not "1:2". Since this matches at the literal
level, this will also match the leading "1:1" in "1:10".
-
+
In contrast::
-
+
first = Word(nums)
matchExpr = first + ":" + matchPreviousExpr(first)
-
+
will *not* match the leading "1:1" in "1:10"; the expressions are
evaluated first, and then compared, so "1" is compared with "10".
-- ``nestedExpr(opener, closer, content=None, ignoreExpr=quotedString)`` - method for defining nested
+- ``nestedExpr(opener, closer, content=None, ignoreExpr=quotedString)`` - method for defining nested
lists enclosed in opening and closing delimiters.
- ``opener`` - opening character for a nested list (default="("); can also be a pyparsing expression
-
+
- ``closer`` - closing character for a nested list (default=")"); can also be a pyparsing expression
-
+
- ``content`` - expression for items within the nested lists (default=None)
-
+
- ``ignoreExpr`` - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
@@ -844,9 +844,9 @@ Helper methods
- ``indentedBlock( statementExpr, indentationStackVar, indent=True)`` -
- function to define an indented block of statements, similar to
+ function to define an indented block of statements, similar to
indentation-based blocking in Python source code:
-
+
- ``statementExpr`` - the expression defining a statement that
will be found in the indented block; a valid ``indentedBlock``
must contain at least 1 matching ``statementExpr``
@@ -855,7 +855,7 @@ Helper methods
should be common to all ``indentedBlock`` expressions defined
within the same grammar, and should be reinitialized to [1]
each time the grammar is to be used
-
+
- ``indent`` - a boolean flag indicating whether the expressions
within the block must be indented from the current parse
location; if using ``indentedBlock`` to define the left-most
@@ -865,12 +865,12 @@ Helper methods
- ``originalTextFor( expr )`` - helper function to preserve the originally parsed text, regardless of any
token processing or conversion done by the contained expression. For instance, the following expression::
-
+
fullName = Word(alphas) + Word(alphas)
will return the parse of "John Smith" as ['John', 'Smith']. In some applications, the actual name as it
was given in the input string is what is desired. To do this, use ``originalTextFor``::
-
+
fullName = originalTextFor(Word(alphas) + Word(alphas))
- ``ungroup( expr )`` - function to "ungroup" returned tokens; useful
@@ -889,49 +889,49 @@ Helper methods
representing ``lineno( loc, string )``; useful when printing out diagnostic
messages for exceptions
-- ``srange( rangeSpec )`` - function to define a string of characters,
- given a string of the form used by regexp string ranges, such as ``"[0-9]"`` for
- all numeric digits, ``"[A-Z_]"`` for uppercase characters plus underscore, and
- so on (note that rangeSpec does not include support for generic regular
+- ``srange( rangeSpec )`` - function to define a string of characters,
+ given a string of the form used by regexp string ranges, such as ``"[0-9]"`` for
+ all numeric digits, ``"[A-Z_]"`` for uppercase characters plus underscore, and
+ so on (note that rangeSpec does not include support for generic regular
expressions, just string range specs)
- ``getTokensEndLoc()`` - function to call from within a parse action to get
the ending location for the matched tokens
-
+
- ``traceParseAction(fn)`` - decorator function to debug parse actions. Lists
each call, called arguments, and return value or exception
-
-
+
+
Helper parse actions
--------------------
- ``removeQuotes`` - removes the first and last characters of a quoted string;
useful to remove the delimiting quotes from quoted strings
-
+
- ``replaceWith(replString)`` - returns a parse action that simply returns the
replString; useful when using transformString, or converting HTML entities, as in::
-
+
nbsp = Literal("&nbsp;").setParseAction( replaceWith("<BLANK>") )
-- ``keepOriginalText``- (deprecated, use originalTextFor_ instead) restores any internal whitespace or suppressed
+- ``keepOriginalText``- (deprecated, use originalTextFor_ instead) restores any internal whitespace or suppressed
text within the tokens for a matched parse
expression. This is especially useful when defining expressions
for scanString or transformString applications.
-- ``withAttribute( *args, **kwargs )`` - helper to create a validating parse action to be used with start tags created
- with ``makeXMLTags`` or ``makeHTMLTags``. Use ``withAttribute`` to qualify a starting tag
- with a required attribute value, to avoid false matches on common tags such as
+- ``withAttribute( *args, **kwargs )`` - helper to create a validating parse action to be used with start tags created
+ with ``makeXMLTags`` or ``makeHTMLTags``. Use ``withAttribute`` to qualify a starting tag
+ with a required attribute value, to avoid false matches on common tags such as
``<TD>`` or ``<DIV>``.
-
+
``withAttribute`` can be called with:
-
+
- keyword arguments, as in ``(class="Customer",align="right")``, or
-
+
- a list of name-value tuples, as in ``( ("ns1:class", "Customer"), ("ns2:align","right") )``
- An attribute can be specified to have the special value
- ``withAttribute.ANY_VALUE``, which will match any value - use this to
+ An attribute can be specified to have the special value
+ ``withAttribute.ANY_VALUE``, which will match any value - use this to
ensure that an attribute is present but any attribute value is
acceptable.
@@ -940,7 +940,7 @@ Helper parse actions
- ``upcaseTokens`` - converts all matched tokens to uppercase
- ``matchOnlyAtCol( columnNumber )`` - a parse action that verifies that
- an expression was matched at a particular column, raising a
+ an expression was matched at a particular column, raising a
ParseException if matching at a different column number; useful when parsing
tabular data
diff --git a/examples/0README.html b/examples/0README.html
index 303d44d..e566784 100644
--- a/examples/0README.html
+++ b/examples/0README.html
@@ -27,7 +27,7 @@ Unicode example to parse "Hello, World!" in Greek.
<p>
<li><a href="chemicalFormulas.py">chemicalFormulas.py</a><br>
-Simple example to demonstrate the use of ParseResults returned from parseString().
+Simple example to demonstrate the use of ParseResults returned from parseString().
Parses a chemical formula (such as "H2O" or "C6H5OH"), and walks the returned list of tokens to calculate the molecular weight.
</li>
<p>
@@ -124,7 +124,7 @@ Parser for CORBA IDL files.
</li>
<p>
-<li><a href="mozillaCalendarParser.py">mozillaCalendarParser.py</a>
+<li><a href="mozillaCalendarParser.py">mozillaCalendarParser.py</a>
<i>~ submission by Petri Savolainen</i><br>
Parser for Mozilla calendar (*.ics) files.
</li>
@@ -167,19 +167,19 @@ example configuration file.
<p>
<li><a href="romanNumerals.py">romanNumerals.py</a><br>
-A Roman numeral generator and parser example, showing the power of parse actions
+A Roman numeral generator and parser example, showing the power of parse actions
to compile Roman numerals into their integer values.
</li>
<p>
<li><a href="removeLineBreaks.py">removeLineBreaks.py</a><br>
A string transformer that converts text files with hard line-breaks into one with line breaks
-only between paragraphs. Useful when converting downloads from
-<a href="http://www.gutenberg.org">Project Gutenberg</a> to import to word processing apps
+only between paragraphs. Useful when converting downloads from
+<a href="http://www.gutenberg.org">Project Gutenberg</a> to import to word processing apps
that can reformat paragraphs once hard line-breaks are removed, or for loading into your Palm Pilot for portable perusal.
<p>
-See <a href="Successful Methods of Public Speaking.txt">Successful Methods of Public Speaking.txt</a> and
-<a href="Successful Methods of Public Speaking(2).txt">Successful Methods of Public Speaking(2).txt</a> for a sample
+See <a href="Successful Methods of Public Speaking.txt">Successful Methods of Public Speaking.txt</a> and
+<a href="Successful Methods of Public Speaking(2).txt">Successful Methods of Public Speaking(2).txt</a> for a sample
before and after (text file courtesy of Project Gutenberg).
</li>
<p>
@@ -190,13 +190,13 @@ An example program showing the utility of the listAllMatches option when specify
<p>
<li><a href="linenoExample.py">linenoExample.py</a><br>
-An example program showing how to use the string location to extract line and column numbers, or the
+An example program showing how to use the string location to extract line and column numbers, or the
source line of text.
</li>
<p>
<li><a href="parseListString.py">parseListString.py</a><br>
-An example program showing a progression of steps, how to parse a string representation of a Python
+An example program showing a progression of steps, how to parse a string representation of a Python
list back into a true list.
</li>
<p>
@@ -208,7 +208,7 @@ returning a Python value of the original type.
<p>
<li><a href="indentedGrammarExample.py">indentedGrammarExample.py</a><br>
-An example program showing how to parse a grammar using indentation for grouping,
+An example program showing how to parse a grammar using indentation for grouping,
such as is done in Python.
</li>
<p>
@@ -220,7 +220,7 @@ An example program showing how to use the new operatorPrecedence helper method t
<p>
<li><a href="simpleBool.py">simpleBool.py</a><br>
-An example program showing how to use the new operatorPrecedence helper method to define a
+An example program showing how to use the new operatorPrecedence helper method to define a
boolean expression parser, with parse actions associated with each operator to "compile" the expression
into a data structure that will evaluate the expression's boolean value.
</li>
@@ -290,7 +290,7 @@ domain objects instead of just strings.
<li><a href="datetimeParseActions.py">datetimeParseActions.py</a><br>
<b>New in version 1.5.7</b><br>
-Parse actions example showing a parse action returning a datetime object instead of
+Parse actions example showing a parse action returning a datetime object instead of
string tokens, and doing validation of the tokens, raising a ParseException if the
given YYYY/MM/DD string does not represent a valid date.
</li>
@@ -298,7 +298,7 @@ given YYYY/MM/DD string does not represent a valid date.
<li><a href="position.py">position.py</a><br>
<b>New in version 1.5.7</b><br>
-Demonstration of a couple of different ways to capture the location a particular
+Demonstration of a couple of different ways to capture the location a particular
expression was found within the overall input string.
</li>
<p>
diff --git a/examples/LAparser.py b/examples/LAparser.py
index ec75d6c..8ffe073 100644
--- a/examples/LAparser.py
+++ b/examples/LAparser.py
@@ -4,11 +4,11 @@ Based on: SimpleCalc.py example (author Paul McGuire) in pyparsing-1.3.3
Author: Mike Ellis
Copyright: Ellis & Grant, Inc. 2005
License: You may freely use, modify, and distribute this software.
-Warranty: THIS SOFTWARE HAS NO WARRANTY WHATSOEVER. USE AT YOUR OWN RISK.
+Warranty: THIS SOFTWARE HAS NO WARRANTY WHATSOEVER. USE AT YOUR OWN RISK.
Notes: Parses infix linear algebra (LA) notation for vectors, matrices, and scalars.
- Output is C code function calls. The parser can be run as an interactive
- interpreter or included as module to use for in-place substitution into C files
- containing LA equations.
+ Output is C code function calls. The parser can be run as an interactive
+ interpreter or included as module to use for in-place substitution into C files
+ containing LA equations.
Supported operations are:
OPERATION: INPUT OUTPUT
@@ -34,27 +34,27 @@ Notes: Parses infix linear algebra (LA) notation for vectors, matrices, and scal
Matrix determinant: "a = M3_b^Det" "a=mDeterminant(b)"
The parser requires the expression to be an equation. Each non-scalar variable
- must be prefixed with a type tag, 'M3_' for 3x3 matrices and 'V3_' for 3-vectors.
- For proper compilation of the C code, the variables need to be declared without
+ must be prefixed with a type tag, 'M3_' for 3x3 matrices and 'V3_' for 3-vectors.
+ For proper compilation of the C code, the variables need to be declared without
the prefix as float[3] for vectors and float[3][3] for matrices. The operations do
not modify any variables on the right-hand side of the equation.
- Equations may include nested expressions within parentheses. The allowed binary
+ Equations may include nested expressions within parentheses. The allowed binary
operators are '+-*/^' for scalars, and '+-*^@' for vectors and matrices with the
meanings defined in the table above.
Specifying an improper combination of operands, e.g. adding a vector to a matrix,
is detected by the parser and results in a Python TypeError Exception. The usual cause
- of this is omitting one or more tag prefixes. The parser knows nothing about a
- a variable's C declaration and relies entirely on the type tags. Errors in C
+ of this is omitting one or more tag prefixes. The parser knows nothing about a
+ a variable's C declaration and relies entirely on the type tags. Errors in C
declarations are not caught until compile time.
-Usage: To process LA equations embedded in source files, import this module and
+Usage: To process LA equations embedded in source files, import this module and
pass input and output file objects to the fprocess() function. You can
can also invoke the parser from the command line, e.g. 'python LAparser.py',
to run a small test suite and enter an interactive loop where you can enter
LA equations and see the resulting C code.
-
+
"""
import re,os,sys
@@ -87,7 +87,7 @@ def _assignVar( str, loc, toks ):
point = Literal('.')
e = CaselessLiteral('E')
plusorminus = Literal('+') | Literal('-')
-number = Word(nums)
+number = Word(nums)
integer = Combine( Optional(plusorminus) + number )
floatnumber = Combine( integer +
Optional( point + Optional(number) ) +
@@ -103,7 +103,7 @@ ident = Forward()
## a ParseException.
ident = Combine(Word(alphas + '-',alphanums + '_') + \
ZeroOrMore(lbracket + (Word(alphas + '-',alphanums + '_')|integer) + rbracket) \
- )
+ )
plus = Literal( "+" )
minus = Literal( "-" )
@@ -118,14 +118,14 @@ expop = Literal( "^" )
assignop = Literal( "=" )
expr = Forward()
-atom = ( ( e | floatnumber | integer | ident ).setParseAction(_pushFirst) |
+atom = ( ( e | floatnumber | integer | ident ).setParseAction(_pushFirst) |
( lpar + expr.suppress() + rpar )
)
factor = Forward()
factor << atom + ZeroOrMore( ( expop + factor ).setParseAction( _pushFirst ) )
-
+
term = factor + ZeroOrMore( ( multop + factor ).setParseAction( _pushFirst ) )
-expr << term + ZeroOrMore( ( addop + term ).setParseAction( _pushFirst ) )
+expr << term + ZeroOrMore( ( addop + term ).setParseAction( _pushFirst ) )
equation = (ident + assignop).setParseAction(_assignVar) + expr + StringEnd()
# End of grammar definition
@@ -143,12 +143,12 @@ class UnaryUnsupportedError(Exception): pass
def _isvec(ident):
if ident[0] == '-' and ident[1:vplen+1] == vprefix:
- raise UnaryUnsupportedError
+ raise UnaryUnsupportedError
else: return ident[0:vplen] == vprefix
-def _ismat(ident):
+def _ismat(ident):
if ident[0] == '-' and ident[1:mplen+1] == mprefix:
- raise UnaryUnsupportedError
+ raise UnaryUnsupportedError
else: return ident[0:mplen] == mprefix
def _isscalar(ident): return not (_isvec(ident) or _ismat(ident))
@@ -163,41 +163,41 @@ def _isscalar(ident): return not (_isvec(ident) or _ismat(ident))
## nested function calls, e.g. "V3_a + V3_b*5" yields "V3_vAdd(a,vScale(b,5)". Note that prefixes are
## stripped from operands and function names within the argument list to the outer function and
## the appropriate prefix is placed on the outer function for removal later as the stack evaluation
-## recurses toward the final assignment statement.
+## recurses toward the final assignment statement.
-def _addfunc(a,b):
+def _addfunc(a,b):
if _isscalar(a) and _isscalar(b): return "(%s+%s)"%(a,b)
- if _isvec(a) and _isvec(b): return "%svAdd(%s,%s)"%(vprefix,a[vplen:],b[vplen:])
- if _ismat(a) and _ismat(b): return "%smAdd(%s,%s)"%(mprefix,a[mplen:],b[mplen:])
+ if _isvec(a) and _isvec(b): return "%svAdd(%s,%s)"%(vprefix,a[vplen:],b[vplen:])
+ if _ismat(a) and _ismat(b): return "%smAdd(%s,%s)"%(mprefix,a[mplen:],b[mplen:])
else: raise TypeError
-def _subfunc(a,b):
+def _subfunc(a,b):
if _isscalar(a) and _isscalar(b): return "(%s-%s)"%(a,b)
- if _isvec(a) and _isvec(b): return "%svSubtract(%s,%s)"%(vprefix,a[vplen:],b[vplen:])
- if _ismat(a) and _ismat(b): return "%smSubtract(%s,%s)"%(mprefix,a[mplen:],b[mplen:])
+ if _isvec(a) and _isvec(b): return "%svSubtract(%s,%s)"%(vprefix,a[vplen:],b[vplen:])
+ if _ismat(a) and _ismat(b): return "%smSubtract(%s,%s)"%(mprefix,a[mplen:],b[mplen:])
else: raise TypeError
-def _mulfunc(a,b):
+def _mulfunc(a,b):
if _isscalar(a) and _isscalar(b): return "%s*%s"%(a,b)
- if _isvec(a) and _isvec(b): return "vDot(%s,%s)"%(a[vplen:],b[vplen:])
- if _ismat(a) and _ismat(b): return "%smMultiply(%s,%s)"%(mprefix,a[mplen:],b[mplen:])
- if _ismat(a) and _isvec(b): return "%smvMultiply(%s,%s)"%(vprefix,a[mplen:],b[vplen:])
- if _ismat(a) and _isscalar(b): return "%smScale(%s,%s)"%(mprefix,a[mplen:],b)
- if _isvec(a) and _isscalar(b): return "%svScale(%s,%s)"%(vprefix,a[mplen:],b)
+ if _isvec(a) and _isvec(b): return "vDot(%s,%s)"%(a[vplen:],b[vplen:])
+ if _ismat(a) and _ismat(b): return "%smMultiply(%s,%s)"%(mprefix,a[mplen:],b[mplen:])
+ if _ismat(a) and _isvec(b): return "%smvMultiply(%s,%s)"%(vprefix,a[mplen:],b[vplen:])
+ if _ismat(a) and _isscalar(b): return "%smScale(%s,%s)"%(mprefix,a[mplen:],b)
+ if _isvec(a) and _isscalar(b): return "%svScale(%s,%s)"%(vprefix,a[mplen:],b)
else: raise TypeError
def _outermulfunc(a,b):
## The '@' operator is used for the vector outer product.
- if _isvec(a) and _isvec(b):
+ if _isvec(a) and _isvec(b):
return "%svOuterProduct(%s,%s)"%(mprefix,a[vplen:],b[vplen:])
else: raise TypeError
def _divfunc(a,b):
- ## The '/' operator is used only for scalar division
+ ## The '/' operator is used only for scalar division
if _isscalar(a) and _isscalar(b): return "%s/%s"%(a,b)
else: raise TypeError
-def _expfunc(a,b):
+def _expfunc(a,b):
## The '^' operator is used for exponentiation on scalars and
## as a marker for unary operations on vectors and matrices.
if _isscalar(a) and _isscalar(b): return "pow(%s,%s)"%(str(a),str(b))
@@ -208,17 +208,17 @@ def _expfunc(a,b):
if _isvec(a) and b=='Mag2': return "vMagnitude2(%s)"%(a[vplen:])
else: raise TypeError
-def _assignfunc(a,b):
+def _assignfunc(a,b):
## The '=' operator is used for assignment
if _isscalar(a) and _isscalar(b): return "%s=%s"%(a,b)
- if _isvec(a) and _isvec(b): return "vCopy(%s,%s)"%(a[vplen:],b[vplen:])
- if _ismat(a) and _ismat(b): return "mCopy(%s,%s)"%(a[mplen:],b[mplen:])
+ if _isvec(a) and _isvec(b): return "vCopy(%s,%s)"%(a[vplen:],b[vplen:])
+ if _ismat(a) and _ismat(b): return "mCopy(%s,%s)"%(a[mplen:],b[mplen:])
else: raise TypeError
## End of BIO func definitions
##----------------------------------------------------------------------------
-# Map operator symbols to corresponding BIO funcs
+# Map operator symbols to corresponding BIO funcs
opn = { "+" : ( _addfunc ),
"-" : ( _subfunc ),
"*" : ( _mulfunc ),
@@ -237,25 +237,25 @@ def _evaluateStack( s ):
result = opn[op]( op1, op2 )
if debug_flag: print(result)
return result
- else:
+ else:
return op
##----------------------------------------------------------------------------
# The parse function that invokes all of the above.
def parse(input_string):
"""
- Accepts an input string containing an LA equation, e.g.,
+ Accepts an input string containing an LA equation, e.g.,
"M3_mymatrix = M3_anothermatrix^-1" returns C code function
calls that implement the expression.
"""
-
+
global exprStack
global targetvar
# Start with a blank exprStack and a blank targetvar
exprStack = []
targetvar=None
-
+
if input_string != '':
# try parsing the input string
try:
@@ -266,14 +266,14 @@ def parse(input_string):
print(" "*(err.column-1) + "^", file=sys.stderr)
print(err, file=sys.stderr)
raise
-
+
# show result of parsing the input string
- if debug_flag:
+ if debug_flag:
print(input_string, "->", L)
print("exprStack=", exprStack)
-
+
# Evaluate the stack of parsed operands, emitting C code.
- try:
+ try:
result=_evaluateStack(exprStack)
except TypeError:
print("Unsupported operation on right side of '%s'.\nCheck for missing or incorrect tags on non-scalar operands."%input_string, file=sys.stderr)
@@ -281,8 +281,8 @@ def parse(input_string):
except UnaryUnsupportedError:
print("Unary negation is not supported for vectors and matrices: '%s'"%input_string, file=sys.stderr)
raise
-
- # Create final assignment and print it.
+
+ # Create final assignment and print it.
if debug_flag: print("var=",targetvar)
if targetvar != None:
try:
@@ -311,22 +311,22 @@ def fprocess(infilep,outfilep):
Other text in the file is unaltered.
- The arguments are file objects (NOT file names) opened for reading and
+ The arguments are file objects (NOT file names) opened for reading and
writing, respectively.
"""
pattern = r'\[\[\s*(.*?)\s*\]\]'
eqn = re.compile(pattern,re.DOTALL)
s = infilep.read()
- def parser(mo):
+ def parser(mo):
ccode = parse(mo.group(1))
return "/* %s */\n%s;\nLAParserBufferReset();\n"%(mo.group(1),ccode)
content = eqn.sub(parser,s)
outfilep.write(content)
-
+
##-----------------------------------------------------------------------------------
def test():
- """
+ """
Tests the parsing of various supported expressions. Raises
an AssertError if the output is not what is expected. Prints the
input, expected output, and actual output for all tests.
@@ -357,9 +357,9 @@ def test():
("Vector magnitude","a = V3_b^Mag","a=sqrt(vMagnitude2(b))"),
("Complicated expression", "myscalar = (M3_amatrix * V3_bvector)^Mag + 5*(-xyz[i] + 2.03^2)","myscalar=(sqrt(vMagnitude2(mvMultiply(amatrix,bvector)))+5*(-xyz[i]+pow(2.03,2)))"),
("Complicated Multiline", "myscalar = \n(M3_amatrix * V3_bvector)^Mag +\n 5*(xyz + 2.03^2)","myscalar=(sqrt(vMagnitude2(mvMultiply(amatrix,bvector)))+5*(xyz+pow(2.03,2)))")
-
+
]
-
+
for t in testcases:
name,input,expected = t
print(name)
@@ -369,13 +369,13 @@ def test():
print(" %s received"%result)
print("")
assert expected == result
-
+
##TODO: Write testcases with invalid expressions and test that the expected
## exceptions are raised.
print("Tests completed!")
##----------------------------------------------------------------------------
-## The following is executed only when this module is executed as
+## The following is executed only when this module is executed as
## command line script. It runs a small test suite (see above)
## and then enters an interactive loop where you
## can enter expressions and see the resulting C code as output.
@@ -386,7 +386,7 @@ if __name__ == '__main__':
# input_string
input_string=''
-
+
# Display instructions on how to use the program interactively
interactiveusage = """
Entering interactive mode:
@@ -396,7 +396,7 @@ if __name__ == '__main__':
"""
print(interactiveusage)
input_string = input("> ")
-
+
while input_string != 'quit':
if input_string == "debug on":
debug_flag = True
@@ -406,12 +406,10 @@ if __name__ == '__main__':
try:
print(parse(input_string))
except:
- pass
+ pass
# obtain new input string
input_string = input("> ")
-
+
# if user types 'quit' then say goodbye
print("Good bye!")
-
-
diff --git a/examples/Setup.ini b/examples/Setup.ini
index 4574b1c..a08016c 100644
--- a/examples/Setup.ini
+++ b/examples/Setup.ini
@@ -21,7 +21,7 @@ RemString1=Set up has finished remove ESS device driver and cleaned your system.
RemString2=ESS devices is removed completely.No need to reboot. If you want to reinstall, run the setup again with driver package.
stshowmsg1=Setup will clean the installed files and update registry.
stshowmsg2=Setup is updating system's registry ....
-stshowmsg3=Setup is starting
+stshowmsg3=Setup is starting
sysdriver=es56cvmp.sys
mdmzn=mdmm3com.inf
@@ -34,7 +34,7 @@ audiocat=allem3.cat
audioinf=M3i
sysaudio=es198xdl.sys
audiovxd=es198x.vxd
-
+
[Languages]
Default=0x0009
count=30
diff --git a/examples/SimpleCalc.py b/examples/SimpleCalc.py
index 0bf62e4..15a1817 100644
--- a/examples/SimpleCalc.py
+++ b/examples/SimpleCalc.py
@@ -1,31 +1,31 @@
# SimpleCalc.py
#
-# Demonstration of the parsing module,
+# Demonstration of the parsing module,
# Sample usage
#
-# $ python SimpleCalc.py
+# $ python SimpleCalc.py
# Type in the string to be parse or 'quit' to exit the program
-# > g=67.89 + 7/5
+# > g=67.89 + 7/5
# 69.29
# > g
# 69.29
-# > h=(6*g+8.8)-g
+# > h=(6*g+8.8)-g
# 355.25
-# > h + 1
+# > h + 1
# 356.25
-# > 87.89 + 7/5
+# > 87.89 + 7/5
# 89.29
# > ans+10
# 99.29
# > quit
# Good bye!
#
-#
+#
# Uncomment the line below for readline support on interactive terminal
-# import readline
+# import readline
from pyparsing import ParseException, Word, alphas, alphanums
import math
@@ -64,11 +64,11 @@ pattern = assignment | arithExpr
if __name__ == '__main__':
# input_string
input_string=''
-
+
# Display instructions on how to quit the program
print("Type in the string to be parsed or 'quit' to exit the program")
input_string = input("> ")
-
+
while input_string.strip().lower() != 'quit':
if input_string.strip().lower() == 'debug':
debug_flag=True
@@ -77,19 +77,19 @@ if __name__ == '__main__':
# Reset to an empty exprStack
del exprStack[:]
-
+
if input_string != '':
# try parsing the input string
try:
L=pattern.parseString(input_string, parseAll=True)
except ParseException as err:
L=['Parse Failure', input_string, (str(err), err.line, err.column)]
-
+
# show result of parsing the input string
if debug_flag: print(input_string, "->", L)
if len(L)==0 or L[0] != 'Parse Failure':
if debug_flag: print("exprStack=", exprStack)
-
+
# calculate result , store a copy in ans , display the result to user
try:
result=evaluateStack(exprStack)
@@ -98,7 +98,7 @@ if __name__ == '__main__':
else:
variables['ans']=result
print(result)
-
+
# Assign result to a variable if required
if L.varname:
variables[L.varname] = result
@@ -109,11 +109,9 @@ if __name__ == '__main__':
print(err_line)
print(" "*(err_col-1) + "^")
print(err_str)
-
+
# obtain new input string
input_string = input("> ")
-
+
# if user type 'quit' then say goodbye
print("Good bye!")
-
-
diff --git a/examples/SingleForm.dfm b/examples/SingleForm.dfm
index 7a52734..f036e5e 100644
--- a/examples/SingleForm.dfm
+++ b/examples/SingleForm.dfm
@@ -75,7 +75,7 @@ object Form1: TForm1
Connection.Params.Strings = (
'BlobSize=-1'
'CommitRetain=False'
-
+
'Database=C:\Program Files\Common Files\Borland Shared\Data\emplo' +
'yee.gdb'
'DriverName=Interbase'
diff --git a/examples/TAP.py b/examples/TAP.py
index 139e47c..3c642e4 100644
--- a/examples/TAP.py
+++ b/examples/TAP.py
@@ -43,8 +43,8 @@ description = Regex("[^#\n]+")
description.setParseAction(lambda t:t[0].lstrip('- '))
TODO,SKIP = map(CaselessLiteral,'TODO SKIP'.split())
-directive = Group(Suppress('#') + (TODO + restOfLine |
- FollowedBy(SKIP) +
+directive = Group(Suppress('#') + (TODO + restOfLine |
+ FollowedBy(SKIP) +
restOfLine.copy().setParseAction(lambda t:['SKIP',t[0]]) ))
commentLine = Suppress("#") + empty + restOfLine
@@ -52,11 +52,11 @@ commentLine = Suppress("#") + empty + restOfLine
testLine = Group(
Optional(OneOrMore(commentLine + NL))("comments") +
testStatus("passed") +
- Optional(integer)("testNumber") +
- Optional(description)("description") +
+ Optional(integer)("testNumber") +
+ Optional(description)("description") +
Optional(directive)("directive")
)
-bailLine = Group(Literal("Bail out!")("BAIL") +
+bailLine = Group(Literal("Bail out!")("BAIL") +
empty + Optional(restOfLine)("reason"))
tapOutputParser = Optional(Group(plan)("plan") + NL) & \
@@ -89,7 +89,7 @@ class TAPSummary(object):
expected = list(range(1, int(results.plan.ubound)+1))
else:
expected = list(range(1,len(results.tests)+1))
-
+
for i,res in enumerate(results.tests):
# test for bail out
if res.BAIL:
@@ -99,7 +99,7 @@ class TAPSummary(object):
self.skippedTests += [ TAPTest.bailedTest(ii) for ii in expected[i:] ]
self.bailReason = res.reason
break
-
+
#~ print res.dump()
testnum = i+1
if res.testNumber != "":
@@ -109,16 +109,16 @@ class TAPSummary(object):
res["testNumber"] = testnum
test = TAPTest(res)
- if test.passed:
+ if test.passed:
self.passedTests.append(test)
else:
self.failedTests.append(test)
if test.skipped: self.skippedTests.append(test)
if test.todo: self.todoTests.append(test)
if test.todo and test.passed: self.bonusTests.append(test)
-
+
self.passedSuite = not self.bail and (set(self.failedTests)-set(self.todoTests) == set())
-
+
def summary(self, showPassed=False, showAll=False):
testListStr = lambda tl : "[" + ",".join(str(t.num) for t in tl) + "]"
summaryText = []
diff --git a/examples/adventureEngine.py b/examples/adventureEngine.py
index cdc618e..259c03e 100644
--- a/examples/adventureEngine.py
+++ b/examples/adventureEngine.py
@@ -10,10 +10,10 @@ import string
def aOrAn( item ):
if item.desc[0] in "aeiou":
- return "an " + item.desc
+ return "an " + item.desc
else:
return "a " + item.desc
-
+
def enumerateItems(l):
if len(l) == 0: return "nothing"
out = []
@@ -31,33 +31,33 @@ def enumerateDoors(l):
out.append("and")
out.append(l[-1])
return " ".join(out)
-
+
class Room(object):
def __init__(self, desc):
self.desc = desc
self.inv = []
self.gameOver = False
self.doors = [None,None,None,None]
-
+
def __getattr__(self,attr):
return \
- {
+ {
"n":self.doors[0],
"s":self.doors[1],
"e":self.doors[2],
"w":self.doors[3],
}[attr]
-
+
def enter(self,player):
if self.gameOver:
player.gameOver = True
-
+
def addItem(self, it):
self.inv.append(it)
-
+
def removeItem(self,it):
self.inv.remove(it)
-
+
def describe(self):
print(self.desc)
visibleItems = [ it for it in self.inv if it.isVisible ]
@@ -69,12 +69,12 @@ class Room(object):
print("There {} {} here.".format(is_form, enumerateItems(visibleItems)))
else:
print("You see %s." % (enumerateItems(visibleItems)))
-
+
class Exit(Room):
def __init__(self):
super(Exit,self).__init__("")
-
+
def enter(self,player):
player.gameOver = True
@@ -94,10 +94,10 @@ class Item(object):
self.usableConditionTest = None
self.cantTakeMessage = "You can't take that!"
Item.items[desc] = self
-
+
def __str__(self):
return self.desc
-
+
def breakItem(self):
if not self.isBroken:
print("<Crash!>")
@@ -109,7 +109,7 @@ class Item(object):
return self.usableConditionTest( player, target )
else:
return False
-
+
def useItem(self, player, target):
if self.useAction:
self.useAction(player, self, target)
@@ -126,7 +126,7 @@ class OpenableItem(Item):
self.contents = contents
else:
self.contents = []
-
+
def openItem(self, player):
if not self.isOpened:
self.isOpened = not self.isOpened
@@ -135,7 +135,7 @@ class OpenableItem(Item):
player.room.addItem( item )
self.contents = []
self.desc = "open " + self.desc
-
+
def closeItem(self, player):
if self.isOpened:
self.isOpened = not self.isOpened
@@ -152,10 +152,10 @@ class Command(object):
@staticmethod
def helpDescription():
return ""
-
+
def _doCommand(self, player):
pass
-
+
def __call__(self, player ):
print(self.verbProg.capitalize()+"...")
self._doCommand(player)
@@ -168,12 +168,12 @@ class MoveCommand(Command):
@staticmethod
def helpDescription():
- return """MOVE or GO - go NORTH, SOUTH, EAST, or WEST
+ return """MOVE or GO - go NORTH, SOUTH, EAST, or WEST
(can abbreviate as 'GO N' and 'GO W', or even just 'E' and 'S')"""
-
+
def _doCommand(self, player):
rm = player.room
- nextRoom = rm.doors[
+ nextRoom = rm.doors[
{
"N":0,
"S":1,
@@ -195,7 +195,7 @@ class TakeCommand(Command):
@staticmethod
def helpDescription():
return "TAKE or PICKUP or PICK UP - pick up an object (but some are deadly)"
-
+
def _doCommand(self, player):
rm = player.room
subj = Item.items[self.subject]
@@ -217,7 +217,7 @@ class DropCommand(Command):
@staticmethod
def helpDescription():
return "DROP or LEAVE - drop an object (but fragile items may break)"
-
+
def _doCommand(self, player):
rm = player.room
subj = Item.items[self.subject]
@@ -234,7 +234,7 @@ class InventoryCommand(Command):
@staticmethod
def helpDescription():
return "INVENTORY or INV or I - lists what items you have"
-
+
def _doCommand(self, player):
print("You have %s." % enumerateItems( player.inv ))
@@ -245,7 +245,7 @@ class LookCommand(Command):
@staticmethod
def helpDescription():
return "LOOK or L - describes the current room and any objects in it"
-
+
def _doCommand(self, player):
player.room.describe()
@@ -256,7 +256,7 @@ class DoorsCommand(Command):
@staticmethod
def helpDescription():
return "DOORS - display what doors are visible from this room"
-
+
def _doCommand(self, player):
rm = player.room
numDoors = sum([1 for r in rm.doors if r is not None])
@@ -267,7 +267,7 @@ class DoorsCommand(Command):
reply = "There is a door to the "
else:
reply = "There are doors to the "
- doorNames = [ {0:"north", 1:"south", 2:"east", 3:"west"}[i]
+ doorNames = [ {0:"north", 1:"south", 2:"east", 3:"west"}[i]
for i,d in enumerate(rm.doors) if d is not None ]
#~ print doorNames
reply += enumerateDoors( doorNames )
@@ -286,7 +286,7 @@ class UseCommand(Command):
@staticmethod
def helpDescription():
return "USE or U - use an object, optionally IN or ON another object"
-
+
def _doCommand(self, player):
rm = player.room
availItems = rm.inv + player.inv
@@ -306,7 +306,7 @@ class OpenCommand(Command):
@staticmethod
def helpDescription():
return "OPEN or O - open an object"
-
+
def _doCommand(self, player):
rm = player.room
availItems = rm.inv+player.inv
@@ -329,7 +329,7 @@ class CloseCommand(Command):
@staticmethod
def helpDescription():
return "CLOSE or CL - close an object"
-
+
def _doCommand(self, player):
rm = player.room
availItems = rm.inv+player.inv
@@ -351,7 +351,7 @@ class QuitCommand(Command):
@staticmethod
def helpDescription():
return "QUIT or Q - ends the game"
-
+
def _doCommand(self, player):
print("Ok....")
player.gameOver = True
@@ -363,7 +363,7 @@ class HelpCommand(Command):
@staticmethod
def helpDescription():
return "HELP or H or ? - displays this help message"
-
+
def _doCommand(self, player):
print("Enter any of the following commands (not case sensitive):")
for cmd in [
@@ -388,28 +388,28 @@ class AppParseException(ParseException):
class Parser(object):
def __init__(self):
self.bnf = self.makeBNF()
-
+
def makeBNF(self):
- invVerb = oneOf("INV INVENTORY I", caseless=True)
- dropVerb = oneOf("DROP LEAVE", caseless=True)
+ invVerb = oneOf("INV INVENTORY I", caseless=True)
+ dropVerb = oneOf("DROP LEAVE", caseless=True)
takeVerb = oneOf("TAKE PICKUP", caseless=True) | \
(CaselessLiteral("PICK") + CaselessLiteral("UP") )
moveVerb = oneOf("MOVE GO", caseless=True) | empty
- useVerb = oneOf("USE U", caseless=True)
+ useVerb = oneOf("USE U", caseless=True)
openVerb = oneOf("OPEN O", caseless=True)
closeVerb = oneOf("CLOSE CL", caseless=True)
- quitVerb = oneOf("QUIT Q", caseless=True)
- lookVerb = oneOf("LOOK L", caseless=True)
+ quitVerb = oneOf("QUIT Q", caseless=True)
+ lookVerb = oneOf("LOOK L", caseless=True)
doorsVerb = CaselessLiteral("DOORS")
helpVerb = oneOf("H HELP ?",caseless=True)
-
+
itemRef = OneOrMore(Word(alphas)).setParseAction( self.validateItemName )
nDir = oneOf("N NORTH",caseless=True).setParseAction(replaceWith("N"))
sDir = oneOf("S SOUTH",caseless=True).setParseAction(replaceWith("S"))
eDir = oneOf("E EAST",caseless=True).setParseAction(replaceWith("E"))
wDir = oneOf("W WEST",caseless=True).setParseAction(replaceWith("W"))
moveDirection = nDir | sDir | eDir | wDir
-
+
invCommand = invVerb
dropCommand = dropVerb + itemRef("item")
takeCommand = takeVerb + itemRef("item")
@@ -423,7 +423,7 @@ class Parser(object):
lookCommand = lookVerb
doorsCommand = doorsVerb
helpCommand = helpVerb
-
+
# attach command classes to expressions
invCommand.setParseAction(InventoryCommand)
dropCommand.setParseAction(DropCommand)
@@ -438,18 +438,18 @@ class Parser(object):
helpCommand.setParseAction(HelpCommand)
# define parser using all command expressions
- return ( invCommand |
+ return ( invCommand |
useCommand |
- openCommand |
- closeCommand |
- dropCommand |
- takeCommand |
- moveCommand |
- lookCommand |
- doorsCommand |
+ openCommand |
+ closeCommand |
+ dropCommand |
+ takeCommand |
+ moveCommand |
+ lookCommand |
+ doorsCommand |
helpCommand |
quitCommand )("command") + LineEnd()
-
+
def validateItemName(self,s,l,t):
iname = " ".join(t)
if iname not in Item.items:
@@ -468,13 +468,13 @@ class Parser(object):
"Excuse me?",
"???",
"What?" ] ))
-
+
class Player(object):
def __init__(self, name):
self.name = name
self.gameOver = False
self.inv = []
-
+
def moveTo(self, rm):
self.room = rm
rm.enter(self)
@@ -484,33 +484,33 @@ class Player(object):
print("Game over!")
else:
rm.describe()
-
+
def take(self,it):
if it.isDeadly:
print("Aaaagh!...., the %s killed me!" % it)
self.gameOver = True
else:
self.inv.append(it)
-
+
def drop(self,it):
self.inv.remove(it)
if it.isFragile:
it.breakItem()
-
+
def createRooms( rm ):
"""
create rooms, using multiline string showing map layout
string contains symbols for the following:
- A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by
+ A-Z, a-z indicate rooms, and rooms will be stored in a dictionary by
reference letter
-, | symbols indicate connection between rooms
<, >, ^, . symbols indicate one-way connection between rooms
"""
# start with empty dictionary of rooms
ret = {}
-
- # look for room symbols, and initialize dictionary
+
+ # look for room symbols, and initialize dictionary
# - exit room is always marked 'Z'
for c in rm:
if c in string.ascii_letters:
@@ -529,9 +529,9 @@ def createRooms( rm ):
s = None
e = None
w = None
-
+
# look in neighboring cells for connection symbols (must take
- # care to guard that neighboring cells exist before testing
+ # care to guard that neighboring cells exist before testing
# contents)
if col > 0 and line[col-1] in "<-":
other = line[col-2]
diff --git a/examples/antlr_grammar.py b/examples/antlr_grammar.py
index 76c681c..7863a77 100644
--- a/examples/antlr_grammar.py
+++ b/examples/antlr_grammar.py
@@ -23,14 +23,14 @@ INT = Word(nums)
ESC = Literal('\\') + (oneOf(list(r'nrtbf\">'+"'")) | ('u' + Word(hexnums, exact=4)) | Word(printables, exact=1))
LITERAL_CHAR = ESC | ~(Literal("'") | Literal('\\')) + Word(printables, exact=1)
CHAR_LITERAL = Suppress("'") + LITERAL_CHAR + Suppress("'")
-STRING_LITERAL = Suppress("'") + Combine(OneOrMore(LITERAL_CHAR)) + Suppress("'")
+STRING_LITERAL = Suppress("'") + Combine(OneOrMore(LITERAL_CHAR)) + Suppress("'")
DOUBLE_QUOTE_STRING_LITERAL = '"' + ZeroOrMore(LITERAL_CHAR) + '"'
DOUBLE_ANGLE_STRING_LITERAL = '<<' + ZeroOrMore(Word(printables, exact=1)) + '>>'
TOKEN_REF = Word(alphas.upper(), alphanums+'_')
RULE_REF = Word(alphas.lower(), alphanums+'_')
ACTION_ESC = (Suppress("\\") + Suppress("'")) | Suppress('\\"') | Suppress('\\') + (~(Literal("'") | Literal('"')) + Word(printables, exact=1))
ACTION_CHAR_LITERAL = Suppress("'") + (ACTION_ESC | ~(Literal('\\') | Literal("'")) + Word(printables, exact=1)) + Suppress("'")
-ACTION_STRING_LITERAL = Suppress('"') + ZeroOrMore(ACTION_ESC | ~(Literal('\\') | Literal('"')) + Word(printables, exact=1)) + Suppress('"')
+ACTION_STRING_LITERAL = Suppress('"') + ZeroOrMore(ACTION_ESC | ~(Literal('\\') | Literal('"')) + Word(printables, exact=1)) + Suppress('"')
SRC = Suppress('src') + ACTION_STRING_LITERAL("file") + INT("line")
id = TOKEN_REF | RULE_REF
SL_COMMENT = Suppress('//') + Suppress('$ANTLR') + SRC | ZeroOrMore(~EOL + Word(printables)) + EOL
@@ -91,7 +91,7 @@ rewrite = Optional(Literal('TODO REWRITE RULES TODO'))
block << Suppress('(') + Optional(Optional(optionsSpec("opts")) + Suppress(':')) + Group(alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives"))("block") + Suppress(')')
altList = alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives")
exceptionHandler = Suppress('catch') + ARG_ACTION + ACTION
-finallyClause = Suppress('finally') + ACTION
+finallyClause = Suppress('finally') + ACTION
exceptionGroup = (OneOrMore(exceptionHandler) + Optional(finallyClause)) | finallyClause
ruleHeading = Optional(ML_COMMENT)("ruleComment") + Optional(modifier)("modifier") + id("ruleName") + Optional("!") + Optional(ARG_ACTION("arg")) + Optional(Suppress('returns') + ARG_ACTION("rt")) + Optional(throwsSpec) + Optional(optionsSpec) + Optional(ruleScopeSpec) + ZeroOrMore(ruleAction)
@@ -125,7 +125,7 @@ def __antlrAlternativeConverter(pyparsingRules, antlrAlternative):
regex = r'['+str(element.atom.c1[0])+'-'+str(element.atom.c2[0]+']')
rule = Regex(regex)("anonymous_regex")
elif hasattr(element, 'block') and element.block != '':
- rule = __antlrAlternativesConverter(pyparsingRules, element.block)
+ rule = __antlrAlternativesConverter(pyparsingRules, element.block)
else:
ruleRef = element.atom
assert ruleRef in pyparsingRules
@@ -145,7 +145,7 @@ def __antlrAlternativeConverter(pyparsingRules, antlrAlternative):
rule = Group(And(elementList))("anonymous_and")
else:
rule = elementList[0]
- assert rule != None
+ assert rule != None
return rule
def __antlrRuleConverter(pyparsingRules, antlrRule):
@@ -169,11 +169,11 @@ def antlrConverter(antlrGrammarTree):
for antlrRuleName, antlrRule in list(antlrRules.items()):
pyparsingRule = __antlrRuleConverter(pyparsingRules, antlrRule)
assert pyparsingRule != None
- pyparsingRules[antlrRuleName] << pyparsingRule
+ pyparsingRules[antlrRuleName] << pyparsingRule
return pyparsingRules
if __name__ == "__main__":
-
+
text = """grammar SimpleCalc;
options {
@@ -209,7 +209,7 @@ NUMBER : (DIGIT)+ ;
fragment DIGIT : '0'..'9' ;
"""
-
+
grammar().validate()
antlrGrammarTree = grammar().parseString(text)
print(antlrGrammarTree.asXML("antlrGrammarTree"))
diff --git a/examples/apicheck.py b/examples/apicheck.py
index 7bca41a..cd35a9a 100644
--- a/examples/apicheck.py
+++ b/examples/apicheck.py
@@ -13,8 +13,8 @@ LBRACK,RBRACK,LBRACE,RBRACE = map(Suppress,"[]{}")
ident = Word(alphas,alphanums+"_") | QuotedString("{",endQuoteChar="}")
arg = "$" + ident
-# define an API call with a specific number of arguments - using '-'
-# will ensure that after matching procname, an incorrect number of args will
+# define an API call with a specific number of arguments - using '-'
+# will ensure that after matching procname, an incorrect number of args will
# raise a ParseSyntaxException, which will interrupt the scanString
def apiProc(name, numargs):
return LBRACK + Keyword(name)("procname") - arg*numargs + RBRACK
@@ -55,4 +55,3 @@ while 1:
api_scanner = apiRef.scanString(test)
except StopIteration:
break
-
diff --git a/examples/cLibHeader.py b/examples/cLibHeader.py
index bb98521..6bb1c25 100644
--- a/examples/cLibHeader.py
+++ b/examples/cLibHeader.py
@@ -1,4 +1,4 @@
-#
+#
# cLibHeader.py
#
# A simple parser to extract API doc info from a C header file
@@ -12,7 +12,7 @@ testdata = """
int func1(float *vec, int len, double arg1);
int func2(float **arr, float *vec, int len, double arg1, double arg2);
"""
-
+
ident = Word(alphas, alphanums + "_")
vartype = Combine( oneOf("float double int char") + Optional(Word("*")), adjacent = False)
arglist = delimitedList(Group(vartype("type") + ident("name")))
diff --git a/examples/chemicalFormulas.py b/examples/chemicalFormulas.py
index ce66afd..31d77de 100644
--- a/examples/chemicalFormulas.py
+++ b/examples/chemicalFormulas.py
@@ -13,7 +13,7 @@ atomicWeight = {
"Cl" : 35.4527,
"C" : 12.0107
}
-
+
def test( bnf, strg, fn=None ):
try:
print(strg,"->", bnf.parseString( strg ), end=' ')
@@ -62,6 +62,3 @@ fn = lambda elemList : sum(atomicWeight[elem.symbol]*elem.qty for elem in elemLi
test( formula, "H2O", fn )
test( formula, "C6H5OH", fn )
test( formula, "NaCl", fn )
-
-
-
diff --git a/examples/configParse.py b/examples/configParse.py
index 769249c..db7b6c7 100644
--- a/examples/configParse.py
+++ b/examples/configParse.py
@@ -15,7 +15,7 @@ import pprint
inibnf = None
def inifile_BNF():
global inibnf
-
+
if not inibnf:
# punctuation
@@ -23,24 +23,24 @@ def inifile_BNF():
rbrack = Literal("]").suppress()
equals = Literal("=").suppress()
semi = Literal(";")
-
+
comment = semi + Optional( restOfLine )
-
+
nonrbrack = "".join( [ c for c in printables if c != "]" ] ) + " \t"
nonequals = "".join( [ c for c in printables if c != "=" ] ) + " \t"
-
+
sectionDef = lbrack + Word( nonrbrack ) + rbrack
keyDef = ~lbrack + Word( nonequals ) + equals + empty + restOfLine
# strip any leading or trailing blanks from key
def stripKey(tokens):
tokens[0] = tokens[0].strip()
keyDef.setParseAction(stripKey)
-
+
# using Dict will allow retrieval of named data fields as attributes of the parsed results
inibnf = Dict( ZeroOrMore( Group( sectionDef + Dict( ZeroOrMore( Group( keyDef ) ) ) ) ) )
-
+
inibnf.ignore( comment )
-
+
return inibnf
@@ -59,14 +59,13 @@ def test( strng ):
print(err.line)
print(" "*(err.column-1) + "^")
print(err)
-
+
iniFile.close()
print()
return tokens
-
+
if __name__ == "__main__":
ini = test("setup.ini")
- print("ini['Startup']['modemid'] =", ini['Startup']['modemid'])
+ print("ini['Startup']['modemid'] =", ini['Startup']['modemid'])
print("ini.Startup =", ini.Startup)
print("ini.Startup.modemid =", ini.Startup.modemid)
-
diff --git a/examples/cpp_enum_parser.py b/examples/cpp_enum_parser.py
index cd8f525..ca2c04b 100644
--- a/examples/cpp_enum_parser.py
+++ b/examples/cpp_enum_parser.py
@@ -3,7 +3,7 @@
#
# Posted by Mark Tolonen on comp.lang.python in August, 2009,
# Used with permission.
-#
+#
# Parser that scans through C or C++ code for enum definitions, and
# generates corresponding Python constant definitions.
#
diff --git a/examples/datetimeParseActions.py b/examples/datetimeParseActions.py
index e42d2c6..aa9e016 100644
--- a/examples/datetimeParseActions.py
+++ b/examples/datetimeParseActions.py
@@ -37,16 +37,16 @@ date_expr.setParseAction(convertToDatetime)
date_expr.runTests("""\
- 2000/1/1
+ 2000/1/1
# invalid month
- 2000/13/1
+ 2000/13/1
# 1900 was not a leap year
- 1900/2/29
+ 1900/2/29
# but 2000 was
- 2000/2/29
+ 2000/2/29
""")
@@ -55,14 +55,14 @@ date_expr = pyparsing_common.iso8601_date.setParseAction(pyparsing_common.conver
date_expr.ignore(pythonStyleComment)
date_expr.runTests("""\
- 2000-01-01
+ 2000-01-01
# invalid month
- 2000-13-01
+ 2000-13-01
# 1900 was not a leap year
- 1900-02-29
+ 1900-02-29
# but 2000 was
- 2000-02-29
- """) \ No newline at end of file
+ 2000-02-29
+ """)
diff --git a/examples/deltaTime.py b/examples/deltaTime.py
index e38da00..2fa8769 100644
--- a/examples/deltaTime.py
+++ b/examples/deltaTime.py
@@ -1,9 +1,9 @@
# deltaTime.py
#
-# Parser to convert a conversational time reference such as "in a minute" or
-# "noon tomorrow" and convert it to a Python datetime. The returned
+# Parser to convert a conversational time reference such as "in a minute" or
+# "noon tomorrow" and convert it to a Python datetime. The returned
# ParseResults object contains the results name "timeOffset" containing
-# the timedelta, and "calculatedTime" containing the computed time relative
+# the timedelta, and "calculatedTime" containing the computed time relative
# to datetime.now().
#
# Copyright 2010, by Paul McGuire
@@ -30,7 +30,7 @@ def convertToTimedelta(toks):
if toks.dir:
td *= toks.dir
toks["timeOffset"] = td
-
+
def convertToDay(toks):
now = datetime.now()
if "wkdayRef" in toks:
@@ -50,7 +50,7 @@ def convertToDay(toks):
"yesterday" : datetime(now.year, now.month, now.day)+timedelta(-1),
"tomorrow" : datetime(now.year, now.month, now.day)+timedelta(+1),
}[name]
-
+
def convertToAbsTime(toks):
now = datetime.now()
if "dayRef" in toks:
@@ -70,7 +70,7 @@ def convertToAbsTime(toks):
if hhmmss.miltime:
hh,mm = hhmmss.miltime
ss = 0
- else:
+ else:
hh,mm,ss = (hhmmss.HH % 12), hhmmss.MM, hhmmss.SS
if not mm: mm = 0
if not ss: ss = 0
@@ -80,7 +80,7 @@ def convertToAbsTime(toks):
else:
timeOfDay = timedelta(0, (now.hour*60+now.minute)*60+now.second, now.microsecond)
toks["absTime"] = day + timeOfDay
-
+
def calculateTime(toks):
if toks.absTime:
absTime = toks.absTime
@@ -89,7 +89,7 @@ def calculateTime(toks):
if toks.timeOffset:
absTime += toks.timeOffset
toks["calculatedTime"] = absTime
-
+
# grammar definitions
CL = CaselessLiteral
today, tomorrow, yesterday, noon, midnight, now = map( CL,
@@ -100,7 +100,7 @@ week, day, hour, minute, second = map( plural,
am = CL("am")
pm = CL("pm")
COLON = Suppress(':')
-
+
# are these actually operators?
in_ = CL("in").setParseAction(replaceWith(1))
from_ = CL("from").setParseAction(replaceWith(1))
@@ -123,7 +123,7 @@ def fill_timefields(t):
int4.addParseAction(fill_timefields)
qty = integer | couple | a_qty
dayName = oneOf( list(calendar.day_name) )
-
+
dayOffset = (qty("qty") + (week | day)("timeunit"))
dayFwdBack = (from_ + now.suppress() | ago)("dir")
weekdayRef = (Optional(next_ | last_,1)("dir") + dayName("day"))
@@ -132,37 +132,37 @@ dayRef = Optional( (dayOffset + (before | after | from_)("dir") ).setParseAction
weekdayRef("wkdayRef")).setParseAction(convertToDay)
todayRef = (dayOffset + dayFwdBack).setParseAction(convertToTimedelta) | \
(in_("dir") + qty("qty") + day("timeunit")).setParseAction(convertToTimedelta)
-
+
dayTimeSpec = dayRef | todayRef
dayTimeSpec.setParseAction(calculateTime)
-
+
relativeTimeUnit = (week | day | hour | minute | second)
-
+
timespec = Group(ungroup(int4) |
- integer("HH") +
- ungroup(Optional(COLON + integer,[0]))("MM") +
- ungroup(Optional(COLON + integer,[0]))("SS") +
+ integer("HH") +
+ ungroup(Optional(COLON + integer,[0]))("MM") +
+ ungroup(Optional(COLON + integer,[0]))("SS") +
(am | pm)("ampm")
)
-absTimeSpec = ((noon | midnight | now | timespec("timeparts"))("timeOfDay") +
+absTimeSpec = ((noon | midnight | now | timespec("timeparts"))("timeOfDay") +
Optional(on_) + Optional(dayRef)("dayRef") |
- dayRef("dayRef") + at_ +
+ dayRef("dayRef") + at_ +
(noon | midnight | now | timespec("timeparts"))("timeOfDay"))
absTimeSpec.setParseAction(convertToAbsTime,calculateTime)
-
+
relTimeSpec = qty("qty") + relativeTimeUnit("timeunit") + \
(from_ | before | after)("dir") + \
Optional(at_) + \
absTimeSpec("absTime") | \
qty("qty") + relativeTimeUnit("timeunit") + ago("dir") | \
- in_ + qty("qty") + relativeTimeUnit("timeunit")
+ in_ + qty("qty") + relativeTimeUnit("timeunit")
relTimeSpec.setParseAction(convertToTimedelta,calculateTime)
-
-nlTimeExpression = (absTimeSpec + Optional(dayTimeSpec) |
- dayTimeSpec + Optional(Optional(at_) + absTimeSpec) |
+
+nlTimeExpression = (absTimeSpec + Optional(dayTimeSpec) |
+ dayTimeSpec + Optional(Optional(at_) + absTimeSpec) |
relTimeSpec + Optional(absTimeSpec))
-
+
if __name__ == "__main__":
# test grammar
tests = """\
diff --git a/examples/dfmparse.py b/examples/dfmparse.py
index cf83814..28a3a9b 100644
--- a/examples/dfmparse.py
+++ b/examples/dfmparse.py
@@ -101,11 +101,11 @@ hint_attribute_value_pair = hint_attribute + EQUALS + value
layout_attribute_value_pair = layout_attribute + EQUALS + value
generic_attribute_value_pair = attribute + EQUALS + value
attribute_value_pair << Group(
- category_attribute_value_pair
- | event_attribute_value_pair
- | font_attribute_value_pair
- | hint_attribute_value_pair
- | layout_attribute_value_pair
+ category_attribute_value_pair
+ | event_attribute_value_pair
+ | font_attribute_value_pair
+ | hint_attribute_value_pair
+ | layout_attribute_value_pair
| generic_attribute_value_pair
)
@@ -176,4 +176,4 @@ def main(testfiles=None, action=printer):
return retval
if __name__ == "__main__":
- main() \ No newline at end of file
+ main()
diff --git a/examples/dhcpd_leases_parser.py b/examples/dhcpd_leases_parser.py
index 145e6ea..a885051 100644
--- a/examples/dhcpd_leases_parser.py
+++ b/examples/dhcpd_leases_parser.py
@@ -1,9 +1,9 @@
-#
+#
# dhcpd_leases_parser.py
#
# Copyright 2008, Paul McGuire
#
-# Sample parser to parse a dhcpd.leases file to extract leases
+# Sample parser to parse a dhcpd.leases file to extract leases
# and lease attributes
#
# format ref: http://www.linuxmanpages.com/man5/dhcpd.leases.5.php
diff --git a/examples/dictExample.py b/examples/dictExample.py
index 5085aed..043d18f 100644
--- a/examples/dictExample.py
+++ b/examples/dictExample.py
@@ -21,8 +21,8 @@ testData = """
# define grammar for datatable
heading = (Literal(
-"+-------+------+------+------+------+------+------+------+------+") +
-"| | A1 | B1 | C1 | D1 | A2 | B2 | C2 | D2 |" +
+"+-------+------+------+------+------+------+------+------+------+") +
+"| | A1 | B1 | C1 | D1 | A2 | B2 | C2 | D2 |" +
"+=======+======+======+======+======+======+======+======+======+").suppress()
vert = Literal("|").suppress()
number = Word(nums)
diff --git a/examples/dictExample2.py b/examples/dictExample2.py
index cae463b..0f243d3 100644
--- a/examples/dictExample2.py
+++ b/examples/dictExample2.py
@@ -44,9 +44,9 @@ print("sum(data['min']) =", sum(data['min']))
print("data.max =", data.max)
print("sum(data.max) =", sum(data.max))
-# now print transpose of data table, using column labels read from table header and
+# now print transpose of data table, using column labels read from table header and
# values from data lists
-print()
+print()
print(" " * 5, end=' ')
for i in range(1,len(data)):
print("|%5s" % data[i][0], end=' ')
@@ -56,4 +56,4 @@ for i in range(len(data.columns)):
print("%5s" % data.columns[i], end=' ')
for j in range(len(data) - 1):
print('|%5s' % data[j + 1][i + 1], end=' ')
- print()
+ print()
diff --git a/examples/ebnf.py b/examples/ebnf.py
index 242aed4..f94c755 100644
--- a/examples/ebnf.py
+++ b/examples/ebnf.py
@@ -55,7 +55,7 @@ syntax.ignore(ebnfComment)
def do_integer(str, loc, toks):
return int(toks[0])
-
+
def do_meta_identifier(str, loc, toks):
if toks[0] in symbol_table:
return symbol_table[toks[0]]
diff --git a/examples/ebnftest.py b/examples/ebnftest.py
index 253404f..40772ee 100644
--- a/examples/ebnftest.py
+++ b/examples/ebnftest.py
@@ -1,5 +1,5 @@
#
-# ebnftest.py
+# ebnftest.py
#
# Test script for ebnf.py
#
@@ -24,11 +24,11 @@ syntactic_primary = optional_sequence | repeated_sequence |
optional_sequence = '[', definitions_list, ']';
repeated_sequence = '{', definitions_list, '}';
grouped_sequence = '(', definitions_list, ')';
-(*
+(*
terminal_string = "'", character - "'", {character - "'"}, "'" |
'"', character - '"', {character - '"'}, '"';
meta_identifier = letter, {letter | digit};
-integer = digit, {digit};
+integer = digit, {digit};
*)
'''
diff --git a/examples/eval_arith.py b/examples/eval_arith.py
index 9562253..133f6c2 100644
--- a/examples/eval_arith.py
+++ b/examples/eval_arith.py
@@ -38,7 +38,7 @@ def operatorOperands(tokenlist):
yield (next(it), next(it))
except StopIteration:
break
-
+
class EvalPowerOp(object):
"Class to evaluate multiplication and division expressions"
def __init__(self, tokens):
@@ -48,7 +48,7 @@ class EvalPowerOp(object):
for val in self.value[-3::-2]:
res = val.eval()**res
return res
-
+
class EvalMultOp(object):
"Class to evaluate multiplication and division expressions"
def __init__(self, tokens):
@@ -61,7 +61,7 @@ class EvalMultOp(object):
if op == '/':
prod /= val.eval()
return prod
-
+
class EvalAddOp(object):
"Class to evaluate addition and subtraction expressions"
def __init__(self, tokens):
@@ -105,7 +105,7 @@ class EvalComparisonOp(object):
else:
return True
return False
-
+
# define the parser
integer = Word(nums)
@@ -137,57 +137,57 @@ comp_expr = infixNotation(arith_expr,
def main():
# sample expressions posted on comp.lang.python, asking for advice
# in safely evaluating them
- rules=[
- '( A - B ) = 0',
- '(A + B + C + D + E + F + G + H + I) = J',
- '(A + B + C + D + E + F + G + H) = I',
- '(A + B + C + D + E + F) = G',
- '(A + B + C + D + E) = (F + G + H + I + J)',
- '(A + B + C + D + E) = (F + G + H + I)',
- '(A + B + C + D + E) = F',
- '(A + B + C + D) = (E + F + G + H)',
- '(A + B + C) = (D + E + F)',
- '(A + B) = (C + D + E + F)',
- '(A + B) = (C + D)',
- '(A + B) = (C - D + E - F - G + H + I + J)',
- '(A + B) = C',
- '(A + B) = 0',
- '(A+B+C+D+E) = (F+G+H+I+J)',
- '(A+B+C+D) = (E+F+G+H)',
- '(A+B+C+D)=(E+F+G+H)',
- '(A+B+C)=(D+E+F)',
- '(A+B)=(C+D)',
- '(A+B)=C',
- '(A-B)=C',
- '(A/(B+C))',
- '(B/(C+D))',
- '(G + H) = I',
- '-0.99 LE ((A+B+C)-(D+E+F+G)) LE 0.99',
- '-0.99 LE (A-(B+C)) LE 0.99',
- '-1000.00 LE A LE 0.00',
- '-5000.00 LE A LE 0.00',
- 'A < B',
- 'A < 7000',
- 'A = -(B)',
- 'A = C',
- 'A = 0',
- 'A GT 0',
- 'A GT 0.00',
- 'A GT 7.00',
- 'A LE B',
- 'A LT -1000.00',
- 'A LT -5000',
- 'A LT 0',
- 'A=(B+C+D)',
- 'A=B',
- 'I = (G + H)',
- '0.00 LE A LE 4.00',
+ rules=[
+ '( A - B ) = 0',
+ '(A + B + C + D + E + F + G + H + I) = J',
+ '(A + B + C + D + E + F + G + H) = I',
+ '(A + B + C + D + E + F) = G',
+ '(A + B + C + D + E) = (F + G + H + I + J)',
+ '(A + B + C + D + E) = (F + G + H + I)',
+ '(A + B + C + D + E) = F',
+ '(A + B + C + D) = (E + F + G + H)',
+ '(A + B + C) = (D + E + F)',
+ '(A + B) = (C + D + E + F)',
+ '(A + B) = (C + D)',
+ '(A + B) = (C - D + E - F - G + H + I + J)',
+ '(A + B) = C',
+ '(A + B) = 0',
+ '(A+B+C+D+E) = (F+G+H+I+J)',
+ '(A+B+C+D) = (E+F+G+H)',
+ '(A+B+C+D)=(E+F+G+H)',
+ '(A+B+C)=(D+E+F)',
+ '(A+B)=(C+D)',
+ '(A+B)=C',
+ '(A-B)=C',
+ '(A/(B+C))',
+ '(B/(C+D))',
+ '(G + H) = I',
+ '-0.99 LE ((A+B+C)-(D+E+F+G)) LE 0.99',
+ '-0.99 LE (A-(B+C)) LE 0.99',
+ '-1000.00 LE A LE 0.00',
+ '-5000.00 LE A LE 0.00',
+ 'A < B',
+ 'A < 7000',
+ 'A = -(B)',
+ 'A = C',
+ 'A = 0',
+ 'A GT 0',
+ 'A GT 0.00',
+ 'A GT 7.00',
+ 'A LE B',
+ 'A LT -1000.00',
+ 'A LT -5000',
+ 'A LT 0',
+ 'A=(B+C+D)',
+ 'A=B',
+ 'I = (G + H)',
+ '0.00 LE A LE 4.00',
'4.00 LT A LE 7.00',
'0.00 LE A LE 4.00 LE E > D',
'2**2**(A+3)',
- ]
- vars_={'A': 0, 'B': 1.1, 'C': 2.2, 'D': 3.3, 'E': 4.4, 'F': 5.5, 'G':
- 6.6, 'H':7.7, 'I':8.8, 'J':9.9}
+ ]
+ vars_={'A': 0, 'B': 1.1, 'C': 2.2, 'D': 3.3, 'E': 4.4, 'F': 5.5, 'G':
+ 6.6, 'H':7.7, 'I':8.8, 'J':9.9}
# define tests from given rules
tests = []
@@ -216,12 +216,12 @@ def main():
failed += 1
else:
print('')
-
+
print('')
if failed:
print(failed, "tests FAILED")
else:
print("all tests PASSED")
-if __name__=='__main__':
+if __name__=='__main__':
main()
diff --git a/examples/excelExpr.py b/examples/excelExpr.py
index 7ce8db2..913d65b 100644
--- a/examples/excelExpr.py
+++ b/examples/excelExpr.py
@@ -1,12 +1,12 @@
# excelExpr.py
#
# Copyright 2010, Paul McGuire
-#
+#
# A partial implementation of a parser of Excel formula expressions.
#
-from pyparsing import (CaselessKeyword, Suppress, Word, alphas,
- alphanums, nums, Optional, Group, oneOf, Forward, Regex,
- infixNotation, opAssoc, dblQuotedString, delimitedList,
+from pyparsing import (CaselessKeyword, Suppress, Word, alphas,
+ alphanums, nums, Optional, Group, oneOf, Forward, Regex,
+ infixNotation, opAssoc, dblQuotedString, delimitedList,
Combine, Literal, QuotedString, ParserElement, pyparsing_common)
ParserElement.enablePackrat()
@@ -15,10 +15,10 @@ EXCL, DOLLAR = map(Literal,"!$")
sheetRef = Word(alphas, alphanums) | QuotedString("'",escQuote="''")
colRef = Optional(DOLLAR) + Word(alphas,max=2)
rowRef = Optional(DOLLAR) + Word(nums)
-cellRef = Combine(Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") +
+cellRef = Combine(Group(Optional(sheetRef + EXCL)("sheet") + colRef("col") +
rowRef("row")))
-cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
+cellRange = (Group(cellRef("start") + COLON + cellRef("end"))("range")
| cellRef | Word(alphas,alphanums))
expr = Forward()
@@ -26,10 +26,10 @@ expr = Forward()
COMPARISON_OP = oneOf("< = > >= <= != <>")
condExpr = expr + COMPARISON_OP + expr
-ifFunc = (CaselessKeyword("if") -
- LPAR +
- Group(condExpr)("condition") +
- COMMA + Group(expr)("if_true") +
+ifFunc = (CaselessKeyword("if") -
+ LPAR +
+ Group(condExpr)("condition") +
+ COMMA + Group(expr)("if_true") +
COMMA + Group(expr)("if_false") + RPAR)
statFunc = lambda name : Group(CaselessKeyword(name) + Group(LPAR + delimitedList(expr) + RPAR))
@@ -42,7 +42,7 @@ funcCall = ifFunc | sumFunc | minFunc | maxFunc | aveFunc
multOp = oneOf("* /")
addOp = oneOf("+ -")
numericLiteral = pyparsing_common.number
-operand = numericLiteral | funcCall | cellRange | cellRef
+operand = numericLiteral | funcCall | cellRange | cellRef
arithExpr = infixNotation(operand,
[
(multOp, 2, opAssoc.LEFT),
@@ -66,4 +66,4 @@ expr << (arithExpr | textExpr)
=if(Sum(A1:A25)>42,Min(B1:B25),if(Sum(C1:C25)>3.14, (Min(C1:C25)+3)*18,Max(B1:B25)))
=sum(a1:a25,10,min(b1,c2,d3))
=if("T"&a2="TTime", "Ready", "Not ready")
-""") \ No newline at end of file
+""")
diff --git a/examples/fourFn.py b/examples/fourFn.py
index f485f53..484aa45 100644
--- a/examples/fourFn.py
+++ b/examples/fourFn.py
@@ -21,7 +21,7 @@ def pushFirst( strg, loc, toks ):
exprStack.append( toks[0] )
def pushUMinus( strg, loc, toks ):
for t in toks:
- if t == '-':
+ if t == '-':
exprStack.append( 'unary -' )
#~ exprStack.append( '-1' )
#~ exprStack.append( '*' )
@@ -48,27 +48,27 @@ def BNF():
# and CaselessKeyword only match whole words
e = CaselessKeyword( "E" )
pi = CaselessKeyword( "PI" )
- #~ fnumber = Combine( Word( "+-"+nums, nums ) +
+ #~ fnumber = Combine( Word( "+-"+nums, nums ) +
#~ Optional( point + Optional( Word( nums ) ) ) +
#~ Optional( e + Word( "+-"+nums, nums ) ) )
fnumber = Regex(r"[+-]?\d+(?:\.\d*)?(?:[eE][+-]?\d+)?")
ident = Word(alphas, alphanums+"_$")
-
+
plus, minus, mult, div = map(Literal, "+-*/")
lpar, rpar = map(Suppress, "()")
addop = plus | minus
multop = mult | div
expop = Literal( "^" )
-
+
expr = Forward()
- atom = ((0,None)*minus + ( pi | e | fnumber | ident + lpar + expr + rpar | ident ).setParseAction( pushFirst ) |
- Group( lpar + expr + rpar )).setParseAction(pushUMinus)
-
+ atom = ((0,None)*minus + ( pi | e | fnumber | ident + lpar + expr + rpar | ident ).setParseAction( pushFirst ) |
+ Group( lpar + expr + rpar )).setParseAction(pushUMinus)
+
# by defining exponentiation as "atom [ ^ factor ]..." instead of "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-righ
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + ZeroOrMore( ( expop + factor ).setParseAction( pushFirst ) )
-
+
term = factor + ZeroOrMore( ( multop + factor ).setParseAction( pushFirst ) )
expr << term + ZeroOrMore( ( addop + term ).setParseAction( pushFirst ) )
bnf = expr
@@ -109,7 +109,7 @@ def evaluateStack( s ):
return float( op )
if __name__ == "__main__":
-
+
def test( s, expVal ):
global exprStack
exprStack[:] = []
@@ -125,7 +125,7 @@ if __name__ == "__main__":
print(s, "=", val, results, "=>", exprStack)
else:
print(s+"!!!", val, "!=", expVal, results, "=>", exprStack)
-
+
test( "9", 9 )
test( "-9", -9 )
test( "--9", 9 )
diff --git a/examples/gen_ctypes.py b/examples/gen_ctypes.py
index e79798f..325aa28 100644
--- a/examples/gen_ctypes.py
+++ b/examples/gen_ctypes.py
@@ -43,7 +43,7 @@ typemap = {
"Bool" : "c_bool",
"void" : "None",
}
-
+
LPAR,RPAR,LBRACE,RBRACE,COMMA,SEMI = map(Suppress,"(){},;")
ident = Word(alphas, alphanums + "_")
integer = Regex(r"[+-]?\d+")
@@ -52,19 +52,19 @@ hexinteger = Regex(r"0x[0-9a-fA-F]+")
const = Suppress("const")
primitiveType = oneOf(t for t in typemap if not t.endswith("*"))
structType = Suppress("struct") + ident
-vartype = (Optional(const) +
- (primitiveType | structType | ident) +
+vartype = (Optional(const) +
+ (primitiveType | structType | ident) +
Optional(Word("*")("ptr")))
def normalizetype(t):
if isinstance(t, ParseResults):
return ' '.join(t)
#~ ret = ParseResults([' '.join(t)])
#~ return ret
-
+
vartype.setParseAction(normalizetype)
arg = Group(vartype("argtype") + Optional(ident("argname")))
-func_def = (vartype("fn_type") + ident("fn_name") +
+func_def = (vartype("fn_type") + ident("fn_name") +
LPAR + Optional(delimitedList(arg|"..."))("fn_args") + RPAR + SEMI)
def derivefields(t):
if t.fn_args and t.fn_args[-1] == "...":
@@ -74,7 +74,7 @@ func_def.setParseAction(derivefields)
fn_typedef = "typedef" + func_def
var_typedef = "typedef" + primitiveType("primType") + ident("name") + SEMI
-enum_def = (Keyword("enum") + LBRACE +
+enum_def = (Keyword("enum") + LBRACE +
delimitedList(Group(ident("name") + '=' + (hexinteger|integer)("value")))("evalues")
+ Optional(COMMA)
+ RBRACE)
@@ -131,7 +131,7 @@ for fn,_,_ in (cStyleComment.suppress() | fn_typedef.suppress() | func_def).scan
if arg != "...":
if arg.argtype not in typemap:
getUDType(arg.argtype)
- functions.append(fn)
+ functions.append(fn)
# scan input header text for enums
enum_def.ignore(cppStyleComment)
@@ -160,7 +160,7 @@ print()
print("# functions")
for fn in functions:
prefix = "{}.{}".format(module, fn.fn_name)
-
+
print("{}.restype = {}".format(prefix, typeAsCtypes(fn.fn_type)))
if fn.varargs:
print("# warning - %s takes variable argument list" % prefix)
@@ -170,5 +170,3 @@ for fn in functions:
print("{}.argtypes = ({},)".format(prefix, ','.join(typeAsCtypes(a.argtype) for a in fn.fn_args)))
else:
print("%s.argtypes = ()" % (prefix))
-
-
diff --git a/examples/getNTPserversNew.py b/examples/getNTPserversNew.py
index 6995b29..6eb653b 100644
--- a/examples/getNTPserversNew.py
+++ b/examples/getNTPserversNew.py
@@ -19,8 +19,8 @@ integer = Word(nums)
ipAddress = Combine( integer + "." + integer + "." + integer + "." + integer )
hostname = delimitedList(Word(alphas,alphanums+"-_"),".",combine=True)
tdStart,tdEnd = makeHTMLTags("td")
-timeServerPattern = (tdStart + hostname("hostname") + tdEnd +
- tdStart + ipAddress("ipAddr") + tdEnd +
+timeServerPattern = (tdStart + hostname("hostname") + tdEnd +
+ tdStart + ipAddress("ipAddr") + tdEnd +
tdStart + SkipTo(tdEnd)("loc") + tdEnd)
# get list of time servers
diff --git a/examples/greetingInGreek.py b/examples/greetingInGreek.py
index 921ac04..8dccd81 100644
--- a/examples/greetingInGreek.py
+++ b/examples/greetingInGreek.py
@@ -1,4 +1,4 @@
-# vim:fileencoding=utf-8
+# vim:fileencoding=utf-8
#
# greetingInGreek.py
#
@@ -10,11 +10,10 @@ from pyparsing import Word, pyparsing_unicode
# define grammar
alphas = pyparsing_unicode.Greek.alphas
-greet = Word(alphas) + ',' + Word(alphas) + '!'
+greet = Word(alphas) + ',' + Word(alphas) + '!'
# input string
hello = "Καλημέρα, κόσμε!"
# parse input string
print(greet.parseString(hello))
-
diff --git a/examples/greetingInKorean.py b/examples/greetingInKorean.py
index 2e9d4a1..7f5a17e 100644
--- a/examples/greetingInKorean.py
+++ b/examples/greetingInKorean.py
@@ -1,4 +1,4 @@
-# vim:fileencoding=utf-8
+# vim:fileencoding=utf-8
#
# greetingInKorean.py
#
diff --git a/examples/holaMundo.py b/examples/holaMundo.py
index 47ab6bb..93dd881 100644
--- a/examples/holaMundo.py
+++ b/examples/holaMundo.py
@@ -2,32 +2,32 @@
# escrito por Marco Alfonso, 2004 Noviembre
-# importamos el modulo
-from pyparsing import *
-saludo= Word(alphas) + ',' + Word(alphas) + '!'
-
-# Aqui decimos que la gramatica "saludo" DEBE contener
-# una palabra compuesta de caracteres alfanumericos
-# (Word(alphas)) mas una ',' mas otra palabra alfanumerica,
-# mas '!' y esos seian nuestros tokens
-tokens = saludo.parseString("Hola, Mundo !")
-
-# Ahora parseamos una cadena, "Hola, Mundo!",
-# el metodo parseString, nos devuelve una lista con los tokens
-# encontrados, en caso de no haber errores...
+# importamos el modulo
+from pyparsing import *
+saludo= Word(alphas) + ',' + Word(alphas) + '!'
+
+# Aqui decimos que la gramatica "saludo" DEBE contener
+# una palabra compuesta de caracteres alfanumericos
+# (Word(alphas)) mas una ',' mas otra palabra alfanumerica,
+# mas '!' y esos seian nuestros tokens
+tokens = saludo.parseString("Hola, Mundo !")
+
+# Ahora parseamos una cadena, "Hola, Mundo!",
+# el metodo parseString, nos devuelve una lista con los tokens
+# encontrados, en caso de no haber errores...
for i in range(len(tokens)):
print ("Token %d -> %s" % (i,tokens[i]))
-#imprimimos cada uno de los tokens Y listooo!!, he aquí a salida
+#imprimimos cada uno de los tokens Y listooo!!, he aquí a salida
# Token 0 -> Hola
# Token 1 -> ,
-# Token 2-> Mundo
-# Token 3 -> !
-
-# Por supuesto, se pueden "reutilizar" gramáticas, por ejemplo:
-numimag = Word(nums) + 'i'
-numreal = Word(nums)
-numcomplex = numreal + '+' + numimag
+# Token 2-> Mundo
+# Token 3 -> !
+
+# Por supuesto, se pueden "reutilizar" gramáticas, por ejemplo:
+numimag = Word(nums) + 'i'
+numreal = Word(nums)
+numcomplex = numreal + '+' + numimag
print (numcomplex.parseString("3+5i"))
# Cambiar a complejo numero durante parsear:
@@ -35,4 +35,3 @@ numcomplex.setParseAction(lambda t: complex(''.join(t).replace('i','j')))
print (numcomplex.parseString("3+5i"))
# Excelente!!, bueno, los dejo, me voy a seguir tirando código...
-
diff --git a/examples/htmlStripper.py b/examples/htmlStripper.py
index 1d7a0f0..6fc4aef 100644
--- a/examples/htmlStripper.py
+++ b/examples/htmlStripper.py
@@ -1,14 +1,14 @@
#
# htmlStripper.py
#
-# Sample code for stripping HTML markup tags and scripts from
+# Sample code for stripping HTML markup tags and scripts from
# HTML source files.
#
# Copyright (c) 2006, 2016, Paul McGuire
#
from contextlib import closing
import urllib.request, urllib.parse, urllib.error
-from pyparsing import (makeHTMLTags, SkipTo, commonHTMLEntity, replaceHTMLEntity,
+from pyparsing import (makeHTMLTags, SkipTo, commonHTMLEntity, replaceHTMLEntity,
htmlComment, anyOpenTag, anyCloseTag, LineEnd, OneOrMore, replaceWith)
scriptOpen,scriptClose = makeHTMLTags("script")
@@ -21,7 +21,7 @@ with closing(urllib.request.urlopen( targetURL )) as targetPage:
targetHTML = targetPage.read().decode("UTF-8")
# first pass, strip out tags and translate entities
-firstPass = (htmlComment | scriptBody | commonHTMLEntity |
+firstPass = (htmlComment | scriptBody | commonHTMLEntity |
anyOpenTag | anyCloseTag ).suppress().transformString(targetHTML)
# first pass leaves many blank lines, collapse these down
@@ -29,4 +29,4 @@ repeatedNewlines = LineEnd() + OneOrMore(LineEnd())
repeatedNewlines.setParseAction(replaceWith("\n\n"))
secondPass = repeatedNewlines.transformString(firstPass)
-print(secondPass) \ No newline at end of file
+print(secondPass)
diff --git a/examples/httpServerLogParser.py b/examples/httpServerLogParser.py
index a147a05..261cea3 100644
--- a/examples/httpServerLogParser.py
+++ b/examples/httpServerLogParser.py
@@ -5,11 +5,11 @@
"""
Parser for HTTP server log output, of the form:
-195.146.134.15 - - [20/Jan/2003:08:55:36 -0800]
-"GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html"
+195.146.134.15 - - [20/Jan/2003:08:55:36 -0800]
+"GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html"
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
-127.0.0.1 - u.surname@domain.com [12/Sep/2006:14:13:53 +0300]
-"GET /skins/monobook/external.png HTTP/1.0" 304 - "http://wiki.mysite.com/skins/monobook/main.css"
+127.0.0.1 - u.surname@domain.com [12/Sep/2006:14:13:53 +0300]
+"GET /skins/monobook/external.png HTTP/1.0" 304 - "http://wiki.mysite.com/skins/monobook/main.css"
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.6) Gecko/20060728 Firefox/1.5.0.6"
You can then break it up as follows:
@@ -32,30 +32,30 @@ def getCmdFields( s, l, t ):
logLineBNF = None
def getLogLineBNF():
global logLineBNF
-
+
if logLineBNF is None:
integer = Word( nums )
ipAddress = delimitedList( integer, ".", combine=True )
-
+
timeZoneOffset = Word("+-",nums)
month = Word(string.uppercase, string.lowercase, exact=3)
- serverDateTime = Group( Suppress("[") +
+ serverDateTime = Group( Suppress("[") +
Combine( integer + "/" + month + "/" + integer +
":" + integer + ":" + integer + ":" + integer ) +
- timeZoneOffset +
+ timeZoneOffset +
Suppress("]") )
-
- logLineBNF = ( ipAddress.setResultsName("ipAddr") +
+
+ logLineBNF = ( ipAddress.setResultsName("ipAddr") +
Suppress("-") +
("-" | Word( alphas+nums+"@._" )).setResultsName("auth") +
- serverDateTime.setResultsName("timestamp") +
+ serverDateTime.setResultsName("timestamp") +
dblQuotedString.setResultsName("cmd").setParseAction(getCmdFields) +
- (integer | "-").setResultsName("statusCode") +
- (integer | "-").setResultsName("numBytesSent") +
+ (integer | "-").setResultsName("statusCode") +
+ (integer | "-").setResultsName("numBytesSent") +
dblQuotedString.setResultsName("referrer").setParseAction(removeQuotes) +
dblQuotedString.setResultsName("clientSfw").setParseAction(removeQuotes) )
return logLineBNF
-
+
testdata = """
195.146.134.15 - - [20/Jan/2003:08:55:36 -0800] "GET /path/to/page.html HTTP/1.0" 200 4649 "http://www.somedomain.com/020602/page.html" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
111.111.111.11 - - [16/Feb/2004:04:09:49 -0800] "GET /ads/redirectads/336x280redirect.htm HTTP/1.1" 304 - "http://www.foobarp.org/theme_detail.php?type=vs&cat=0&mid=27512" "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
diff --git a/examples/idlParse.py b/examples/idlParse.py
index b94bac4..37cb363 100644
--- a/examples/idlParse.py
+++ b/examples/idlParse.py
@@ -15,30 +15,30 @@ import pprint
bnf = None
def CORBA_IDL_BNF():
global bnf
-
+
if not bnf:
# punctuation
(colon,lbrace,rbrace,lbrack,rbrack,lparen,rparen,
equals,comma,dot,slash,bslash,star,semi,langle,rangle) = map(Literal, r":{}[]()=,./\*;<>")
-
+
# keywords
- (any_, attribute_, boolean_, case_, char_, const_, context_, default_, double_, enum_, exception_,
- FALSE_, fixed_, float_, inout_, interface_, in_, long_, module_, Object_, octet_, oneway_, out_, raises_,
- readonly_, sequence_, short_, string_, struct_, switch_, TRUE_, typedef_, unsigned_, union_, void_,
- wchar_, wstring_) = map(Keyword, """any attribute boolean case char const context
- default double enum exception FALSE fixed float inout interface in long module
+ (any_, attribute_, boolean_, case_, char_, const_, context_, default_, double_, enum_, exception_,
+ FALSE_, fixed_, float_, inout_, interface_, in_, long_, module_, Object_, octet_, oneway_, out_, raises_,
+ readonly_, sequence_, short_, string_, struct_, switch_, TRUE_, typedef_, unsigned_, union_, void_,
+ wchar_, wstring_) = map(Keyword, """any attribute boolean case char const context
+ default double enum exception FALSE fixed float inout interface in long module
Object octet oneway out raises readonly sequence short string struct switch
TRUE typedef unsigned union void wchar wstring""".split())
-
+
identifier = Word( alphas, alphanums + "_" ).setName("identifier")
-
+
real = Regex(r"[+-]?\d+\.\d*([Ee][+-]?\d+)?").setName("real")
integer = Regex(r"0x[0-9a-fA-F]+|[+-]?\d+").setName("int")
udTypeName = delimitedList( identifier, "::", combine=True ).setName("udType")
- typeName = ( any_ | boolean_ | char_ | double_ | fixed_ |
- float_ | long_ | octet_ | short_ | string_ |
+ typeName = ( any_ | boolean_ | char_ | double_ | fixed_ |
+ float_ | long_ | octet_ | short_ | string_ |
wchar_ | wstring_ | udTypeName ).setName("type")
sequenceDef = Forward().setName("seq")
sequenceDef << Group( sequence_ + langle + ( sequenceDef | typeName ) + rangle )
@@ -60,11 +60,11 @@ def CORBA_IDL_BNF():
moduleDef << module_ + identifier + lbrace + ZeroOrMore( moduleItem ) + rbrace + semi
bnf = ( moduleDef | OneOrMore( moduleItem ) )
-
+
singleLineComment = "//" + restOfLine
bnf.ignore( singleLineComment )
bnf.ignore( cStyleComment )
-
+
return bnf
testnum = 1
@@ -84,7 +84,7 @@ def test( strng ):
print(" "*(err.column-1) + "^")
print(err)
print()
-
+
if __name__ == "__main__":
test(
"""
@@ -94,7 +94,7 @@ if __name__ == "__main__":
typedef string[10] tenStrings;
typedef sequence<string> stringSeq;
typedef sequence< sequence<string> > stringSeqSeq;
-
+
interface QoSAdmin {
stringSeq method1( in string arg1, inout long arg2 );
stringSeqSeq method2( in string arg1, inout long arg2, inout long arg3);
@@ -108,14 +108,14 @@ if __name__ == "__main__":
* a block comment *
*/
typedef string[10] tenStrings;
- typedef
+ typedef
/** ** *** **** *
* a block comment *
*/
sequence<string> /*comment inside an And */ stringSeq;
/* */ /**/ /***/ /****/
typedef sequence< sequence<string> > stringSeqSeq;
-
+
interface QoSAdmin {
stringSeq method1( in string arg1, inout long arg2 );
stringSeqSeq method2( in string arg1, inout long arg2, inout long arg3);
@@ -135,7 +135,7 @@ if __name__ == "__main__":
string msg;
sequence<string> dataStrings;
};
-
+
interface TestInterface
{
void method1( in string arg1, inout long arg2 );
@@ -144,16 +144,16 @@ if __name__ == "__main__":
)
test(
"""
- module Test1
+ module Test1
{
exception TestException
{
string msg;
];
-
+
interface TestInterface
{
- void method1( in string arg1, inout long arg2 )
+ void method1( in string arg1, inout long arg2 )
raises ( TestException );
};
};
@@ -161,13 +161,13 @@ if __name__ == "__main__":
)
test(
"""
- module Test1
+ module Test1
{
exception TestException
{
string msg;
};
-
+
};
"""
)
diff --git a/examples/indentedGrammarExample.py b/examples/indentedGrammarExample.py
index 442e6a4..0133a9e 100644
--- a/examples/indentedGrammarExample.py
+++ b/examples/indentedGrammarExample.py
@@ -2,7 +2,7 @@
#
# Copyright (c) 2006,2016 Paul McGuire
#
-# A sample of a pyparsing grammar using indentation for
+# A sample of a pyparsing grammar using indentation for
# grouping (like Python does).
#
# Updated to use indentedBlock helper method.
@@ -51,4 +51,3 @@ module_body = OneOrMore(stmt)
print(data)
parseTree = module_body.parseString(data)
parseTree.pprint()
-
diff --git a/examples/invRegex.py b/examples/invRegex.py
index aea3b55..e935b3b 100644
--- a/examples/invRegex.py
+++ b/examples/invRegex.py
@@ -1,4 +1,4 @@
-#
+#
# invRegex.py
#
# Copyright 2008, Paul McGuire
@@ -13,7 +13,7 @@
#
__all__ = ["count","invert"]
-from pyparsing import (Literal, oneOf, printables, ParserElement, Combine,
+from pyparsing import (Literal, oneOf, printables, ParserElement, Combine,
SkipTo, infixNotation, ParseFatalException, Word, nums, opAssoc,
Suppress, ParseResults, srange)
@@ -76,7 +76,7 @@ class AlternativeEmitter(object):
for s in e.makeGenerator()():
yield s
return altGen
-
+
class LiteralEmitter(object):
def __init__(self,lit):
self.lit = lit
@@ -91,7 +91,7 @@ class LiteralEmitter(object):
def handleRange(toks):
return CharacterRangeEmitter(srange(toks[0]))
-
+
def handleRepetition(toks):
toks=toks[0]
if toks[1] in "*+":
@@ -111,7 +111,7 @@ def handleRepetition(toks):
return GroupEmitter([toks[0]] * mincount + [opt])
else:
return [toks[0]] * mincount
-
+
def handleLiteral(toks):
lit = ""
for t in toks:
@@ -122,7 +122,7 @@ def handleLiteral(toks):
lit += t[1]
else:
lit += t
- return LiteralEmitter(lit)
+ return LiteralEmitter(lit)
def handleMacro(toks):
macroChar = toks[0][1]
@@ -163,14 +163,14 @@ def parser():
repetition = (
( lbrace + Word(nums)("count") + rbrace ) |
( lbrace + Word(nums)("minCount")+","+ Word(nums)("maxCount") + rbrace ) |
- oneOf(list("*+?"))
+ oneOf(list("*+?"))
)
reRange.setParseAction(handleRange)
reLiteral.setParseAction(handleLiteral)
reMacro.setParseAction(handleMacro)
reDot.setParseAction(handleDot)
-
+
reTerm = ( reLiteral | reRange | reMacro | reDot | reNonCaptureGroup)
reExpr = infixNotation( reTerm,
[
@@ -180,7 +180,7 @@ def parser():
]
)
_parser = reExpr
-
+
return _parser
def count(gen):
@@ -232,7 +232,7 @@ def main():
(Fri|Mon|S(atur|un)|T(hur|ue)s|Wednes)day
A(pril|ugust)|((Dec|Nov|Sept)em|Octo)ber|(Febr|Jan)uary|Ju(ly|ne)|Ma(rch|y)
""".split('\n')
-
+
for t in tests:
t = t.strip()
if not t: continue
diff --git a/examples/javascript_grammar.g b/examples/javascript_grammar.g
index c30eac2..49fc238 100644
--- a/examples/javascript_grammar.g
+++ b/examples/javascript_grammar.g
@@ -15,25 +15,25 @@ options
program
: LT!* sourceElements LT!* EOF!
;
-
+
sourceElements
: sourceElement (LT!* sourceElement)*
;
-
+
sourceElement
: functionDeclaration
| statement
;
-
+
// functions
functionDeclaration
: 'function' LT!* Identifier LT!* formalParameterList LT!* functionBody
;
-
+
functionExpression
: 'function' LT!* Identifier? LT!* formalParameterList LT!* functionBody
;
-
+
formalParameterList
: '(' (LT!* Identifier (LT!* ',' LT!* Identifier)*)? LT!* ')'
;
@@ -59,83 +59,83 @@ statement
| throwStatement
| tryStatement
;
-
+
statementBlock
: '{' LT!* statementList? LT!* '}'
;
-
+
statementList
: statement (LT!* statement)*
;
-
+
variableStatement
: 'var' LT!* variableDeclarationList (LT | ';')!
;
-
+
variableDeclarationList
: variableDeclaration (LT!* ',' LT!* variableDeclaration)*
;
-
+
variableDeclarationListNoIn
: variableDeclarationNoIn (LT!* ',' LT!* variableDeclarationNoIn)*
;
-
+
variableDeclaration
: Identifier LT!* initialiser?
;
-
+
variableDeclarationNoIn
: Identifier LT!* initialiserNoIn?
;
-
+
initialiser
: '=' LT!* assignmentExpression
;
-
+
initialiserNoIn
: '=' LT!* assignmentExpressionNoIn
;
-
+
emptyStatement
: ';'
;
-
+
expressionStatement
: expression (LT | ';')!
;
-
+
ifStatement
: 'if' LT!* '(' LT!* expression LT!* ')' LT!* statement (LT!* 'else' LT!* statement)?
;
-
+
iterationStatement
: doWhileStatement
| whileStatement
| forStatement
| forInStatement
;
-
+
doWhileStatement
: 'do' LT!* statement LT!* 'while' LT!* '(' expression ')' (LT | ';')!
;
-
+
whileStatement
: 'while' LT!* '(' LT!* expression LT!* ')' LT!* statement
;
-
+
forStatement
: 'for' LT!* '(' (LT!* forStatementInitialiserPart)? LT!* ';' (LT!* expression)? LT!* ';' (LT!* expression)? LT!* ')' LT!* statement
;
-
+
forStatementInitialiserPart
: expressionNoIn
| 'var' LT!* variableDeclarationListNoIn
;
-
+
forInStatement
: 'for' LT!* '(' LT!* forInStatementInitialiserPart LT!* 'in' LT!* expression LT!* ')' LT!* statement
;
-
+
forInStatementInitialiserPart
: leftHandSideExpression
| 'var' LT!* variableDeclarationNoIn
@@ -152,7 +152,7 @@ breakStatement
returnStatement
: 'return' expression? (LT | ';')!
;
-
+
withStatement
: 'with' LT!* '(' LT!* expression LT!* ')' LT!* statement
;
@@ -160,11 +160,11 @@ withStatement
labelledStatement
: Identifier LT!* ':' LT!* statement
;
-
+
switchStatement
: 'switch' LT!* '(' LT!* expression LT!* ')' LT!* caseBlock
;
-
+
caseBlock
: '{' (LT!* caseClause)* (LT!* defaultClause (LT!* caseClause)*)? LT!* '}'
;
@@ -172,11 +172,11 @@ caseBlock
caseClause
: 'case' LT!* expression LT!* ':' LT!* statementList?
;
-
+
defaultClause
: 'default' LT!* ':' LT!* statementList?
;
-
+
throwStatement
: 'throw' expression (LT | ';')!
;
@@ -184,11 +184,11 @@ throwStatement
tryStatement
: 'try' LT!* statementBlock LT!* (finallyClause | catchClause (LT!* finallyClause)?)
;
-
+
catchClause
: 'catch' LT!* '(' LT!* Identifier LT!* ')' LT!* statementBlock
;
-
+
finallyClause
: 'finally' LT!* statementBlock
;
@@ -197,35 +197,35 @@ finallyClause
expression
: assignmentExpression (LT!* ',' LT!* assignmentExpression)*
;
-
+
expressionNoIn
: assignmentExpressionNoIn (LT!* ',' LT!* assignmentExpressionNoIn)*
;
-
+
assignmentExpression
: conditionalExpression
| leftHandSideExpression LT!* assignmentOperator LT!* assignmentExpression
;
-
+
assignmentExpressionNoIn
: conditionalExpressionNoIn
| leftHandSideExpression LT!* assignmentOperator LT!* assignmentExpressionNoIn
;
-
+
leftHandSideExpression
: callExpression
| newExpression
;
-
+
newExpression
: memberExpression
| 'new' LT!* newExpression
;
-
+
memberExpression
: (primaryExpression | functionExpression | 'new' LT!* memberExpression LT!* arguments) (LT!* memberExpressionSuffix)*
;
-
+
memberExpressionSuffix
: indexSuffix
| propertyReferenceSuffix
@@ -234,7 +234,7 @@ memberExpressionSuffix
callExpression
: memberExpression LT!* arguments (LT!* callExpressionSuffix)*
;
-
+
callExpressionSuffix
: arguments
| indexSuffix
@@ -244,15 +244,15 @@ callExpressionSuffix
arguments
: '(' (LT!* assignmentExpression (LT!* ',' LT!* assignmentExpression)*)? LT!* ')'
;
-
+
indexSuffix
: '[' LT!* expression LT!* ']'
- ;
-
+ ;
+
propertyReferenceSuffix
: '.' LT!* Identifier
;
-
+
assignmentOperator
: '=' | '*=' | '/=' | '%=' | '+=' | '-=' | '<<=' | '>>=' | '>>>=' | '&=' | '^=' | '|='
;
@@ -268,43 +268,43 @@ conditionalExpressionNoIn
logicalORExpression
: logicalANDExpression (LT!* '||' LT!* logicalANDExpression)*
;
-
+
logicalORExpressionNoIn
: logicalANDExpressionNoIn (LT!* '||' LT!* logicalANDExpressionNoIn)*
;
-
+
logicalANDExpression
: bitwiseORExpression (LT!* '&&' LT!* bitwiseORExpression)*
;
-
+
logicalANDExpressionNoIn
: bitwiseORExpressionNoIn (LT!* '&&' LT!* bitwiseORExpressionNoIn)*
;
-
+
bitwiseORExpression
: bitwiseXORExpression (LT!* '|' LT!* bitwiseXORExpression)*
;
-
+
bitwiseORExpressionNoIn
: bitwiseXORExpressionNoIn (LT!* '|' LT!* bitwiseXORExpressionNoIn)*
;
-
+
bitwiseXORExpression
: bitwiseANDExpression (LT!* '^' LT!* bitwiseANDExpression)*
;
-
+
bitwiseXORExpressionNoIn
: bitwiseANDExpressionNoIn (LT!* '^' LT!* bitwiseANDExpressionNoIn)*
;
-
+
bitwiseANDExpression
: equalityExpression (LT!* '&' LT!* equalityExpression)*
;
-
+
bitwiseANDExpressionNoIn
: equalityExpressionNoIn (LT!* '&' LT!* equalityExpressionNoIn)*
;
-
+
equalityExpression
: relationalExpression (LT!* ('==' | '!=' | '===' | '!==') LT!* relationalExpression)*
;
@@ -312,7 +312,7 @@ equalityExpression
equalityExpressionNoIn
: relationalExpressionNoIn (LT!* ('==' | '!=' | '===' | '!==') LT!* relationalExpressionNoIn)*
;
-
+
relationalExpression
: shiftExpression (LT!* ('<' | '>' | '<=' | '>=' | 'instanceof' | 'in') LT!* shiftExpression)*
;
@@ -337,7 +337,7 @@ unaryExpression
: postfixExpression
| ('delete' | 'void' | 'typeof' | '++' | '--' | '+' | '-' | '~' | '!') unaryExpression
;
-
+
postfixExpression
: leftHandSideExpression ('++' | '--')?
;
@@ -350,17 +350,17 @@ primaryExpression
| objectLiteral
| '(' LT!* expression LT!* ')'
;
-
+
// arrayLiteral definition.
arrayLiteral
: '[' LT!* assignmentExpression? (LT!* ',' (LT!* assignmentExpression)?)* LT!* ']'
;
-
+
// objectLiteral definition.
objectLiteral
: '{' LT!* propertyNameAndValue (LT!* ',' LT!* propertyNameAndValue)* LT!* '}'
;
-
+
propertyNameAndValue
: propertyName LT!* ':' LT!* assignmentExpression
;
@@ -379,20 +379,20 @@ literal
| StringLiteral
| NumericLiteral
;
-
+
// lexer rules.
StringLiteral
: '"' DoubleStringCharacter* '"'
| '\'' SingleStringCharacter* '\''
;
-
+
fragment DoubleStringCharacter
- : ~('"' | '\\' | LT)
+ : ~('"' | '\\' | LT)
| '\\' EscapeSequence
;
fragment SingleStringCharacter
- : ~('\'' | '\\' | LT)
+ : ~('\'' | '\\' | LT)
| '\\' EscapeSequence
;
@@ -402,7 +402,7 @@ fragment EscapeSequence
| HexEscapeSequence
| UnicodeEscapeSequence
;
-
+
fragment CharacterEscapeSequence
: SingleEscapeCharacter
| NonEscapeCharacter
@@ -422,33 +422,33 @@ fragment EscapeCharacter
| 'x'
| 'u'
;
-
+
fragment HexEscapeSequence
: 'x' HexDigit HexDigit
;
-
+
fragment UnicodeEscapeSequence
: 'u' HexDigit HexDigit HexDigit HexDigit
;
-
+
NumericLiteral
: DecimalLiteral
| HexIntegerLiteral
;
-
+
fragment HexIntegerLiteral
: '0' ('x' | 'X') HexDigit+
;
-
+
fragment HexDigit
: DecimalDigit | ('a'..'f') | ('A'..'F')
;
-
+
fragment DecimalLiteral
: DecimalDigit+ '.' DecimalDigit* ExponentPart?
| '.'? DecimalDigit+ ExponentPart?
;
-
+
fragment DecimalDigit
: ('0'..'9')
;
@@ -460,21 +460,21 @@ fragment ExponentPart
Identifier
: IdentifierStart IdentifierPart*
;
-
+
fragment IdentifierStart
: UnicodeLetter
| '$'
| '_'
| '\\' UnicodeEscapeSequence
;
-
+
fragment IdentifierPart
: (IdentifierStart) => IdentifierStart // Avoids ambiguity, as some IdentifierStart chars also match following alternatives.
| UnicodeDigit
| UnicodeConnectorPunctuation
;
-
-fragment UnicodeLetter // Any character in the Unicode categories "Uppercase letter (Lu)",
+
+fragment UnicodeLetter // Any character in the Unicode categories "Uppercase letter (Lu)",
: '\u0041'..'\u005A' // "Lowercase letter (Ll)", "Titlecase letter (Lt)",
| '\u0061'..'\u007A' // "Modifier letter (Lm)", "Other letter (Lo)", or "Letter number (Nl)".
| '\u00AA'
@@ -737,7 +737,7 @@ fragment UnicodeLetter // Any character in the Unicode categories "Uppercase le
| '\uFFD2'..'\uFFD7'
| '\uFFDA'..'\uFFDC'
;
-
+
fragment UnicodeCombiningMark // Any character in the Unicode categories "Non-spacing mark (Mn)"
: '\u0300'..'\u034E' // or "Combining spacing mark (Mc)".
| '\u0360'..'\u0362'
@@ -745,7 +745,7 @@ fragment UnicodeCombiningMark // Any character in the Unicode categories "Non-sp
| '\u0591'..'\u05A1'
| '\u05A3'..'\u05B9'
| '\u05BB'..'\u05BD'
- | '\u05BF'
+ | '\u05BF'
| '\u05C1'..'\u05C2'
| '\u05C4'
| '\u064B'..'\u0655'
@@ -863,7 +863,7 @@ fragment UnicodeDigit // Any character in the Unicode category "Decimal number
| '\u1810'..'\u1819'
| '\uFF10'..'\uFF19'
;
-
+
fragment UnicodeConnectorPunctuation // Any character in the Unicode category "Connector punctuation (Pc)".
: '\u005F'
| '\u203F'..'\u2040'
@@ -873,7 +873,7 @@ fragment UnicodeConnectorPunctuation // Any character in the Unicode category "C
| '\uFF3F'
| '\uFF65'
;
-
+
Comment
: '/*' (options {greedy=false;} : .)* '*/' {$channel=HIDDEN;}
;
diff --git a/examples/jsonParser.py b/examples/jsonParser.py
index f080c6c..6319c36 100644
--- a/examples/jsonParser.py
+++ b/examples/jsonParser.py
@@ -11,19 +11,19 @@
# Updated 9 Aug 2016 - use more current pyparsing constructs/idioms
#
json_bnf = """
-object
- { members }
- {}
-members
- string : value
- members , string : value
-array
+object
+ { members }
+ {}
+members
+ string : value
+ members , string : value
+array
[ elements ]
- []
-elements
- value
- elements , value
-value
+ []
+elements
+ value
+ elements , value
+value
string
number
object
@@ -55,10 +55,10 @@ memberDef = Group(jsonString + COLON + jsonValue)
jsonMembers = delimitedList(memberDef)
jsonObject << Dict(LBRACE + Optional(jsonMembers) + RBRACE)
-jsonComment = cppStyleComment
+jsonComment = cppStyleComment
jsonObject.ignore(jsonComment)
-
+
if __name__ == "__main__":
testdata = """
{
@@ -66,7 +66,7 @@ if __name__ == "__main__":
"title": "example glossary",
"GlossDiv": {
"title": "S",
- "GlossList":
+ "GlossList":
{
"ID": "SGML",
"SortAs": "SGML",
@@ -103,5 +103,3 @@ if __name__ == "__main__":
testPrint( results.glossary.GlossDiv.GlossList.Acronym )
testPrint( results.glossary.GlossDiv.GlossList.EvenPrimesGreaterThan2 )
testPrint( results.glossary.GlossDiv.GlossList.PrimesLessThan10 )
-
-
diff --git a/examples/linenoExample.py b/examples/linenoExample.py
index 1186f48..0f84e10 100644
--- a/examples/linenoExample.py
+++ b/examples/linenoExample.py
@@ -22,8 +22,8 @@ def reportLongWords(st,locn,toks):
print("The full line of text was:")
print("'%s'" % line(locn,st))
print((" "*col(locn,st))+" ^")
- print()
-
+ print()
+
wd = Word(alphas).setParseAction( reportLongWords )
OneOrMore(wd).parseString(data)
@@ -39,7 +39,7 @@ class Token(object):
self.col = col(locn,st)
def __str__(self):
return "%(tokenString)s (line: %(lineNo)d, col: %(col)d)" % self.__dict__
-
+
def createTokenObject(st,locn,toks):
return Token(st,locn, toks[0])
diff --git a/examples/listAllMatches.py b/examples/listAllMatches.py
index 1b1bdd4..7301c84 100644
--- a/examples/listAllMatches.py
+++ b/examples/listAllMatches.py
@@ -49,4 +49,3 @@ print(sorted(set(results.cons)))
print('')
print(results.others)
print(sorted(set(results.others)))
-
diff --git a/examples/lucene_grammar.py b/examples/lucene_grammar.py
index 6e404d8..fc104e0 100644
--- a/examples/lucene_grammar.py
+++ b/examples/lucene_grammar.py
@@ -42,11 +42,11 @@ boost = (CARAT + number("boost"))
string_expr = Group(string + proximity_modifier) | string
word_expr = Group(valid_word + fuzzy_modifier) | valid_word
-term << (Optional(field_name("field") + COLON) +
+term << (Optional(field_name("field") + COLON) +
(word_expr | string_expr | range_search | Group(LPAR + expression + RPAR)) +
Optional(boost))
term.setParseAction(lambda t:[t] if 'field' in t or 'boost' in t else None)
-
+
expression << infixNotation(term,
[
(required_modifier | prohibit_modifier, 1, opAssoc.RIGHT),
@@ -302,7 +302,7 @@ failtests = r"""
XY\u005A
item:\ item:ABCD\
\
- a\ or b
+ a\ or b
a\:b\-c
a\:b\+c
a\:b\-c\*
@@ -315,5 +315,5 @@ failtests = r"""
success1, _ = expression.runTests(tests)
success2, _ = expression.runTests(failtests, failureTests=True)
-
+
print(("FAIL", "OK")[success1 and success2])
diff --git a/examples/macroExpander.py b/examples/macroExpander.py
index 327976c..c6b7034 100644
--- a/examples/macroExpander.py
+++ b/examples/macroExpander.py
@@ -1,6 +1,6 @@
# macroExpander.py
#
-# Example pyparsing program for performing macro expansion, similar to
+# Example pyparsing program for performing macro expansion, similar to
# the C pre-processor. This program is not as fully-featured, simply
# processing macros of the form:
# #def xxx yyyyy
@@ -14,11 +14,11 @@
#
from pyparsing import *
-# define the structure of a macro definition (the empty term is used
+# define the structure of a macro definition (the empty term is used
# to advance to the next non-whitespace character)
identifier = Word(alphas+"_",alphanums+"_")
macroDef = "#def" + identifier("macro") + empty + restOfLine("value")
-
+
# define a placeholder for defined macros - initially nothing
macroExpr = Forward()
macroExpr << NoMatch()
diff --git a/examples/matchPreviousDemo.py b/examples/matchPreviousDemo.py
index f0812e9..34991f8 100644
--- a/examples/matchPreviousDemo.py
+++ b/examples/matchPreviousDemo.py
@@ -8,11 +8,11 @@ src = """
class a
...
end a;
-
+
class b
...
end b;
-
+
class c
...
end d;"""
@@ -20,7 +20,7 @@ end d;"""
identifier = Word(alphas)
-classIdent = identifier("classname") # note that this also makes a copy of identifier
+classIdent = identifier("classname") # note that this also makes a copy of identifier
classHead = "class" + classIdent
classBody = "..."
classEnd = "end" + matchPreviousLiteral(classIdent) + ';'
@@ -30,4 +30,4 @@ classDefn = classHead + classBody + classEnd
# classDefn = classHead + classBody - classEnd
for tokens in classDefn.searchString(src):
- print(tokens.classname) \ No newline at end of file
+ print(tokens.classname)
diff --git a/examples/nested.py b/examples/nested.py
index 24cf2f4..7a0f803 100644
--- a/examples/nested.py
+++ b/examples/nested.py
@@ -10,12 +10,12 @@ from pyparsing import *
import pprint
data = """
-{
- { item1 "item with } in it" }
- {
- {item2a item2b }
- {item3}
- }
+{
+ { item1 "item with } in it" }
+ {
+ {item2a item2b }
+ {item3}
+ }
}
"""
@@ -27,4 +27,3 @@ print(( (nestedItems+stringEnd).parseString(data).asList() ))
# use default delimiters of ()'s
mathExpr = nestedExpr()
print(( mathExpr.parseString( "((( ax + by)*C) *(Z | (E^F) & D))") ))
-
diff --git a/examples/numerics.py b/examples/numerics.py
index 5ab99dd..0af3cae 100644
--- a/examples/numerics.py
+++ b/examples/numerics.py
@@ -9,41 +9,41 @@
# Format samples from https://docs.oracle.com/cd/E19455-01/806-0169/overview-9/index.html
#
tests = """\
-# Canadian (English and French)
-4 294 967 295,000
+# Canadian (English and French)
+4 294 967 295,000
-# Danish
-4 294 967 295,000
+# Danish
+4 294 967 295,000
-# Finnish
-4 294 967 295,000
+# Finnish
+4 294 967 295,000
-# French
-4 294 967 295,000
+# French
+4 294 967 295,000
-# German
-4 294 967 295,000
+# German
+4 294 967 295,000
-# Italian
-4.294.967.295,000
+# Italian
+4.294.967.295,000
-# Norwegian
-4.294.967.295,000
+# Norwegian
+4.294.967.295,000
-# Spanish
-4.294.967.295,000
+# Spanish
+4.294.967.295,000
-# Swedish
-4 294 967 295,000
+# Swedish
+4 294 967 295,000
-# GB-English
-4,294,967,295.000
+# GB-English
+4,294,967,295.000
-# US-English
-4,294,967,295.000
+# US-English
+4,294,967,295.000
-# Thai
-4,294,967,295.000
+# Thai
+4,294,967,295.000
"""
from pyparsing import Regex
diff --git a/examples/oc.py b/examples/oc.py
index cf656ec..fdf7bcd 100644
--- a/examples/oc.py
+++ b/examples/oc.py
@@ -74,7 +74,7 @@ from pyparsing import *
ParserElement.enablePackrat()
LPAR,RPAR,LBRACK,RBRACK,LBRACE,RBRACE,SEMI,COMMA = map(Suppress, "()[]{};,")
-INT, CHAR, WHILE, DO, IF, ELSE, RETURN = map(Keyword,
+INT, CHAR, WHILE, DO, IF, ELSE, RETURN = map(Keyword,
"int char while do if else return".split())
NAME = Word(alphas+"_", alphanums+"_")
@@ -86,7 +86,7 @@ TYPE = Group((INT | CHAR) + ZeroOrMore("*"))
expr = Forward()
func_call = Group(NAME + LPAR + Group(Optional(delimitedList(expr))) + RPAR)
operand = func_call | NAME | integer | char | string_
-expr <<= (infixNotation(operand,
+expr <<= (infixNotation(operand,
[
(oneOf('! - *'), 1, opAssoc.RIGHT),
(oneOf('++ --'), 1, opAssoc.RIGHT),
@@ -95,8 +95,8 @@ expr <<= (infixNotation(operand,
(oneOf('+ -'), 2, opAssoc.LEFT),
(oneOf('< == > <= >= !='), 2, opAssoc.LEFT),
(Regex(r'(?<!=)=(?!=)'), 2, opAssoc.LEFT),
- ]) +
- Optional( LBRACK + expr + RBRACK |
+ ]) +
+ Optional( LBRACK + expr + RBRACK |
LPAR + Group(Optional(delimitedList(expr))) + RPAR )
)
@@ -110,7 +110,7 @@ returnstmt = RETURN - expr + SEMI
stmt << Group( ifstmt |
whilestmt |
dowhilestmt |
- returnstmt |
+ returnstmt |
expr + SEMI |
LBRACE + ZeroOrMore(stmt) + RBRACE |
SEMI)
diff --git a/examples/parsePythonValue.py b/examples/parsePythonValue.py
index 53c61fc..351dad2 100644
--- a/examples/parsePythonValue.py
+++ b/examples/parsePythonValue.py
@@ -35,11 +35,11 @@ noneLiteral = Literal("None").setParseAction(replaceWith(None))
listItem = real|integer|quotedString|unicodeString|boolLiteral|noneLiteral| \
Group(listStr) | tupleStr | dictStr
-tupleStr << ( Suppress("(") + Optional(delimitedList(listItem)) +
+tupleStr << ( Suppress("(") + Optional(delimitedList(listItem)) +
Optional(Suppress(",")) + Suppress(")") )
tupleStr.setParseAction( cvtTuple )
-listStr << (lbrack + Optional(delimitedList(listItem) +
+listStr << (lbrack + Optional(delimitedList(listItem) +
Optional(Suppress(","))) + rbrack)
listStr.setParseAction(cvtList, lambda t: t[0])
diff --git a/examples/parseResultsSumExample.py b/examples/parseResultsSumExample.py
index a18ba29..2c0f9fc 100644
--- a/examples/parseResultsSumExample.py
+++ b/examples/parseResultsSumExample.py
@@ -21,5 +21,3 @@ for test in (samplestr1,samplestr2,samplestr3,samplestr4,):
print(person.id)
print(person.dump())
print()
-
-
diff --git a/examples/parseTabularData.py b/examples/parseTabularData.py
index 3846310..5aa6f09 100644
--- a/examples/parseTabularData.py
+++ b/examples/parseTabularData.py
@@ -2,7 +2,7 @@
# parseTabularData.py
#
# Example of parsing data that is formatted in a tabular listing, with
-# potential for missing values. Uses new addCondition method on
+# potential for missing values. Uses new addCondition method on
# ParserElements.
#
# Copyright 2015, Paul McGuire
@@ -27,20 +27,20 @@ def mustMatchCols(startloc,endloc):
def tableValue(expr, colstart, colend):
empty_cell_is_zero = False
if empty_cell_is_zero:
- return Optional(expr.copy().addCondition(mustMatchCols(colstart,colend),
- message="text not in expected columns"),
+ return Optional(expr.copy().addCondition(mustMatchCols(colstart,colend),
+ message="text not in expected columns"),
default=0)
else:
- return Optional(expr.copy().addCondition(mustMatchCols(colstart,colend),
+ return Optional(expr.copy().addCondition(mustMatchCols(colstart,colend),
message="text not in expected columns"))
# define the grammar for this simple table
colorname = Word(alphas)
integer = Word(nums).setParseAction(lambda t: int(t[0])).setName("integer")
-row = (colorname("name") +
- tableValue(integer, 11, 12)("S") +
- tableValue(integer, 15, 16)("M") +
+row = (colorname("name") +
+ tableValue(integer, 11, 12)("S") +
+ tableValue(integer, 15, 16)("M") +
tableValue(integer, 19, 20)("L"))
# parse the sample text - skip over the header and counter lines
diff --git a/examples/partial_gene_match.py b/examples/partial_gene_match.py
index 8bf5f7c..e19cf8b 100644
--- a/examples/partial_gene_match.py
+++ b/examples/partial_gene_match.py
@@ -1,6 +1,6 @@
# parital_gene_match.py
#
-# Example showing how to create a customized pyparsing Token, in this case,
+# Example showing how to create a customized pyparsing Token, in this case,
# one that is very much like Literal, but which tolerates up to 'n' character
# mismatches
from pyparsing import *
@@ -12,77 +12,77 @@ datafile = urllib.request.urlopen("http://toxodb.org/common/downloads/release-6.
fastasrc = datafile.read()
datafile.close()
-"""
-Sample header:
+"""
+Sample header:
>NC_001799-6-2978-2778 | organism=Toxoplasma_gondii_RH | location=NC_001799:2778-2978(-) | length=201
-"""
-integer = Word(nums).setParseAction(lambda t:int(t[0]))
-genebit = Group(">" + Word(alphanums.upper()+"-_") + "|" +
- Word(printables)("id") + SkipTo("length=", include=True) +
- integer("genelen") + LineEnd() +
- Combine(OneOrMore(Word("ACGTN")),adjacent=False)("gene"))
-
+"""
+integer = Word(nums).setParseAction(lambda t:int(t[0]))
+genebit = Group(">" + Word(alphanums.upper()+"-_") + "|" +
+ Word(printables)("id") + SkipTo("length=", include=True) +
+ integer("genelen") + LineEnd() +
+ Combine(OneOrMore(Word("ACGTN")),adjacent=False)("gene"))
+
# read gene data from .fasta file - takes just a few seconds
-genedata = OneOrMore(genebit).parseString(fastasrc)
+genedata = OneOrMore(genebit).parseString(fastasrc)
-class CloseMatch(Token):
+class CloseMatch(Token):
"""A special subclass of Token that does *close* matches. For each
close match of the given string, a tuple is returned giving the
found close match, and a list of mismatch positions."""
- def __init__(self, seq, maxMismatches=1):
- super(CloseMatch,self).__init__()
- self.name = seq
- self.sequence = seq
- self.maxMismatches = maxMismatches
- self.errmsg = "Expected " + self.sequence
- self.mayIndexError = False
- self.mayReturnEmpty = False
-
- def parseImpl( self, instring, loc, doActions=True ):
- start = loc
- instrlen = len(instring)
- maxloc = start + len(self.sequence)
-
- if maxloc <= instrlen:
- seq = self.sequence
- seqloc = 0
- mismatches = []
- throwException = False
- done = False
- while loc < maxloc and not done:
- if instring[loc] != seq[seqloc]:
- mismatches.append(seqloc)
- if len(mismatches) > self.maxMismatches:
- throwException = True
- done = True
- loc += 1
- seqloc += 1
- else:
- throwException = True
-
- if throwException:
- exc = self.myException
- exc.loc = loc
- exc.pstr = instring
- raise exc
-
- return loc, (instring[start:loc],mismatches)
+ def __init__(self, seq, maxMismatches=1):
+ super(CloseMatch,self).__init__()
+ self.name = seq
+ self.sequence = seq
+ self.maxMismatches = maxMismatches
+ self.errmsg = "Expected " + self.sequence
+ self.mayIndexError = False
+ self.mayReturnEmpty = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ start = loc
+ instrlen = len(instring)
+ maxloc = start + len(self.sequence)
+
+ if maxloc <= instrlen:
+ seq = self.sequence
+ seqloc = 0
+ mismatches = []
+ throwException = False
+ done = False
+ while loc < maxloc and not done:
+ if instring[loc] != seq[seqloc]:
+ mismatches.append(seqloc)
+ if len(mismatches) > self.maxMismatches:
+ throwException = True
+ done = True
+ loc += 1
+ seqloc += 1
+ else:
+ throwException = True
+
+ if throwException:
+ exc = self.myException
+ exc.loc = loc
+ exc.pstr = instring
+ raise exc
+
+ return loc, (instring[start:loc],mismatches)
# using the genedata extracted above, look for close matches of a gene sequence
searchseq = CloseMatch("TTAAATCTAGAAGAT", 3)
-for g in genedata:
- print("%s (%d)" % (g.id, g.genelen))
- print("-"*24)
- for t,startLoc,endLoc in searchseq.scanString(g.gene, overlap=True):
- matched, mismatches = t[0]
- print("MATCH:", searchseq.sequence)
- print("FOUND:", matched)
- if mismatches:
- print(" ", ''.join(' ' if i not in mismatches else '*'
- for i,c in enumerate(searchseq.sequence)))
- else:
- print("<exact match>")
- print("at location", startLoc)
- print()
- print() \ No newline at end of file
+for g in genedata:
+ print("%s (%d)" % (g.id, g.genelen))
+ print("-"*24)
+ for t,startLoc,endLoc in searchseq.scanString(g.gene, overlap=True):
+ matched, mismatches = t[0]
+ print("MATCH:", searchseq.sequence)
+ print("FOUND:", matched)
+ if mismatches:
+ print(" ", ''.join(' ' if i not in mismatches else '*'
+ for i,c in enumerate(searchseq.sequence)))
+ else:
+ print("<exact match>")
+ print("at location", startLoc)
+ print()
+ print()
diff --git a/examples/position.py b/examples/position.py
index 984c018..d88c14a 100644
--- a/examples/position.py
+++ b/examples/position.py
@@ -1,12 +1,12 @@
from pyparsing import *
-text = """Lorem ipsum dolor sit amet, consectetur adipisicing
-elit, sed do eiusmod tempor incididunt ut labore et dolore magna
-aliqua. Ut enim ad minim veniam, quis nostrud exercitation
-ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis
-aute irure dolor in reprehenderit in voluptate velit esse cillum
-dolore eu fugiat nulla pariatur. Excepteur sint occaecat
-cupidatat non proident, sunt in culpa qui officia deserunt
+text = """Lorem ipsum dolor sit amet, consectetur adipisicing
+elit, sed do eiusmod tempor incididunt ut labore et dolore magna
+aliqua. Ut enim ad minim veniam, quis nostrud exercitation
+ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis
+aute irure dolor in reprehenderit in voluptate velit esse cillum
+dolore eu fugiat nulla pariatur. Excepteur sint occaecat
+cupidatat non proident, sunt in culpa qui officia deserunt
mollit anim id est laborum"""
# find all words beginning with a vowel
@@ -51,5 +51,3 @@ for ivowelInfo in (initialConsWord | locateInitialVowels).searchString(text):
if not ivowelInfo:
continue
print(ivowelInfo.locn, ':', ivowelInfo.word)
-
-
diff --git a/examples/protobuf_parser.py b/examples/protobuf_parser.py
index 8f8b12d..9be2c7e 100644
--- a/examples/protobuf_parser.py
+++ b/examples/protobuf_parser.py
@@ -14,7 +14,7 @@ integer = Regex(r"[+-]?\d+")
LBRACE,RBRACE,LBRACK,RBRACK,LPAR,RPAR,EQ,SEMI = map(Suppress,"{}[]()=;")
-kwds = """message required optional repeated enum extensions extends extend
+kwds = """message required optional repeated enum extensions extends extend
to package service rpc returns true false option import"""
for kw in kwds.split():
exec("{}_ = Keyword('{}')".format(kw.upper(), kw))
@@ -23,11 +23,11 @@ messageBody = Forward()
messageDefn = MESSAGE_ - ident("messageId") + LBRACE + messageBody("body") + RBRACE
-typespec = oneOf("""double float int32 int64 uint32 uint64 sint32 sint64
+typespec = oneOf("""double float int32 int64 uint32 uint64 sint32 sint64
fixed32 fixed64 sfixed32 sfixed64 bool string bytes""") | ident
rvalue = integer | TRUE_ | FALSE_ | ident
fieldDirective = LBRACK + Group(ident + EQ + rvalue) + RBRACK
-fieldDefn = (( REQUIRED_ | OPTIONAL_ | REPEATED_ )("fieldQualifier") -
+fieldDefn = (( REQUIRED_ | OPTIONAL_ | REPEATED_ )("fieldQualifier") -
typespec("typespec") + ident("ident") + EQ + integer("fieldint") + ZeroOrMore(fieldDirective) + SEMI)
# enumDefn ::= 'enum' ident '{' { ident '=' integer ';' }* '}'
@@ -43,8 +43,8 @@ messageExtension = EXTEND_ - ident + LBRACE + messageBody + RBRACE
messageBody << Group(ZeroOrMore( Group(fieldDefn | enumDefn | messageDefn | extensionsDefn | messageExtension) ))
# methodDefn ::= 'rpc' ident '(' [ ident ] ')' 'returns' '(' [ ident ] ')' ';'
-methodDefn = (RPC_ - ident("methodName") +
- LPAR + Optional(ident("methodParam")) + RPAR +
+methodDefn = (RPC_ - ident("methodName") +
+ LPAR + Optional(ident("methodParam")) + RPAR +
RETURNS_ + LPAR + Optional(ident("methodReturn")) + RPAR)
# serviceDefn ::= 'service' ident '{' methodDefn* '}'
@@ -66,10 +66,10 @@ parser = Optional(packageDirective) + ZeroOrMore(topLevelStatement)
parser.ignore(comment)
-test1 = """message Person {
- required int32 id = 1;
- required string name = 2;
- optional string email = 3;
+test1 = """message Person {
+ required int32 id = 1;
+ required string name = 2;
+ optional string email = 3;
}"""
test2 = """package tutorial;
diff --git a/examples/pymicko.py b/examples/pymicko.py
index 0db2938..e0fe205 100644
--- a/examples/pymicko.py
+++ b/examples/pymicko.py
@@ -62,7 +62,7 @@ DEBUG = 0
# (small subset of C made for compiler course at Faculty of Technical Sciences, Chair for Applied Computer Sciences, Novi Sad, Serbia)
# Patterns:
-
+
# letter
# -> "_" | "a" | "A" | "b" | "B" | "c" | "C" | "d" | "D" | "e" | "E" | "f"
# | "F" | "g" | "G" | "h" | "H" | "i" | "I" | "j" | "J" | "k" | "K" | "l"
@@ -72,56 +72,56 @@ DEBUG = 0
# digit
# -> "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9"
-
+
# identifier
# -> letter ( letter | digit )*
-
+
# int_constant
# -> digit +
-
+
# unsigned_constant
# -> digit + ( "u" | "U" )
-
+
# Productions:
-
+
# program
# -> variable_list function_list
# -> function_list
-
+
# variable_list
# -> variable ";"
# -> variable_list variable ";"
-
+
# variable
# -> type identifier
-
+
# type
# -> "int"
# -> "unsigned"
-
+
# function_list
# -> function
# -> function_list function
-
+
# function
# -> type identifier "(" parameters ")" body
-
+
# parameters
# -> <empty>
# -> parameter_list
-
+
# parameter_list
# -> variable
# -> parameter_list "," variable
-
+
# body
# -> "{" variable_list statement_list "}"
# -> "{" statement_list "}"
-
+
# statement_list
# -> <empty>
# -> statement_list statement
-
+
# statement
# -> assignement_statement
# -> function_call_statement
@@ -129,20 +129,20 @@ DEBUG = 0
# -> while_statement
# -> return_statement
# -> compound_statement
-
+
# assignement_statement
# -> identifier "=" num_exp ";"
-
+
# num_exp
# -> mul_exp
# -> num_exp "+" mul_exp
# -> num_exp "-" mul_exp
-
+
# mul_exp
# -> exp
# -> mul_exp "*" exp
# -> mul_exp "/" exp
-
+
# exp
# -> constant
# -> identifier
@@ -150,38 +150,38 @@ DEBUG = 0
# -> "(" num_exp ")"
# -> "+" exp
# -> "-" exp
-
+
# constant
# -> int_constant
# -> unsigned_constant
-
+
# function_call
# -> identifier "(" arguments ")"
-
+
# arguments
# -> <empty>
# -> argument_list
-
+
# argument_list
# -> num_exp
# -> argument_list "," num_exp
-
+
# function_call_statement
# -> function_call ";"
-
+
# if_statement
# -> "if" "(" log_exp ")" statement
# -> "if" "(" log_exp ")" statement "else" statement
# -> -> -> -> -> -> -> -> 2
-
+
# log_exp
# -> and_exp
# -> log_exp "||" and_exp
-
+
# and_exp
# -> rel_exp
# -> and_exp "&&" rel_exp
-
+
# rel_exp
# -> num_exp "<" num_exp
# -> num_exp ">" num_exp
@@ -189,16 +189,16 @@ DEBUG = 0
# -> num_exp ">=" num_exp
# -> num_exp "==" num_exp
# -> num_exp "!=" num_exp
-
+
# while_statement
# -> "while" "(" log_exp ")" statement
-
+
# return_statement
# -> "return" num_exp ";"
-
+
# compound_statement
# -> "{" statement_list "}"
-
+
# Comment: /* a comment */
##########################################################################################
@@ -280,13 +280,13 @@ class SemanticException(Exception):
self.text = line(exshared.location, exshared.text)
else:
self.line = self.col = self.text = None
-
- def _get_message(self):
+
+ def _get_message(self):
return self._message
- def _set_message(self, message):
+ def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
-
+
def __str__(self):
"""String representation of the semantic error"""
msg = "Error"
@@ -322,7 +322,7 @@ class SymbolTableEntry(object):
"""Sets attribute's name and value"""
self.attribute_name = name
self.attribute = value
-
+
def attribute_str(self):
"""Returns attribute string (used only for table display)"""
return "{}={}".format(self.attribute_name, self.attribute) if self.attribute != None else "None"
@@ -468,7 +468,7 @@ class SymbolTable(object):
except Exception:
self.error()
return same
-
+
def same_type_as_argument(self, index, function_index, argument_number):
"""Returns True if index and function's argument are of the same type
index - index in symbol table
@@ -924,7 +924,7 @@ class MicroC(object):
if print_location and (exshared.location != None):
msg += "\n%s" % wtext
print(msg)
-
+
def data_begin_action(self):
"""Inserts text at start of data segment"""
@@ -1311,7 +1311,7 @@ if 0:
#main program
mc = MicroC()
output_file = "output.asm"
-
+
if len(argv) == 1:
input_file = stdin
elif len(argv) == 2:
@@ -1345,7 +1345,7 @@ if 0:
##########################################################################################
if __name__ == "__main__":
-
+
test_program_example = """
int a;
int b;
@@ -1384,4 +1384,4 @@ if __name__ == "__main__":
mc = MicroC()
mc.parse_text(test_program_example)
- print(mc.codegen.code) \ No newline at end of file
+ print(mc.codegen.code)
diff --git a/examples/pythonGrammarParser.py b/examples/pythonGrammarParser.py
index 706f758..aed6bbe 100644
--- a/examples/pythonGrammarParser.py
+++ b/examples/pythonGrammarParser.py
@@ -5,7 +5,7 @@
from pyparsing import *
-# should probably read this from the Grammar file provided with the Python source, but
+# should probably read this from the Grammar file provided with the Python source, but
# this just skips that step and inlines the bnf text directly - this grammar was taken from
# Python 2.4.1
#
@@ -135,15 +135,15 @@ class SemanticGroup(object):
self.contents = contents
while self.contents[-1].__class__ == self.__class__:
self.contents = self.contents[:-1] + self.contents[-1].contents
-
+
def __str__(self):
- return "{}({})".format(self.label,
+ return "{}({})".format(self.label,
" ".join([isinstance(c,str) and c or str(c) for c in self.contents]) )
-
+
class OrList(SemanticGroup):
label = "OR"
pass
-
+
class AndList(SemanticGroup):
label = "AND"
pass
@@ -151,7 +151,7 @@ class AndList(SemanticGroup):
class OptionalGroup(SemanticGroup):
label = "OPT"
pass
-
+
class Atom(SemanticGroup):
def __init__(self,contents):
if len(contents) > 1:
@@ -162,10 +162,10 @@ class Atom(SemanticGroup):
self.contents = contents
else:
self.contents = contents[0]
-
+
def __str__(self):
return "{}{}".format(self.rep, self.contents)
-
+
def makeGroupObject(cls):
def groupAction(s,l,t):
try:
diff --git a/examples/rangeCheck.py b/examples/rangeCheck.py
index b89a1e3..219defa 100644
--- a/examples/rangeCheck.py
+++ b/examples/rangeCheck.py
@@ -1,6 +1,6 @@
# rangeCheck.py
#
-# A sample program showing how parse actions can convert parsed
+# A sample program showing how parse actions can convert parsed
# strings into a data type or object, and to validate the parsed value.
#
# Updated to use new addCondition method and expr() copy.
@@ -38,7 +38,7 @@ integer.setParseAction(lambda t:int(t[0]))
month = ranged_value(integer, 1, 12)
day = ranged_value(integer, 1, 31)
year = ranged_value(integer, 2000, None)
-
+
SLASH = Suppress('/')
dateExpr = year("year") + SLASH + month("month") + Optional(SLASH + day("day"))
dateExpr.setName("date")
@@ -58,5 +58,3 @@ dateExpr.runTests("""
2004/2/29
2004/2
1999/12/31""")
-
-
diff --git a/examples/readJson.py b/examples/readJson.py
index deca53b..f691ea5 100644
--- a/examples/readJson.py
+++ b/examples/readJson.py
@@ -1,10 +1,9 @@
-
#~ url = "http://cmsdoc.cern.ch/cms/test/aprom/phedex/dev/gowri/datasvc/tbedi/requestDetails"
-#~ params = {'format':'json'}
-#~ import urllib
-#~ eparams = urllib.urlencode(params)
-#~ import urllib2
-#~ request = urllib2.Request(url,eparams)
+#~ params = {'format':'json'}
+#~ import urllib
+#~ eparams = urllib.urlencode(params)
+#~ import urllib2
+#~ request = urllib2.Request(url,eparams)
#~ response = urllib2.urlopen(request)
#~ s = response.read()
#~ response.close()
diff --git a/examples/removeLineBreaks.py b/examples/removeLineBreaks.py
index ba4b498..a94dd35 100644
--- a/examples/removeLineBreaks.py
+++ b/examples/removeLineBreaks.py
@@ -3,7 +3,7 @@
# Demonstration of the pyparsing module, converting text files
# with hard line-breaks to text files with line breaks only
# between paragraphs. (Helps when converting downloads from Project
-# Gutenberg - http://www.gutenberg.org - to import to word processing apps
+# Gutenberg - http://www.gutenberg.org - to import to word processing apps
# that can reformat paragraphs once hard line-breaks are removed.)
#
# Uses parse actions and transformString to remove unwanted line breaks,
diff --git a/examples/romanNumerals.py b/examples/romanNumerals.py
index 27361f0..536fbb0 100644
--- a/examples/romanNumerals.py
+++ b/examples/romanNumerals.py
@@ -7,7 +7,7 @@ from pyparsing import *
def romanNumeralLiteral(numeralString, value):
return Literal(numeralString).setParseAction(replaceWith(value))
-
+
one = romanNumeralLiteral("I",1)
four = romanNumeralLiteral("IV",4)
five = romanNumeralLiteral("V",5)
@@ -22,8 +22,8 @@ fivehundred = romanNumeralLiteral("D",500)
ninehundred = romanNumeralLiteral("CM",900)
onethousand = romanNumeralLiteral("M",1000)
-numeral = ( onethousand | ninehundred | fivehundred | fourhundred |
- onehundred | ninety | fifty | forty | ten | nine | five |
+numeral = ( onethousand | ninehundred | fivehundred | fourhundred |
+ onehundred | ninety | fifty | forty | ten | nine | five |
four | one ).leaveWhitespace()
romanNumeral = OneOrMore(numeral).setParseAction( lambda s,l,t : sum(t) )
@@ -34,7 +34,7 @@ def makeRomanNumeral(n):
n -= limit
s += c
return n,s
-
+
ret = ""
while n >= 1000: n,ret = addDigit(n,1000,"M",ret)
while n >= 900: n,ret = addDigit(n, 900,"CM",ret)
@@ -66,9 +66,3 @@ test("XIV")
test("XIX")
test("MCMLXXX")
test("MMVI")
-
-
-
-
-
-
diff --git a/examples/scanExamples.py b/examples/scanExamples.py
index 24ae0e7..4ee62a1 100644
--- a/examples/scanExamples.py
+++ b/examples/scanExamples.py
@@ -28,8 +28,8 @@ ident = Word(alphas, alphanums+"_")
macroDef = Literal("#define") + ident.setResultsName("name") + "=" + restOfLine.setResultsName("value")
for t,s,e in macroDef.scanString( testData ):
print(t.name,":", t.value)
-
-# or a quick way to make a dictionary of the names and values
+
+# or a quick way to make a dictionary of the names and values
# (return only key and value tokens, and construct dict from key-value pairs)
# - empty ahead of restOfLine advances past leading whitespace, does implicit lstrip during parsing
macroDef = Suppress("#define") + ident + Suppress("=") + empty + restOfLine
@@ -48,8 +48,8 @@ scopedIdent.setParseAction(lambda t: "_".join(t))
print("(replace namespace-scoped names with C-compatible names)")
print(scopedIdent.transformString( testData ))
-
-
+
+
# or a crude pre-processor (use parse actions to replace matching text)
def substituteMacro(s,l,t):
if t[0] in macros:
diff --git a/examples/searchParserAppDemo.py b/examples/searchParserAppDemo.py
index 021428d..d1bf8ba 100644
--- a/examples/searchParserAppDemo.py
+++ b/examples/searchParserAppDemo.py
@@ -3,7 +3,7 @@ from searchparser import SearchQueryParser
products = [ "grape juice", "grape jelly", "orange juice", "orange jujubees",
"strawberry jam", "prune juice", "prune butter", "orange marmalade",
"grapefruit juice" ]
-
+
class FruitSearchParser(SearchQueryParser):
def GetWord(self, word):
return { p for p in products if p.startswith(word + " ") }
diff --git a/examples/searchparser.py b/examples/searchparser.py
index 36e3cd7..d645a1f 100644
--- a/examples/searchparser.py
+++ b/examples/searchparser.py
@@ -2,7 +2,7 @@
version 2006-03-09
-This search query parser uses the excellent Pyparsing module
+This search query parser uses the excellent Pyparsing module
(http://pyparsing.sourceforge.net/) to parse search queries by users.
It handles:
@@ -30,7 +30,7 @@ are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
+ this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Estrate nor the names of its contributors may be used
to endorse or promote products derived from this software without specific
@@ -41,10 +41,10 @@ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
CONTRIBUTORS:
@@ -72,12 +72,12 @@ class SearchQueryParser:
'wordwildcard': self.evaluateWordWildcard,
}
self._parser = self.parser()
-
+
def parser(self):
"""
This function returns a parser.
The grammar should be like most full text search engines (Google, Tsearch, Lucene).
-
+
Grammar:
- a query consists of alphanumeric words, with an optional '*' wildcard
at the end of a word
@@ -89,19 +89,19 @@ class SearchQueryParser:
- if an operator is missing, use an 'and' operator
"""
operatorOr = Forward()
-
+
operatorWord = Group(Combine(Word(alphanums) + Suppress('*'))).setResultsName('wordwildcard') | \
Group(Word(alphanums)).setResultsName('word')
-
+
operatorQuotesContent = Forward()
operatorQuotesContent << (
(operatorWord + operatorQuotesContent) | operatorWord
)
-
+
operatorQuotes = Group(
Suppress('"') + operatorQuotesContent + Suppress('"')
).setResultsName("quotes") | operatorWord
-
+
operatorParenthesis = Group(
(Suppress("(") + operatorOr + Suppress(")"))
).setResultsName("parenthesis") | operatorQuotes
@@ -117,7 +117,7 @@ class SearchQueryParser:
).setResultsName("and") | Group(
operatorNot + OneOrMore(~oneOf("and or") + operatorAnd)
).setResultsName("and") | operatorNot)
-
+
operatorOr << (Group(
operatorAnd + Suppress(Keyword("or", caseless=True)) + operatorOr
).setResultsName("or") | operatorAnd)
@@ -158,7 +158,7 @@ class SearchQueryParser:
def evaluateWordWildcard(self, argument):
return self.GetWordWildcard(argument[0])
-
+
def evaluate(self, argument):
return self._methods[argument.getName()](argument)
@@ -230,7 +230,7 @@ class ParserTest(SearchQueryParser):
7: 'nothing',
8: 'helper',
}
-
+
index = {
'help': {1, 2, 4, 5},
'me': {2},
@@ -264,7 +264,7 @@ class ParserTest(SearchQueryParser):
if self.docs[item].count(search_string):
result.add(item)
return result
-
+
def GetNot(self, not_set):
all = set(list(self.docs.keys()))
return all.difference(not_set)
@@ -284,7 +284,7 @@ class ParserTest(SearchQueryParser):
print('>>>>>>>>>>>>>>>>>>>>>>Test ERROR<<<<<<<<<<<<<<<<<<<<<')
print('')
return all_ok
-
+
if __name__=='__main__':
if ParserTest().Test():
print('All tests OK')
diff --git a/examples/select_parser.py b/examples/select_parser.py
index 5925ad8..7b3727e 100644
--- a/examples/select_parser.py
+++ b/examples/select_parser.py
@@ -11,22 +11,22 @@ LPAR,RPAR,COMMA = map(Suppress,"(),")
select_stmt = Forward().setName("select statement")
# keywords
-(UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
+(UNION, ALL, AND, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
- HAVING, ORDER, BY, LIMIT, OFFSET, OR) = map(CaselessKeyword, """UNION, ALL, AND, INTERSECT,
- EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT,
+ HAVING, ORDER, BY, LIMIT, OFFSET, OR) = map(CaselessKeyword, """UNION, ALL, AND, INTERSECT,
+ EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER, CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT,
DISTINCT, FROM, WHERE, GROUP, BY, HAVING, ORDER, BY, LIMIT, OFFSET, OR""".replace(",","").split())
(CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
- COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
- CURRENT_TIMESTAMP) = map(CaselessKeyword, """CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE,
- END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE,
+ COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
+ CURRENT_TIMESTAMP) = map(CaselessKeyword, """CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE,
+ END, CASE, WHEN, THEN, EXISTS, COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE,
CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP""".replace(",","").split())
-keyword = MatchFirst((UNION, ALL, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
+keyword = MatchFirst((UNION, ALL, INTERSECT, EXCEPT, COLLATE, ASC, DESC, ON, USING, NATURAL, INNER,
CROSS, LEFT, OUTER, JOIN, AS, INDEXED, NOT, SELECT, DISTINCT, FROM, WHERE, GROUP, BY,
HAVING, ORDER, BY, LIMIT, OFFSET, CAST, ISNULL, NOTNULL, NULL, IS, BETWEEN, ELSE, END, CASE, WHEN, THEN, EXISTS,
- COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
+ COLLATE, IN, LIKE, GLOB, REGEXP, MATCH, ESCAPE, CURRENT_TIME, CURRENT_DATE,
CURRENT_TIMESTAMP))
-
+
identifier = ~keyword + Word(alphas, alphanums+"_")
collation_name = identifier.copy()
column_name = identifier.copy()
@@ -88,20 +88,20 @@ join_constraint = Group(Optional(ON + expr | USING + LPAR + Group(delimitedList(
join_op = COMMA | Group(Optional(NATURAL) + Optional(INNER | CROSS | LEFT + OUTER | LEFT | OUTER) + JOIN)
join_source = Forward()
-single_source = ( (Group(database_name("database") + "." + table_name("table*")) | table_name("table*")) +
+single_source = ( (Group(database_name("database") + "." + table_name("table*")) | table_name("table*")) +
Optional(Optional(AS) + table_alias("table_alias*")) +
- Optional(INDEXED + BY + index_name("name") | NOT + INDEXED)("index") |
- (LPAR + select_stmt + RPAR + Optional(Optional(AS) + table_alias)) |
+ Optional(INDEXED + BY + index_name("name") | NOT + INDEXED)("index") |
+ (LPAR + select_stmt + RPAR + Optional(Optional(AS) + table_alias)) |
(LPAR + join_source + RPAR) )
-join_source << (Group(single_source + OneOrMore(join_op + single_source + join_constraint)) |
+join_source << (Group(single_source + OneOrMore(join_op + single_source + join_constraint)) |
single_source)
result_column = "*" | table_name + "." + "*" | Group(expr + Optional(Optional(AS) + column_alias))
select_core = (SELECT + Optional(DISTINCT | ALL) + Group(delimitedList(result_column))("columns") +
Optional(FROM + join_source("from*")) +
Optional(WHERE + expr("where_expr")) +
- Optional(GROUP + BY + Group(delimitedList(ordering_term))("group_by_terms") +
+ Optional(GROUP + BY + Group(delimitedList(ordering_term))("group_by_terms") +
Optional(HAVING + expr("having_expr"))))
select_stmt << (select_core + ZeroOrMore(compound_operator + select_core) +
diff --git a/examples/sexpParser.py b/examples/sexpParser.py
index 963d153..86fb089 100644
--- a/examples/sexpParser.py
+++ b/examples/sexpParser.py
@@ -5,8 +5,8 @@
#
# Updates:
# November, 2011 - fixed errors in precedence of alternatives in simpleString;
-# fixed exception raised in verifyLen to properly signal the input string
-# and exception location so that markInputline works correctly; fixed
+# fixed exception raised in verifyLen to properly signal the input string
+# and exception location so that markInputline works correctly; fixed
# definition of decimal to accept a single '0' and optional leading '-'
# sign; updated tests to improve parser coverage
#
@@ -17,7 +17,7 @@ BNF reference: http://theory.lcs.mit.edu/~rivest/sexp.txt
<sexp> :: <string> | <list>
<string> :: <display>? <simple-string> ;
-<simple-string> :: <raw> | <token> | <base-64> | <hexadecimal> |
+<simple-string> :: <raw> | <token> | <base-64> | <hexadecimal> |
<quoted-string> ;
<display> :: "[" <simple-string> "]" ;
<raw> :: <decimal> ":" <bytes> ;
@@ -27,7 +27,7 @@ BNF reference: http://theory.lcs.mit.edu/~rivest/sexp.txt
<token> :: <tokenchar>+ ;
<base-64> :: <decimal>? "|" ( <base-64-char> | <whitespace> )* "|" ;
<hexadecimal> :: "#" ( <hex-digit> | <white-space> )* "#" ;
-<quoted-string> :: <decimal>? <quoted-string-body>
+<quoted-string> :: <decimal>? <quoted-string-body>
<quoted-string-body> :: "\"" <bytes> "\""
<list> :: "(" ( <sexp> | <whitespace> )* ")" ;
<whitespace> :: <whitespace-char>* ;
@@ -65,11 +65,11 @@ hexadecimal = ("#" + OneOrMore(Word(hexnums)) + "#")\
bytes = Word(printables)
raw = Group(decimal("len") + Suppress(":") + bytes).setParseAction(verifyLen)
token = Word(alphanums + "-./_:*+=")
-base64_ = Group(Optional(decimal|hexadecimal,default=None)("len") + VBAR
+base64_ = Group(Optional(decimal|hexadecimal,default=None)("len") + VBAR
+ OneOrMore(Word( alphanums +"+/=" )).setParseAction(lambda t: b64decode("".join(t)))
+ VBAR).setParseAction(verifyLen)
-
-qString = Group(Optional(decimal,default=None)("len") +
+
+qString = Group(Optional(decimal,default=None)("len") +
dblQuotedString.setParseAction(removeQuotes)).setParseAction(verifyLen)
simpleString = base64_ | raw | decimal | token | hexadecimal | qString
@@ -86,7 +86,7 @@ string_ = Optional(display) + simpleString
sexp = Forward()
sexpList = Group(LPAR + ZeroOrMore(sexp) + RPAR)
sexp << ( string_ | sexpList )
-
+
######### Test data ###########
test00 = """(snicker "abc" (#03# |YWJj|))"""
test01 = """(certificate
@@ -142,14 +142,14 @@ test07 = """(defun factorial (x)
test51 = """(2:XX "abc" (#30# |YWJj|))"""
test51error = """(3:XX "abc" (#30# |YWJj|))"""
-test52 = """
- (and
- (or (> uid 1000)
- (!= gid 20)
- )
- (> quota 5.0e+03)
- )
- """
+test52 = """
+ (and
+ (or (> uid 1000)
+ (!= gid 20)
+ )
+ (> quota 5.0e+03)
+ )
+ """
# Run tests
t = None
diff --git a/examples/shapes.py b/examples/shapes.py
index c5f4867..73ed334 100644
--- a/examples/shapes.py
+++ b/examples/shapes.py
@@ -1,6 +1,6 @@
# shapes.py
#
-# A sample program showing how parse actions can convert parsed
+# A sample program showing how parse actions can convert parsed
# strings into a data type or object.
#
# Copyright 2012, Paul T. McGuire
@@ -13,7 +13,7 @@ class Shape(object):
def area(self):
raise NotImplementedException()
-
+
def __str__(self):
return "<{}>: {}".format(self.__class__.__name__, self.__dict__)
diff --git a/examples/simpleArith.py b/examples/simpleArith.py
index 825956b..af05373 100644
--- a/examples/simpleArith.py
+++ b/examples/simpleArith.py
@@ -34,7 +34,7 @@ factop = Literal('!')
# - rightLeftAssoc is the indicator whether the operator is
# right or left associative, using the pyparsing-defined
# constants opAssoc.RIGHT and opAssoc.LEFT.
-# - parseAction is the parse action to be associated with
+# - parseAction is the parse action to be associated with
# expressions matching this operator expression (the
# parse action tuple member may be omitted)
# 3. Call infixNotation passing the operand expression and
@@ -42,7 +42,7 @@ factop = Literal('!')
# as the generated pyparsing expression. You can then use
# this expression to parse input strings, or incorporate it
# into a larger, more complex grammar.
-#
+#
expr = infixNotation( operand,
[("!", 1, opAssoc.LEFT),
("^", 2, opAssoc.RIGHT),
@@ -63,5 +63,4 @@ test = ["9 + 2 + 3",
for t in test:
print(t)
print(expr.parseString(t))
- print('')
-
+ print('')
diff --git a/examples/simpleBool.py b/examples/simpleBool.py
index 5f355b7..d1df9a5 100644
--- a/examples/simpleBool.py
+++ b/examples/simpleBool.py
@@ -98,5 +98,3 @@ if __name__ == "__main__":
res = boolExpr.parseString(t)[0]
success = "PASS" if bool(res) == expected else "FAIL"
print (t,'\n', res, '=', bool(res),'\n', success, '\n')
-
-
diff --git a/examples/simpleSQL.py b/examples/simpleSQL.py
index 6cde6ce..18e93a0 100644
--- a/examples/simpleSQL.py
+++ b/examples/simpleSQL.py
@@ -35,11 +35,11 @@ whereCondition = Group(
( columnName + in_ + "(" + selectStmt + ")" ) |
( "(" + whereExpression + ")" )
)
-whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression )
+whereExpression << whereCondition + ZeroOrMore( ( and_ | or_ ) + whereExpression )
# define the grammar
-selectStmt <<= (SELECT + ('*' | columnNameList)("columns") +
- FROM + tableNameList( "tables" ) +
+selectStmt <<= (SELECT + ('*' | columnNameList)("columns") +
+ FROM + tableNameList( "tables" ) +
Optional(Group(WHERE + whereExpression), "")("where"))
simpleSQL = selectStmt
@@ -61,7 +61,7 @@ if __name__ == "__main__":
Select A,B,C from Sys.dual
- Select A, B, C from Sys.dual, Table2
+ Select A, B, C from Sys.dual, Table2
# FAIL - invalid SELECT keyword
Xelect A, B, C from Sys.dual
diff --git a/examples/simpleWiki.py b/examples/simpleWiki.py
index 1813432..4ec3b71 100644
--- a/examples/simpleWiki.py
+++ b/examples/simpleWiki.py
@@ -12,7 +12,7 @@ def convertToHTML(opening,closing):
def conversionParseAction(s,l,t):
return opening + t[0] + closing
return conversionParseAction
-
+
italicized = QuotedString("*").setParseAction(convertToHTML("<I>","</I>"))
bolded = QuotedString("**").setParseAction(convertToHTML("<B>","</B>"))
boldItalicized = QuotedString("***").setParseAction(convertToHTML("<B><I>","</I></B>"))
@@ -22,7 +22,7 @@ def convertToHTML_A(s,l,t):
except ValueError:
raise ParseFatalException(s,l,"invalid URL link reference: " + t[0])
return '<A href="{}">{}</A>'.format(url,text)
-
+
urlRef = QuotedString("{{",endQuoteChar="}}").setParseAction(convertToHTML_A)
wikiMarkup = urlRef | boldItalicized | bolded | italicized
diff --git a/examples/snmp_api.h b/examples/snmp_api.h
index d75cb12..fc802d1 100644
--- a/examples/snmp_api.h
+++ b/examples/snmp_api.h
@@ -13,10 +13,10 @@
#ifndef DONT_SHARE_ERROR_WITH_OTHER_THREADS
#define SET_SNMP_ERROR(x) snmp_errno=(x)
#else
-#define SET_SNMP_ERROR(x)
+#define SET_SNMP_ERROR(x)
#endif
-
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -109,7 +109,7 @@ struct snmp_pdu {
long version;
int command; /* Type of this PDU */
long reqid; /* Request id - note: not incremented on retries */
- long msgid; /* Message id for V3 messages
+ long msgid; /* Message id for V3 messages
* note: incremented for each retry */
long transid; /* Unique ID for incoming transactions */
long sessid; /* Session id for AgentX messages */
@@ -291,7 +291,7 @@ extern void snmp_set_detail (const char *);
#define REPORT_usmStatsUnsupportedSecLevels_NUM 1
#define REPORT_usmStatsNotInTimeWindows_NUM 2
#define REPORT_usmStatsUnknownUserNames_NUM 3
-#define REPORT_usmStatsUnknownEngineIDs_NUM 4
+#define REPORT_usmStatsUnknownEngineIDs_NUM 4
#define REPORT_usmStatsWrongDigests_NUM 5
#define REPORT_usmStatsDecryptionErrors_NUM 6
@@ -478,7 +478,7 @@ int snmp_send (struct snmp_session *, struct snmp_pdu *);
* On any error, 0 is returned.
* The pdu is freed by snmp_send() unless a failure occured.
*/
-int snmp_async_send (struct snmp_session *, struct snmp_pdu *,
+int snmp_async_send (struct snmp_session *, struct snmp_pdu *,
snmp_callback, void *);
diff --git a/examples/sparser.py b/examples/sparser.py
index c3c3da0..68d8a52 100644
--- a/examples/sparser.py
+++ b/examples/sparser.py
@@ -2,7 +2,7 @@
"""
NAME:
- sparser.py
+ sparser.py
SYNOPSIS:
sparser.py [options] filename
@@ -64,7 +64,7 @@ debug_p = 0
#---positional args, default is empty---
-pargs = []
+pargs = []
#---other---
@@ -86,7 +86,7 @@ def fatal(ftn, txt):
"""If can't continue."""
msg = "{}.{}:FATAL:{}\n".format(modname, ftn, txt)
raise SystemExit(msg)
-
+
def usage():
"""Prints the docstring."""
print(__doc__)
@@ -129,10 +129,10 @@ class ParseFileLineByLine:
or '~user' to indicate a home directory, as well as URLs (for reading
only).
- Constructor:
+ Constructor:
ParseFileLineByLine(|filename|, |mode|='"r"'), where |filename| is the name
of the file (or a URL) and |mode| is one of '"r"' (read), '"w"' (write) or
- '"a"' (append, not supported for .Z files).
+ '"a"' (append, not supported for .Z files).
"""
def __init__(self, filename, mode = 'r'):
@@ -154,27 +154,27 @@ class ParseFileLineByLine:
raise IOError(2, 'No such file or directory: ' + filename)
filen, file_extension = os.path.splitext(filename)
command_dict = {
- ('.Z', 'r'):
+ ('.Z', 'r'):
"self.file = os.popen('uncompress -c ' + filename, mode)",
- ('.gz', 'r'):
+ ('.gz', 'r'):
"self.file = gzip.GzipFile(filename, 'rb')",
- ('.bz2', 'r'):
+ ('.bz2', 'r'):
"self.file = os.popen('bzip2 -dc ' + filename, mode)",
- ('.Z', 'w'):
+ ('.Z', 'w'):
"self.file = os.popen('compress > ' + filename, mode)",
- ('.gz', 'w'):
+ ('.gz', 'w'):
"self.file = gzip.GzipFile(filename, 'wb')",
- ('.bz2', 'w'):
+ ('.bz2', 'w'):
"self.file = os.popen('bzip2 > ' + filename, mode)",
- ('.Z', 'a'):
+ ('.Z', 'a'):
"raise IOError, (0, 'Can\'t append to .Z files')",
- ('.gz', 'a'):
+ ('.gz', 'a'):
"self.file = gzip.GzipFile(filename, 'ab')",
- ('.bz2', 'a'):
+ ('.bz2', 'a'):
"raise IOError, (0, 'Can\'t append to .bz2 files')",
}
- exec(command_dict.get((file_extension, mode),
+ exec(command_dict.get((file_extension, mode),
'self.file = open(filename, mode)'))
self.grammar = None
@@ -211,54 +211,54 @@ class ParseFileLineByLine:
decimal_sep = "."
sign = oneOf("+ -")
# part of printables without decimal_sep, +, -
- special_chars = string.replace('!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~',
- decimal_sep, "")
+ special_chars = string.replace('!"#$%&\'()*,./:;<=>?@[\\]^_`{|}~',
+ decimal_sep, "")
integer = ToInteger(
- Combine(Optional(sign) +
+ Combine(Optional(sign) +
Word(nums))).setName("integer")
positive_integer = ToInteger(
- Combine(Optional("+") +
+ Combine(Optional("+") +
Word(nums))).setName("integer")
negative_integer = ToInteger(
- Combine("-" +
+ Combine("-" +
Word(nums))).setName("integer")
real = ToFloat(
- Combine(Optional(sign) +
- Word(nums) +
- decimal_sep +
- Optional(Word(nums)) +
- Optional(oneOf("E e") +
+ Combine(Optional(sign) +
+ Word(nums) +
+ decimal_sep +
+ Optional(Word(nums)) +
+ Optional(oneOf("E e") +
Word(nums)))).setName("real")
positive_real = ToFloat(
- Combine(Optional("+") +
- Word(nums) +
- decimal_sep +
- Optional(Word(nums)) +
- Optional(oneOf("E e") +
+ Combine(Optional("+") +
+ Word(nums) +
+ decimal_sep +
+ Optional(Word(nums)) +
+ Optional(oneOf("E e") +
Word(nums)))).setName("real")
negative_real = ToFloat(
- Combine("-" +
- Word(nums) +
- decimal_sep +
- Optional(Word(nums)) +
- Optional(oneOf("E e") +
+ Combine("-" +
+ Word(nums) +
+ decimal_sep +
+ Optional(Word(nums)) +
+ Optional(oneOf("E e") +
Word(nums)))).setName("real")
qString = ( sglQuotedString | dblQuotedString ).setName("qString")
-
+
# add other characters we should skip over between interesting fields
integer_junk = Optional(
Suppress(
- Word(alphas +
- special_chars +
+ Word(alphas +
+ special_chars +
decimal_sep))).setName("integer_junk")
real_junk = Optional(
Suppress(
- Word(alphas +
+ Word(alphas +
special_chars))).setName("real_junk")
qString_junk = SkipTo(qString).setName("qString_junk")
# Now that 'integer', 'real', and 'qString' have been assigned I can
- # execute the definition file.
+ # execute the definition file.
exec(compile(open(self.parsedef).read(), self.parsedef, 'exec'))
# Build the grammar, combination of the 'integer', 'real, 'qString',
@@ -337,7 +337,7 @@ def main(pargs):
for i in fp:
print(i)
-
+
#-------------------------
if __name__ == '__main__':
ftn = "main"
@@ -361,5 +361,5 @@ if __name__ == '__main__':
#===Revision Log===
#Created by mkpythonproj:
-#2006-02-06 Tim Cera
+#2006-02-06 Tim Cera
#
diff --git a/examples/sql2dot.py b/examples/sql2dot.py
index 1156207..aa86c40 100644
--- a/examples/sql2dot.py
+++ b/examples/sql2dot.py
@@ -2,7 +2,7 @@
# sql2dot.py
#
-# Creates table graphics by parsing SQL table DML commands and
+# Creates table graphics by parsing SQL table DML commands and
# generating DOT language output.
#
# Adapted from a post at http://energyblog.blogspot.com/2006/04/blog-post_20.html.
@@ -38,15 +38,15 @@ class_id integer
alter table only student_registrations
add constraint students_link
- foreign key
+ foreign key
(student_id) references students(student_id);
alter table only student_registrations
- add constraint classes_link
- foreign key
+ add constraint classes_link
+ foreign key
(class_id) references classes(class_id);
""".upper()
-
+
from pyparsing import Literal, CaselessLiteral, Word, delimitedList \
,Optional, Combine, Group, alphas, nums, alphanums, Forward \
, oneOf, sglQuotedString, OneOrMore, ZeroOrMore, CharsNotIn \
@@ -55,7 +55,7 @@ from pyparsing import Literal, CaselessLiteral, Word, delimitedList \
skobki = "(" + ZeroOrMore(CharsNotIn(")")) + ")"
field_def = OneOrMore(Word(alphas,alphanums+"_\"':-") | skobki)
-def field_act(s,loc,tok):
+def field_act(s,loc,tok):
return ("<"+tok[0]+"> " + " ".join(tok)).replace("\"","\\\"")
field_def.setParseAction(field_act)
@@ -75,7 +75,7 @@ create_table_def.setParseAction(create_table_act)
add_fkey_def=Literal("ALTER")+"TABLE"+"ONLY" + Word(alphanums+"_").setResultsName("fromtable") + "ADD" \
+ "CONSTRAINT" + Word(alphanums+"_") + "FOREIGN"+"KEY"+"("+Word(alphanums+"_").setResultsName("fromcolumn")+")" \
- +"REFERENCES"+Word(alphanums+"_").setResultsName("totable")+"("+Word(alphanums+"_").setResultsName("tocolumn")+")"+";"
+ +"REFERENCES"+Word(alphanums+"_").setResultsName("totable")+"("+Word(alphanums+"_").setResultsName("tocolumn")+")"+";"
def add_fkey_act(toks):
return """ "%(fromtable)s":%(fromcolumn)s -> "%(totable)s":%(tocolumn)s """ % toks
@@ -90,7 +90,7 @@ statement_def = comment_def | create_table_def | add_fkey_def | other_statement
defs = OneOrMore(statement_def)
print("""digraph g { graph [ rankdir = "LR" ]; """)
-for i in defs.parseString(sampleSQL):
- if i!="":
+for i in defs.parseString(sampleSQL):
+ if i!="":
print(i)
-print("}") \ No newline at end of file
+print("}")
diff --git a/examples/stackish.py b/examples/stackish.py
index f80b4d6..3fa98f4 100644
--- a/examples/stackish.py
+++ b/examples/stackish.py
@@ -1,31 +1,31 @@
# stackish.py
#
-# Stackish is a data representation syntax, similar to JSON or YAML. For more info on
+# Stackish is a data representation syntax, similar to JSON or YAML. For more info on
# stackish, see http://www.savingtheinternetwithhate.com/stackish.html
#
# Copyright 2008, Paul McGuire
#
"""
-NUMBER A simple integer type that's just any series of digits.
-FLOAT A simple floating point type.
-STRING A string is double quotes with anything inside that's not a " or
- newline character. You can include \n and \" to include these
- characters.
-MARK Marks a point in the stack that demarcates the boundary for a nested
- group.
-WORD Marks the root node of a group, with the other end being the nearest
- MARK.
-GROUP Acts as the root node of an anonymous group.
-ATTRIBUTE Assigns an attribute name to the previously processed node.
- This means that just about anything can be an attribute, unlike in XML.
-BLOB A BLOB is unique to Stackish and allows you to record any content
+NUMBER A simple integer type that's just any series of digits.
+FLOAT A simple floating point type.
+STRING A string is double quotes with anything inside that's not a " or
+ newline character. You can include \n and \" to include these
+ characters.
+MARK Marks a point in the stack that demarcates the boundary for a nested
+ group.
+WORD Marks the root node of a group, with the other end being the nearest
+ MARK.
+GROUP Acts as the root node of an anonymous group.
+ATTRIBUTE Assigns an attribute name to the previously processed node.
+ This means that just about anything can be an attribute, unlike in XML.
+BLOB A BLOB is unique to Stackish and allows you to record any content
(even binary content) inside the structure. This is done by pre-
- sizing the data with the NUMBER similar to Dan Bernstein's netstrings
- setup.
-SPACE White space is basically ignored. This is interesting because since
- Stackish is serialized consistently this means you can use \n as the
- separation character and perform reasonable diffs on two structures.
+ sizing the data with the NUMBER similar to Dan Bernstein's netstrings
+ setup.
+SPACE White space is basically ignored. This is interesting because since
+ Stackish is serialized consistently this means you can use \n as the
+ separation character and perform reasonable diffs on two structures.
"""
from pyparsing import Suppress,Word,nums,alphas,alphanums,Combine,oneOf,\
@@ -45,7 +45,7 @@ strBody = Forward()
def setBodyLength(tokens):
strBody << Word(srange(r'[\0x00-\0xffff]'), exact=int(tokens[0]))
return ""
-BLOB = Combine(QUOTE + Word(nums).setParseAction(setBodyLength) +
+BLOB = Combine(QUOTE + Word(nums).setParseAction(setBodyLength) +
COLON + strBody + QUOTE)
item = Forward()
@@ -55,13 +55,13 @@ def assignUsing(s):
tokens[tokens[s]] = tokens[0]
del tokens[s]
return assignPA
-GROUP = (MARK +
- Group( ZeroOrMore(
- (item +
+GROUP = (MARK +
+ Group( ZeroOrMore(
+ (item +
Optional(ATTRIBUTE)("attr")
).setParseAction(assignUsing("attr"))
)
- ) +
+ ) +
( WORD("name") | UNMARK )
).setParseAction(assignUsing("name"))
item << (NUMBER | FLOAT | STRING | BLOB | GROUP )
diff --git a/examples/stateMachine2.py b/examples/stateMachine2.py
index 748bb8b..21e0545 100644
--- a/examples/stateMachine2.py
+++ b/examples/stateMachine2.py
@@ -1,5 +1,5 @@
# stateMachine.py
-#
+#
# module to define .pystate import handler
#
#import imputil
@@ -18,9 +18,9 @@ from pyparsing import Word, Group, ZeroOrMore, alphas, \
ident = Word(alphas+"_", alphanums+"_$")
-pythonKeywords = """and as assert break class continue def
- del elif else except exec finally for from global if import
- in is lambda None not or pass print raise return try while with
+pythonKeywords = """and as assert break class continue def
+ del elif else except exec finally for from global if import
+ in is lambda None not or pass print raise return try while with
yield True False"""
pythonKeywords = set(pythonKeywords.split())
def no_keywords_allowed(s,l,t):
@@ -46,7 +46,7 @@ namedStateMachine = Keyword("statemachine") + \
def expand_state_definition(source, loc, tokens):
indent = " " * (col(loc,source)-1)
statedef = []
-
+
# build list of states
states = set()
fromTo = {}
@@ -54,7 +54,7 @@ def expand_state_definition(source, loc, tokens):
states.add(tn.fromState)
states.add(tn.toState)
fromTo[tn.fromState] = tn.toState
-
+
# define base class for state classes
baseStateClass = tokens.name + "State"
statedef.extend([
@@ -63,17 +63,17 @@ def expand_state_definition(source, loc, tokens):
" return self.__class__.__name__",
" def next_state(self):",
" return self._next_state_class()" ])
-
+
# define all state classes
statedef.extend(
- "class {}({}): pass".format(s,baseStateClass)
+ "class {}({}): pass".format(s,baseStateClass)
for s in states )
statedef.extend(
- "{}._next_state_class = {}".format(s,fromTo[s])
+ "{}._next_state_class = {}".format(s,fromTo[s])
for s in states if s in fromTo )
-
+
return indent + ("\n"+indent).join(statedef)+"\n"
-
+
stateMachine.setParseAction(expand_state_definition)
def expand_named_state_definition(source,loc,tokens):
@@ -82,9 +82,9 @@ def expand_named_state_definition(source,loc,tokens):
# build list of states and transitions
states = set()
transitions = set()
-
+
baseStateClass = tokens.name + "State"
-
+
fromTo = {}
for tn in tokens.transitions:
states.add(tn.fromState)
@@ -99,7 +99,7 @@ def expand_named_state_definition(source,loc,tokens):
for s in states:
if s not in fromTo:
fromTo[s] = {}
-
+
# define state transition class
statedef.extend([
"class %sTransition:" % baseStateClass,
@@ -107,9 +107,9 @@ def expand_named_state_definition(source,loc,tokens):
" return self.transitionName",
])
statedef.extend(
- "{} = {}Transition()".format(tn,baseStateClass)
+ "{} = {}Transition()".format(tn,baseStateClass)
for tn in transitions)
- statedef.extend("{}.transitionName = '{}'".format(tn,tn)
+ statedef.extend("{}.transitionName = '{}'".format(tn,tn)
for tn in transitions)
# define base class for state classes
@@ -128,19 +128,19 @@ def expand_named_state_definition(source,loc,tokens):
" def __getattr__(self,name):",
" raise Exception(%s)" % excmsg,
])
-
+
# define all state classes
for s in states:
- statedef.append("class %s(%s): pass" %
+ statedef.append("class %s(%s): pass" %
(s,baseStateClass))
# define state transition maps and transition methods
for s in states:
trns = list(fromTo[s].items())
- statedef.append("%s.tnmap = {%s}" %
+ statedef.append("%s.tnmap = {%s}" %
(s, ",".join("%s:%s" % tn for tn in trns)) )
statedef.extend([
- "%s.%s = staticmethod(lambda : %s())" %
+ "%s.%s = staticmethod(lambda : %s())" %
(s,tn_,to_)
for tn_,to_ in trns
])
@@ -159,7 +159,7 @@ class SuffixImporter(object):
the PEP, and also used Doug Hellmann's PyMOTW article `Modules and
Imports`_, as a pattern.
- .. _`Modules and Imports`: http://www.doughellmann.com/PyMOTW/sys/imports.html
+ .. _`Modules and Imports`: http://www.doughellmann.com/PyMOTW/sys/imports.html
Define a subclass that specifies a :attr:`suffix` attribute, and
implements a :meth:`process_filedata` method. Then call the classmethod
@@ -199,7 +199,7 @@ class SuffixImporter(object):
checkpath = os.path.join(
dirpath,'{}.{}'.format(fullname,self.suffix))
yield checkpath
-
+
def find_module(self, fullname, path=None):
for checkpath in self.checkpath_iter(fullname):
if os.path.isfile(checkpath):
@@ -237,20 +237,20 @@ class PystateImporter(SuffixImporter):
# MATT-NOTE: re-worked :func:`get_state_machine`
# convert any statemachine expressions
- stateMachineExpr = (stateMachine |
+ stateMachineExpr = (stateMachine |
namedStateMachine).ignore(
pythonStyleComment)
generated_code = stateMachineExpr.transformString(data)
if DEBUG: print(generated_code)
- # compile code object from generated code
- # (strip trailing spaces and tabs, compile doesn't like
+ # compile code object from generated code
+ # (strip trailing spaces and tabs, compile doesn't like
# dangling whitespace)
COMPILE_MODE = 'exec'
- codeobj = compile(generated_code.rstrip(" \t"),
- module.__file__,
+ codeobj = compile(generated_code.rstrip(" \t"),
+ module.__file__,
COMPILE_MODE)
exec(codeobj, module.__dict__)
diff --git a/examples/urlExtractor.py b/examples/urlExtractor.py
index 10783de..58e74ae 100644
--- a/examples/urlExtractor.py
+++ b/examples/urlExtractor.py
@@ -23,11 +23,8 @@ with closing(urllib.request.urlopen("http://www.yahoo.com")) as serverListPage:
for toks,strt,end in link.scanString(htmlText):
print(toks.asList())
-# Create dictionary from list comprehension, assembled from each pair of tokens returned
+# Create dictionary from list comprehension, assembled from each pair of tokens returned
# from a matched URL.
-pprint.pprint(
+pprint.pprint(
{toks.body: toks.href for toks,strt,end in link.scanString(htmlText)}
)
-
-
-
diff --git a/examples/urlExtractorNew.py b/examples/urlExtractorNew.py
index 7d6a1b5..e258408 100644
--- a/examples/urlExtractorNew.py
+++ b/examples/urlExtractorNew.py
@@ -25,11 +25,8 @@ serverListPage.close()
for toks,strt,end in link.scanString(htmlText):
print(toks.startA.href,"->",toks.body)
-# Create dictionary from list comprehension, assembled from each pair of tokens returned
+# Create dictionary from list comprehension, assembled from each pair of tokens returned
# from a matched URL.
-pprint.pprint(
+pprint.pprint(
{ toks.body:toks.startA.href for toks,strt,end in link.scanString(htmlText) }
)
-
-
-
diff --git a/examples/verilogParse.py b/examples/verilogParse.py
index 05650df..0c63f19 100644
--- a/examples/verilogParse.py
+++ b/examples/verilogParse.py
@@ -125,7 +125,7 @@ def Verilog_BNF():
identifier2 = Regex(r"\\\S+").setParseAction(lambda t:t[0][1:]).setName("escapedIdent")#.setDebug()
identifier = identifier1 | identifier2
assert(identifier2 == r'\abc')
-
+
hexnums = nums + "abcdefABCDEF" + "_?"
base = Regex("'[bBoOdDhH]").setName("base")
basedNumber = Combine( Optional( Word(nums + "_") ) + base + Word(hexnums+"xXzZ"),
@@ -557,10 +557,10 @@ def Verilog_BNF():
port = portExpr | Group( ( DOT + identifier + LPAR + portExpr + RPAR ) )
moduleHdr = Group ( oneOf("module macromodule") + identifier +
- Optional( LPAR + Group( Optional( delimitedList(
- Group(oneOf("input output") +
+ Optional( LPAR + Group( Optional( delimitedList(
+ Group(oneOf("input output") +
(netDecl1Arg | netDecl2Arg | netDecl3Arg) ) |
- port ) ) ) +
+ port ) ) ) +
RPAR ) + SEMI ).setName("moduleHdr")
module = Group( moduleHdr +
@@ -710,7 +710,7 @@ else:
#~ lp = LineProfiler(ParseResults.__init__)
main()
-
+
#~ lp.print_stats()
#~ import hotshot
#~ p = hotshot.Profile("vparse.prof",1,1)
diff --git a/examples/withAttribute.py b/examples/withAttribute.py
index 062c9ae..7fa2bc8 100644
--- a/examples/withAttribute.py
+++ b/examples/withAttribute.py
@@ -3,7 +3,7 @@
# Copyright, 2007 - Paul McGuire
#
# Simple example of using withAttribute parse action helper
-# to define
+# to define
#
data = """\
<td align=right width=80><font size=2 face="New Times Roman,Times,Serif">&nbsp;49.950&nbsp;</font></td>
diff --git a/examples/wordsToNum.py b/examples/wordsToNum.py
index 7cebbff..3d5c4b7 100644
--- a/examples/wordsToNum.py
+++ b/examples/wordsToNum.py
@@ -68,11 +68,11 @@ mag = Or(makeLit(s,v) for s,v in majorDefinitions)
wordprod = lambda t: reduce(mul,t)
wordsum = lambda t: sum(t)
-numPart = (((( units + Optional(hundreds) ).setParseAction(wordprod) +
- Optional(tens)).setParseAction(wordsum)
+numPart = (((( units + Optional(hundreds) ).setParseAction(wordprod) +
+ Optional(tens)).setParseAction(wordsum)
^ tens )
+ Optional(units) ).setParseAction(wordsum)
-numWords = OneOrMore( (numPart + Optional(mag)).setParseAction(wordprod)
+numWords = OneOrMore( (numPart + Optional(mag)).setParseAction(wordprod)
).setParseAction(wordsum) + StringEnd()
numWords.ignore(Literal("-"))
numWords.ignore(CaselessLiteral("and"))
@@ -103,4 +103,4 @@ test("seventy-seven thousand eight hundred and nineteen", 77819)
test("seven hundred seventy-seven thousand seven hundred and seventy-seven", 777777)
test("zero", 0)
test("forty two", 42)
-test("fourty two", 42) \ No newline at end of file
+test("fourty two", 42)
diff --git a/makeRelease.bat b/makeRelease.bat
index 692e572..7c07a3a 100644
--- a/makeRelease.bat
+++ b/makeRelease.bat
@@ -21,4 +21,3 @@ python setup.py bdist_wininst --target-version=2.7 --plat-name=win32
python setup.py bdist_wininst --target-version=3.3 --plat-name=win32
python setup.py bdist_wininst --target-version=3.4 --plat-name=win32
python setup.py bdist_wininst --target-version=3.5 --plat-name=win32
-
diff --git a/scrutinizer-pyenv.sh b/scrutinizer-pyenv.sh
index f6fa468..7da72fb 100755
--- a/scrutinizer-pyenv.sh
+++ b/scrutinizer-pyenv.sh
@@ -2,12 +2,10 @@
set -e
-pushd .
+pushd .
cd .pyenv
git fetch --tags
git checkout v1.2.7
popd
echo $PYTHON_VERSIONS | xargs -n1 pyenv install
-
-
diff --git a/simple_unit_tests.py b/simple_unit_tests.py
index c8bd9b6..875ded7 100644
--- a/simple_unit_tests.py
+++ b/simple_unit_tests.py
@@ -36,15 +36,15 @@ class PyparsingExpressionTestCase(unittest.TestCase):
for test_spec in self.tests:
# for each spec in the class's tests list, create a subtest
# that will either:
- # - parse the string with expected success, display the
+ # - parse the string with expected success, display the
# results, and validate the returned ParseResults
- # - or parse the string with expected failure, display the
+ # - or parse the string with expected failure, display the
# error message and mark the error location, and validate
# the location against an expected value
with self.subTest(test_spec=test_spec):
test_spec.expr.streamline()
- print("\n{} - {}({})".format(test_spec.desc,
- type(test_spec.expr).__name__,
+ print("\n{} - {}({})".format(test_spec.desc,
+ type(test_spec.expr).__name__,
test_spec.expr))
parsefn = getattr(test_spec.expr, test_spec.parse_fn)
@@ -218,7 +218,7 @@ class TestGroups(PyparsingExpressionTestCase):
tests = [
PpTestSpec(
desc = "Define multiple results names in groups",
- expr = pp.OneOrMore(pp.Group(pp.Word(pp.alphas)("key")
+ expr = pp.OneOrMore(pp.Group(pp.Word(pp.alphas)("key")
+ EQ
+ pp.pyparsing_common.number("value"))),
text = "range=5280 long=-138.52 lat=46.91",
@@ -226,7 +226,7 @@ class TestGroups(PyparsingExpressionTestCase):
),
PpTestSpec(
desc = "Define multiple results names in groups - use Dict to define results names using parsed keys",
- expr = pp.Dict(pp.OneOrMore(pp.Group(pp.Word(pp.alphas)
+ expr = pp.Dict(pp.OneOrMore(pp.Group(pp.Word(pp.alphas)
+ EQ
+ pp.pyparsing_common.number))),
text = "range=5280 long=-138.52 lat=46.91",
@@ -251,18 +251,18 @@ class TestParseAction(PyparsingExpressionTestCase):
desc = "Match with numeric string converted to int",
expr = pp.Word("0123456789").addParseAction(lambda t: int(t[0])),
text = "12345",
- expected_list = [12345], # note - result is type int, not str
+ expected_list = [12345], # note - result is type int, not str
),
PpTestSpec(
desc = "Use two parse actions to convert numeric string, then convert to datetime",
- expr = pp.Word(pp.nums).addParseAction(lambda t: int(t[0]),
+ expr = pp.Word(pp.nums).addParseAction(lambda t: int(t[0]),
lambda t: datetime.utcfromtimestamp(t[0])),
text = "1537415628",
expected_list = [datetime(2018, 9, 20, 3, 53, 48)],
),
PpTestSpec(
desc = "Use tokenMap for parse actions that operate on a single-length token",
- expr = pp.Word(pp.nums).addParseAction(pp.tokenMap(int),
+ expr = pp.Word(pp.nums).addParseAction(pp.tokenMap(int),
pp.tokenMap(datetime.utcfromtimestamp)),
text = "1537415628",
expected_list = [datetime(2018, 9, 20, 3, 53, 48)],
@@ -405,7 +405,7 @@ if __name__ == '__main__':
if sys.version_info[0] < 3:
print("simple_unit_tests.py runs on Python 3 only")
sys.exit(0)
-
+
import inspect
def get_decl_line_no(cls):
return inspect.getsourcelines(cls)[1]
@@ -413,7 +413,7 @@ if __name__ == '__main__':
# get all test case classes defined in this module and sort them by decl line no
test_case_classes = list(PyparsingExpressionTestCase.__subclasses__())
test_case_classes.sort(key=get_decl_line_no)
-
+
# make into a suite and run it - this will run the tests in the same order
# they are declared in this module
suite = unittest.TestSuite(cls() for cls in test_case_classes)
diff --git a/test/jsonParserTests.py b/test/jsonParserTests.py
index c8a42bb..61c6eb8 100644
--- a/test/jsonParserTests.py
+++ b/test/jsonParserTests.py
@@ -21,7 +21,7 @@ test1 = """
"WMDsFound" : false,
"IraqAlQaedaConnections" : null,
"Abbrev": "ISO 8879:1986",
- "GlossDef":
+ "GlossDef":
"A meta-markup language, used to create markup languages such as DocBook.",
"GlossSeeAlso": ["GML", "XML", "markup"],
"EmptyDict" : {},
@@ -50,7 +50,7 @@ test3 = """
"debug": "on",
"window": {
"title": "Sample Konfabulator Widget", "name": "main_window", "width": 500, "height": 500
- }, "image": {
+ }, "image": {
"src": "Images/Sun.png",
"name": "sun1", "hOffset": 250, "vOffset": 250, "alignment": "center"
}, "text": {
@@ -59,7 +59,7 @@ test3 = """
"style": "bold", "name": "text1", "hOffset": 250, "vOffset": 100, "alignment": "center",
"onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
}
-}}
+}}
"""
test4 = """
{"web-app": {
@@ -357,4 +357,3 @@ test5 = """
]
}}
"""
-
diff --git a/test/karthik.ini b/test/karthik.ini
index 0a7f594..785d0ea 100644
--- a/test/karthik.ini
+++ b/test/karthik.ini
@@ -5,7 +5,7 @@ result_dir = '/home/karthik/Projects/Results'
param_file = $result_dir/param_file
res_file = $result_dir/result_file
comment = 'this is a comment'
-; a line starting with ';' is a comment
+; a line starting with ';' is a comment
K = 8
simulate_K = 0
N = 4000
diff --git a/tox.ini b/tox.ini
index d9f614e..1058efd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,4 +6,3 @@ deps=-rrequirements-dev.txt
commands=
coverage run --parallel --branch simple_unit_tests.py
coverage run --parallel --branch unitTests.py
-
diff --git a/unitTests.py b/unitTests.py
index 0845451..fa7c0e5 100644
--- a/unitTests.py
+++ b/unitTests.py
@@ -10,7 +10,7 @@
from unittest import TestCase, TestSuite, TextTestRunner
import datetime
from pyparsing import ParseException
-import pyparsing as pp
+import pyparsing as pp
import sys
import pdb
@@ -79,11 +79,11 @@ BUFFER_OUTPUT = True
class ParseTestCase(TestCase):
def __init__(self):
super(ParseTestCase, self).__init__(methodName='_runTest')
-
+
def _runTest(self):
buffered_stdout = StringIO()
-
+
try:
with AutoReset(sys, 'stdout', 'stderr'):
try:
@@ -95,7 +95,7 @@ class ParseTestCase(TestCase):
finally:
print_("<<<< End of test",str(self))
- print_()
+ print_()
except Exception as exc:
if BUFFER_OUTPUT:
@@ -105,10 +105,10 @@ class ParseTestCase(TestCase):
def runTest(self):
pass
-
+
def __str__(self):
return self.__class__.__name__
-
+
class PyparsingTestInit(ParseTestCase):
def setUp(self):
from pyparsing import __version__ as pyparsingVersion
@@ -154,7 +154,7 @@ if 0:
#~ print min([wd.procBegin for wd in results.waferData])
#~ print max([results.waferData[k].procEnd for k in range(len(results.waferData))])
#~ print sum(results.levelStatsIV['MAX.'])
-
+
class ParseFourFnTest(ParseTestCase):
def runTest(self):
@@ -165,7 +165,7 @@ class ParseFourFnTest(ParseTestCase):
resultValue = fourFn.evaluateStack( fourFn.exprStack )
self.assertTrue(resultValue == ans, "failed to evaluate %s, got %f" % ( s, resultValue ))
print_(s, "->", resultValue)
-
+
from math import pi,exp
e = exp(1)
@@ -194,7 +194,7 @@ class ParseFourFnTest(ParseTestCase):
class ParseSQLTest(ParseTestCase):
def runTest(self):
import examples.simpleSQL as simpleSQL
-
+
def test(s, numToks, errloc=-1):
try:
sqlToks = flatten(simpleSQL.simpleSQL.parseString(s).asList())
@@ -224,7 +224,7 @@ class ParseSQLTest(ParseTestCase):
class ParseConfigFileTest(ParseTestCase):
def runTest(self):
from examples import configParse
-
+
def test(fnam,numToks,resCheckList):
print_("Parsing",fnam,"...", end=' ')
with open(fnam) as infile:
@@ -248,13 +248,13 @@ class ParseConfigFileTest(ParseTestCase):
chk[1],
var))
print_("OK")
-
- test("test/karthik.ini", 23,
- [ ("users.K","8"),
+
+ test("test/karthik.ini", 23,
+ [ ("users.K","8"),
("users.mod_scheme","'QPSK'"),
("users.Na", "K+2") ]
)
- test("examples/Setup.ini", 125,
+ test("examples/Setup.ini", 125,
[ ("Startup.audioinf", "M3i"),
("Languages.key1", "0x0003"),
("test.foo","bar") ] )
@@ -264,7 +264,7 @@ class ParseJSONDataTest(ParseTestCase):
from examples.jsonParser import jsonObject
from test.jsonParserTests import test1,test2,test3,test4,test5
from test.jsonParserTests import test1,test2,test3,test4,test5
-
+
expected = [
[],
[],
@@ -272,7 +272,7 @@ class ParseJSONDataTest(ParseTestCase):
[],
[],
]
-
+
import pprint
for t,exp in zip((test1,test2,test3,test4,test5),expected):
result = jsonObject.parseString(t)
@@ -286,7 +286,7 @@ class ParseCommaSeparatedValuesTest(ParseTestCase):
def runTest(self):
from pyparsing import commaSeparatedList
import string
-
+
testData = [
"a,b,c,100.2,,3",
"d, e, j k , m ",
@@ -299,7 +299,7 @@ class ParseCommaSeparatedValuesTest(ParseTestCase):
[ (3,'100.2'), (4,''), (5, '3') ],
[ (2, 'j k'), (3, 'm') ],
[ (0, "'Hello, World'"), (2, 'g'), (3, '') ],
- [ (0,'John Doe'), (1, '123 Main St.'), (2, 'Cleveland'), (3, 'Ohio') ],
+ [ (0,'John Doe'), (1, '123 Main St.'), (2, 'Cleveland'), (3, 'Ohio') ],
[ (0,'Jane Doe'), (1, '456 St. James St.'), (2, 'Los Angeles'), (3, 'California') ]
]
for line,tests in zip(testData, testVals):
@@ -317,9 +317,9 @@ class ParseEBNFTest(ParseTestCase):
def runTest(self):
from examples import ebnf
from pyparsing import Word, quotedString, alphas, nums,ParserElement
-
+
print_('Constructing EBNF parser with pyparsing...')
-
+
grammar = '''
syntax = (syntax_rule), {(syntax_rule)};
syntax_rule = meta_identifier, '=', definitions_list, ';';
@@ -332,19 +332,19 @@ class ParseEBNFTest(ParseTestCase):
optional_sequence = '[', definitions_list, ']';
repeated_sequence = '{', definitions_list, '}';
grouped_sequence = '(', definitions_list, ')';
- (*
+ (*
terminal_string = "'", character - "'", {character - "'"}, "'" |
'"', character - '"', {character - '"'}, '"';
meta_identifier = letter, {letter | digit};
- integer = digit, {digit};
+ integer = digit, {digit};
*)
'''
-
+
table = {}
table['terminal_string'] = quotedString
table['meta_identifier'] = Word(alphas+"_", alphas+"_"+nums)
table['integer'] = Word(nums)
-
+
print_('Parsing EBNF grammar with EBNF parser...')
parsers = ebnf.parse(grammar, table)
ebnf_parser = parsers['syntax']
@@ -355,7 +355,7 @@ class ParseEBNFTest(ParseTestCase):
print_('Parsing EBNF grammar with generated EBNF parser...')
parsed_chars = ebnf_parser.parseString(grammar)
parsed_char_len = len(parsed_chars)
-
+
print_("],\n".join(str( parsed_chars.asList() ).split("],")))
self.assertEqual(len(flatten(parsed_chars.asList())), 98, "failed to tokenize grammar correctly")
@@ -390,7 +390,7 @@ class ParseIDLTest(ParseTestCase):
typedef string[10] tenStrings;
typedef sequence<string> stringSeq;
typedef sequence< sequence<string> > stringSeqSeq;
-
+
interface QoSAdmin {
stringSeq method1( in string arg1, inout long arg2 );
stringSeqSeq method2( in string arg1, inout long arg2, inout long arg3);
@@ -404,14 +404,14 @@ class ParseIDLTest(ParseTestCase):
* a block comment *
*/
typedef string[10] tenStrings;
- typedef
+ typedef
/** ** *** **** *
* a block comment *
*/
sequence<string> /*comment inside an And */ stringSeq;
/* */ /**/ /***/ /****/
typedef sequence< sequence<string> > stringSeqSeq;
-
+
interface QoSAdmin {
stringSeq method1( in string arg1, inout long arg2 );
stringSeqSeq method2( in string arg1, inout long arg2, inout long arg3);
@@ -431,7 +431,7 @@ class ParseIDLTest(ParseTestCase):
string msg;
sequence<string> dataStrings;
};
-
+
interface TestInterface
{
void method1( in string arg1, inout long arg2 );
@@ -440,34 +440,34 @@ class ParseIDLTest(ParseTestCase):
)
test(
"""
- module Test1
+ module Test1
{
exception TestException
{
string msg;
];
-
+
interface TestInterface
{
- void method1( in string arg1, inout long arg2 )
+ void method1( in string arg1, inout long arg2 )
raises ( TestException );
};
};
- """, 0, 57
+ """, 0, 56
)
test(
"""
- module Test1
+ module Test1
{
exception TestException
{
string msg;
};
-
+
};
""", 13
)
-
+
class ParseVerilogTest(ParseTestCase):
def runTest(self):
pass
@@ -475,7 +475,7 @@ class ParseVerilogTest(ParseTestCase):
class RunExamplesTest(ParseTestCase):
def runTest(self):
pass
-
+
class ScanStringTest(ParseTestCase):
def runTest(self):
from pyparsing import Word, Combine, Suppress, CharsNotIn, nums, StringEnd
@@ -520,12 +520,12 @@ class ScanStringTest(ParseTestCase):
timeServerPattern = (tdStart + ipAddress.setResultsName("ipAddr") + tdEnd
+ tdStart + CharsNotIn("<").setResultsName("loc") + tdEnd)
servers = [srvr.ipAddr for srvr,startloc,endloc in timeServerPattern.scanString( testdata )]
-
+
print_(servers)
self.assertEqual(servers,
['129.6.15.28', '129.6.15.29', '132.163.4.101', '132.163.4.102', '132.163.4.103'],
"failed scanString()")
-
+
# test for stringEnd detection in scanString
foundStringEnds = [ r for r in StringEnd().scanString("xyzzy") ]
print_(foundStringEnds)
@@ -625,11 +625,11 @@ class QuotedStringsTest(ParseTestCase):
expr.parseString(test_string)
except Exception:
continue
-
+
class CaselessOneOfTest(ParseTestCase):
def runTest(self):
from pyparsing import oneOf,ZeroOrMore
-
+
caseless1 = oneOf("d a b c aA B A C", caseless=True)
caseless1str = str( caseless1 )
print_(caseless1str)
@@ -643,7 +643,7 @@ class CaselessOneOfTest(ParseTestCase):
print_(res)
self.assertEqual(len(res), 4, "caseless1 oneOf failed")
self.assertEqual("".join(res), "aA"*4,"caseless1 CaselessLiteral return failed")
-
+
res = ZeroOrMore(caseless2).parseString("AAaaAaaA")
print_(res)
self.assertEqual(len(res), 4, "caseless2 oneOf failed")
@@ -652,9 +652,9 @@ class CaselessOneOfTest(ParseTestCase):
class AsXMLTest(ParseTestCase):
def runTest(self):
-
+
# test asXML()
-
+
aaa = pp.Word("a").setResultsName("A")
bbb = pp.Group(pp.Word("b")).setResultsName("B")
ccc = pp.Combine(":" + pp.Word("c")).setResultsName("C")
@@ -717,7 +717,7 @@ class AsXMLTest(ParseTestCase):
" <ITEM>b</ITEM>",
" </B>",
" <A>a</A>",
- "</TEST>",
+ "</TEST>",
] ), \
"failed to generate XML correctly, filtering unnamed items: " + xml
@@ -725,7 +725,7 @@ class AsXMLTest2(ParseTestCase):
def runTest(self):
from pyparsing import Suppress,Optional,CharsNotIn,Combine,ZeroOrMore,Word,\
Group,Literal,alphas,alphanums,delimitedList,OneOrMore
-
+
EndOfLine = Word("\n").setParseAction(lambda s,l,t: [' '])
whiteSpace=Word('\t ')
Mexpr = Suppress(Optional(whiteSpace)) + CharsNotIn('\\"\t \n') + Optional(" ") + \
@@ -741,9 +741,9 @@ class AsXMLTest2(ParseTestCase):
QuotedReducedString = Combine( Suppress(_dblQuote) + ZeroOrMore( reducedString |
_escapedChar ) + \
Suppress(_dblQuote )).streamline()
-
+
Manifest_string = QuotedReducedString.setResultsName('manifest_string')
-
+
Identifier = Word( alphas, alphanums+ '_$' ).setResultsName("identifier")
Index_string = CharsNotIn('\\";\n')
Index_string.setName('index_string')
@@ -751,15 +751,15 @@ class AsXMLTest2(ParseTestCase):
Group(delimitedList(Manifest_string, delim=',')) | \
Index_string
).setResultsName('value')
-
+
IndexKey = Identifier.setResultsName('key')
IndexKey.setName('key')
Index_clause = Group(IndexKey + Suppress(':') + Optional(Index_term_list))
Index_clause.setName('index_clause')
- Index_list = Index_clause.setResultsName('index')
+ Index_list = Index_clause.setResultsName('index')
Index_list.setName('index_list')
Index_block = Group('indexing' + Group(OneOrMore(Index_list + Suppress(';')))).setResultsName('indexes')
-
+
class CommentParserTest(ParseTestCase):
def runTest(self):
@@ -774,7 +774,7 @@ class CommentParserTest(ParseTestCase):
/* /*/
/** /*/
/*** /*/
- /*
+ /*
ablsjdflj
*/
"""
@@ -792,7 +792,7 @@ class CommentParserTest(ParseTestCase):
<!---- /-->
<!---- /- ->
<!---- / -- >
- <!--
+ <!--
ablsjdflj
-->
"""
@@ -820,10 +820,10 @@ class ParseExpressionResultsTest(ParseTestCase):
ab = (a + b).setName("AB")
abc = (ab + c).setName("ABC")
word = Word(alphas).setName("word")
-
+
#~ words = OneOrMore(word).setName("words")
words = Group(OneOrMore(~a + word)).setName("words")
-
+
#~ phrase = words.setResultsName("Head") + \
#~ ( abc ^ ab ^ a ).setResultsName("ABC") + \
#~ words.setResultsName("Tail")
@@ -833,7 +833,7 @@ class ParseExpressionResultsTest(ParseTestCase):
phrase = words.setResultsName("Head") + \
Group( a + Optional(b + Optional(c)) ).setResultsName("ABC") + \
words.setResultsName("Tail")
-
+
results = phrase.parseString("xavier yeti alpha beta charlie will beaver")
print_(results,results.Head, results.ABC,results.Tail)
for key,ln in [("Head",2), ("ABC",3), ("Tail",2)]:
@@ -844,10 +844,10 @@ class ParseExpressionResultsTest(ParseTestCase):
class ParseKeywordTest(ParseTestCase):
def runTest(self):
from pyparsing import Literal,Keyword
-
+
kw = Keyword("if")
lit = Literal("if")
-
+
def test(s,litShouldPass,kwShouldPass):
print_("Test",s)
print_("Match Literal", end=' ')
@@ -860,7 +860,7 @@ class ParseKeywordTest(ParseTestCase):
else:
if not litShouldPass:
self.assertTrue(False, "Literal matched %s, should not have" % s)
-
+
print_("Match Keyword", end=' ')
try:
print_(kw.parseString(s))
@@ -875,9 +875,9 @@ class ParseKeywordTest(ParseTestCase):
test("ifOnlyIfOnly", True, False)
test("if(OnlyIfOnly)", True, True)
test("if (OnlyIf Only)", True, True)
-
+
kw = Keyword("if",caseless=True)
-
+
test("IFOnlyIfOnly", False, False)
test("If(OnlyIfOnly)", False, True)
test("iF (OnlyIf Only)", False, True)
@@ -923,7 +923,7 @@ class ParseExpressionResultsAccumulateTest(ParseTestCase):
Query = Goal.setResultsName("head") + ":-" + delimitedList(Goal | Comparison_Predicate)
test="""Q(x,y,z):-Bloo(x,"Mitsis",y),Foo(y,z,1243),y>28,x<12,x>3"""
-
+
queryRes = Query.parseString(test)
print_("pred",queryRes.pred)
self.assertEqual(queryRes.pred.asList(), [['y', '>', '28'], ['x', '<', '12'], ['x', '>', '3']],
@@ -988,12 +988,12 @@ class ReStringRangeTest(ParseTestCase):
class SkipToParserTests(ParseTestCase):
def runTest(self):
-
+
from pyparsing import Literal, SkipTo, NotAny, cStyleComment, ParseBaseException
-
+
thingToFind = Literal('working')
testExpr = SkipTo(Literal(';'), include=True, ignore=cStyleComment) + thingToFind
-
+
def tryToParse (someText, fail_expected=False):
try:
print_(testExpr.parseString(someText))
@@ -1002,7 +1002,7 @@ class SkipToParserTests(ParseTestCase):
print_("Exception %s while parsing string %s" % (e,repr(someText)))
self.assertTrue(fail_expected and isinstance(e,ParseBaseException),
"Exception %s while parsing string %s" % (e,repr(someText)))
-
+
# This first test works, as the SkipTo expression is immediately following the ignore expression (cStyleComment)
tryToParse('some text /* comment with ; in */; working')
# This second test previously failed, as there is text following the ignore expression, and before the SkipTo expression.
@@ -1038,7 +1038,7 @@ class CustomQuotesTest(ParseTestCase):
hatQuotes = QuotedString('^','\\')
hatQuotes1 = QuotedString('^','\\','^^')
dblEqQuotes = QuotedString('==','\\')
-
+
def test(quoteExpr, expected):
print_(quoteExpr.pattern)
print_(quoteExpr.searchString(testString))
@@ -1049,7 +1049,7 @@ class CustomQuotesTest(ParseTestCase):
"failed to match %s, expected '%s', got '%s'" % (quoteExpr, expected,
quoteExpr.searchString(testString)[0]))
print_()
-
+
test(colonQuotes, r"sdf:jls:djf")
test(dashQuotes, r"sdf:jls::-djf: sl")
test(hatQuotes, r"sdf:jls")
@@ -1206,7 +1206,7 @@ class InfixNotationGrammarTest1(ParseTestCase):
def runTest(self):
from pyparsing import Word,nums,alphas,Literal,oneOf,infixNotation,opAssoc
import ast
-
+
integer = Word(nums).setParseAction(lambda t:int(t[0]))
variable = Word(alphas,exact=1)
operand = integer | variable
@@ -1257,7 +1257,7 @@ class InfixNotationGrammarTest2(ParseTestCase):
def runTest(self):
from pyparsing import infixNotation, Word, alphas, oneOf, opAssoc
-
+
boolVars = { "True":True, "False":False }
class BoolOperand(object):
reprsymbol = ''
@@ -1266,7 +1266,7 @@ class InfixNotationGrammarTest2(ParseTestCase):
def __str__(self):
sep = " %s " % self.reprsymbol
return "(" + sep.join(map(str,self.args)) + ")"
-
+
class BoolAnd(BoolOperand):
reprsymbol = '&'
def __bool__(self):
@@ -1280,7 +1280,7 @@ class InfixNotationGrammarTest2(ParseTestCase):
return True
class BoolOr(BoolOperand):
- reprsymbol = '|'
+ reprsymbol = '|'
def __bool__(self):
for a in self.args:
if isinstance(a,str):
@@ -1332,15 +1332,15 @@ class InfixNotationGrammarTest2(ParseTestCase):
res = boolExpr.parseString(t)[0]
print_(t,'\n', res, '=', bool(res),'\n')
-
+
class InfixNotationGrammarTest3(ParseTestCase):
def runTest(self):
from pyparsing import infixNotation, Word, alphas, oneOf, opAssoc, nums, Literal
-
+
global count
count = 0
-
+
def evaluate_int(t):
global count
value = int(t[0])
@@ -1481,16 +1481,16 @@ class PickleTest_Greeting():
def __init__(self, toks):
self.salutation = toks[0]
self.greetee = toks[1]
-
+
def __repr__(self):
- return "%s: {%s}" % (self.__class__.__name__,
+ return "%s: {%s}" % (self.__class__.__name__,
', '.join('%r: %r' % (k, getattr(self,k)) for k in sorted(self.__dict__)))
-
+
class ParseResultsPickleTest(ParseTestCase):
def runTest(self):
from pyparsing import makeHTMLTags, ParseResults
import pickle
-
+
# test 1
body = makeHTMLTags("BODY")[0]
result = body.parseString("<BODY BGCOLOR='#00FFBB' FGCOLOR=black>")
@@ -1528,7 +1528,7 @@ class ParseResultsPickleTest(ParseTestCase):
string = 'Good morning, Miss Crabtree!'
result = greeting.parseString(string)
-
+
for protocol in range(pickle.HIGHEST_PROTOCOL+1):
print_("Test pickle dump protocol", protocol)
try:
@@ -1567,7 +1567,7 @@ class ParseHTMLTagsTest(ParseTestCase):
<BODY/>
</BODY>
"""
- results = [
+ results = [
("startBody", False, "", ""),
("startBody", False, "#00FFCC", ""),
("startBody", True, "#00FFAA", ""),
@@ -1575,14 +1575,14 @@ class ParseHTMLTagsTest(ParseTestCase):
("startBody", True, "", ""),
("endBody", False, "", ""),
]
-
+
bodyStart, bodyEnd = pp.makeHTMLTags("BODY")
resIter = iter(results)
for t,s,e in (bodyStart | bodyEnd).scanString( test ):
print_(test[s:e], "->", t.asList())
(expectedType, expectedEmpty, expectedBG, expectedFG) = next(resIter)
-
- tType = t.getName()
+
+ tType = t.getName()
#~ print tType,"==",expectedType,"?"
self.assertTrue(tType in "startBody endBody".split(), "parsed token of unknown type '%s'" % tType)
self.assertEqual(tType, expectedType, "expected token of type %s, got %s" % (expectedType, tType))
@@ -1602,7 +1602,7 @@ class ParseHTMLTagsTest(ParseTestCase):
class UpcaseDowncaseUnicode(ParseTestCase):
def runTest(self):
-
+
import pyparsing as pp
import sys
if PY_3:
@@ -1625,7 +1625,7 @@ class UpcaseDowncaseUnicode(ParseTestCase):
uword = pp.Word(ualphas).setParseAction(pp.downcaseTokens)
print_(uword.searchString(a))
-
+
kw = pp.Keyword('mykey', caseless=True).setParseAction(pp.upcaseTokens).setResultsName('rname')
ret = kw.parseString('mykey')
print(ret.rname)
@@ -1662,15 +1662,15 @@ class UpcaseDowncaseUnicode(ParseTestCase):
class ParseUsingRegex(ParseTestCase):
def runTest(self):
-
+
import re
-
+
signedInt = pp.Regex(r'[-+][0-9]+')
unsignedInt = pp.Regex(r'[0-9]+')
simpleString = pp.Regex(r'("[^\"]*")|(\'[^\']*\')')
namedGrouping = pp.Regex(r'("(?P<content>[^\"]*)")')
compiledRE = pp.Regex(re.compile(r'[A-Z]+'))
-
+
def testMatch (expression, instring, shouldPass, expectedString=None):
if shouldPass:
try:
@@ -1694,7 +1694,7 @@ class ParseUsingRegex(ParseTestCase):
(repr(expression), repr(instring)))
return True
return False
-
+
# These should fail
self.assertTrue(testMatch(signedInt, '1234 foo', False), "Re: (1) passed, expected fail")
self.assertTrue(testMatch(signedInt, ' +foo', False), "Re: (2) passed, expected fail")
@@ -1734,7 +1734,7 @@ class ParseUsingRegex(ParseTestCase):
print_(e)
else:
self.assertTrue(False, "failed to reject invalid RE")
-
+
invRe = pp.Regex('')
class RegexAsTypeTest(ParseTestCase):
@@ -1771,7 +1771,7 @@ class RegexSubTest(ParseTestCase):
result = expr.transformString("This is the title: <title>")
print_(result)
self.assertEqual(result, "This is the title: 'Richard III'", "incorrect Regex.sub result with simple string")
-
+
print_("test sub with re string")
expr = pp.Regex(r"([Hh]\d):\s*(.*)").sub(r"<\1>\2</\1>")
result = expr.transformString("h1: This is the main heading\nh2: This is the sub-heading")
@@ -1831,8 +1831,8 @@ class PrecededByTest(ParseTestCase):
(interesting_num, [384, 8324], {'prefix': ['c', 'b']}),
(semi_interesting_num, [9293874, 293], {}),
(boring_num, [404], {}),
- (crazy_num, [2939], {'prefix': ['^%$']}),
- (finicky_num, [2939], {}),
+ (crazy_num, [2939], {'prefix': ['^%$']}),
+ (finicky_num, [2939], {}),
(very_boring_num, [404], {}),
]:
print(expr.searchString(s))
@@ -1851,16 +1851,16 @@ class PrecededByTest(ParseTestCase):
class CountedArrayTest(ParseTestCase):
def runTest(self):
from pyparsing import Word,nums,OneOrMore,countedArray
-
+
testString = "2 5 7 6 0 1 2 3 4 5 0 3 5 4 3"
integer = Word(nums).setParseAction(lambda t: int(t[0]))
countedField = countedArray(integer)
-
+
r = OneOrMore(countedField).parseString( testString )
print_(testString)
print_(r.asList())
-
+
self.assertEqual(r.asList(), [[5,7],[0,1,2,3,4,5],[],[5,4,3]],
"Failed matching countedArray, got " + str(r.asList()))
@@ -1868,17 +1868,17 @@ class CountedArrayTest2(ParseTestCase):
# addresses bug raised by Ralf Vosseler
def runTest(self):
from pyparsing import Word,nums,OneOrMore,countedArray
-
+
testString = "2 5 7 6 0 1 2 3 4 5 0 3 5 4 3"
integer = Word(nums).setParseAction(lambda t: int(t[0]))
countedField = countedArray(integer)
-
+
dummy = Word("A")
r = OneOrMore(dummy ^ countedField).parseString( testString )
print_(testString)
print_(r.asList())
-
+
self.assertEqual(r.asList(), [[5,7],[0,1,2,3,4,5],[],[5,4,3]],
"Failed matching countedArray, got " + str(r.asList()))
@@ -1888,17 +1888,17 @@ class CountedArrayTest3(ParseTestCase):
from pyparsing import Word,nums,OneOrMore,countedArray,alphas
int_chars = "_"+alphas
array_counter = Word(int_chars).setParseAction(lambda t: int_chars.index(t[0]))
-
+
# 123456789012345678901234567890
testString = "B 5 7 F 0 1 2 3 4 5 _ C 5 4 3"
integer = Word(nums).setParseAction(lambda t: int(t[0]))
countedField = countedArray(integer, intExpr=array_counter)
-
+
r = OneOrMore(countedField).parseString( testString )
print_(testString)
print_(r.asList())
-
+
self.assertEqual(r.asList(), [[5,7],[0,1,2,3,4,5],[],[5,4,3]],
"Failed matching countedArray, got " + str(r.asList()))
@@ -2026,7 +2026,7 @@ class LineAndStringEndTest(ParseTestCase):
k = Regex(r'a+',flags=re.S+re.M)
k = k.parseWithTabs()
k = k.leaveWhitespace()
-
+
tests = [
(r'aaa',['aaa']),
(r'\naaa',None),
@@ -2044,7 +2044,7 @@ class LineAndStringEndTest(ParseTestCase):
class VariableParseActionArgsTest(ParseTestCase):
def runTest(self):
-
+
pa3 = lambda s,l,t: t
pa2 = lambda l,t: t
pa1 = lambda t: t
@@ -2060,7 +2060,7 @@ class VariableParseActionArgsTest(ParseTestCase):
return t
class Callable0(object):
def __call__(self):
- return
+ return
class CallableS3(object):
#~ @staticmethod
def __call__(s,l,t):
@@ -2101,7 +2101,7 @@ class VariableParseActionArgsTest(ParseTestCase):
def __call__(cls):
return
__call__=classmethod(__call__)
-
+
class parseActionHolder(object):
#~ @staticmethod
def pa3(s,l,t):
@@ -2119,7 +2119,7 @@ class VariableParseActionArgsTest(ParseTestCase):
def pa0():
return
pa0=staticmethod(pa0)
-
+
def paArgs(*args):
print_(args)
return args[2]
@@ -2129,26 +2129,26 @@ class VariableParseActionArgsTest(ParseTestCase):
pass
def __str__(self):
return "A"
-
+
class ClassAsPA1(object):
def __init__(self,t):
print_("making a ClassAsPA1")
self.t = t
def __str__(self):
return self.t[0]
-
+
class ClassAsPA2(object):
def __init__(self,l,t):
self.t = t
def __str__(self):
return self.t[0]
-
+
class ClassAsPA3(object):
def __init__(self,s,l,t):
self.t = t
def __str__(self):
return self.t[0]
-
+
class ClassAsPAStarNew(tuple):
def __new__(cls, *args):
print_("make a ClassAsPAStarNew", args)
@@ -2165,7 +2165,7 @@ class VariableParseActionArgsTest(ParseTestCase):
#~ return self.t
from pyparsing import Literal,OneOrMore
-
+
A = Literal("A").setParseAction(pa0)
B = Literal("B").setParseAction(pa1)
C = Literal("C").setParseAction(pa2)
@@ -2188,7 +2188,7 @@ class VariableParseActionArgsTest(ParseTestCase):
T = Literal("T").setParseAction(parseActionHolder.pa1)
U = Literal("U").setParseAction(parseActionHolder.pa0)
V = Literal("V")
-
+
gg = OneOrMore( A | C | D | E | F | G | H |
I | J | K | L | M | N | O | P | Q | R | S | U | V | B | T)
testString = "VUTSRQPONMLKJIHGFEDCBA"
@@ -2219,7 +2219,7 @@ class EnablePackratParsing(ParseTestCase):
class SingleArgExceptionTest(ParseTestCase):
def runTest(self):
from pyparsing import ParseBaseException,ParseFatalException
-
+
msg = ""
raisedMsg = ""
testMessage = "just one arg"
@@ -2234,17 +2234,17 @@ class SingleArgExceptionTest(ParseTestCase):
class OriginalTextForTest(ParseTestCase):
def runTest(self):
from pyparsing import makeHTMLTags, originalTextFor
-
+
def rfn(t):
return "%s:%d" % (t.src, len("".join(t)))
makeHTMLStartTag = lambda tag: originalTextFor(makeHTMLTags(tag)[0], asString=False)
-
+
# use the lambda, Luke
- #~ start, imge = makeHTMLTags('IMG')
+ #~ start, imge = makeHTMLTags('IMG')
start = makeHTMLStartTag('IMG')
- # don't replace our fancy parse action with rfn,
+ # don't replace our fancy parse action with rfn,
# append rfn to the list of parse actions
#~ start.setParseAction(rfn)
start.addParseAction(rfn)
@@ -2275,11 +2275,11 @@ class PackratParsingCacheCopyTest(ParseTestCase):
arrayType= simpleType+ZeroOrMore('['+delimitedList(integer)+']')
varType = arrayType | simpleType
varDec = varType + delimitedList(id + Optional('='+integer))+';'
-
+
codeBlock = Literal('{}')
-
+
funcDef = Optional(varType | 'void')+id+'('+(delimitedList(varType+id)|'void'|empty)+')'+codeBlock
-
+
program = varDec | funcDef
input = 'int f(){}'
results = program.parseString(input)
@@ -2309,7 +2309,7 @@ class PackratParsingCacheCopyTest2(ParseTestCase):
class ParseResultsDelTest(ParseTestCase):
def runTest(self):
from pyparsing import OneOrMore, Word, alphas, nums
-
+
grammar = OneOrMore(Word(nums))("ints") + OneOrMore(Word(alphas))("words")
res = grammar.parseString("123 456 ABC DEF")
print_(res.dump())
@@ -2327,17 +2327,17 @@ class WithAttributeParseActionTest(ParseTestCase):
def runTest(self):
"""
This unit test checks withAttribute in these ways:
-
+
* Argument forms as keywords and tuples
* Selecting matching tags by attribute
* Case-insensitive attribute matching
* Correctly matching tags having the attribute, and rejecting tags not having the attribute
-
+
(Unit test written by voigts as part of the Google Highly Open Participation Contest)
"""
-
+
from pyparsing import makeHTMLTags, Word, withAttribute, withClass, nums
-
+
data = """
<a>1</a>
<a b="x">2</a>
@@ -2347,16 +2347,16 @@ class WithAttributeParseActionTest(ParseTestCase):
<a class="boo">8</a>
"""
tagStart, tagEnd = makeHTMLTags("a")
-
+
expr = tagStart + Word(nums).setResultsName("value") + tagEnd
-
- expected = ([['a', ['b', 'x'], False, '2', '</a>'],
+
+ expected = ([['a', ['b', 'x'], False, '2', '</a>'],
['a', ['b', 'x'], False, '3', '</a>']],
- [['a', ['b', 'x'], False, '2', '</a>'],
+ [['a', ['b', 'x'], False, '2', '</a>'],
['a', ['b', 'x'], False, '3', '</a>']],
[['a', ['class', 'boo'], False, '8', '</a>']],
)
-
+
for attrib, exp in zip([
withAttribute(b="x"),
#withAttribute(B="x"),
@@ -2364,10 +2364,10 @@ class WithAttributeParseActionTest(ParseTestCase):
#withAttribute(("B","x")),
withClass("boo"),
], expected):
-
+
tagStart.setParseAction(attrib)
result = expr.searchString(data)
-
+
print_(result.dump())
self.assertEqual(result.asList(), exp, "Failed test, expected %s, got %s" % (expected, result.asList()))
@@ -2383,7 +2383,7 @@ class NestedExpressionsTest(ParseTestCase):
- use of input data containing nesting delimiters
- correct grouping of parsed tokens according to nesting of opening
and closing delimiters in the input string
-
+
(Unit test written by christoph... as part of the Google Highly Open Participation Contest)
"""
from pyparsing import nestedExpr, Literal, Regex, restOfLine, quotedString
@@ -2430,9 +2430,9 @@ class NestedExpressionsTest(ParseTestCase):
print_("\nLiteral expressions for opener and closer")
opener,closer = list(map(Literal, "bar baz".split()))
- expr = nestedExpr(opener, closer,
- content=Regex(r"([^b ]|b(?!a)|ba(?![rz]))+"))
-
+ expr = nestedExpr(opener, closer,
+ content=Regex(r"([^b ]|b(?!a)|ba(?![rz]))+"))
+
teststring = "barbar ax + bybaz*Cbaz"
expected = [[['ax', '+', 'by'], '*C']]
# expr = nestedExpr(opener, closer)
@@ -2459,7 +2459,7 @@ class NestedExpressionsTest(ParseTestCase):
#Lisp-ish comments, using a standard bit of pyparsing, and an Or.
print_("\nUse ignore expression (2)")
- comment = ';;' + restOfLine
+ comment = ';;' + restOfLine
teststring = \
"""
@@ -2467,7 +2467,7 @@ class NestedExpressionsTest(ParseTestCase):
(display greeting))
"""
- expected = [['let', [['greeting', '"Hello, )world!"']], ';;', '(foo bar',
+ expected = [['let', [['greeting', '"Hello, )world!"']], ';;', '(foo bar',
['display', 'greeting']]]
expr = nestedExpr(ignoreExpr=(comment ^ quotedString))
result = expr.parseString(teststring)
@@ -2479,7 +2479,7 @@ class WordExcludeTest(ParseTestCase):
def runTest(self):
from pyparsing import Word, printables
allButPunc = Word(printables, excludeChars=".,:;-_!?")
-
+
test = "Hello, Mr. Ed, it's Wilbur!"
result = allButPunc.searchString(test).asList()
print_(result)
@@ -2488,9 +2488,9 @@ class WordExcludeTest(ParseTestCase):
class ParseAllTest(ParseTestCase):
def runTest(self):
from pyparsing import Word, cppStyleComment
-
+
testExpr = Word("A")
-
+
tests = [
("AAAAA", False, True),
("AAAAA", True, True),
@@ -2525,14 +2525,14 @@ class ParseAllTest(ParseTestCase):
class GreedyQuotedStringsTest(ParseTestCase):
def runTest(self):
from pyparsing import QuotedString, sglQuotedString, dblQuotedString, quotedString, delimitedList
-
+
src = """\
"string1", "strin""g2"
'string1', 'string2'
^string1^, ^string2^
<string1>, <string2>"""
-
- testExprs = (sglQuotedString, dblQuotedString, quotedString,
+
+ testExprs = (sglQuotedString, dblQuotedString, quotedString,
QuotedString('"', escQuote='""'), QuotedString("'", escQuote="''"),
QuotedString("^"), QuotedString("<",endQuoteChar=">"))
for expr in testExprs:
@@ -2545,11 +2545,11 @@ class GreedyQuotedStringsTest(ParseTestCase):
from pyparsing import alphas, nums, Word
src = """'ms1',1,0,'2009-12-22','2009-12-22 10:41:22') ON DUPLICATE KEY UPDATE sent_count = sent_count + 1, mtime = '2009-12-22 10:41:22';"""
tok_sql_quoted_value = (
- QuotedString("'", "\\", "''", True, False) ^
+ QuotedString("'", "\\", "''", True, False) ^
QuotedString('"', "\\", '""', True, False))
tok_sql_computed_value = Word(nums)
tok_sql_identifier = Word(alphas)
-
+
val = tok_sql_quoted_value | tok_sql_computed_value | tok_sql_identifier
vals = delimitedList(val)
print_(vals.parseString(src))
@@ -2583,14 +2583,14 @@ class WordBoundaryExpressionsTest(ParseTestCase):
[['D', 'G'], ['A'], ['C', 'F'], ['I'], ['E'], ['A', 'I']],
[['J', 'M', 'P'], [], ['L', 'R'], ['O'], [], ['O']],
[['S', 'V'], ['Y'], ['X', 'Z'], ['U'], [], ['U', 'Y']],
- [['D', 'G', 'J', 'M', 'P', 'S', 'V'],
- ['A', 'Y'],
- ['C', 'F', 'L', 'R', 'X', 'Z'],
- ['I', 'O', 'U'],
+ [['D', 'G', 'J', 'M', 'P', 'S', 'V'],
+ ['A', 'Y'],
+ ['C', 'F', 'L', 'R', 'X', 'Z'],
+ ['I', 'O', 'U'],
['E'],
['A', 'I', 'O', 'U', 'Y']],
]
-
+
for t,expected in zip(tests, expectedResult):
print_(t)
results = [flatten(e.searchString(t).asList()) for e in [
@@ -2628,7 +2628,7 @@ class RequiredEachTest(ParseTestCase):
class OptionalEachTest(ParseTestCase):
def runTest1(self):
from pyparsing import Optional, Keyword
-
+
the_input = "Major Tal Weiss"
parser1 = (Optional('Tal') + Optional('Weiss')) & Keyword('Major')
parser2 = Optional(Optional('Tal') + Optional('Weiss')) & Keyword('Major')
@@ -2678,19 +2678,19 @@ class OptionalEachTest(ParseTestCase):
self.assertTrue(False, "failed to raise exception when required element is missing")
except ParseException as pe:
pass
-
+
def runTest4(self):
from pyparsing import pyparsing_common, ZeroOrMore, Group
-
- expr = ((~pyparsing_common.iso8601_date + pyparsing_common.integer("id"))
+
+ expr = ((~pyparsing_common.iso8601_date + pyparsing_common.integer("id"))
& ZeroOrMore(Group(pyparsing_common.iso8601_date)("date*")))
-
+
expr.runTests("""
1999-12-31 100 2001-01-01
42
""")
-
-
+
+
def runTest(self):
self.runTest1()
self.runTest2()
@@ -2735,7 +2735,7 @@ class MarkInputLineTest(ParseTestCase):
def runTest(self):
samplestr1 = "DOB 100-10-2010;more garbage\nID PARI12345678;more garbage"
-
+
from pyparsing import Regex, Word, alphanums, restOfLine
dob_ref = "DOB" + Regex(r"\d{2}-\d{2}-\d{4}")("dob")
@@ -2753,7 +2753,7 @@ class LocatedExprTest(ParseTestCase):
# 012345678901234567890123456789012345678901234567890
samplestr1 = "DOB 10-10-2010;more garbage;ID PARI12345678 ;more garbage"
-
+
from pyparsing import Regex, Word, alphanums, restOfLine, locatedExpr
id_ref = locatedExpr("ID" + Word(alphanums,exact=12)("id"))
@@ -2808,7 +2808,7 @@ class AddConditionTest(ParseTestCase):
numParser.addParseAction(lambda s,l,t: int(t[0]))
numParser.addCondition(lambda s,l,t: t[0] % 2)
numParser.addCondition(lambda s,l,t: t[0] >= 7)
-
+
result = numParser.searchString("1 2 3 4 5 6 7 8 9 10")
print_(result.asList())
self.assertEqual(result.asList(), [[7],[9]], "failed to properly process conditions")
@@ -2838,8 +2838,8 @@ class PatientOrTest(ParseTestCase):
def runTest(self):
import pyparsing as pp
- # Two expressions and a input string which could - syntactically - be matched against
- # both expressions. The "Literal" expression is considered invalid though, so this PE
+ # Two expressions and a input string which could - syntactically - be matched against
+ # both expressions. The "Literal" expression is considered invalid though, so this PE
# should always detect the "Word" expression.
def validate(token):
if token[0] == "def":
@@ -2850,9 +2850,9 @@ class PatientOrTest(ParseTestCase):
b = pp.Literal("def").setName("Literal").setParseAction(validate)#.setDebug()
c = pp.Literal("d").setName("d")#.setDebug()
- # The "Literal" expressions's ParseAction is not executed directly after syntactically
- # detecting the "Literal" Expression but only after the Or-decision has been made
- # (which is too late)...
+ # The "Literal" expressions's ParseAction is not executed directly after syntactically
+ # detecting the "Literal" Expression but only after the Or-decision has been made
+ # (which is too late)...
try:
result = (a ^ b ^ c).parseString("def")
self.assertEqual(result.asList(), ['de'], "failed to select longest match, chose %s" % result)
@@ -2865,7 +2865,7 @@ class PatientOrTest(ParseTestCase):
class EachWithOptionalWithResultsNameTest(ParseTestCase):
def runTest(self):
from pyparsing import Optional
-
+
result = (Optional('foo')('one') & Optional('bar')('two')).parseString('bar foo')
print_(result.dump())
self.assertEqual(sorted(result.keys()), ['one','two'])
@@ -2873,7 +2873,7 @@ class EachWithOptionalWithResultsNameTest(ParseTestCase):
class UnicodeExpressionTest(ParseTestCase):
def runTest(self):
from pyparsing import Literal, ParseException
-
+
z = 'a' | Literal(u'\u1111')
z.streamline()
try:
@@ -2916,7 +2916,7 @@ class SetNameTest(ParseTestCase):
commonHTMLEntity,
commonHTMLEntity.setParseAction(replaceHTMLEntity).transformString("lsdjkf &lt;lsdjkf&gt;&amp;&apos;&quot;&xyzzy;"),
]
-
+
expected = map(str.strip, """\
a | b | c
d | e | f
@@ -2932,7 +2932,7 @@ class SetNameTest(ParseTestCase):
(<any tag>, </any tag>)
common HTML entity
lsdjkf <lsdjkf>&'"&xyzzy;""".splitlines())
-
+
for t,e in zip(tests, expected):
tname = str(t)
self.assertEqual(tname, e, "expression name mismatch, expected {} got {}".format(e, tname))
@@ -2940,7 +2940,7 @@ class SetNameTest(ParseTestCase):
class TrimArityExceptionMaskingTest(ParseTestCase):
def runTest(self):
from pyparsing import Word
-
+
invalid_message = [
"<lambda>() takes exactly 1 argument (0 given)",
"<lambda>() missing 1 required positional argument: 't'"
@@ -2953,16 +2953,16 @@ class TrimArityExceptionMaskingTest(ParseTestCase):
class TrimArityExceptionMaskingTest2(ParseTestCase):
def runTest(self):
-
-
+
+
# construct deep call tree
def A():
import traceback
-
+
traceback.print_stack(limit=2)
from pyparsing import Word
-
+
invalid_message = [
"<lambda>() takes exactly 1 argument (0 given)",
"<lambda>() missing 1 required positional argument: 't'"
@@ -2976,28 +2976,28 @@ class TrimArityExceptionMaskingTest2(ParseTestCase):
def B():
A()
-
+
def C():
B()
-
+
def D():
C()
-
+
def E():
D()
-
+
def F():
E()
-
+
def G():
F()
-
+
def H():
G()
-
+
def J():
H()
-
+
def K():
J()
@@ -3007,7 +3007,7 @@ class OneOrMoreStopTest(ParseTestCase):
def runTest(self):
from pyparsing import (Word, OneOrMore, alphas, Keyword, CaselessKeyword,
nums, alphanums)
-
+
test = "BEGIN aaa bbb ccc END"
BEGIN,END = map(Keyword, "BEGIN,END".split(','))
body_word = Word(alphas).setName("word")
@@ -3016,7 +3016,7 @@ class OneOrMoreStopTest(ParseTestCase):
self.assertEqual(test, expr, "Did not successfully stop on ending expression %r" % ender)
number = Word(nums+',.()').setName("number with optional commas")
- parser= (OneOrMore(Word(alphanums+'-/.'), stopOn=number)('id').setParseAction(' '.join)
+ parser= (OneOrMore(Word(alphanums+'-/.'), stopOn=number)('id').setParseAction(' '.join)
+ number('data'))
result = parser.parseString(' XXX Y/123 1,234.567890')
self.assertEqual(result.asList(), ['XXX Y/123', '1,234.567890'],
@@ -3025,14 +3025,14 @@ class OneOrMoreStopTest(ParseTestCase):
class ZeroOrMoreStopTest(ParseTestCase):
def runTest(self):
from pyparsing import (Word, ZeroOrMore, alphas, Keyword, CaselessKeyword)
-
+
test = "BEGIN END"
BEGIN,END = map(Keyword, "BEGIN,END".split(','))
body_word = Word(alphas).setName("word")
for ender in (END, "END", CaselessKeyword("END")):
expr = BEGIN + ZeroOrMore(body_word, stopOn=ender) + END
self.assertEqual(test, expr, "Did not successfully stop on ending expression %r" % ender)
-
+
class NestedAsDictTest(ParseTestCase):
def runTest(self):
from pyparsing import Literal, Forward, alphanums, Group, delimitedList, Dict, Word, Optional
@@ -3100,7 +3100,7 @@ class RunTestsTest(ParseTestCase):
tests = """\
# normal data
1-3,2-4,6,8-10,16
-
+
# lone integer
11"""
results = indices.runTests(tests, printResults=False)[1]
@@ -3148,7 +3148,7 @@ class CommonExpressionsTest(ParseTestCase):
def runTest(self):
from pyparsing import pyparsing_common
import ast
-
+
success = pyparsing_common.mac_address.runTests("""
AA:BB:CC:DD:EE:FF
AA.BB.CC.DD.EE.FF
@@ -3183,13 +3183,13 @@ class CommonExpressionsTest(ParseTestCase):
0:0:0:0:0:0:A00:1
1080::8:800:200C:417A
::A00:1
-
+
# loopback address
::1
-
+
# the null address
::
-
+
# ipv4 compatibility form
::ffff:192.168.0.1
""")[0]
@@ -3198,7 +3198,7 @@ class CommonExpressionsTest(ParseTestCase):
success = pyparsing_common.ipv6_address.runTests("""
# too few values
1080:0:0:0:8:800:200C
-
+
# too many ::'s, only 1 allowed
2134::1234:4567::2444:2106
""", failureTests=True)[0]
@@ -3350,18 +3350,18 @@ class HTMLStripperTest(ParseTestCase):
class ExprSplitterTest(ParseTestCase):
def runTest(self):
-
+
from pyparsing import Literal, quotedString, pythonStyleComment, Empty
-
+
expr = Literal(';') + Empty()
expr.ignore(quotedString)
expr.ignore(pythonStyleComment)
-
-
+
+
sample = """
def main():
this_semi_does_nothing();
- neither_does_this_but_there_are_spaces_afterward();
+ neither_does_this_but_there_are_spaces_afterward();
a = "a;b"; return a # this is a comment; it has a semicolon!
def b():
@@ -3373,7 +3373,7 @@ class ExprSplitterTest(ParseTestCase):
def bar(self):
'''a docstring; with a semicolon'''
a = 10; b = 11; c = 12
-
+
# this comment; has several; semicolons
if self.spam:
x = 12; return x # so; does; this; one
@@ -3447,7 +3447,7 @@ class ExprSplitterTest(ParseTestCase):
class ParseFatalExceptionTest(ParseTestCase):
def runTest(self):
-
+
from pyparsing import Word, nums, ParseFatalException
success = False
@@ -3460,14 +3460,14 @@ class ParseFatalExceptionTest(ParseTestCase):
except Exception as e:
print(type(e))
print(e)
-
+
self.assertTrue(success, "bad handling of syntax error")
class InlineLiteralsUsingTest(ParseTestCase):
def runTest(self):
-
+
from pyparsing import ParserElement, Suppress, Literal, CaselessLiteral, Word, alphas, oneOf, CaselessKeyword, nums
-
+
with AutoReset(ParserElement, "_literalStringClass"):
ParserElement.inlineLiteralsUsing(Suppress)
wd = Word(alphas)
@@ -3490,13 +3490,13 @@ class InlineLiteralsUsingTest(ParseTestCase):
integer = Word(nums)
ParserElement.inlineLiteralsUsing(Literal)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
- result = date_str.parseString("1999/12/31")
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ result = date_str.parseString("1999/12/31")
self.assertEqual(result.asList(), ['1999', '/', '12', '/', '31'], "inlineLiteralsUsing(example 1) failed!")
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
- date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
self.assertEqual(result.asList(), ['1999', '12', '31'], "inlineLiteralsUsing(example 2) failed!")
@@ -3504,7 +3504,7 @@ class InlineLiteralsUsingTest(ParseTestCase):
class CloseMatchTest(ParseTestCase):
def runTest(self):
import pyparsing as pp
-
+
searchseq = pp.CloseMatch("ATCATCGAATGGA", 2)
_, results = searchseq.runTests("""
@@ -3528,7 +3528,7 @@ class CloseMatchTest(ParseTestCase):
if exp is not None:
self.assertEquals(r[1].mismatches, exp,
"fail CloseMatch between %r and %r" % (searchseq.match_string, r[0]))
- print(r[0], 'exc: %s' % r[1] if exp is None and isinstance(r[1], Exception)
+ print(r[0], 'exc: %s' % r[1] if exp is None and isinstance(r[1], Exception)
else ("no match", "match")[r[1].mismatches == exp])
class DefaultKeywordCharsTest(ParseTestCase):
@@ -3608,7 +3608,7 @@ class ParseActionExceptionTest(ParseTestCase):
def runTest(self):
import pyparsing as pp
import traceback
-
+
number = pp.Word(pp.nums)
def number_action():
raise IndexError # this is the important line!
@@ -3630,7 +3630,7 @@ class ParseActionExceptionTest(ParseTestCase):
class ParseActionNestingTest(ParseTestCase):
# tests Issue #22
def runTest(self):
-
+
vals = pp.OneOrMore(pp.pyparsing_common.integer)("int_values")
def add_total(tokens):
tokens['total'] = sum(tokens)
@@ -3639,7 +3639,7 @@ class ParseActionNestingTest(ParseTestCase):
results = vals.parseString("244 23 13 2343")
print(results.dump())
self.assertEqual(results.int_values.asDict(), {}, "noop parse action changed ParseResults structure")
-
+
name = pp.Word(pp.alphas)('name')
score = pp.Word(pp.nums + '.')('score')
nameScore = pp.Group(name + score)
@@ -3665,8 +3665,8 @@ class ParseResultsNameBelowUngroupedNameTest(ParseTestCase):
import pyparsing as pp
rule_num = pp.Regex("[0-9]+")("LIT_NUM*")
- list_num = pp.Group(pp.Literal("[")("START_LIST")
- + pp.delimitedList(rule_num)("LIST_VALUES")
+ list_num = pp.Group(pp.Literal("[")("START_LIST")
+ + pp.delimitedList(rule_num)("LIST_VALUES")
+ pp.Literal("]")("END_LIST"))("LIST")
test_string = "[ 1,2,3,4,5,6 ]"
@@ -3831,11 +3831,11 @@ class IndentedBlockTest(ParseTestCase):
class MiscellaneousParserTests(ParseTestCase):
def runTest(self):
-
+
runtests = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if IRON_PYTHON_ENV:
runtests = "ABCDEGHIJKLMNOPQRSTUVWXYZ"
-
+
# test making oneOf with duplicate symbols
if "A" in runtests:
print_("verify oneOf handles duplicate symbols")
@@ -3919,11 +3919,11 @@ class MiscellaneousParserTests(ParseTestCase):
g2 = pp.ZeroOrMore("C" + g1)
fwd << pp.Group(g2)
testValidation( fwd, "fwd", isValid=True )
-
+
fwd2 = pp.Forward()
fwd2 << pp.Group("A" | fwd2)
testValidation( fwd2, "fwd2", isValid=False )
-
+
fwd3 = pp.Forward()
fwd3 << pp.Optional("A") + fwd3
testValidation( fwd3, "fwd3", isValid=False )
@@ -3954,13 +3954,13 @@ class MiscellaneousParserTests(ParseTestCase):
from pyparsing import Keyword, Word, alphas, OneOrMore
IF,AND,BUT = map(Keyword, "if and but".split())
- ident = ~(IF | AND | BUT) + Word(alphas)("non-key")
+ ident = ~(IF | AND | BUT) + Word(alphas)("non-key")
scanner = OneOrMore(IF | AND | BUT | ident)
def getNameTester(s,l,t):
print(t, t.getName())
ident.addParseAction(getNameTester)
scanner.parseString("lsjd sldkjf IF Saslkj AND lsdjf")
-
+
# test ParseResults.get() method
if "H" in runtests:
print_("verify behavior of ParseResults.get()")
@@ -3975,7 +3975,7 @@ class MiscellaneousParserTests(ParseTestCase):
testGrammar = "A" + pp.Optional("B") + pp.Optional("C") + pp.Optional("D")
testGrammar.parseString("A")
testGrammar.parseString("AB")
-
+
# test creating Literal with empty string
if "J" in runtests:
print_('verify non-fatal usage of Literal("")')
@@ -4011,7 +4011,7 @@ class MiscellaneousParserTests(ParseTestCase):
grammar = abb | abc | aba
self.assertEqual(''.join(grammar.parseString( "aba" )), 'aba', "Packrat ABA failure!")
-
+
if "M" in runtests:
print_('verify behavior of setResultsName with OneOrMore and ZeroOrMore')
diff --git a/update_pyparsing_timestamp.py b/update_pyparsing_timestamp.py
index 4a82bd1..841b8ec 100644
--- a/update_pyparsing_timestamp.py
+++ b/update_pyparsing_timestamp.py
@@ -15,4 +15,3 @@ with open('pyparsing.py', encoding='utf-8') as oldpp:
with open('pyparsing.py','w', encoding='utf-8') as newpp:
newpp.write(new_code)
-