summaryrefslogtreecommitdiff
path: root/examples/lucene_grammar.py
diff options
context:
space:
mode:
Diffstat (limited to 'examples/lucene_grammar.py')
-rw-r--r--examples/lucene_grammar.py42
1 files changed, 25 insertions, 17 deletions
diff --git a/examples/lucene_grammar.py b/examples/lucene_grammar.py
index bf92509..ee4633c 100644
--- a/examples/lucene_grammar.py
+++ b/examples/lucene_grammar.py
@@ -9,19 +9,22 @@
import pyparsing as pp
from pyparsing import pyparsing_common as ppc
+
pp.ParserElement.enablePackrat()
-COLON,LBRACK,RBRACK,LBRACE,RBRACE,TILDE,CARAT = map(pp.Literal,":[]{}~^")
-LPAR,RPAR = map(pp.Suppress,"()")
+COLON, LBRACK, RBRACK, LBRACE, RBRACE, TILDE, CARAT = map(pp.Literal, ":[]{}~^")
+LPAR, RPAR = map(pp.Suppress, "()")
and_, or_, not_, to_ = map(pp.CaselessKeyword, "AND OR NOT TO".split())
keyword = and_ | or_ | not_ | to_
expression = pp.Forward()
-valid_word = pp.Regex(r'([a-zA-Z0-9*_+.-]|\\\\|\\([+\-!(){}\[\]^"~*?:]|\|\||&&))+').setName("word")
+valid_word = pp.Regex(
+ r'([a-zA-Z0-9*_+.-]|\\\\|\\([+\-!(){}\[\]^"~*?:]|\|\||&&))+'
+).setName("word")
valid_word.setParseAction(
- lambda t : t[0].replace('\\\\',chr(127)).replace('\\','').replace(chr(127),'\\')
- )
+ lambda t: t[0].replace("\\\\", chr(127)).replace("\\", "").replace(chr(127), "\\")
+)
string = pp.QuotedString('"')
@@ -37,24 +40,28 @@ field_name = valid_word().setName("fieldname")
incl_range_search = pp.Group(LBRACK - term("lower") + to_ + term("upper") + RBRACK)
excl_range_search = pp.Group(LBRACE - term("lower") + to_ + term("upper") + RBRACE)
range_search = incl_range_search("incl_range") | excl_range_search("excl_range")
-boost = (CARAT - number("boost"))
+boost = CARAT - number("boost")
string_expr = pp.Group(string + proximity_modifier) | string
word_expr = pp.Group(valid_word + fuzzy_modifier) | valid_word
-term << (pp.Optional(field_name("field") + COLON)
- + (word_expr | string_expr | range_search | pp.Group(LPAR + expression + RPAR))
- + pp.Optional(boost))
-term.setParseAction(lambda t:[t] if 'field' in t or 'boost' in t else None)
+term << (
+ pp.Optional(field_name("field") + COLON)
+ + (word_expr | string_expr | range_search | pp.Group(LPAR + expression + RPAR))
+ + pp.Optional(boost)
+)
+term.setParseAction(lambda t: [t] if "field" in t or "boost" in t else None)
-expression << pp.infixNotation(term,
+expression << pp.infixNotation(
+ term,
[
- (required_modifier | prohibit_modifier, 1, pp.opAssoc.RIGHT),
- ((not_ | '!').setParseAction(lambda: "NOT"), 1, pp.opAssoc.RIGHT),
- ((and_ | '&&').setParseAction(lambda: "AND"), 2, pp.opAssoc.LEFT),
- (pp.Optional(or_ | '||').setParseAction(lambda: "OR"), 2, pp.opAssoc.LEFT),
- ])
+ (required_modifier | prohibit_modifier, 1, pp.opAssoc.RIGHT),
+ ((not_ | "!").setParseAction(lambda: "NOT"), 1, pp.opAssoc.RIGHT),
+ ((and_ | "&&").setParseAction(lambda: "AND"), 2, pp.opAssoc.LEFT),
+ (pp.Optional(or_ | "||").setParseAction(lambda: "OR"), 2, pp.opAssoc.LEFT),
+ ],
+)
-if __name__ == '__main__':
+if __name__ == "__main__":
# test strings taken from grammar description doc, and TestQueryParser.java
tests = r"""
@@ -329,4 +336,5 @@ if __name__ == '__main__':
if not (success1 and success2):
import sys
+
sys.exit(1)